diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts linux-3.15-rc4/arch/arm/boot/dts/imx6dl-hummingboard.dts --- linux-3.15-rc4.orig/arch/arm/boot/dts/imx6dl-hummingboard.dts 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/imx6dl-hummingboard.dts 2014-05-07 17:05:41.464107257 +0200 @@ -67,6 +67,14 @@ status = "okay"; }; +&hdmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hummingboard_hdmi>; + ddc-i2c-bus = <&i2c2>; + status = "okay"; + crtcs = <&ipu1 0>; +}; + &i2c1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hummingboard_i2c1>; @@ -82,6 +90,13 @@ */ }; +&i2c2 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_hummingboard_i2c2>; + status = "okay"; +}; + &iomuxc { hummingboard { pinctrl_hummingboard_flexcan1: hummingboard-flexcan1 { @@ -97,6 +112,12 @@ >; }; + pinctrl_hummingboard_hdmi: hummingboard-hdmi { + fsl,pins = < + MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0 + >; + }; + pinctrl_hummingboard_i2c1: hummingboard-i2c1 { fsl,pins = < MX6QDL_PAD_EIM_D21__I2C1_SCL 0x4001b8b1 @@ -104,6 +125,13 @@ >; }; + pinctrl_hummingboard_i2c2: hummingboard-i2c2 { + fsl,pins = < + MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1 + MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1 + >; + }; + pinctrl_hummingboard_spdif: hummingboard-spdif { fsl,pins = ; }; diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/imx6q-cubox-i.dts linux-3.15-rc4/arch/arm/boot/dts/imx6q-cubox-i.dts --- linux-3.15-rc4.orig/arch/arm/boot/dts/imx6q-cubox-i.dts 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/imx6q-cubox-i.dts 2014-05-07 17:05:41.464107257 +0200 @@ -13,4 +13,8 @@ &sata { status = "okay"; + fsl,transmit-level-mV = <1104>; + fsl,transmit-boost-mdB = <0>; + fsl,transmit-atten-16ths = <9>; + fsl,no-spread-spectrum; }; diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi linux-3.15-rc4/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi --- linux-3.15-rc4.orig/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi 2014-05-07 17:05:41.464107257 +0200 @@ -12,6 +12,19 @@ pinctrl-0 = <&pinctrl_cubox_i_ir>; }; + pwmleds { + compatible = "pwm-leds"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_cubox_i_pwm1>; + + front { + active-low; + label = "imx6:red:front"; + max-brightness = <248>; + pwms = <&pwm1 0 50000>; + }; + }; + regulators { compatible = "simple-bus"; @@ -55,6 +68,21 @@ }; }; +&hdmi { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_cubox_i_hdmi>; + ddc-i2c-bus = <&i2c2>; + status = "okay"; + crtcs = <&ipu1 0>; +}; + +&i2c2 { + clock-frequency = <100000>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_cubox_i_i2c2>; + status = "okay"; +}; + &i2c3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_cubox_i_i2c3>; @@ -69,6 +97,19 @@ &iomuxc { cubox_i { + pinctrl_cubox_i_hdmi: cubox-i-hdmi { + fsl,pins = < + MX6QDL_PAD_KEY_ROW2__HDMI_TX_CEC_LINE 0x1f8b0 + >; + }; + + pinctrl_cubox_i_i2c2: cubox-i-i2c2 { + fsl,pins = < + MX6QDL_PAD_KEY_COL3__I2C2_SCL 0x4001b8b1 + MX6QDL_PAD_KEY_ROW3__I2C2_SDA 0x4001b8b1 + >; + }; + pinctrl_cubox_i_i2c3: cubox-i-i2c3 { fsl,pins = < MX6QDL_PAD_EIM_D17__I2C3_SCL 0x4001b8b1 @@ -82,6 +123,10 @@ >; }; + pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led { + fsl,pins = ; + }; + pinctrl_cubox_i_spdif: cubox-i-spdif { fsl,pins = ; }; @@ -111,6 +156,28 @@ MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x13059 >; }; + + pinctrl_cubox_i_usdhc2_100mhz: cubox-i-usdhc2-100mhz { + fsl,pins = < + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170b9 + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100b9 + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170b9 + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170b9 + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170b9 + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130b9 + >; + }; + + pinctrl_cubox_i_usdhc2_200mhz: cubox-i-usdhc2-200mhz { + fsl,pins = < + MX6QDL_PAD_SD2_CMD__SD2_CMD 0x170f9 + MX6QDL_PAD_SD2_CLK__SD2_CLK 0x100f9 + MX6QDL_PAD_SD2_DAT0__SD2_DATA0 0x170f9 + MX6QDL_PAD_SD2_DAT1__SD2_DATA1 0x170f9 + MX6QDL_PAD_SD2_DAT2__SD2_DATA2 0x170f9 + MX6QDL_PAD_SD2_DAT3__SD2_DATA3 0x130f9 + >; + }; }; }; @@ -130,9 +197,19 @@ status = "okay"; }; +&uart4 { + status = "okay"; +}; + +&usdhc1 { + status = "okay"; +}; + &usdhc2 { - pinctrl-names = "default"; + pinctrl-names = "default", "state_100mhz", "state_200mhz"; pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; + pinctrl-1 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_100mhz>; + pinctrl-2 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2_200mhz>; vmmc-supply = <®_3p3v>; cd-gpios = <&gpio1 4 0>; status = "okay"; diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/imx6qdl.dtsi linux-3.15-rc4/arch/arm/boot/dts/imx6qdl.dtsi --- linux-3.15-rc4.orig/arch/arm/boot/dts/imx6qdl.dtsi 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/imx6qdl.dtsi 2014-05-07 17:05:41.464107257 +0200 @@ -128,6 +128,8 @@ cache-level = <2>; arm,tag-latency = <4 2 3>; arm,data-latency = <4 2 3>; + arm,dynamic-clk-gating; + arm,standby-mode; }; pcie: pcie@0x01000000 { diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi linux-3.15-rc4/arch/arm/boot/dts/imx6qdl-microsom.dtsi --- linux-3.15-rc4.orig/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/imx6qdl-microsom.dtsi 2014-05-07 17:05:49.488157029 +0200 @@ -1,9 +1,69 @@ /* * Copyright (C) 2013,2014 Russell King */ +#include +/ { + regulators { + compatible = "simple-bus"; + + reg_brcm_osc: brcm-osc-reg { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio5 5 0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_microsom_brcm_osc_reg>; + regulator-name = "brcm_osc_reg"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + regulator-always-on; + regulator-boot-on; + }; + + reg_brcm: brcm-reg { + compatible = "regulator-fixed"; + enable-active-high; + gpio = <&gpio3 19 0>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_microsom_brcm_reg>; + regulator-name = "brcm_reg"; + regulator-min-microvolt = <3300000>; + regulator-max-microvolt = <3300000>; + startup-delay-us = <200000>; + }; + }; +}; &iomuxc { microsom { + pinctrl_microsom_brcm_osc_reg: microsom-brcm-osc-reg { + fsl,pins = < + MX6QDL_PAD_DISP0_DAT11__GPIO5_IO05 0x40013070 + >; + }; + + pinctrl_microsom_brcm_reg: microsom-brcm-reg { + fsl,pins = < + MX6QDL_PAD_EIM_D19__GPIO3_IO19 0x40013070 + >; + }; + + pinctrl_microsom_brcm_wifi: microsom-brcm-wifi { + fsl,pins = < + MX6QDL_PAD_GPIO_8__XTALOSC_REF_CLK_32K 0x1b0b0 + MX6QDL_PAD_CSI0_DATA_EN__GPIO5_IO20 0x40013070 + MX6QDL_PAD_CSI0_DAT8__GPIO5_IO26 0x40013070 + MX6QDL_PAD_CSI0_DAT9__GPIO5_IO27 0x40013070 + >; + }; + + pinctrl_microsom_brcm_bt: microsom-brcm-bt { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT14__GPIO6_IO00 0x40013070 + MX6QDL_PAD_CSI0_DAT15__GPIO6_IO01 0x40013070 + MX6QDL_PAD_CSI0_DAT18__GPIO6_IO04 0x40013070 + >; + }; + pinctrl_microsom_uart1: microsom-uart1 { fsl,pins = < MX6QDL_PAD_CSI0_DAT10__UART1_TX_DATA 0x1b0b1 @@ -11,6 +71,15 @@ >; }; + pinctrl_microsom_uart4_1: microsom-uart4 { + fsl,pins = < + MX6QDL_PAD_CSI0_DAT12__UART4_TX_DATA 0x1b0b1 + MX6QDL_PAD_CSI0_DAT13__UART4_RX_DATA 0x1b0b1 + MX6QDL_PAD_CSI0_DAT16__UART4_RTS_B 0x1b0b1 + MX6QDL_PAD_CSI0_DAT17__UART4_CTS_B 0x1b0b1 + >; + }; + pinctrl_microsom_usbotg: microsom-usbotg { /* * Similar to pinctrl_usbotg_2, but we want it @@ -18,6 +87,17 @@ */ fsl,pins = ; }; + + pinctrl_microsom_usdhc1: microsom-usdhc1 { + fsl,pins = < + MX6QDL_PAD_SD1_CMD__SD1_CMD 0x17059 + MX6QDL_PAD_SD1_CLK__SD1_CLK 0x10059 + MX6QDL_PAD_SD1_DAT0__SD1_DATA0 0x17059 + MX6QDL_PAD_SD1_DAT1__SD1_DATA1 0x17059 + MX6QDL_PAD_SD1_DAT2__SD1_DATA2 0x17059 + MX6QDL_PAD_SD1_DAT3__SD1_DATA3 0x17059 + >; + }; }; }; @@ -27,7 +107,25 @@ status = "okay"; }; +/* UART4 - Connected to optional BRCM Wifi/BT/FM */ +&uart4 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_microsom_brcm_bt &pinctrl_microsom_uart4_1>; + fsl,uart-has-rtscts; +}; + &usbotg { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_microsom_usbotg>; }; + +/* USDHC1 - Connected to optional BRCM Wifi/BT/FM */ +&usdhc1 { + card-external-vcc-supply = <®_brcm>; + card-reset-gpios = <&gpio5 26 GPIO_ACTIVE_LOW>, <&gpio6 0 GPIO_ACTIVE_LOW>; + keep-power-in-suspend; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_microsom_brcm_wifi &pinctrl_microsom_usdhc1>; + vmmc-supply = <®_brcm>; +}; diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/imx6sl.dtsi linux-3.15-rc4/arch/arm/boot/dts/imx6sl.dtsi --- linux-3.15-rc4.orig/arch/arm/boot/dts/imx6sl.dtsi 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/imx6sl.dtsi 2014-05-07 17:05:49.488157029 +0200 @@ -111,6 +111,8 @@ cache-level = <2>; arm,tag-latency = <4 2 3>; arm,data-latency = <4 2 3>; + arm,dynamic-clk-gating; + arm,standby-mode; }; pmu { diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/marco.dtsi linux-3.15-rc4/arch/arm/boot/dts/marco.dtsi --- linux-3.15-rc4.orig/arch/arm/boot/dts/marco.dtsi 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/marco.dtsi 2014-05-07 17:05:49.488157029 +0200 @@ -36,7 +36,7 @@ ranges = <0x40000000 0x40000000 0xa0000000>; l2-cache-controller@c0030000 { - compatible = "sirf,marco-pl310-cache", "arm,pl310-cache"; + compatible = "arm,pl310-cache"; reg = <0xc0030000 0x1000>; interrupts = <0 59 0>; arm,tag-latency = <1 1 1>; diff -Nur linux-3.15-rc4.orig/arch/arm/boot/dts/prima2.dtsi linux-3.15-rc4/arch/arm/boot/dts/prima2.dtsi --- linux-3.15-rc4.orig/arch/arm/boot/dts/prima2.dtsi 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/boot/dts/prima2.dtsi 2014-05-07 17:05:49.488157029 +0200 @@ -48,7 +48,7 @@ ranges = <0x40000000 0x40000000 0x80000000>; l2-cache-controller@80040000 { - compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache"; + compatible = "arm,pl310-cache"; reg = <0x80040000 0x1000>; interrupts = <59>; arm,tag-latency = <1 1 1>; diff -Nur linux-3.15-rc4.orig/arch/arm/configs/imx_v6_v7_defconfig linux-3.15-rc4/arch/arm/configs/imx_v6_v7_defconfig --- linux-3.15-rc4.orig/arch/arm/configs/imx_v6_v7_defconfig 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/configs/imx_v6_v7_defconfig 2014-05-07 17:05:49.488157029 +0200 @@ -245,6 +245,7 @@ CONFIG_DRM_IMX_LDB=y CONFIG_DRM_IMX_IPUV3_CORE=y CONFIG_DRM_IMX_IPUV3=y +CONFIG_DRM_IMX_HDMI=y CONFIG_COMMON_CLK_DEBUG=y # CONFIG_IOMMU_SUPPORT is not set CONFIG_PWM=y diff -Nur linux-3.15-rc4.orig/arch/arm/include/asm/hardware/cache-l2x0.h linux-3.15-rc4/arch/arm/include/asm/hardware/cache-l2x0.h --- linux-3.15-rc4.orig/arch/arm/include/asm/hardware/cache-l2x0.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/include/asm/hardware/cache-l2x0.h 2014-05-07 17:05:49.488157029 +0200 @@ -26,8 +26,8 @@ #define L2X0_CACHE_TYPE 0x004 #define L2X0_CTRL 0x100 #define L2X0_AUX_CTRL 0x104 -#define L2X0_TAG_LATENCY_CTRL 0x108 -#define L2X0_DATA_LATENCY_CTRL 0x10C +#define L310_TAG_LATENCY_CTRL 0x108 +#define L310_DATA_LATENCY_CTRL 0x10C #define L2X0_EVENT_CNT_CTRL 0x200 #define L2X0_EVENT_CNT1_CFG 0x204 #define L2X0_EVENT_CNT0_CFG 0x208 @@ -54,53 +54,93 @@ #define L2X0_LOCKDOWN_WAY_D_BASE 0x900 #define L2X0_LOCKDOWN_WAY_I_BASE 0x904 #define L2X0_LOCKDOWN_STRIDE 0x08 -#define L2X0_ADDR_FILTER_START 0xC00 -#define L2X0_ADDR_FILTER_END 0xC04 +#define L310_ADDR_FILTER_START 0xC00 +#define L310_ADDR_FILTER_END 0xC04 #define L2X0_TEST_OPERATION 0xF00 #define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_TAG 0xF30 #define L2X0_DEBUG_CTRL 0xF40 -#define L2X0_PREFETCH_CTRL 0xF60 -#define L2X0_POWER_CTRL 0xF80 -#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1) -#define L2X0_STNDBY_MODE_EN (1 << 0) +#define L310_PREFETCH_CTRL 0xF60 +#define L310_POWER_CTRL 0xF80 +#define L310_DYNAMIC_CLK_GATING_EN (1 << 1) +#define L310_STNDBY_MODE_EN (1 << 0) /* Registers shifts and masks */ #define L2X0_CACHE_ID_PART_MASK (0xf << 6) #define L2X0_CACHE_ID_PART_L210 (1 << 6) +#define L2X0_CACHE_ID_PART_L220 (2 << 6) #define L2X0_CACHE_ID_PART_L310 (3 << 6) #define L2X0_CACHE_ID_RTL_MASK 0x3f -#define L2X0_CACHE_ID_RTL_R0P0 0x0 -#define L2X0_CACHE_ID_RTL_R1P0 0x2 -#define L2X0_CACHE_ID_RTL_R2P0 0x4 -#define L2X0_CACHE_ID_RTL_R3P0 0x5 -#define L2X0_CACHE_ID_RTL_R3P1 0x6 -#define L2X0_CACHE_ID_RTL_R3P2 0x8 - -#define L2X0_AUX_CTRL_MASK 0xc0000fff +#define L210_CACHE_ID_RTL_R0P2_02 0x00 +#define L210_CACHE_ID_RTL_R0P1 0x01 +#define L210_CACHE_ID_RTL_R0P2_01 0x02 +#define L210_CACHE_ID_RTL_R0P3 0x03 +#define L210_CACHE_ID_RTL_R0P4 0x0b +#define L210_CACHE_ID_RTL_R0P5 0x0f +#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06 +#define L310_CACHE_ID_RTL_R0P0 0x00 +#define L310_CACHE_ID_RTL_R1P0 0x02 +#define L310_CACHE_ID_RTL_R2P0 0x04 +#define L310_CACHE_ID_RTL_R3P0 0x05 +#define L310_CACHE_ID_RTL_R3P1 0x06 +#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07 +#define L310_CACHE_ID_RTL_R3P2 0x08 +#define L310_CACHE_ID_RTL_R3P3 0x09 + +/* L2C auxiliary control register - bits common to L2C-210/220/310 */ +#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17 +#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17) +#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17) +#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20) +#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21) +#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22) +/* L2C-210/220 common bits */ #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 -#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7 +#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0) #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 -#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3) +#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3) #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 -#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6) +#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6) #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 -#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9) -#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 -#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 -#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) -#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 -#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 -#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 -#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28 -#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 -#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 - -#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0 -#define L2X0_LATENCY_CTRL_RD_SHIFT 4 -#define L2X0_LATENCY_CTRL_WR_SHIFT 8 - -#define L2X0_ADDR_FILTER_EN 1 +#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9) +#define L2X0_AUX_CTRL_ASSOC_SHIFT 13 +#define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13) +/* L2C-210 specific bits */ +#define L210_AUX_CTRL_WRAP_DISABLE BIT(12) +#define L210_AUX_CTRL_WA_OVERRIDE BIT(23) +#define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24) +/* L2C-220 specific bits */ +#define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12) +#define L220_AUX_CTRL_FWA_SHIFT 23 +#define L220_AUX_CTRL_FWA_MASK (3 << 23) +#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26) +#define L220_AUX_CTRL_NS_INT_CTRL BIT(27) +/* L2C-310 specific bits */ +#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */ +#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */ +#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */ +#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12) +#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16) +#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */ +#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26) +#define L310_AUX_CTRL_NS_INT_CTRL BIT(27) +#define L310_AUX_CTRL_DATA_PREFETCH BIT(28) +#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29) +#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */ + +#define L310_LATENCY_CTRL_SETUP(n) ((n) << 0) +#define L310_LATENCY_CTRL_RD(n) ((n) << 4) +#define L310_LATENCY_CTRL_WR(n) ((n) << 8) + +#define L310_ADDR_FILTER_EN 1 + +#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f +#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23) +#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24) +#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27) +#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28) +#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29) +#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30) #define L2X0_CTRL_EN 1 diff -Nur linux-3.15-rc4.orig/arch/arm/include/asm/outercache.h linux-3.15-rc4/arch/arm/include/asm/outercache.h --- linux-3.15-rc4.orig/arch/arm/include/asm/outercache.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/include/asm/outercache.h 2014-05-07 17:05:49.488157029 +0200 @@ -21,6 +21,7 @@ #ifndef __ASM_OUTERCACHE_H #define __ASM_OUTERCACHE_H +#include #include struct outer_cache_fns { @@ -28,53 +29,84 @@ void (*clean_range)(unsigned long, unsigned long); void (*flush_range)(unsigned long, unsigned long); void (*flush_all)(void); - void (*inv_all)(void); void (*disable)(void); #ifdef CONFIG_OUTER_CACHE_SYNC void (*sync)(void); #endif - void (*set_debug)(unsigned long); void (*resume)(void); + + /* This is an ARM L2C thing */ + void (*write_sec)(unsigned long, unsigned); }; extern struct outer_cache_fns outer_cache; #ifdef CONFIG_OUTER_CACHE - +/** + * outer_inv_range - invalidate range of outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.inv_range) outer_cache.inv_range(start, end); } + +/** + * outer_clean_range - clean dirty outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.clean_range) outer_cache.clean_range(start, end); } + +/** + * outer_flush_range - clean and invalidate outer cache lines + * @start: starting physical address, inclusive + * @end: end physical address, exclusive + */ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) { if (outer_cache.flush_range) outer_cache.flush_range(start, end); } +/** + * outer_flush_all - clean and invalidate all cache lines in the outer cache + * + * Note: depending on implementation, this may not be atomic - it must + * only be called with interrupts disabled and no other active outer + * cache masters. + * + * It is intended that this function is only used by implementations + * needing to override the outer_cache.disable() method due to security. + * (Some implementations perform this as a clean followed by an invalidate.) + */ static inline void outer_flush_all(void) { if (outer_cache.flush_all) outer_cache.flush_all(); } -static inline void outer_inv_all(void) -{ - if (outer_cache.inv_all) - outer_cache.inv_all(); -} - -static inline void outer_disable(void) -{ - if (outer_cache.disable) - outer_cache.disable(); -} - +/** + * outer_disable - clean, invalidate and disable the outer cache + * + * Disable the outer cache, ensuring that any data contained in the outer + * cache is pushed out to lower levels of system memory. The note and + * conditions above concerning outer_flush_all() applies here. + */ +extern void outer_disable(void); + +/** + * outer_resume - restore the cache configuration and re-enable outer cache + * + * Restore any configuration that the cache had when previously enabled, + * and re-enable the outer cache. + */ static inline void outer_resume(void) { if (outer_cache.resume) @@ -90,13 +122,18 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) { } static inline void outer_flush_all(void) { } -static inline void outer_inv_all(void) { } static inline void outer_disable(void) { } static inline void outer_resume(void) { } #endif #ifdef CONFIG_OUTER_CACHE_SYNC +/** + * outer_sync - perform a sync point for outer cache + * + * Ensure that all outer cache operations are complete and any store + * buffers are drained. + */ static inline void outer_sync(void) { if (outer_cache.sync) diff -Nur linux-3.15-rc4.orig/arch/arm/Kconfig linux-3.15-rc4/arch/arm/Kconfig --- linux-3.15-rc4.orig/arch/arm/Kconfig 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/Kconfig 2014-05-07 17:05:49.488157029 +0200 @@ -1230,19 +1230,6 @@ register of the Cortex-A9 which reduces the linefill issuing capabilities of the processor. -config PL310_ERRATA_588369 - bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" - depends on CACHE_L2X0 - help - The PL310 L2 cache controller implements three types of Clean & - Invalidate maintenance operations: by Physical Address - (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC). - They are architecturally defined to behave as the execution of a - clean operation followed immediately by an invalidate operation, - both performing to the same memory location. This functionality - is not correctly implemented in PL310 as clean lines are not - invalidated as a result of these operations. - config ARM_ERRATA_643719 bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" depends on CPU_V7 && SMP @@ -1265,17 +1252,6 @@ tables. The workaround changes the TLB flushing routines to invalidate entries regardless of the ASID. -config PL310_ERRATA_727915 - bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" - depends on CACHE_L2X0 - help - PL310 implements the Clean & Invalidate by Way L2 cache maintenance - operation (offset 0x7FC). This operation runs in background so that - PL310 can handle normal accesses while it is in progress. Under very - rare circumstances, due to this erratum, write data can be lost when - PL310 treats a cacheable write transaction during a Clean & - Invalidate by Way operation. - config ARM_ERRATA_743622 bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" depends on CPU_V7 @@ -1301,21 +1277,6 @@ operation is received by a CPU before the ICIALLUIS has completed, potentially leading to corrupted entries in the cache or TLB. -config PL310_ERRATA_753970 - bool "PL310 errata: cache sync operation may be faulty" - depends on CACHE_PL310 - help - This option enables the workaround for the 753970 PL310 (r3p0) erratum. - - Under some condition the effect of cache sync operation on - the store buffer still remains when the operation completes. - This means that the store buffer is always asked to drain and - this prevents it from merging any further writes. The workaround - is to replace the normal offset of cache sync operation (0x730) - by another offset targeting an unmapped PL310 register 0x740. - This has the same effect as the cache sync operation: store buffer - drain and waiting for all buffers empty. - config ARM_ERRATA_754322 bool "ARM errata: possible faulty MMU translations following an ASID switch" depends on CPU_V7 @@ -1364,18 +1325,6 @@ relevant cache maintenance functions and sets a specific bit in the diagnostic control register of the SCU. -config PL310_ERRATA_769419 - bool "PL310 errata: no automatic Store Buffer drain" - depends on CACHE_L2X0 - help - On revisions of the PL310 prior to r3p2, the Store Buffer does - not automatically drain. This can cause normal, non-cacheable - writes to be retained when the memory system is idle, leading - to suboptimal I/O performance for drivers using coherent DMA. - This option adds a write barrier to the cpu_idle loop so that, - on systems with an outer cache, the store buffer is drained - explicitly. - config ARM_ERRATA_775420 bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" depends on CPU_V7 diff -Nur linux-3.15-rc4.orig/arch/arm/mach-berlin/berlin.c linux-3.15-rc4/arch/arm/mach-berlin/berlin.c --- linux-3.15-rc4.orig/arch/arm/mach-berlin/berlin.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-berlin/berlin.c 2014-05-07 17:05:49.492157054 +0200 @@ -24,7 +24,7 @@ * with DT probing for L2CCs, berlin_init_machine can be removed. * Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc */ - l2x0_of_init(0x70c00000, 0xfeffffff); + l2x0_of_init(0x30c00000, 0xfeffffff); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-cns3xxx/core.c linux-3.15-rc4/arch/arm/mach-cns3xxx/core.c --- linux-3.15-rc4.orig/arch/arm/mach-cns3xxx/core.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-cns3xxx/core.c 2014-05-07 17:05:49.492157054 +0200 @@ -272,9 +272,9 @@ * * 1 cycle of latency for setup, read and write accesses */ - val = readl(base + L2X0_TAG_LATENCY_CTRL); + val = readl(base + L310_TAG_LATENCY_CTRL); val &= 0xfffff888; - writel(val, base + L2X0_TAG_LATENCY_CTRL); + writel(val, base + L310_TAG_LATENCY_CTRL); /* * Data RAM Control register @@ -285,12 +285,12 @@ * * 1 cycle of latency for setup, read and write accesses */ - val = readl(base + L2X0_DATA_LATENCY_CTRL); + val = readl(base + L310_DATA_LATENCY_CTRL); val &= 0xfffff888; - writel(val, base + L2X0_DATA_LATENCY_CTRL); + writel(val, base + L310_DATA_LATENCY_CTRL); /* 32 KiB, 8-way, parity disable */ - l2x0_init(base, 0x00540000, 0xfe000fff); + l2x0_init(base, 0x00500000, 0xfe0f0fff); } #endif /* CONFIG_CACHE_L2X0 */ diff -Nur linux-3.15-rc4.orig/arch/arm/mach-exynos/common.h linux-3.15-rc4/arch/arm/mach-exynos/common.h --- linux-3.15-rc4.orig/arch/arm/mach-exynos/common.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-exynos/common.h 2014-05-07 17:05:49.492157054 +0200 @@ -55,7 +55,6 @@ NUM_SYS_POWERDOWN, }; -extern unsigned long l2x0_regs_phys; struct exynos_pmu_conf { void __iomem *reg; unsigned int val[NUM_SYS_POWERDOWN]; diff -Nur linux-3.15-rc4.orig/arch/arm/mach-exynos/exynos.c linux-3.15-rc4/arch/arm/mach-exynos/exynos.c --- linux-3.15-rc4.orig/arch/arm/mach-exynos/exynos.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-exynos/exynos.c 2014-05-07 17:05:49.492157054 +0200 @@ -32,9 +32,6 @@ #include "mfc.h" #include "regs-pmu.h" -#define L2_AUX_VAL 0x7C470001 -#define L2_AUX_MASK 0xC200ffff - static struct map_desc exynos4_iodesc[] __initdata = { { .virtual = (unsigned long)S3C_VA_SYS, @@ -321,17 +318,7 @@ static int __init exynos4_l2x0_cache_init(void) { - int ret; - - ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK); - if (ret) - return ret; - - if (IS_ENABLED(CONFIG_S5P_SLEEP)) { - l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs); - clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long)); - } - return 0; + return l2x0_of_init(0x3c400001, 0xc20fffff); } early_initcall(exynos4_l2x0_cache_init); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-exynos/sleep.S linux-3.15-rc4/arch/arm/mach-exynos/sleep.S --- linux-3.15-rc4.orig/arch/arm/mach-exynos/sleep.S 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-exynos/sleep.S 2014-05-07 17:05:49.492157054 +0200 @@ -16,8 +16,6 @@ */ #include -#include -#include #define CPU_MASK 0xff0ffff0 #define CPU_CORTEX_A9 0x410fc090 @@ -53,33 +51,7 @@ and r0, r0, r1 ldr r1, =CPU_CORTEX_A9 cmp r0, r1 - bne skip_l2_resume - adr r0, l2x0_regs_phys - ldr r0, [r0] - cmp r0, #0 - beq skip_l2_resume - ldr r1, [r0, #L2X0_R_PHY_BASE] - ldr r2, [r1, #L2X0_CTRL] - tst r2, #0x1 - bne skip_l2_resume - ldr r2, [r0, #L2X0_R_AUX_CTRL] - str r2, [r1, #L2X0_AUX_CTRL] - ldr r2, [r0, #L2X0_R_TAG_LATENCY] - str r2, [r1, #L2X0_TAG_LATENCY_CTRL] - ldr r2, [r0, #L2X0_R_DATA_LATENCY] - str r2, [r1, #L2X0_DATA_LATENCY_CTRL] - ldr r2, [r0, #L2X0_R_PREFETCH_CTRL] - str r2, [r1, #L2X0_PREFETCH_CTRL] - ldr r2, [r0, #L2X0_R_PWR_CTRL] - str r2, [r1, #L2X0_POWER_CTRL] - mov r2, #1 - str r2, [r1, #L2X0_CTRL] -skip_l2_resume: + bleq l2c310_early_resume #endif b cpu_resume ENDPROC(exynos_cpu_resume) -#ifdef CONFIG_CACHE_L2X0 - .globl l2x0_regs_phys -l2x0_regs_phys: - .long 0 -#endif diff -Nur linux-3.15-rc4.orig/arch/arm/mach-highbank/highbank.c linux-3.15-rc4/arch/arm/mach-highbank/highbank.c --- linux-3.15-rc4.orig/arch/arm/mach-highbank/highbank.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-highbank/highbank.c 2014-05-07 17:05:49.492157054 +0200 @@ -51,11 +51,13 @@ } -static void highbank_l2x0_disable(void) +static void highbank_l2c310_write_sec(unsigned long val, unsigned reg) { - outer_flush_all(); - /* Disable PL310 L2 Cache controller */ - highbank_smc1(0x102, 0x0); + if (reg == L2X0_CTRL) + highbank_smc1(0x102, val); + else + WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n", + reg); } static void __init highbank_init_irq(void) @@ -66,11 +68,9 @@ highbank_scu_map_io(); /* Enable PL310 L2 Cache controller */ - if (IS_ENABLED(CONFIG_CACHE_L2X0) && - of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) { - highbank_smc1(0x102, 0x1); - l2x0_of_init(0, ~0UL); - outer_cache.disable = highbank_l2x0_disable; + if (IS_ENABLED(CONFIG_CACHE_L2X0)) { + outer_cache.write_sec = highbank_l2c310_write_sec; + l2x0_of_init(0, ~0); } } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-imx/clk-pllv3.c linux-3.15-rc4/arch/arm/mach-imx/clk-pllv3.c --- linux-3.15-rc4.orig/arch/arm/mach-imx/clk-pllv3.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-imx/clk-pllv3.c 2014-05-07 17:05:51.040166651 +0200 @@ -273,9 +273,10 @@ struct clk_pllv3 *pll = to_clk_pllv3(hw); unsigned long min_rate = parent_rate * 27; unsigned long max_rate = parent_rate * 54; - u32 val, div; + u32 val, newval, div; u32 mfn, mfd = 1000000; s64 temp64; + int ret; if (rate < min_rate || rate > max_rate) return -EINVAL; @@ -287,13 +288,27 @@ mfn = temp64; val = readl_relaxed(pll->base); - val &= ~pll->div_mask; - val |= div; - writel_relaxed(val, pll->base); + + /* set the PLL into bypass mode */ + newval = val | BM_PLL_BYPASS; + writel_relaxed(newval, pll->base); + + /* configure the new frequency */ + newval &= ~pll->div_mask; + newval |= div; + writel_relaxed(newval, pll->base); writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET); - writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET); + writel(mfd, pll->base + PLL_DENOM_OFFSET); + + ret = clk_pllv3_wait_lock(pll); + if (ret == 0 && val & BM_PLL_POWER) { + /* only if it locked can we switch back to the PLL */ + newval &= ~BM_PLL_BYPASS; + newval |= val & BM_PLL_BYPASS; + writel(newval, pll->base); + } - return clk_pllv3_wait_lock(pll); + return ret; } static const struct clk_ops clk_pllv3_av_ops = { diff -Nur linux-3.15-rc4.orig/arch/arm/mach-imx/mach-vf610.c linux-3.15-rc4/arch/arm/mach-imx/mach-vf610.c --- linux-3.15-rc4.orig/arch/arm/mach-imx/mach-vf610.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-imx/mach-vf610.c 2014-05-07 17:05:51.040166651 +0200 @@ -22,7 +22,7 @@ static void __init vf610_init_irq(void) { - l2x0_of_init(0, ~0UL); + l2x0_of_init(0, ~0); irqchip_init(); } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-imx/suspend-imx6.S linux-3.15-rc4/arch/arm/mach-imx/suspend-imx6.S --- linux-3.15-rc4.orig/arch/arm/mach-imx/suspend-imx6.S 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-imx/suspend-imx6.S 2014-05-07 17:05:51.040166651 +0200 @@ -334,28 +334,10 @@ * turned into relative ones. */ -#ifdef CONFIG_CACHE_L2X0 - .macro pl310_resume - adr r0, l2x0_saved_regs_offset - ldr r2, [r0] - add r2, r2, r0 - ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0 - ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value - str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl - mov r1, #0x1 - str r1, [r0, #L2X0_CTRL] @ re-enable L2 - .endm - -l2x0_saved_regs_offset: - .word l2x0_saved_regs - . - -#else - .macro pl310_resume - .endm -#endif - ENTRY(v7_cpu_resume) bl v7_invalidate_l1 - pl310_resume +#ifdef CONFIG_CACHE_L2X0 + bl l2c310_early_resume +#endif b cpu_resume ENDPROC(v7_cpu_resume) diff -Nur linux-3.15-rc4.orig/arch/arm/mach-imx/system.c linux-3.15-rc4/arch/arm/mach-imx/system.c --- linux-3.15-rc4.orig/arch/arm/mach-imx/system.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-imx/system.c 2014-05-07 17:05:51.040166651 +0200 @@ -124,7 +124,7 @@ } /* Configure the L2 PREFETCH and POWER registers */ - val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); + val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL); val |= 0x70800000; /* * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 @@ -137,14 +137,12 @@ */ if (cpu_is_imx6q()) val &= ~(1 << 30 | 1 << 23); - writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL); - val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN; - writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL); + writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL); iounmap(l2x0_base); of_node_put(np); out: - l2x0_of_init(0, ~0UL); + l2x0_of_init(0, ~0); } #endif diff -Nur linux-3.15-rc4.orig/arch/arm/mach-mvebu/board-v7.c linux-3.15-rc4/arch/arm/mach-mvebu/board-v7.c --- linux-3.15-rc4.orig/arch/arm/mach-mvebu/board-v7.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-mvebu/board-v7.c 2014-05-07 17:05:51.040166651 +0200 @@ -60,7 +60,7 @@ coherency_init(); BUG_ON(mvebu_mbus_dt_init()); #ifdef CONFIG_CACHE_L2X0 - l2x0_of_init(0, ~0UL); + l2x0_of_init(0, ~0); #endif if (of_machine_is_compatible("marvell,armada375")) diff -Nur linux-3.15-rc4.orig/arch/arm/mach-nomadik/cpu-8815.c linux-3.15-rc4/arch/arm/mach-nomadik/cpu-8815.c --- linux-3.15-rc4.orig/arch/arm/mach-nomadik/cpu-8815.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-nomadik/cpu-8815.c 2014-05-07 17:05:51.040166651 +0200 @@ -147,7 +147,7 @@ { #ifdef CONFIG_CACHE_L2X0 /* At full speed latency must be >=2, so 0x249 in low bits */ - l2x0_of_init(0x00730249, 0xfe000fff); + l2x0_of_init(0x00700249, 0xfe0fefff); #endif of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-omap2/common.h linux-3.15-rc4/arch/arm/mach-omap2/common.h --- linux-3.15-rc4.orig/arch/arm/mach-omap2/common.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-omap2/common.h 2014-05-07 17:05:51.040166651 +0200 @@ -91,6 +91,7 @@ extern void omap3_secure_sync32k_timer_init(void); extern void omap3_gptimer_timer_init(void); extern void omap4_local_timer_init(void); +int omap_l2_cache_init(void); extern void omap5_realtime_timer_init(void); void omap2420_init_early(void); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-omap2/io.c linux-3.15-rc4/arch/arm/mach-omap2/io.c --- linux-3.15-rc4.orig/arch/arm/mach-omap2/io.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-omap2/io.c 2014-05-07 17:05:51.040166651 +0200 @@ -609,6 +609,7 @@ am43xx_clockdomains_init(); am43xx_hwmod_init(); omap_hwmod_init_postsetup(); + omap_l2_cache_init(); omap_clk_soc_init = am43xx_dt_clk_init; } @@ -640,6 +641,7 @@ omap44xx_clockdomains_init(); omap44xx_hwmod_init(); omap_hwmod_init_postsetup(); + omap_l2_cache_init(); omap_clk_soc_init = omap4xxx_dt_clk_init; } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-omap2/Kconfig linux-3.15-rc4/arch/arm/mach-omap2/Kconfig --- linux-3.15-rc4.orig/arch/arm/mach-omap2/Kconfig 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-omap2/Kconfig 2014-05-07 17:05:51.040166651 +0200 @@ -65,6 +65,7 @@ select ARCH_HAS_OPP select ARM_GIC select MACH_OMAP_GENERIC + select MIGHT_HAVE_CACHE_L2X0 config SOC_DRA7XX bool "TI DRA7XX" diff -Nur linux-3.15-rc4.orig/arch/arm/mach-omap2/omap4-common.c linux-3.15-rc4/arch/arm/mach-omap2/omap4-common.c --- linux-3.15-rc4.orig/arch/arm/mach-omap2/omap4-common.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-omap2/omap4-common.c 2014-05-07 17:05:51.040166651 +0200 @@ -167,75 +167,57 @@ return l2cache_base; } -static void omap4_l2x0_disable(void) +static void omap4_l2c310_write_sec(unsigned long val, unsigned reg) { - outer_flush_all(); - /* Disable PL310 L2 Cache controller */ - omap_smc1(0x102, 0x0); -} + unsigned smc_op; -static void omap4_l2x0_set_debug(unsigned long val) -{ - /* Program PL310 L2 Cache controller debug register */ - omap_smc1(0x100, val); + switch (reg) { + case L2X0_CTRL: + smc_op = OMAP4_MON_L2X0_CTRL_INDEX; + break; + + case L2X0_AUX_CTRL: + smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX; + break; + + case L2X0_DEBUG_CTRL: + smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX; + break; + + case L310_PREFETCH_CTRL: + smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX; + break; + + default: + WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg); + return; + } + + omap_smc1(smc_op, val); } -static int __init omap_l2_cache_init(void) +int __init omap_l2_cache_init(void) { - u32 aux_ctrl = 0; - - /* - * To avoid code running on other OMAPs in - * multi-omap builds - */ - if (!cpu_is_omap44xx()) - return -ENODEV; + u32 aux_ctrl; /* Static mapping, never released */ l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); if (WARN_ON(!l2cache_base)) return -ENOMEM; - /* - * 16-way associativity, parity disabled - * Way size - 32KB (es1.0) - * Way size - 64KB (es2.0 +) - */ - aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) | - (0x1 << 25) | - (0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) | - (0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT)); - - if (omap_rev() == OMAP4430_REV_ES1_0) { - aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT; - } else { - aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | - (1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) | - (1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) | - (1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) | - (1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT)); - } - if (omap_rev() != OMAP4430_REV_ES1_0) - omap_smc1(0x109, aux_ctrl); - - /* Enable PL310 L2 Cache controller */ - omap_smc1(0x102, 0x1); + /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */ + aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE | + L310_AUX_CTRL_DATA_PREFETCH | + L310_AUX_CTRL_INSTR_PREFETCH; + outer_cache.write_sec = omap4_l2c310_write_sec; if (of_have_populated_dt()) - l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK); + l2x0_of_init(aux_ctrl, 0xcf9fffff); else - l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK); - - /* - * Override default outer_cache.disable with a OMAP4 - * specific one - */ - outer_cache.disable = omap4_l2x0_disable; - outer_cache.set_debug = omap4_l2x0_set_debug; + l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff); return 0; } -omap_early_initcall(omap_l2_cache_init); #endif void __iomem *omap4_get_sar_ram_base(void) diff -Nur linux-3.15-rc4.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c linux-3.15-rc4/arch/arm/mach-omap2/omap-mpuss-lowpower.c --- linux-3.15-rc4.orig/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-omap2/omap-mpuss-lowpower.c 2014-05-07 17:05:51.040166651 +0200 @@ -187,19 +187,15 @@ * in every restore MPUSS OFF path. */ #ifdef CONFIG_CACHE_L2X0 -static void save_l2x0_context(void) +static void __init save_l2x0_context(void) { - u32 val; - void __iomem *l2x0_base = omap4_get_l2cache_base(); - if (l2x0_base) { - val = __raw_readl(l2x0_base + L2X0_AUX_CTRL); - __raw_writel(val, sar_base + L2X0_AUXCTRL_OFFSET); - val = __raw_readl(l2x0_base + L2X0_PREFETCH_CTRL); - __raw_writel(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET); - } + __raw_writel(l2x0_saved_regs.aux_ctrl, + sar_base + L2X0_AUXCTRL_OFFSET); + __raw_writel(l2x0_saved_regs.prefetch_ctrl, + sar_base + L2X0_PREFETCH_CTRL_OFFSET); } #else -static void save_l2x0_context(void) +static void __init save_l2x0_context(void) {} #endif diff -Nur linux-3.15-rc4.orig/arch/arm/mach-prima2/l2x0.c linux-3.15-rc4/arch/arm/mach-prima2/l2x0.c --- linux-3.15-rc4.orig/arch/arm/mach-prima2/l2x0.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-prima2/l2x0.c 2014-05-07 17:05:51.040166651 +0200 @@ -8,42 +8,10 @@ #include #include -#include #include -struct l2x0_aux { - u32 val; - u32 mask; -}; - -static const struct l2x0_aux prima2_l2x0_aux __initconst = { - .val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT, - .mask = 0, -}; - -static const struct l2x0_aux marco_l2x0_aux __initconst = { - .val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) | - (1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT), - .mask = L2X0_AUX_CTRL_MASK, -}; - -static const struct of_device_id sirf_l2x0_ids[] __initconst = { - { .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, }, - { .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, }, - {}, -}; - static int __init sirfsoc_l2x0_init(void) { - struct device_node *np; - const struct l2x0_aux *aux; - - np = of_find_matching_node(NULL, sirf_l2x0_ids); - if (np) { - aux = of_match_node(sirf_l2x0_ids, np)->data; - return l2x0_of_init(aux->val, aux->mask); - } - - return 0; + return l2x0_of_init(0, ~0); } early_initcall(sirfsoc_l2x0_init); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-prima2/pm.c linux-3.15-rc4/arch/arm/mach-prima2/pm.c --- linux-3.15-rc4.orig/arch/arm/mach-prima2/pm.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-prima2/pm.c 2014-05-07 17:05:51.044166676 +0200 @@ -71,7 +71,6 @@ case PM_SUSPEND_MEM: sirfsoc_pre_suspend_power_off(); - outer_flush_all(); outer_disable(); /* go zzz */ cpu_suspend(0, sirfsoc_finish_suspend); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-realview/realview_eb.c linux-3.15-rc4/arch/arm/mach-realview/realview_eb.c --- linux-3.15-rc4.orig/arch/arm/mach-realview/realview_eb.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-realview/realview_eb.c 2014-05-07 17:05:51.044166676 +0200 @@ -442,8 +442,13 @@ realview_eb11mp_fixup(); #ifdef CONFIG_CACHE_L2X0 - /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled - * Bits: .... ...0 0111 1001 0000 .... .... .... */ + /* + * The PL220 needs to be manually configured as the hardware + * doesn't report the correct sizes. + * 1MB (128KB/way), 8-way associativity, event monitor and + * parity enabled, ignore share bit, no force write allocate + * Bits: .... ...0 0111 1001 0000 .... .... .... + */ l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff); #endif platform_device_register(&pmu_device); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-realview/realview_pb1176.c linux-3.15-rc4/arch/arm/mach-realview/realview_pb1176.c --- linux-3.15-rc4.orig/arch/arm/mach-realview/realview_pb1176.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-realview/realview_pb1176.c 2014-05-07 17:05:51.044166676 +0200 @@ -355,7 +355,13 @@ int i; #ifdef CONFIG_CACHE_L2X0 - /* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */ + /* + * The PL220 needs to be manually configured as the hardware + * doesn't report the correct sizes. + * 128kB (16kB/way), 8-way associativity, event monitor and + * parity enabled, ignore share bit, no force write allocate + * Bits: .... ...0 0111 0011 0000 .... .... .... + */ l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff); #endif diff -Nur linux-3.15-rc4.orig/arch/arm/mach-realview/realview_pb11mp.c linux-3.15-rc4/arch/arm/mach-realview/realview_pb11mp.c --- linux-3.15-rc4.orig/arch/arm/mach-realview/realview_pb11mp.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-realview/realview_pb11mp.c 2014-05-07 17:05:51.044166676 +0200 @@ -337,8 +337,13 @@ int i; #ifdef CONFIG_CACHE_L2X0 - /* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled - * Bits: .... ...0 0111 1001 0000 .... .... .... */ + /* + * The PL220 needs to be manually configured as the hardware + * doesn't report the correct sizes. + * 1MB (128KB/way), 8-way associativity, event monitor and + * parity enabled, ignore share bit, no force write allocate + * Bits: .... ...0 0111 1001 0000 .... .... .... + */ l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff); #endif diff -Nur linux-3.15-rc4.orig/arch/arm/mach-realview/realview_pbx.c linux-3.15-rc4/arch/arm/mach-realview/realview_pbx.c --- linux-3.15-rc4.orig/arch/arm/mach-realview/realview_pbx.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-realview/realview_pbx.c 2014-05-07 17:05:51.044166676 +0200 @@ -370,8 +370,8 @@ __io_address(REALVIEW_PBX_TILE_L220_BASE); /* set RAM latencies to 1 cycle for eASIC */ - writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); - writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); + writel(0, l2x0_base + L310_TAG_LATENCY_CTRL); + writel(0, l2x0_base + L310_DATA_LATENCY_CTRL); /* 16KB way size, 8-way associativity, parity disabled * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */ diff -Nur linux-3.15-rc4.orig/arch/arm/mach-rockchip/rockchip.c linux-3.15-rc4/arch/arm/mach-rockchip/rockchip.c --- linux-3.15-rc4.orig/arch/arm/mach-rockchip/rockchip.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-rockchip/rockchip.c 2014-05-07 17:05:51.044166676 +0200 @@ -26,7 +26,7 @@ static void __init rockchip_dt_init(void) { - l2x0_of_init(0, ~0UL); + l2x0_of_init(0, ~0); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-armadillo800eva.c linux-3.15-rc4/arch/arm/mach-shmobile/board-armadillo800eva.c --- linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-armadillo800eva.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-shmobile/board-armadillo800eva.c 2014-05-07 17:05:51.044166676 +0200 @@ -1271,8 +1271,8 @@ #ifdef CONFIG_CACHE_L2X0 - /* Early BRESP enable, Shared attribute override enable, 32K*8way */ - l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); + /* Shared attribute override enable, 32K*8way */ + l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff); #endif i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c linux-3.15-rc4/arch/arm/mach-shmobile/board-armadillo800eva-reference.c --- linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-shmobile/board-armadillo800eva-reference.c 2014-05-07 17:05:51.044166676 +0200 @@ -164,8 +164,8 @@ r8a7740_meram_workaround(); #ifdef CONFIG_CACHE_L2X0 - /* Early BRESP enable, Shared attribute override enable, 32K*8way */ - l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); + /* Shared attribute override enable, 32K*8way */ + l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff); #endif r8a7740_add_standard_devices_dt(); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-kzm9g.c linux-3.15-rc4/arch/arm/mach-shmobile/board-kzm9g.c --- linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-kzm9g.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-shmobile/board-kzm9g.c 2014-05-07 17:05:51.044166676 +0200 @@ -876,8 +876,8 @@ gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */ #ifdef CONFIG_CACHE_L2X0 - /* Early BRESP enable, Shared attribute override enable, 64K*8way */ - l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); + /* Shared attribute override enable, 64K*8way */ + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); #endif i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c linux-3.15-rc4/arch/arm/mach-shmobile/board-kzm9g-reference.c --- linux-3.15-rc4.orig/arch/arm/mach-shmobile/board-kzm9g-reference.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-shmobile/board-kzm9g-reference.c 2014-05-07 17:05:51.044166676 +0200 @@ -36,8 +36,8 @@ sh73a0_add_standard_devices_dt(); #ifdef CONFIG_CACHE_L2X0 - /* Early BRESP enable, Shared attribute override enable, 64K*8way */ - l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); + /* Shared attribute override enable, 64K*8way */ + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); #endif } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-shmobile/setup-r8a7778.c linux-3.15-rc4/arch/arm/mach-shmobile/setup-r8a7778.c --- linux-3.15-rc4.orig/arch/arm/mach-shmobile/setup-r8a7778.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-shmobile/setup-r8a7778.c 2014-05-07 17:05:51.044166676 +0200 @@ -298,10 +298,10 @@ void __iomem *base = ioremap_nocache(0xf0100000, 0x1000); if (base) { /* - * Early BRESP enable, Shared attribute override enable, 64K*16way + * Shared attribute override enable, 64K*16way * don't call iounmap(base) */ - l2x0_init(base, 0x40470000, 0x82000fff); + l2x0_init(base, 0x00400000, 0xc20f0fff); } #endif diff -Nur linux-3.15-rc4.orig/arch/arm/mach-shmobile/setup-r8a7779.c linux-3.15-rc4/arch/arm/mach-shmobile/setup-r8a7779.c --- linux-3.15-rc4.orig/arch/arm/mach-shmobile/setup-r8a7779.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-shmobile/setup-r8a7779.c 2014-05-07 17:05:51.048166700 +0200 @@ -700,8 +700,8 @@ void __init r8a7779_add_standard_devices(void) { #ifdef CONFIG_CACHE_L2X0 - /* Early BRESP enable, Shared attribute override enable, 64K*16way */ - l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff); + /* Shared attribute override enable, 64K*16way */ + l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff); #endif r8a7779_pm_init(); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-socfpga/socfpga.c linux-3.15-rc4/arch/arm/mach-socfpga/socfpga.c --- linux-3.15-rc4.orig/arch/arm/mach-socfpga/socfpga.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-socfpga/socfpga.c 2014-05-07 17:05:51.048166700 +0200 @@ -100,7 +100,7 @@ static void __init socfpga_cyclone5_init(void) { - l2x0_of_init(0, ~0UL); + l2x0_of_init(0, ~0); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-spear/platsmp.c linux-3.15-rc4/arch/arm/mach-spear/platsmp.c --- linux-3.15-rc4.orig/arch/arm/mach-spear/platsmp.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-spear/platsmp.c 2014-05-07 17:05:51.048166700 +0200 @@ -20,6 +20,18 @@ #include #include "generic.h" +/* + * Write pen_release in a way that is guaranteed to be visible to all + * observers, irrespective of whether they're taking part in coherency + * or not. This is necessary for the hotplug code to work reliably. + */ +static void write_pen_release(int val) +{ + pen_release = val; + smp_wmb(); + sync_cache_w(&pen_release); +} + static DEFINE_SPINLOCK(boot_lock); static void __iomem *scu_base = IOMEM(VA_SCU_BASE); @@ -30,8 +42,7 @@ * let the primary processor know we're out of the * pen, then head off into the C entry point */ - pen_release = -1; - smp_wmb(); + write_pen_release(-1); /* * Synchronise with the boot thread. @@ -58,9 +69,7 @@ * Note that "pen_release" is the hardware CPU ID, whereas * "cpu" is Linux's internal ID. */ - pen_release = cpu; - flush_cache_all(); - outer_flush_all(); + write_pen_release(cpu); timeout = jiffies + (1 * HZ); while (time_before(jiffies, timeout)) { diff -Nur linux-3.15-rc4.orig/arch/arm/mach-spear/spear13xx.c linux-3.15-rc4/arch/arm/mach-spear/spear13xx.c --- linux-3.15-rc4.orig/arch/arm/mach-spear/spear13xx.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-spear/spear13xx.c 2014-05-07 17:05:51.048166700 +0200 @@ -38,15 +38,15 @@ if (!IS_ENABLED(CONFIG_CACHE_L2X0)) return; - writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL); + writel_relaxed(0x06, VA_L2CC_BASE + L310_PREFETCH_CTRL); /* * Program following latencies in order to make * SPEAr1340 work at 600 MHz */ - writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL); - writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL); - l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff); + writel_relaxed(0x221, VA_L2CC_BASE + L310_TAG_LATENCY_CTRL); + writel_relaxed(0x441, VA_L2CC_BASE + L310_DATA_LATENCY_CTRL); + l2x0_init(VA_L2CC_BASE, 0x30a00001, 0xfe0fffff); } /* diff -Nur linux-3.15-rc4.orig/arch/arm/mach-sti/board-dt.c linux-3.15-rc4/arch/arm/mach-sti/board-dt.c --- linux-3.15-rc4.orig/arch/arm/mach-sti/board-dt.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-sti/board-dt.c 2014-05-07 17:05:51.048166700 +0200 @@ -16,15 +16,9 @@ void __init stih41x_l2x0_init(void) { - u32 way_size = 0x4; - u32 aux_ctrl; - /* may be this can be encoded in macros like BIT*() */ - aux_ctrl = (0x1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) | - (0x1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) | - (0x1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) | - (way_size << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); - - l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK); + l2x0_of_init(L2C_AUX_CTRL_SHARED_OVERRIDE | + L310_AUX_CTRL_DATA_PREFETCH | + L310_AUX_CTRL_INSTR_PREFETCH, 0xc00f0fff); } static void __init stih41x_machine_init(void) diff -Nur linux-3.15-rc4.orig/arch/arm/mach-tegra/pm.h linux-3.15-rc4/arch/arm/mach-tegra/pm.h --- linux-3.15-rc4.orig/arch/arm/mach-tegra/pm.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-tegra/pm.h 2014-05-07 17:05:51.048166700 +0200 @@ -35,8 +35,6 @@ void tegra30_lp1_iram_hook(void); void tegra30_sleep_core_init(void); -extern unsigned long l2x0_saved_regs_addr; - void tegra_clear_cpu_in_lp2(void); bool tegra_set_cpu_in_lp2(void); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-tegra/reset-handler.S linux-3.15-rc4/arch/arm/mach-tegra/reset-handler.S --- linux-3.15-rc4.orig/arch/arm/mach-tegra/reset-handler.S 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-tegra/reset-handler.S 2014-05-07 17:05:51.048166700 +0200 @@ -19,7 +19,6 @@ #include #include -#include #include "flowctrl.h" #include "fuse.h" @@ -78,8 +77,10 @@ str r1, [r0] #endif +#ifdef CONFIG_CACHE_L2X0 /* L2 cache resume & re-enable */ - l2_cache_resume r0, r1, r2, l2x0_saved_regs_addr + bl l2c310_early_resume +#endif end_ca9_scu_l2_resume: mov32 r9, 0xc0f cmp r8, r9 @@ -89,12 +90,6 @@ ENDPROC(tegra_resume) #endif -#ifdef CONFIG_CACHE_L2X0 - .globl l2x0_saved_regs_addr -l2x0_saved_regs_addr: - .long 0 -#endif - .align L1_CACHE_SHIFT ENTRY(__tegra_cpu_reset_handler_start) diff -Nur linux-3.15-rc4.orig/arch/arm/mach-tegra/sleep.h linux-3.15-rc4/arch/arm/mach-tegra/sleep.h --- linux-3.15-rc4.orig/arch/arm/mach-tegra/sleep.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-tegra/sleep.h 2014-05-07 17:05:51.048166700 +0200 @@ -120,37 +120,6 @@ mov \tmp1, \tmp1, lsr #8 .endm -/* Macro to resume & re-enable L2 cache */ -#ifndef L2X0_CTRL_EN -#define L2X0_CTRL_EN 1 -#endif - -#ifdef CONFIG_CACHE_L2X0 -.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs - W(adr) \tmp1, \phys_l2x0_saved_regs - ldr \tmp1, [\tmp1] - ldr \tmp2, [\tmp1, #L2X0_R_PHY_BASE] - ldr \tmp3, [\tmp2, #L2X0_CTRL] - tst \tmp3, #L2X0_CTRL_EN - bne exit_l2_resume - ldr \tmp3, [\tmp1, #L2X0_R_TAG_LATENCY] - str \tmp3, [\tmp2, #L2X0_TAG_LATENCY_CTRL] - ldr \tmp3, [\tmp1, #L2X0_R_DATA_LATENCY] - str \tmp3, [\tmp2, #L2X0_DATA_LATENCY_CTRL] - ldr \tmp3, [\tmp1, #L2X0_R_PREFETCH_CTRL] - str \tmp3, [\tmp2, #L2X0_PREFETCH_CTRL] - ldr \tmp3, [\tmp1, #L2X0_R_PWR_CTRL] - str \tmp3, [\tmp2, #L2X0_POWER_CTRL] - ldr \tmp3, [\tmp1, #L2X0_R_AUX_CTRL] - str \tmp3, [\tmp2, #L2X0_AUX_CTRL] - mov \tmp3, #L2X0_CTRL_EN - str \tmp3, [\tmp2, #L2X0_CTRL] -exit_l2_resume: -.endm -#else /* CONFIG_CACHE_L2X0 */ -.macro l2_cache_resume, tmp1, tmp2, tmp3, phys_l2x0_saved_regs -.endm -#endif /* CONFIG_CACHE_L2X0 */ #else void tegra_pen_lock(void); void tegra_pen_unlock(void); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-tegra/tegra.c linux-3.15-rc4/arch/arm/mach-tegra/tegra.c --- linux-3.15-rc4.orig/arch/arm/mach-tegra/tegra.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-tegra/tegra.c 2014-05-07 17:05:51.048166700 +0200 @@ -73,27 +73,7 @@ static void __init tegra_init_cache(void) { #ifdef CONFIG_CACHE_L2X0 - static const struct of_device_id pl310_ids[] __initconst = { - { .compatible = "arm,pl310-cache", }, - {} - }; - - struct device_node *np; - int ret; - void __iomem *p = IO_ADDRESS(TEGRA_ARM_PERIF_BASE) + 0x3000; - u32 aux_ctrl, cache_type; - - np = of_find_matching_node(NULL, pl310_ids); - if (!np) - return; - - cache_type = readl(p + L2X0_CACHE_TYPE); - aux_ctrl = (cache_type & 0x700) << (17-8); - aux_ctrl |= 0x7C400001; - - ret = l2x0_of_init(aux_ctrl, 0x8200c3fe); - if (!ret) - l2x0_saved_regs_addr = virt_to_phys(&l2x0_saved_regs); + l2x0_of_init(0x3c400001, 0xc20fc3fe); #endif } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-ux500/cache-l2x0.c linux-3.15-rc4/arch/arm/mach-ux500/cache-l2x0.c --- linux-3.15-rc4.orig/arch/arm/mach-ux500/cache-l2x0.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-ux500/cache-l2x0.c 2014-05-07 17:05:51.048166700 +0200 @@ -35,10 +35,16 @@ return 0; } -static int __init ux500_l2x0_init(void) +static void ux500_l2c310_write_sec(unsigned long val, unsigned reg) { - u32 aux_val = 0x3e000000; + /* + * We can't write to secure registers as we are in non-secure + * mode, until we have some SMI service available. + */ +} +static int __init ux500_l2x0_init(void) +{ if (cpu_is_u8500_family() || cpu_is_ux540_family()) l2x0_base = __io_address(U8500_L2CC_BASE); else @@ -48,28 +54,12 @@ /* Unlock before init */ ux500_l2x0_unlock(); - /* DBx540's L2 has 128KB way size */ - if (cpu_is_ux540_family()) - /* 128KB way size */ - aux_val |= (0x4 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); - else - /* 64KB way size */ - aux_val |= (0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT); + outer_cache.write_sec = ux500_l2c310_write_sec; - /* 64KB way size, 8 way associativity, force WA */ if (of_have_populated_dt()) - l2x0_of_init(aux_val, 0xc0000fff); + l2x0_of_init(0, ~0); else - l2x0_init(l2x0_base, aux_val, 0xc0000fff); - - /* - * We can't disable l2 as we are in non secure mode, currently - * this seems be called only during kexec path. So let's - * override outer.disable with nasty assignment until we have - * some SMI service available. - */ - outer_cache.disable = NULL; - outer_cache.set_debug = NULL; + l2x0_init(l2x0_base, 0, ~0); return 0; } diff -Nur linux-3.15-rc4.orig/arch/arm/mach-vexpress/ct-ca9x4.c linux-3.15-rc4/arch/arm/mach-vexpress/ct-ca9x4.c --- linux-3.15-rc4.orig/arch/arm/mach-vexpress/ct-ca9x4.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-vexpress/ct-ca9x4.c 2014-05-07 17:05:51.048166700 +0200 @@ -45,6 +45,23 @@ iotable_init(ct_ca9x4_io_desc, ARRAY_SIZE(ct_ca9x4_io_desc)); } +static void __init ca9x4_l2_init(void) +{ +#ifdef CONFIG_CACHE_L2X0 + void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K); + + if (l2x0_base) { + /* set RAM latencies to 1 cycle for this core tile. */ + writel(0, l2x0_base + L310_TAG_LATENCY_CTRL); + writel(0, l2x0_base + L310_DATA_LATENCY_CTRL); + + l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); + } else { + pr_err("L2C: unable to map L2 cache controller\n"); + } +#endif +} + #ifdef CONFIG_HAVE_ARM_TWD static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, A9_MPCORE_TWD, IRQ_LOCALTIMER); @@ -63,6 +80,7 @@ gic_init(0, 29, ioremap(A9_MPCORE_GIC_DIST, SZ_4K), ioremap(A9_MPCORE_GIC_CPU, SZ_256)); ca9x4_twd_init(); + ca9x4_l2_init(); } static int ct_ca9x4_clcd_setup(struct clcd_fb *fb) @@ -141,16 +159,6 @@ { int i; -#ifdef CONFIG_CACHE_L2X0 - void __iomem *l2x0_base = ioremap(CT_CA9X4_L2CC, SZ_4K); - - /* set RAM latencies to 1 cycle for this core tile. */ - writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); - writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); - - l2x0_init(l2x0_base, 0x00400000, 0xfe0fffff); -#endif - for (i = 0; i < ARRAY_SIZE(ct_ca9x4_amba_devs); i++) amba_device_register(ct_ca9x4_amba_devs[i], &iomem_resource); diff -Nur linux-3.15-rc4.orig/arch/arm/mach-zynq/common.c linux-3.15-rc4/arch/arm/mach-zynq/common.c --- linux-3.15-rc4.orig/arch/arm/mach-zynq/common.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mach-zynq/common.c 2014-05-07 17:05:51.048166700 +0200 @@ -70,7 +70,7 @@ /* * 64KB way size, 8-way associativity, parity disabled */ - l2x0_of_init(0x02060000, 0xF0F0FFFF); + l2x0_of_init(0x02000000, 0xf0ffffff); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); diff -Nur linux-3.15-rc4.orig/arch/arm/mm/cache-feroceon-l2.c linux-3.15-rc4/arch/arm/mm/cache-feroceon-l2.c --- linux-3.15-rc4.orig/arch/arm/mm/cache-feroceon-l2.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mm/cache-feroceon-l2.c 2014-05-07 17:05:51.052166726 +0200 @@ -350,7 +350,6 @@ outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; - outer_cache.inv_all = l2_inv_all; enable_l2(); diff -Nur linux-3.15-rc4.orig/arch/arm/mm/cache-l2x0.c linux-3.15-rc4/arch/arm/mm/cache-l2x0.c --- linux-3.15-rc4.orig/arch/arm/mm/cache-l2x0.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mm/cache-l2x0.c 2014-05-07 17:05:51.052166726 +0200 @@ -16,18 +16,33 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include +#include #include #include #include #include #include +#include +#include #include #include "cache-tauros3.h" #include "cache-aurora-l2.h" +struct l2c_init_data { + const char *type; + unsigned way_size_0; + unsigned num_lock; + void (*of_parse)(const struct device_node *, u32 *, u32 *); + void (*enable)(void __iomem *, u32, unsigned); + void (*fixup)(void __iomem *, u32, struct outer_cache_fns *); + void (*save)(void __iomem *); + struct outer_cache_fns outer_cache; +}; + #define CACHE_LINE_SIZE 32 static void __iomem *l2x0_base; @@ -36,96 +51,116 @@ static u32 l2x0_size; static unsigned long sync_reg_offset = L2X0_CACHE_SYNC; -/* Aurora don't have the cache ID register available, so we have to - * pass it though the device tree */ -static u32 cache_id_part_number_from_dt; - struct l2x0_regs l2x0_saved_regs; -struct l2x0_of_data { - void (*setup)(const struct device_node *, u32 *, u32 *); - void (*save)(void); - struct outer_cache_fns outer_cache; -}; - -static bool of_init = false; - -static inline void cache_wait_way(void __iomem *reg, unsigned long mask) +/* + * Common code for all cache controllers. + */ +static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask) { /* wait for cache operation by line or way to complete */ while (readl_relaxed(reg) & mask) cpu_relax(); } -#ifdef CONFIG_CACHE_PL310 -static inline void cache_wait(void __iomem *reg, unsigned long mask) +/* + * By default, we write directly to secure registers. Platforms must + * override this if they are running non-secure. + */ +static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg) { - /* cache operations by line are atomic on PL310 */ + if (val == readl_relaxed(base + reg)) + return; + if (outer_cache.write_sec) + outer_cache.write_sec(val, reg); + else + writel_relaxed(val, base + reg); } -#else -#define cache_wait cache_wait_way -#endif -static inline void cache_sync(void) +/* + * This should only be called when we have a requirement that the + * register be written due to a work-around, as platforms running + * in non-secure mode may not be able to access this register. + */ +static inline void l2c_set_debug(void __iomem *base, unsigned long val) { - void __iomem *base = l2x0_base; - - writel_relaxed(0, base + sync_reg_offset); - cache_wait(base + L2X0_CACHE_SYNC, 1); + l2c_write_sec(val, base, L2X0_DEBUG_CTRL); } -static inline void l2x0_clean_line(unsigned long addr) +static void __l2c_op_way(void __iomem *reg) { - void __iomem *base = l2x0_base; - cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); + writel_relaxed(l2x0_way_mask, reg); + l2c_wait_mask(reg, l2x0_way_mask); } -static inline void l2x0_inv_line(unsigned long addr) +static inline void l2c_unlock(void __iomem *base, unsigned num) { - void __iomem *base = l2x0_base; - cache_wait(base + L2X0_INV_LINE_PA, 1); - writel_relaxed(addr, base + L2X0_INV_LINE_PA); + unsigned i; + + for (i = 0; i < num; i++) { + writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE + + i * L2X0_LOCKDOWN_STRIDE); + writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE + + i * L2X0_LOCKDOWN_STRIDE); + } } -#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) -static inline void debug_writel(unsigned long val) +/* + * Enable the L2 cache controller. This function must only be + * called when the cache controller is known to be disabled. + */ +static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock) { - if (outer_cache.set_debug) - outer_cache.set_debug(val); + unsigned long flags; + + l2c_write_sec(aux, base, L2X0_AUX_CTRL); + + l2c_unlock(base, num_lock); + + local_irq_save(flags); + __l2c_op_way(base + L2X0_INV_WAY); + writel_relaxed(0, base + sync_reg_offset); + l2c_wait_mask(base + sync_reg_offset, 1); + local_irq_restore(flags); + + l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL); } -static void pl310_set_debug(unsigned long val) +static void l2c_disable(void) { - writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL); + void __iomem *base = l2x0_base; + + outer_cache.flush_all(); + l2c_write_sec(0, base, L2X0_CTRL); + dsb(st); } -#else -/* Optimised out for non-errata case */ -static inline void debug_writel(unsigned long val) + +#ifdef CONFIG_CACHE_PL310 +static inline void cache_wait(void __iomem *reg, unsigned long mask) { + /* cache operations by line are atomic on PL310 */ } - -#define pl310_set_debug NULL +#else +#define cache_wait l2c_wait_mask #endif -#ifdef CONFIG_PL310_ERRATA_588369 -static inline void l2x0_flush_line(unsigned long addr) +static inline void cache_sync(void) { void __iomem *base = l2x0_base; - /* Clean by PA followed by Invalidate by PA */ - cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA); - cache_wait(base + L2X0_INV_LINE_PA, 1); - writel_relaxed(addr, base + L2X0_INV_LINE_PA); + writel_relaxed(0, base + sync_reg_offset); + cache_wait(base + L2X0_CACHE_SYNC, 1); } -#else -static inline void l2x0_flush_line(unsigned long addr) +#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915) +static inline void debug_writel(unsigned long val) +{ + l2c_set_debug(l2x0_base, val); +} +#else +/* Optimised out for non-errata case */ +static inline void debug_writel(unsigned long val) { - void __iomem *base = l2x0_base; - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA); } #endif @@ -141,8 +176,7 @@ static void __l2x0_flush_all(void) { debug_writel(0x03); - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY); - cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask); + __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY); cache_sync(); debug_writel(0x00); } @@ -157,274 +191,882 @@ raw_spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_clean_all(void) +static void l2x0_disable(void) { unsigned long flags; - /* clean all ways */ raw_spin_lock_irqsave(&l2x0_lock, flags); - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY); - cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask); - cache_sync(); + __l2x0_flush_all(); + l2c_write_sec(0, l2x0_base, L2X0_CTRL); + dsb(st); raw_spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_inv_all(void) +static void l2c_save(void __iomem *base) { - unsigned long flags; + l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); +} - /* invalidate all ways */ - raw_spin_lock_irqsave(&l2x0_lock, flags); - /* Invalidating when L2 is enabled is a nono */ - BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN); - writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY); - cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask); - cache_sync(); - raw_spin_unlock_irqrestore(&l2x0_lock, flags); +/* + * L2C-210 specific code. + * + * The L2C-2x0 PA, set/way and sync operations are atomic, but we must + * ensure that no background operation is running. The way operations + * are all background tasks. + * + * While a background operation is in progress, any new operation is + * ignored (unspecified whether this causes an error.) Thankfully, not + * used on SMP. + * + * Never has a different sync register other than L2X0_CACHE_SYNC, but + * we use sync_reg_offset here so we can share some of this with L2C-310. + */ +static void __l2c210_cache_sync(void __iomem *base) +{ + writel_relaxed(0, base + sync_reg_offset); } -static void l2x0_inv_range(unsigned long start, unsigned long end) +static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start, + unsigned long end) +{ + while (start < end) { + writel_relaxed(start, reg); + start += CACHE_LINE_SIZE; + } +} + +static void l2c210_inv_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; - unsigned long flags; - raw_spin_lock_irqsave(&l2x0_lock, flags); if (start & (CACHE_LINE_SIZE - 1)) { start &= ~(CACHE_LINE_SIZE - 1); - debug_writel(0x03); - l2x0_flush_line(start); - debug_writel(0x00); + writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); start += CACHE_LINE_SIZE; } if (end & (CACHE_LINE_SIZE - 1)) { end &= ~(CACHE_LINE_SIZE - 1); - debug_writel(0x03); - l2x0_flush_line(end); - debug_writel(0x00); + writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); } + __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); + __l2c210_cache_sync(base); +} + +static void l2c210_clean_range(unsigned long start, unsigned long end) +{ + void __iomem *base = l2x0_base; + + start &= ~(CACHE_LINE_SIZE - 1); + __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end); + __l2c210_cache_sync(base); +} + +static void l2c210_flush_range(unsigned long start, unsigned long end) +{ + void __iomem *base = l2x0_base; + + start &= ~(CACHE_LINE_SIZE - 1); + __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end); + __l2c210_cache_sync(base); +} + +static void l2c210_flush_all(void) +{ + void __iomem *base = l2x0_base; + + BUG_ON(!irqs_disabled()); + + __l2c_op_way(base + L2X0_CLEAN_INV_WAY); + __l2c210_cache_sync(base); +} + +static void l2c210_sync(void) +{ + __l2c210_cache_sync(l2x0_base); +} + +static void l2c210_resume(void) +{ + void __iomem *base = l2x0_base; + + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1); +} + +static const struct l2c_init_data l2c210_data __initconst = { + .type = "L2C-210", + .way_size_0 = SZ_8K, + .num_lock = 1, + .enable = l2c_enable, + .save = l2c_save, + .outer_cache = { + .inv_range = l2c210_inv_range, + .clean_range = l2c210_clean_range, + .flush_range = l2c210_flush_range, + .flush_all = l2c210_flush_all, + .disable = l2c_disable, + .sync = l2c210_sync, + .resume = l2c210_resume, + }, +}; + +/* + * L2C-220 specific code. + * + * All operations are background operations: they have to be waited for. + * Conflicting requests generate a slave error (which will cause an + * imprecise abort.) Never uses sync_reg_offset, so we hard-code the + * sync register here. + * + * However, we can re-use the l2c210_resume call. + */ +static inline void __l2c220_cache_sync(void __iomem *base) +{ + writel_relaxed(0, base + L2X0_CACHE_SYNC); + l2c_wait_mask(base + L2X0_CACHE_SYNC, 1); +} + +static void l2c220_op_way(void __iomem *base, unsigned reg) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&l2x0_lock, flags); + __l2c_op_way(base + reg); + __l2c220_cache_sync(base); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start, + unsigned long end, unsigned long flags) +{ + raw_spinlock_t *lock = &l2x0_lock; + while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); while (start < blk_end) { - l2x0_inv_line(start); + l2c_wait_mask(reg, 1); + writel_relaxed(start, reg); start += CACHE_LINE_SIZE; } if (blk_end < end) { - raw_spin_unlock_irqrestore(&l2x0_lock, flags); - raw_spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); + raw_spin_lock_irqsave(lock, flags); } } - cache_wait(base + L2X0_INV_LINE_PA, 1); - cache_sync(); - raw_spin_unlock_irqrestore(&l2x0_lock, flags); + + return flags; } -static void l2x0_clean_range(unsigned long start, unsigned long end) +static void l2c220_inv_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; unsigned long flags; - if ((end - start) >= l2x0_size) { - l2x0_clean_all(); - return; - } - raw_spin_lock_irqsave(&l2x0_lock, flags); - start &= ~(CACHE_LINE_SIZE - 1); - while (start < end) { - unsigned long blk_end = start + min(end - start, 4096UL); - - while (start < blk_end) { - l2x0_clean_line(start); + if ((start | end) & (CACHE_LINE_SIZE - 1)) { + if (start & (CACHE_LINE_SIZE - 1)) { + start &= ~(CACHE_LINE_SIZE - 1); + writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA); start += CACHE_LINE_SIZE; } - if (blk_end < end) { - raw_spin_unlock_irqrestore(&l2x0_lock, flags); - raw_spin_lock_irqsave(&l2x0_lock, flags); + if (end & (CACHE_LINE_SIZE - 1)) { + end &= ~(CACHE_LINE_SIZE - 1); + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); + writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA); } } - cache_wait(base + L2X0_CLEAN_LINE_PA, 1); - cache_sync(); + + flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA, + start, end, flags); + l2c_wait_mask(base + L2X0_INV_LINE_PA, 1); + __l2c220_cache_sync(base); raw_spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_flush_range(unsigned long start, unsigned long end) +static void l2c220_clean_range(unsigned long start, unsigned long end) { void __iomem *base = l2x0_base; unsigned long flags; + start &= ~(CACHE_LINE_SIZE - 1); if ((end - start) >= l2x0_size) { - l2x0_flush_all(); + l2c220_op_way(base, L2X0_CLEAN_WAY); return; } raw_spin_lock_irqsave(&l2x0_lock, flags); + flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA, + start, end, flags); + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); + __l2c220_cache_sync(base); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static void l2c220_flush_range(unsigned long start, unsigned long end) +{ + void __iomem *base = l2x0_base; + unsigned long flags; + start &= ~(CACHE_LINE_SIZE - 1); + if ((end - start) >= l2x0_size) { + l2c220_op_way(base, L2X0_CLEAN_INV_WAY); + return; + } + + raw_spin_lock_irqsave(&l2x0_lock, flags); + flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, + start, end, flags); + l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1); + __l2c220_cache_sync(base); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static void l2c220_flush_all(void) +{ + l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY); +} + +static void l2c220_sync(void) +{ + unsigned long flags; + + raw_spin_lock_irqsave(&l2x0_lock, flags); + __l2c220_cache_sync(l2x0_base); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); +} + +static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock) +{ + /* + * Always enable non-secure access to the lockdown registers - + * we write to them as part of the L2C enable sequence so they + * need to be accessible. + */ + aux |= L220_AUX_CTRL_NS_LOCKDOWN; + + l2c_enable(base, aux, num_lock); +} + +static const struct l2c_init_data l2c220_data = { + .type = "L2C-220", + .way_size_0 = SZ_8K, + .num_lock = 1, + .enable = l2c220_enable, + .save = l2c_save, + .outer_cache = { + .inv_range = l2c220_inv_range, + .clean_range = l2c220_clean_range, + .flush_range = l2c220_flush_range, + .flush_all = l2c220_flush_all, + .disable = l2c_disable, + .sync = l2c220_sync, + .resume = l2c210_resume, + }, +}; + +/* + * L2C-310 specific code. + * + * Very similar to L2C-210, the PA, set/way and sync operations are atomic, + * and the way operations are all background tasks. However, issuing an + * operation while a background operation is in progress results in a + * SLVERR response. We can reuse: + * + * __l2c210_cache_sync (using sync_reg_offset) + * l2c210_sync + * l2c210_inv_range (if 588369 is not applicable) + * l2c210_clean_range + * l2c210_flush_range (if 588369 is not applicable) + * l2c210_flush_all (if 727915 is not applicable) + * + * Errata: + * 588369: PL310 R0P0->R1P0, fixed R2P0. + * Affects: all clean+invalidate operations + * clean and invalidate skips the invalidate step, so we need to issue + * separate operations. We also require the above debug workaround + * enclosing this code fragment on affected parts. On unaffected parts, + * we must not use this workaround without the debug register writes + * to avoid exposing a problem similar to 727915. + * + * 727915: PL310 R2P0->R3P0, fixed R3P1. + * Affects: clean+invalidate by way + * clean and invalidate by way runs in the background, and a store can + * hit the line between the clean operation and invalidate operation, + * resulting in the store being lost. + * + * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2. + * Affects: 8x64-bit (double fill) line fetches + * double fill line fetches can fail to cause dirty data to be evicted + * from the cache before the new data overwrites the second line. + * + * 753970: PL310 R3P0, fixed R3P1. + * Affects: sync + * prevents merging writes after the sync operation, until another L2C + * operation is performed (or a number of other conditions.) + * + * 769419: PL310 R0P0->R3P1, fixed R3P2. + * Affects: store buffer + * store buffer is not automatically drained. + */ +static void l2c310_inv_range_erratum(unsigned long start, unsigned long end) +{ + void __iomem *base = l2x0_base; + + if ((start | end) & (CACHE_LINE_SIZE - 1)) { + unsigned long flags; + + /* Erratum 588369 for both clean+invalidate operations */ + raw_spin_lock_irqsave(&l2x0_lock, flags); + l2c_set_debug(base, 0x03); + + if (start & (CACHE_LINE_SIZE - 1)) { + start &= ~(CACHE_LINE_SIZE - 1); + writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); + writel_relaxed(start, base + L2X0_INV_LINE_PA); + start += CACHE_LINE_SIZE; + } + + if (end & (CACHE_LINE_SIZE - 1)) { + end &= ~(CACHE_LINE_SIZE - 1); + writel_relaxed(end, base + L2X0_CLEAN_LINE_PA); + writel_relaxed(end, base + L2X0_INV_LINE_PA); + } + + l2c_set_debug(base, 0x00); + raw_spin_unlock_irqrestore(&l2x0_lock, flags); + } + + __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end); + __l2c210_cache_sync(base); +} + +static void l2c310_flush_range_erratum(unsigned long start, unsigned long end) +{ + raw_spinlock_t *lock = &l2x0_lock; + unsigned long flags; + void __iomem *base = l2x0_base; + + raw_spin_lock_irqsave(lock, flags); while (start < end) { unsigned long blk_end = start + min(end - start, 4096UL); - debug_writel(0x03); + l2c_set_debug(base, 0x03); while (start < blk_end) { - l2x0_flush_line(start); + writel_relaxed(start, base + L2X0_CLEAN_LINE_PA); + writel_relaxed(start, base + L2X0_INV_LINE_PA); start += CACHE_LINE_SIZE; } - debug_writel(0x00); + l2c_set_debug(base, 0x00); if (blk_end < end) { - raw_spin_unlock_irqrestore(&l2x0_lock, flags); - raw_spin_lock_irqsave(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); + raw_spin_lock_irqsave(lock, flags); } } - cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1); - cache_sync(); - raw_spin_unlock_irqrestore(&l2x0_lock, flags); + raw_spin_unlock_irqrestore(lock, flags); + __l2c210_cache_sync(base); } -static void l2x0_disable(void) +static void l2c310_flush_all_erratum(void) { + void __iomem *base = l2x0_base; unsigned long flags; raw_spin_lock_irqsave(&l2x0_lock, flags); - __l2x0_flush_all(); - writel_relaxed(0, l2x0_base + L2X0_CTRL); - dsb(st); + l2c_set_debug(base, 0x03); + __l2c_op_way(base + L2X0_CLEAN_INV_WAY); + l2c_set_debug(base, 0x00); + __l2c210_cache_sync(base); raw_spin_unlock_irqrestore(&l2x0_lock, flags); } -static void l2x0_unlock(u32 cache_id) +static void __init l2c310_save(void __iomem *base) { - int lockregs; - int i; + unsigned revision; - switch (cache_id & L2X0_CACHE_ID_PART_MASK) { - case L2X0_CACHE_ID_PART_L310: - lockregs = 8; - break; - case AURORA_CACHE_ID: - lockregs = 4; + l2c_save(base); + + l2x0_saved_regs.tag_latency = readl_relaxed(base + + L310_TAG_LATENCY_CTRL); + l2x0_saved_regs.data_latency = readl_relaxed(base + + L310_DATA_LATENCY_CTRL); + l2x0_saved_regs.filter_end = readl_relaxed(base + + L310_ADDR_FILTER_END); + l2x0_saved_regs.filter_start = readl_relaxed(base + + L310_ADDR_FILTER_START); + + revision = readl_relaxed(base + L2X0_CACHE_ID) & + L2X0_CACHE_ID_RTL_MASK; + + /* From r2p0, there is Prefetch offset/control register */ + if (revision >= L310_CACHE_ID_RTL_R2P0) + l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base + + L310_PREFETCH_CTRL); + + /* From r3p0, there is Power control register */ + if (revision >= L310_CACHE_ID_RTL_R3P0) + l2x0_saved_regs.pwr_ctrl = readl_relaxed(base + + L310_POWER_CTRL); +} + +static void l2c310_resume(void) +{ + void __iomem *base = l2x0_base; + + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { + unsigned revision; + + /* restore pl310 setup */ + writel_relaxed(l2x0_saved_regs.tag_latency, + base + L310_TAG_LATENCY_CTRL); + writel_relaxed(l2x0_saved_regs.data_latency, + base + L310_DATA_LATENCY_CTRL); + writel_relaxed(l2x0_saved_regs.filter_end, + base + L310_ADDR_FILTER_END); + writel_relaxed(l2x0_saved_regs.filter_start, + base + L310_ADDR_FILTER_START); + + revision = readl_relaxed(base + L2X0_CACHE_ID) & + L2X0_CACHE_ID_RTL_MASK; + + if (revision >= L310_CACHE_ID_RTL_R2P0) + l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base, + L310_PREFETCH_CTRL); + if (revision >= L310_CACHE_ID_RTL_R3P0) + l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base, + L310_POWER_CTRL); + + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); + + /* Re-enable full-line-of-zeros for Cortex-A9 */ + if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); + } +} + +static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data) +{ + switch (act & ~CPU_TASKS_FROZEN) { + case CPU_STARTING: + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); break; - default: - /* L210 and unknown types */ - lockregs = 1; + case CPU_DYING: + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); break; } + return NOTIFY_OK; +} - for (i = 0; i < lockregs; i++) { - writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + - i * L2X0_LOCKDOWN_STRIDE); - writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + - i * L2X0_LOCKDOWN_STRIDE); +static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) +{ + unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; + bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; + + if (rev >= L310_CACHE_ID_RTL_R2P0) { + if (cortex_a9) { + aux |= L310_AUX_CTRL_EARLY_BRESP; + pr_info("L2C-310 enabling early BRESP for Cortex-A9\n"); + } else if (aux & L310_AUX_CTRL_EARLY_BRESP) { + pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n"); + aux &= ~L310_AUX_CTRL_EARLY_BRESP; + } + } + + if (cortex_a9) { + u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL); + u32 acr = get_auxcr(); + + pr_debug("Cortex-A9 ACR=0x%08x\n", acr); + + if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO)) + pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n"); + + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3))) + pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n"); + + if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) { + aux |= L310_AUX_CTRL_FULL_LINE_ZERO; + pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n"); + } + } else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) { + pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n"); + aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP); + } + + if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) { + u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL); + + pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n", + aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "", + aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "", + 1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK)); + } + + /* r3p0 or later has power control register */ + if (rev >= L310_CACHE_ID_RTL_R3P0) { + u32 power_ctrl; + + l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN, + base, L310_POWER_CTRL); + power_ctrl = readl_relaxed(base + L310_POWER_CTRL); + pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n", + power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis", + power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis"); + } + + /* + * Always enable non-secure access to the lockdown registers - + * we write to them as part of the L2C enable sequence so they + * need to be accessible. + */ + aux |= L310_AUX_CTRL_NS_LOCKDOWN; + + l2c_enable(base, aux, num_lock); + + if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) { + set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1)); + cpu_notifier(l2c310_cpu_enable_flz, 0); } } -void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) +static void __init l2c310_fixup(void __iomem *base, u32 cache_id, + struct outer_cache_fns *fns) { - u32 aux; - u32 cache_id; - u32 way_size = 0; - int ways; - int way_size_shift = L2X0_WAY_SIZE_SHIFT; - const char *type; + unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK; + const char *errata[8]; + unsigned n = 0; - l2x0_base = base; - if (cache_id_part_number_from_dt) - cache_id = cache_id_part_number_from_dt; - else - cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); - aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); + if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) && + revision < L310_CACHE_ID_RTL_R2P0 && + /* For bcm compatibility */ + fns->inv_range == l2c210_inv_range) { + fns->inv_range = l2c310_inv_range_erratum; + fns->flush_range = l2c310_flush_range_erratum; + errata[n++] = "588369"; + } + + if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) && + revision >= L310_CACHE_ID_RTL_R2P0 && + revision < L310_CACHE_ID_RTL_R3P1) { + fns->flush_all = l2c310_flush_all_erratum; + errata[n++] = "727915"; + } + + if (revision >= L310_CACHE_ID_RTL_R3P0 && + revision < L310_CACHE_ID_RTL_R3P2) { + u32 val = readl_relaxed(base + L310_PREFETCH_CTRL); + /* I don't think bit23 is required here... but iMX6 does so */ + if (val & (BIT(30) | BIT(23))) { + val &= ~(BIT(30) | BIT(23)); + l2c_write_sec(val, base, L310_PREFETCH_CTRL); + errata[n++] = "752271"; + } + } + + if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) && + revision == L310_CACHE_ID_RTL_R3P0) { + sync_reg_offset = L2X0_DUMMY_REG; + errata[n++] = "753970"; + } + + if (IS_ENABLED(CONFIG_PL310_ERRATA_769419)) + errata[n++] = "769419"; + + if (n) { + unsigned i; + + pr_info("L2C-310 errat%s", n > 1 ? "a" : "um"); + for (i = 0; i < n; i++) + pr_cont(" %s", errata[i]); + pr_cont(" enabled\n"); + } +} + +static void l2c310_disable(void) +{ + /* + * If full-line-of-zeros is enabled, we must first disable it in the + * Cortex-A9 auxiliary control register before disabling the L2 cache. + */ + if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO) + set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1))); + l2c_disable(); +} + +static const struct l2c_init_data l2c310_init_fns __initconst = { + .type = "L2C-310", + .way_size_0 = SZ_8K, + .num_lock = 8, + .enable = l2c310_enable, + .fixup = l2c310_fixup, + .save = l2c310_save, + .outer_cache = { + .inv_range = l2c210_inv_range, + .clean_range = l2c210_clean_range, + .flush_range = l2c210_flush_range, + .flush_all = l2c210_flush_all, + .disable = l2c310_disable, + .sync = l2c210_sync, + .resume = l2c310_resume, + }, +}; + +static void __init __l2c_init(const struct l2c_init_data *data, + u32 aux_val, u32 aux_mask, u32 cache_id) +{ + struct outer_cache_fns fns; + unsigned way_size_bits, ways; + u32 aux, old_aux; + + /* + * Sanity check the aux values. aux_mask is the bits we preserve + * from reading the hardware register, and aux_val is the bits we + * set. + */ + if (aux_val & aux_mask) + pr_alert("L2C: platform provided aux values permit register corruption.\n"); + + old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); aux &= aux_mask; aux |= aux_val; + if (old_aux != aux) + pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n", + old_aux, aux); + /* Determine the number of ways */ switch (cache_id & L2X0_CACHE_ID_PART_MASK) { case L2X0_CACHE_ID_PART_L310: + if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16)) + pr_warn("L2C: DT/platform tries to modify or specify cache size\n"); if (aux & (1 << 16)) ways = 16; else ways = 8; - type = "L310"; -#ifdef CONFIG_PL310_ERRATA_753970 - /* Unmapped register. */ - sync_reg_offset = L2X0_DUMMY_REG; -#endif - if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0) - outer_cache.set_debug = pl310_set_debug; break; + case L2X0_CACHE_ID_PART_L210: + case L2X0_CACHE_ID_PART_L220: ways = (aux >> 13) & 0xf; - type = "L210"; break; case AURORA_CACHE_ID: - sync_reg_offset = AURORA_SYNC_REG; ways = (aux >> 13) & 0xf; ways = 2 << ((ways + 1) >> 2); - way_size_shift = AURORA_WAY_SIZE_SHIFT; - type = "Aurora"; break; + default: /* Assume unknown chips have 8 ways */ ways = 8; - type = "L2x0 series"; break; } l2x0_way_mask = (1 << ways) - 1; /* - * L2 cache Size = Way size * Number of ways + * way_size_0 is the size that a way_size value of zero would be + * given the calculation: way_size = way_size_0 << way_size_bits. + * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k, + * then way_size_0 would be 8k. + * + * L2 cache size = number of ways * way size. + */ + way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >> + L2C_AUX_CTRL_WAY_SIZE_SHIFT; + l2x0_size = ways * (data->way_size_0 << way_size_bits); + + fns = data->outer_cache; + fns.write_sec = outer_cache.write_sec; + if (data->fixup) + data->fixup(l2x0_base, cache_id, &fns); + + /* + * Check if l2x0 controller is already enabled. If we are booting + * in non-secure mode accessing the below registers will fault. */ - way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17; - way_size = 1 << (way_size + way_size_shift); + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) + data->enable(l2x0_base, aux, data->num_lock); - l2x0_size = ways * way_size * SZ_1K; + outer_cache = fns; /* - * Check if l2x0 controller is already enabled. - * If you are booting from non-secure mode - * accessing the below registers will fault. + * It is strange to save the register state before initialisation, + * but hey, this is what the DT implementations decided to do. */ - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { - /* Make sure that I&D is not locked down when starting */ - l2x0_unlock(cache_id); + if (data->save) + data->save(l2x0_base); + + /* Re-read it in case some bits are reserved. */ + aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); + + pr_info("%s cache controller enabled, %d ways, %d kB\n", + data->type, ways, l2x0_size >> 10); + pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n", + data->type, cache_id, aux); +} - /* l2x0 controller is disabled */ - writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); +void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask) +{ + const struct l2c_init_data *data; + u32 cache_id; - l2x0_inv_all(); + l2x0_base = base; - /* enable L2X0 */ - writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); + cache_id = readl_relaxed(base + L2X0_CACHE_ID); + + switch (cache_id & L2X0_CACHE_ID_PART_MASK) { + default: + case L2X0_CACHE_ID_PART_L210: + data = &l2c210_data; + break; + + case L2X0_CACHE_ID_PART_L220: + data = &l2c220_data; + break; + + case L2X0_CACHE_ID_PART_L310: + data = &l2c310_init_fns; + break; } - /* Re-read it in case some bits are reserved. */ - aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); + __l2c_init(data, aux_val, aux_mask, cache_id); +} + +#ifdef CONFIG_OF +static int l2_wt_override; + +/* Aurora don't have the cache ID register available, so we have to + * pass it though the device tree */ +static u32 cache_id_part_number_from_dt; + +static void __init l2x0_of_parse(const struct device_node *np, + u32 *aux_val, u32 *aux_mask) +{ + u32 data[2] = { 0, 0 }; + u32 tag = 0; + u32 dirty = 0; + u32 val = 0, mask = 0; + + of_property_read_u32(np, "arm,tag-latency", &tag); + if (tag) { + mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; + val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; + } + + of_property_read_u32_array(np, "arm,data-latency", + data, ARRAY_SIZE(data)); + if (data[0] && data[1]) { + mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | + L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; + val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | + ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); + } + + of_property_read_u32(np, "arm,dirty-latency", &dirty); + if (dirty) { + mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; + val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; + } - /* Save the value for resuming. */ - l2x0_saved_regs.aux_ctrl = aux; + *aux_val &= ~mask; + *aux_val |= val; + *aux_mask &= ~mask; +} + +static const struct l2c_init_data of_l2c210_data __initconst = { + .type = "L2C-210", + .way_size_0 = SZ_8K, + .num_lock = 1, + .of_parse = l2x0_of_parse, + .enable = l2c_enable, + .save = l2c_save, + .outer_cache = { + .inv_range = l2c210_inv_range, + .clean_range = l2c210_clean_range, + .flush_range = l2c210_flush_range, + .flush_all = l2c210_flush_all, + .disable = l2c_disable, + .sync = l2c210_sync, + .resume = l2c210_resume, + }, +}; + +static const struct l2c_init_data of_l2c220_data __initconst = { + .type = "L2C-220", + .way_size_0 = SZ_8K, + .num_lock = 1, + .of_parse = l2x0_of_parse, + .enable = l2c220_enable, + .save = l2c_save, + .outer_cache = { + .inv_range = l2c220_inv_range, + .clean_range = l2c220_clean_range, + .flush_range = l2c220_flush_range, + .flush_all = l2c220_flush_all, + .disable = l2c_disable, + .sync = l2c220_sync, + .resume = l2c210_resume, + }, +}; + +static void __init l2c310_of_parse(const struct device_node *np, + u32 *aux_val, u32 *aux_mask) +{ + u32 data[3] = { 0, 0, 0 }; + u32 tag[3] = { 0, 0, 0 }; + u32 filter[2] = { 0, 0 }; + + of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); + if (tag[0] && tag[1] && tag[2]) + writel_relaxed( + L310_LATENCY_CTRL_RD(tag[0] - 1) | + L310_LATENCY_CTRL_WR(tag[1] - 1) | + L310_LATENCY_CTRL_SETUP(tag[2] - 1), + l2x0_base + L310_TAG_LATENCY_CTRL); + + of_property_read_u32_array(np, "arm,data-latency", + data, ARRAY_SIZE(data)); + if (data[0] && data[1] && data[2]) + writel_relaxed( + L310_LATENCY_CTRL_RD(data[0] - 1) | + L310_LATENCY_CTRL_WR(data[1] - 1) | + L310_LATENCY_CTRL_SETUP(data[2] - 1), + l2x0_base + L310_DATA_LATENCY_CTRL); - if (!of_init) { - outer_cache.inv_range = l2x0_inv_range; - outer_cache.clean_range = l2x0_clean_range; - outer_cache.flush_range = l2x0_flush_range; - outer_cache.sync = l2x0_cache_sync; - outer_cache.flush_all = l2x0_flush_all; - outer_cache.inv_all = l2x0_inv_all; - outer_cache.disable = l2x0_disable; - } - - pr_info("%s cache controller enabled\n", type); - pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n", - ways, cache_id, aux, l2x0_size >> 10); + of_property_read_u32_array(np, "arm,filter-ranges", + filter, ARRAY_SIZE(filter)); + if (filter[1]) { + writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), + l2x0_base + L310_ADDR_FILTER_END); + writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN, + l2x0_base + L310_ADDR_FILTER_START); + } } -#ifdef CONFIG_OF -static int l2_wt_override; +static const struct l2c_init_data of_l2c310_data __initconst = { + .type = "L2C-310", + .way_size_0 = SZ_8K, + .num_lock = 8, + .of_parse = l2c310_of_parse, + .enable = l2c310_enable, + .fixup = l2c310_fixup, + .save = l2c310_save, + .outer_cache = { + .inv_range = l2c210_inv_range, + .clean_range = l2c210_clean_range, + .flush_range = l2c210_flush_range, + .flush_all = l2c210_flush_all, + .disable = l2c310_disable, + .sync = l2c210_sync, + .resume = l2c310_resume, + }, +}; /* * Note that the end addresses passed to Linux primitives are @@ -524,6 +1166,100 @@ } } +static void aurora_save(void __iomem *base) +{ + l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL); + l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL); +} + +static void aurora_resume(void) +{ + void __iomem *base = l2x0_base; + + if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) { + writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL); + writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL); + } +} + +/* + * For Aurora cache in no outer mode, enable via the CP15 coprocessor + * broadcasting of cache commands to L2. + */ +static void __init aurora_enable_no_outer(void __iomem *base, u32 aux, + unsigned num_lock) +{ + u32 u; + + asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u)); + u |= AURORA_CTRL_FW; /* Set the FW bit */ + asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u)); + + isb(); + + l2c_enable(base, aux, num_lock); +} + +static void __init aurora_fixup(void __iomem *base, u32 cache_id, + struct outer_cache_fns *fns) +{ + sync_reg_offset = AURORA_SYNC_REG; +} + +static void __init aurora_of_parse(const struct device_node *np, + u32 *aux_val, u32 *aux_mask) +{ + u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; + u32 mask = AURORA_ACR_REPLACEMENT_MASK; + + of_property_read_u32(np, "cache-id-part", + &cache_id_part_number_from_dt); + + /* Determine and save the write policy */ + l2_wt_override = of_property_read_bool(np, "wt-override"); + + if (l2_wt_override) { + val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; + mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; + } + + *aux_val &= ~mask; + *aux_val |= val; + *aux_mask &= ~mask; +} + +static const struct l2c_init_data of_aurora_with_outer_data __initconst = { + .type = "Aurora", + .way_size_0 = SZ_4K, + .num_lock = 4, + .of_parse = aurora_of_parse, + .enable = l2c_enable, + .fixup = aurora_fixup, + .save = aurora_save, + .outer_cache = { + .inv_range = aurora_inv_range, + .clean_range = aurora_clean_range, + .flush_range = aurora_flush_range, + .flush_all = l2x0_flush_all, + .disable = l2x0_disable, + .sync = l2x0_cache_sync, + .resume = aurora_resume, + }, +}; + +static const struct l2c_init_data of_aurora_no_outer_data __initconst = { + .type = "Aurora", + .way_size_0 = SZ_4K, + .num_lock = 4, + .of_parse = aurora_of_parse, + .enable = aurora_enable_no_outer, + .fixup = aurora_fixup, + .save = aurora_save, + .outer_cache = { + .resume = aurora_resume, + }, +}; + /* * For certain Broadcom SoCs, depending on the address range, different offsets * need to be added to the address before passing it to L2 for @@ -588,16 +1324,16 @@ /* normal case, no cross section between start and end */ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { - l2x0_inv_range(new_start, new_end); + l2c210_inv_range(new_start, new_end); return; } /* They cross sections, so it can only be a cross from section * 2 to section 3 */ - l2x0_inv_range(new_start, + l2c210_inv_range(new_start, bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); - l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), + l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), new_end); } @@ -610,26 +1346,21 @@ if (unlikely(end <= start)) return; - if ((end - start) >= l2x0_size) { - l2x0_clean_all(); - return; - } - new_start = bcm_l2_phys_addr(start); new_end = bcm_l2_phys_addr(end); /* normal case, no cross section between start and end */ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { - l2x0_clean_range(new_start, new_end); + l2c210_clean_range(new_start, new_end); return; } /* They cross sections, so it can only be a cross from section * 2 to section 3 */ - l2x0_clean_range(new_start, + l2c210_clean_range(new_start, bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); - l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), + l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), new_end); } @@ -643,7 +1374,7 @@ return; if ((end - start) >= l2x0_size) { - l2x0_flush_all(); + outer_cache.flush_all(); return; } @@ -652,283 +1383,67 @@ /* normal case, no cross section between start and end */ if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) { - l2x0_flush_range(new_start, new_end); + l2c210_flush_range(new_start, new_end); return; } /* They cross sections, so it can only be a cross from section * 2 to section 3 */ - l2x0_flush_range(new_start, + l2c210_flush_range(new_start, bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1)); - l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), + l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR), new_end); } -static void __init l2x0_of_setup(const struct device_node *np, - u32 *aux_val, u32 *aux_mask) -{ - u32 data[2] = { 0, 0 }; - u32 tag = 0; - u32 dirty = 0; - u32 val = 0, mask = 0; - - of_property_read_u32(np, "arm,tag-latency", &tag); - if (tag) { - mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK; - val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT; - } - - of_property_read_u32_array(np, "arm,data-latency", - data, ARRAY_SIZE(data)); - if (data[0] && data[1]) { - mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK | - L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK; - val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) | - ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT); - } - - of_property_read_u32(np, "arm,dirty-latency", &dirty); - if (dirty) { - mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK; - val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; - } - - *aux_val &= ~mask; - *aux_val |= val; - *aux_mask &= ~mask; -} - -static void __init pl310_of_setup(const struct device_node *np, - u32 *aux_val, u32 *aux_mask) -{ - u32 data[3] = { 0, 0, 0 }; - u32 tag[3] = { 0, 0, 0 }; - u32 filter[2] = { 0, 0 }; - - of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); - if (tag[0] && tag[1] && tag[2]) - writel_relaxed( - ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | - ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | - ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), - l2x0_base + L2X0_TAG_LATENCY_CTRL); - - of_property_read_u32_array(np, "arm,data-latency", - data, ARRAY_SIZE(data)); - if (data[0] && data[1] && data[2]) - writel_relaxed( - ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) | - ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) | - ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT), - l2x0_base + L2X0_DATA_LATENCY_CTRL); - - of_property_read_u32_array(np, "arm,filter-ranges", - filter, ARRAY_SIZE(filter)); - if (filter[1]) { - writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M), - l2x0_base + L2X0_ADDR_FILTER_END); - writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN, - l2x0_base + L2X0_ADDR_FILTER_START); - } -} - -static void __init pl310_save(void) -{ - u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & - L2X0_CACHE_ID_RTL_MASK; - - l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base + - L2X0_TAG_LATENCY_CTRL); - l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base + - L2X0_DATA_LATENCY_CTRL); - l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base + - L2X0_ADDR_FILTER_END); - l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base + - L2X0_ADDR_FILTER_START); - - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { - /* - * From r2p0, there is Prefetch offset/control register - */ - l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base + - L2X0_PREFETCH_CTRL); - /* - * From r3p0, there is Power control register - */ - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) - l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base + - L2X0_POWER_CTRL); - } -} +/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */ +static const struct l2c_init_data of_bcm_l2x0_data __initconst = { + .type = "BCM-L2C-310", + .way_size_0 = SZ_8K, + .num_lock = 8, + .of_parse = l2c310_of_parse, + .enable = l2c310_enable, + .save = l2c310_save, + .outer_cache = { + .inv_range = bcm_inv_range, + .clean_range = bcm_clean_range, + .flush_range = bcm_flush_range, + .flush_all = l2c210_flush_all, + .disable = l2c310_disable, + .sync = l2c210_sync, + .resume = l2c310_resume, + }, +}; -static void aurora_save(void) +static void __init tauros3_save(void __iomem *base) { - l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL); - l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); -} + l2c_save(base); -static void __init tauros3_save(void) -{ l2x0_saved_regs.aux2_ctrl = - readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL); + readl_relaxed(base + TAUROS3_AUX2_CTRL); l2x0_saved_regs.prefetch_ctrl = - readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); -} - -static void l2x0_resume(void) -{ - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { - /* restore aux ctrl and enable l2 */ - l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID)); - - writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base + - L2X0_AUX_CTRL); - - l2x0_inv_all(); - - writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL); - } -} - -static void pl310_resume(void) -{ - u32 l2x0_revision; - - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { - /* restore pl310 setup */ - writel_relaxed(l2x0_saved_regs.tag_latency, - l2x0_base + L2X0_TAG_LATENCY_CTRL); - writel_relaxed(l2x0_saved_regs.data_latency, - l2x0_base + L2X0_DATA_LATENCY_CTRL); - writel_relaxed(l2x0_saved_regs.filter_end, - l2x0_base + L2X0_ADDR_FILTER_END); - writel_relaxed(l2x0_saved_regs.filter_start, - l2x0_base + L2X0_ADDR_FILTER_START); - - l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) & - L2X0_CACHE_ID_RTL_MASK; - - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) { - writel_relaxed(l2x0_saved_regs.prefetch_ctrl, - l2x0_base + L2X0_PREFETCH_CTRL); - if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0) - writel_relaxed(l2x0_saved_regs.pwr_ctrl, - l2x0_base + L2X0_POWER_CTRL); - } - } - - l2x0_resume(); -} - -static void aurora_resume(void) -{ - if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { - writel_relaxed(l2x0_saved_regs.aux_ctrl, - l2x0_base + L2X0_AUX_CTRL); - writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL); - } + readl_relaxed(base + L310_PREFETCH_CTRL); } static void tauros3_resume(void) { - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { + void __iomem *base = l2x0_base; + + if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) { writel_relaxed(l2x0_saved_regs.aux2_ctrl, - l2x0_base + TAUROS3_AUX2_CTRL); + base + TAUROS3_AUX2_CTRL); writel_relaxed(l2x0_saved_regs.prefetch_ctrl, - l2x0_base + L2X0_PREFETCH_CTRL); - } - - l2x0_resume(); -} - -static void __init aurora_broadcast_l2_commands(void) -{ - __u32 u; - /* Enable Broadcasting of cache commands to L2*/ - __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u)); - u |= AURORA_CTRL_FW; /* Set the FW bit */ - __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u)); - isb(); -} - -static void __init aurora_of_setup(const struct device_node *np, - u32 *aux_val, u32 *aux_mask) -{ - u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU; - u32 mask = AURORA_ACR_REPLACEMENT_MASK; + base + L310_PREFETCH_CTRL); - of_property_read_u32(np, "cache-id-part", - &cache_id_part_number_from_dt); - - /* Determine and save the write policy */ - l2_wt_override = of_property_read_bool(np, "wt-override"); - - if (l2_wt_override) { - val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY; - mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK; + l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8); } - - *aux_val &= ~mask; - *aux_val |= val; - *aux_mask &= ~mask; } -static const struct l2x0_of_data pl310_data = { - .setup = pl310_of_setup, - .save = pl310_save, - .outer_cache = { - .resume = pl310_resume, - .inv_range = l2x0_inv_range, - .clean_range = l2x0_clean_range, - .flush_range = l2x0_flush_range, - .sync = l2x0_cache_sync, - .flush_all = l2x0_flush_all, - .inv_all = l2x0_inv_all, - .disable = l2x0_disable, - }, -}; - -static const struct l2x0_of_data l2x0_data = { - .setup = l2x0_of_setup, - .save = NULL, - .outer_cache = { - .resume = l2x0_resume, - .inv_range = l2x0_inv_range, - .clean_range = l2x0_clean_range, - .flush_range = l2x0_flush_range, - .sync = l2x0_cache_sync, - .flush_all = l2x0_flush_all, - .inv_all = l2x0_inv_all, - .disable = l2x0_disable, - }, -}; - -static const struct l2x0_of_data aurora_with_outer_data = { - .setup = aurora_of_setup, - .save = aurora_save, - .outer_cache = { - .resume = aurora_resume, - .inv_range = aurora_inv_range, - .clean_range = aurora_clean_range, - .flush_range = aurora_flush_range, - .sync = l2x0_cache_sync, - .flush_all = l2x0_flush_all, - .inv_all = l2x0_inv_all, - .disable = l2x0_disable, - }, -}; - -static const struct l2x0_of_data aurora_no_outer_data = { - .setup = aurora_of_setup, - .save = aurora_save, - .outer_cache = { - .resume = aurora_resume, - }, -}; - -static const struct l2x0_of_data tauros3_data = { - .setup = NULL, +static const struct l2c_init_data of_tauros3_data __initconst = { + .type = "Tauros3", + .way_size_0 = SZ_8K, + .num_lock = 8, + .enable = l2c_enable, .save = tauros3_save, /* Tauros3 broadcasts L1 cache operations to L2 */ .outer_cache = { @@ -936,43 +1451,26 @@ }, }; -static const struct l2x0_of_data bcm_l2x0_data = { - .setup = pl310_of_setup, - .save = pl310_save, - .outer_cache = { - .resume = pl310_resume, - .inv_range = bcm_inv_range, - .clean_range = bcm_clean_range, - .flush_range = bcm_flush_range, - .sync = l2x0_cache_sync, - .flush_all = l2x0_flush_all, - .inv_all = l2x0_inv_all, - .disable = l2x0_disable, - }, -}; - +#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns } static const struct of_device_id l2x0_ids[] __initconst = { - { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data }, - { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data }, - { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data }, - { .compatible = "bcm,bcm11351-a2-pl310-cache", /* deprecated name */ - .data = (void *)&bcm_l2x0_data}, - { .compatible = "brcm,bcm11351-a2-pl310-cache", - .data = (void *)&bcm_l2x0_data}, - { .compatible = "marvell,aurora-outer-cache", - .data = (void *)&aurora_with_outer_data}, - { .compatible = "marvell,aurora-system-cache", - .data = (void *)&aurora_no_outer_data}, - { .compatible = "marvell,tauros3-cache", - .data = (void *)&tauros3_data }, + L2C_ID("arm,l210-cache", of_l2c210_data), + L2C_ID("arm,l220-cache", of_l2c220_data), + L2C_ID("arm,pl310-cache", of_l2c310_data), + L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), + L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data), + L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data), + L2C_ID("marvell,tauros3-cache", of_tauros3_data), + /* Deprecated IDs */ + L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data), {} }; int __init l2x0_of_init(u32 aux_val, u32 aux_mask) { + const struct l2c_init_data *data; struct device_node *np; - const struct l2x0_of_data *data; struct resource res; + u32 cache_id, old_aux; np = of_find_matching_node(NULL, l2x0_ids); if (!np) @@ -989,23 +1487,29 @@ data = of_match_node(l2x0_ids, np)->data; - /* L2 configuration can only be changed if the cache is disabled */ - if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) { - if (data->setup) - data->setup(np, &aux_val, &aux_mask); - - /* For aurora cache in no outer mode select the - * correct mode using the coprocessor*/ - if (data == &aurora_no_outer_data) - aurora_broadcast_l2_commands(); + old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); + if (old_aux != ((old_aux & aux_mask) | aux_val)) { + pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", + old_aux, (old_aux & aux_mask) | aux_val); + } else if (aux_mask != ~0U && aux_val != 0) { + pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n"); } - if (data->save) - data->save(); + /* All L2 caches are unified, so this property should be specified */ + if (!of_property_read_bool(np, "cache-unified")) + pr_err("L2C: device tree omits to specify unified cache\n"); + + /* L2 configuration can only be changed if the cache is disabled */ + if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) + if (data->of_parse) + data->of_parse(np, &aux_val, &aux_mask); + + if (cache_id_part_number_from_dt) + cache_id = cache_id_part_number_from_dt; + else + cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID); - of_init = true; - memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache)); - l2x0_init(l2x0_base, aux_val, aux_mask); + __l2c_init(data, aux_val, aux_mask, cache_id); return 0; } diff -Nur linux-3.15-rc4.orig/arch/arm/mm/Kconfig linux-3.15-rc4/arch/arm/mm/Kconfig --- linux-3.15-rc4.orig/arch/arm/mm/Kconfig 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mm/Kconfig 2014-05-07 17:05:51.052166726 +0200 @@ -897,6 +897,57 @@ This option enables optimisations for the PL310 cache controller. +config PL310_ERRATA_588369 + bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" + depends on CACHE_L2X0 + help + The PL310 L2 cache controller implements three types of Clean & + Invalidate maintenance operations: by Physical Address + (offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC). + They are architecturally defined to behave as the execution of a + clean operation followed immediately by an invalidate operation, + both performing to the same memory location. This functionality + is not correctly implemented in PL310 as clean lines are not + invalidated as a result of these operations. + +config PL310_ERRATA_727915 + bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" + depends on CACHE_L2X0 + help + PL310 implements the Clean & Invalidate by Way L2 cache maintenance + operation (offset 0x7FC). This operation runs in background so that + PL310 can handle normal accesses while it is in progress. Under very + rare circumstances, due to this erratum, write data can be lost when + PL310 treats a cacheable write transaction during a Clean & + Invalidate by Way operation. + +config PL310_ERRATA_753970 + bool "PL310 errata: cache sync operation may be faulty" + depends on CACHE_PL310 + help + This option enables the workaround for the 753970 PL310 (r3p0) erratum. + + Under some condition the effect of cache sync operation on + the store buffer still remains when the operation completes. + This means that the store buffer is always asked to drain and + this prevents it from merging any further writes. The workaround + is to replace the normal offset of cache sync operation (0x730) + by another offset targeting an unmapped PL310 register 0x740. + This has the same effect as the cache sync operation: store buffer + drain and waiting for all buffers empty. + +config PL310_ERRATA_769419 + bool "PL310 errata: no automatic Store Buffer drain" + depends on CACHE_L2X0 + help + On revisions of the PL310 prior to r3p2, the Store Buffer does + not automatically drain. This can cause normal, non-cacheable + writes to be retained when the memory system is idle, leading + to suboptimal I/O performance for drivers using coherent DMA. + This option adds a write barrier to the cpu_idle loop so that, + on systems with an outer cache, the store buffer is drained + explicitly. + config CACHE_TAUROS2 bool "Enable the Tauros2 L2 cache controller" depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4) diff -Nur linux-3.15-rc4.orig/arch/arm/mm/l2c-common.c linux-3.15-rc4/arch/arm/mm/l2c-common.c --- linux-3.15-rc4.orig/arch/arm/mm/l2c-common.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/arch/arm/mm/l2c-common.c 2014-05-07 17:05:51.052166726 +0200 @@ -0,0 +1,20 @@ +/* + * Copyright (C) 2010 ARM Ltd. + * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include + +void outer_disable(void) +{ + WARN_ON(!irqs_disabled()); + WARN_ON(num_online_cpus() > 1); + + if (outer_cache.disable) + outer_cache.disable(); +} diff -Nur linux-3.15-rc4.orig/arch/arm/mm/l2c-l2x0-resume.S linux-3.15-rc4/arch/arm/mm/l2c-l2x0-resume.S --- linux-3.15-rc4.orig/arch/arm/mm/l2c-l2x0-resume.S 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/arch/arm/mm/l2c-l2x0-resume.S 2014-05-07 17:05:51.052166726 +0200 @@ -0,0 +1,58 @@ +/* + * L2C-310 early resume code. This can be used by platforms to restore + * the settings of their L2 cache controller before restoring the + * processor state. + * + * This code can only be used to if you are running in the secure world. + */ +#include +#include + + .text + +ENTRY(l2c310_early_resume) + adr r0, 1f + ldr r2, [r0] + add r0, r2, r0 + + ldmia r0, {r1, r2, r3, r4, r5, r6, r7, r8} + @ r1 = phys address of L2C-310 controller + @ r2 = aux_ctrl + @ r3 = tag_latency + @ r4 = data_latency + @ r5 = filter_start + @ r6 = filter_end + @ r7 = prefetch_ctrl + @ r8 = pwr_ctrl + + @ Check that the address has been initialised + teq r1, #0 + moveq pc, lr + + @ The prefetch and power control registers are revision dependent + @ and can be written whether or not the L2 cache is enabled + ldr r0, [r1, #L2X0_CACHE_ID] + and r0, r0, #L2X0_CACHE_ID_RTL_MASK + cmp r0, #L310_CACHE_ID_RTL_R2P0 + strcs r7, [r1, #L310_PREFETCH_CTRL] + cmp r0, #L310_CACHE_ID_RTL_R3P0 + strcs r8, [r1, #L310_POWER_CTRL] + + @ Don't setup the L2 cache if it is already enabled + ldr r0, [r1, #L2X0_CTRL] + tst r0, #L2X0_CTRL_EN + movne pc, lr + + str r3, [r1, #L310_TAG_LATENCY_CTRL] + str r4, [r1, #L310_DATA_LATENCY_CTRL] + str r6, [r1, #L310_ADDR_FILTER_END] + str r5, [r1, #L310_ADDR_FILTER_START] + + str r2, [r1, #L2X0_AUX_CTRL] + mov r9, #L2X0_CTRL_EN + str r9, [r1, #L2X0_CTRL] + mov pc, lr +ENDPROC(l2c310_early_resume) + + .align +1: .long l2x0_saved_regs - . diff -Nur linux-3.15-rc4.orig/arch/arm/mm/Makefile linux-3.15-rc4/arch/arm/mm/Makefile --- linux-3.15-rc4.orig/arch/arm/mm/Makefile 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/mm/Makefile 2014-05-07 17:05:51.052166726 +0200 @@ -95,7 +95,8 @@ AFLAGS_proc-v6.o :=-Wa,-march=armv6 AFLAGS_proc-v7.o :=-Wa,-march=armv7-a +obj-$(CONFIG_OUTER_CACHE) += l2c-common.o obj-$(CONFIG_CACHE_FEROCEON_L2) += cache-feroceon-l2.o -obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o +obj-$(CONFIG_CACHE_L2X0) += cache-l2x0.o l2c-l2x0-resume.o obj-$(CONFIG_CACHE_XSC3L2) += cache-xsc3l2.o obj-$(CONFIG_CACHE_TAUROS2) += cache-tauros2.o diff -Nur linux-3.15-rc4.orig/arch/arm/plat-samsung/s5p-sleep.S linux-3.15-rc4/arch/arm/plat-samsung/s5p-sleep.S --- linux-3.15-rc4.orig/arch/arm/plat-samsung/s5p-sleep.S 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/arch/arm/plat-samsung/s5p-sleep.S 2014-05-07 17:05:51.056166749 +0200 @@ -22,7 +22,6 @@ */ #include -#include .data .align diff -Nur linux-3.15-rc4.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt linux-3.15-rc4/Documentation/devicetree/bindings/leds/leds-pwm.txt --- linux-3.15-rc4.orig/Documentation/devicetree/bindings/leds/leds-pwm.txt 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/Documentation/devicetree/bindings/leds/leds-pwm.txt 2014-05-07 17:05:51.088166948 +0200 @@ -13,6 +13,8 @@ For the pwms and pwm-names property please refer to: Documentation/devicetree/bindings/pwm/pwm.txt - max-brightness : Maximum brightness possible for the LED +- active-low : (optional) For PWMs where the LED is wired to supply + rather than ground. - label : (optional) see Documentation/devicetree/bindings/leds/common.txt - linux,default-trigger : (optional) diff -Nur linux-3.15-rc4.orig/Documentation/devicetree/bindings/mmc/mmc.txt linux-3.15-rc4/Documentation/devicetree/bindings/mmc/mmc.txt --- linux-3.15-rc4.orig/Documentation/devicetree/bindings/mmc/mmc.txt 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/Documentation/devicetree/bindings/mmc/mmc.txt 2014-05-07 17:05:51.088166948 +0200 @@ -5,6 +5,8 @@ Interpreted by the OF core: - reg: Registers location and length. - interrupts: Interrupts used by the MMC controller. +- clocks: Clocks needed for the host controller, if any. +- clock-names: Goes with clocks above. Card detection: If no property below is supplied, host native card detect is used. @@ -39,6 +41,15 @@ - mmc-hs200-1_8v: eMMC HS200 mode(1.8V I/O) is supported - mmc-hs200-1_2v: eMMC HS200 mode(1.2V I/O) is supported +Card power and reset control: +The following properties can be specified for cases where the MMC +peripheral needs additional reset, regulator and clock lines. It is for +example common for WiFi/BT adapters to have these separate from the main +MMC bus: + - card-reset-gpios: Specify GPIOs for card reset (reset active low) + - card-external-vcc-supply: Regulator to drive (independent) card VCC + - clock with name "card_ext_clock": External clock provided to the card + *NOTE* on CD and WP polarity. To use common for all SD/MMC host controllers line polarity properties, we have to fix the meaning of the "normal" and "inverted" line levels. We choose to follow the SDHCI standard, which specifies both those diff -Nur linux-3.15-rc4.orig/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt linux-3.15-rc4/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt --- linux-3.15-rc4.orig/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/Documentation/devicetree/bindings/staging/imx-drm/fsl-imx-drm.txt 2014-05-07 17:05:51.088166948 +0200 @@ -60,7 +60,8 @@ - compatible: Should be "fsl,imx-parallel-display" Optional properties: - interface_pix_fmt: How this display is connected to the - display interface. Currently supported types: "rgb24", "rgb565", "bgr666" + display interface. Currently supported types: "rgb24", "rgb565", "bgr666", + "rgb666" - edid: verbatim EDID data block describing attached display. - ddc: phandle describing the i2c bus handling the display data channel diff -Nur linux-3.15-rc4.orig/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml linux-3.15-rc4/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml --- linux-3.15-rc4.orig/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/Documentation/DocBook/media/v4l/pixfmt-packed-rgb.xml 2014-05-07 17:05:51.088166948 +0200 @@ -279,6 +279,45 @@ + + V4L2_PIX_FMT_RGB666 + 'RGBH' + + r5 + r4 + r3 + r2 + r1 + r0 + g5 + g4 + + g3 + g2 + g1 + g0 + b5 + b4 + b3 + b2 + + b1 + b0 + + + + + + + + + + + + + + + V4L2_PIX_FMT_BGR24 'BGR3' diff -Nur linux-3.15-rc4.orig/drivers/ata/ahci_imx.c linux-3.15-rc4/drivers/ata/ahci_imx.c --- linux-3.15-rc4.orig/drivers/ata/ahci_imx.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/ata/ahci_imx.c 2014-05-07 17:05:51.088166948 +0200 @@ -46,6 +46,7 @@ struct regmap *gpr; bool no_device; bool first_time; + u32 phy_params; }; static int ahci_imx_hotplug; @@ -90,14 +91,7 @@ IMX6Q_GPR13_SATA_TX_LVL_MASK | IMX6Q_GPR13_SATA_MPLL_CLK_EN | IMX6Q_GPR13_SATA_TX_EDGE_RATE, - IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB | - IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | - IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F | - IMX6Q_GPR13_SATA_SPD_MODE_3P0G | - IMX6Q_GPR13_SATA_MPLL_SS_EN | - IMX6Q_GPR13_SATA_TX_ATTEN_9_16 | - IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB | - IMX6Q_GPR13_SATA_TX_LVL_1_025_V); + imxpriv->phy_params); regmap_update_bits(imxpriv->gpr, IOMUXC_GPR13, IMX6Q_GPR13_SATA_MPLL_CLK_EN, IMX6Q_GPR13_SATA_MPLL_CLK_EN); @@ -160,6 +154,10 @@ writel(reg_val | PORT_PHY_CTL_PDDQ_LOC, mmio + PORT_PHY_CTL); imx_sata_disable(hpriv); imxpriv->no_device = true; + + dev_info(ap->dev, "no device found, disabling link.\n"); + dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX + ".hotplug=1 to enable hotplug\n"); } static int ahci_imx_softreset(struct ata_link *link, unsigned int *class, @@ -200,6 +198,165 @@ }; MODULE_DEVICE_TABLE(of, imx_ahci_of_match); +struct reg_value { + u32 of_value; + u32 reg_value; +}; + +struct reg_property { + const char *name; + const struct reg_value *values; + size_t num_values; + u32 def_value; + u32 set_value; +}; + +static const struct reg_value gpr13_tx_level[] = { + { 937, IMX6Q_GPR13_SATA_TX_LVL_0_937_V }, + { 947, IMX6Q_GPR13_SATA_TX_LVL_0_947_V }, + { 957, IMX6Q_GPR13_SATA_TX_LVL_0_957_V }, + { 966, IMX6Q_GPR13_SATA_TX_LVL_0_966_V }, + { 976, IMX6Q_GPR13_SATA_TX_LVL_0_976_V }, + { 986, IMX6Q_GPR13_SATA_TX_LVL_0_986_V }, + { 996, IMX6Q_GPR13_SATA_TX_LVL_0_996_V }, + { 1005, IMX6Q_GPR13_SATA_TX_LVL_1_005_V }, + { 1015, IMX6Q_GPR13_SATA_TX_LVL_1_015_V }, + { 1025, IMX6Q_GPR13_SATA_TX_LVL_1_025_V }, + { 1035, IMX6Q_GPR13_SATA_TX_LVL_1_035_V }, + { 1045, IMX6Q_GPR13_SATA_TX_LVL_1_045_V }, + { 1054, IMX6Q_GPR13_SATA_TX_LVL_1_054_V }, + { 1064, IMX6Q_GPR13_SATA_TX_LVL_1_064_V }, + { 1074, IMX6Q_GPR13_SATA_TX_LVL_1_074_V }, + { 1084, IMX6Q_GPR13_SATA_TX_LVL_1_084_V }, + { 1094, IMX6Q_GPR13_SATA_TX_LVL_1_094_V }, + { 1104, IMX6Q_GPR13_SATA_TX_LVL_1_104_V }, + { 1113, IMX6Q_GPR13_SATA_TX_LVL_1_113_V }, + { 1123, IMX6Q_GPR13_SATA_TX_LVL_1_123_V }, + { 1133, IMX6Q_GPR13_SATA_TX_LVL_1_133_V }, + { 1143, IMX6Q_GPR13_SATA_TX_LVL_1_143_V }, + { 1152, IMX6Q_GPR13_SATA_TX_LVL_1_152_V }, + { 1162, IMX6Q_GPR13_SATA_TX_LVL_1_162_V }, + { 1172, IMX6Q_GPR13_SATA_TX_LVL_1_172_V }, + { 1182, IMX6Q_GPR13_SATA_TX_LVL_1_182_V }, + { 1191, IMX6Q_GPR13_SATA_TX_LVL_1_191_V }, + { 1201, IMX6Q_GPR13_SATA_TX_LVL_1_201_V }, + { 1211, IMX6Q_GPR13_SATA_TX_LVL_1_211_V }, + { 1221, IMX6Q_GPR13_SATA_TX_LVL_1_221_V }, + { 1230, IMX6Q_GPR13_SATA_TX_LVL_1_230_V }, + { 1240, IMX6Q_GPR13_SATA_TX_LVL_1_240_V } +}; + +static const struct reg_value gpr13_tx_boost[] = { + { 0, IMX6Q_GPR13_SATA_TX_BOOST_0_00_DB }, + { 370, IMX6Q_GPR13_SATA_TX_BOOST_0_37_DB }, + { 740, IMX6Q_GPR13_SATA_TX_BOOST_0_74_DB }, + { 111, IMX6Q_GPR13_SATA_TX_BOOST_1_11_DB }, + { 148, IMX6Q_GPR13_SATA_TX_BOOST_1_48_DB }, + { 185, IMX6Q_GPR13_SATA_TX_BOOST_1_85_DB }, + { 222, IMX6Q_GPR13_SATA_TX_BOOST_2_22_DB }, + { 259, IMX6Q_GPR13_SATA_TX_BOOST_2_59_DB }, + { 296, IMX6Q_GPR13_SATA_TX_BOOST_2_96_DB }, + { 333, IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB }, + { 370, IMX6Q_GPR13_SATA_TX_BOOST_3_70_DB }, + { 407, IMX6Q_GPR13_SATA_TX_BOOST_4_07_DB }, + { 444, IMX6Q_GPR13_SATA_TX_BOOST_4_44_DB }, + { 481, IMX6Q_GPR13_SATA_TX_BOOST_4_81_DB }, + { 528, IMX6Q_GPR13_SATA_TX_BOOST_5_28_DB }, + { 575, IMX6Q_GPR13_SATA_TX_BOOST_5_75_DB } +}; + +static const struct reg_value gpr13_tx_atten[] = { + { 8, IMX6Q_GPR13_SATA_TX_ATTEN_8_16 }, + { 9, IMX6Q_GPR13_SATA_TX_ATTEN_9_16 }, + { 10, IMX6Q_GPR13_SATA_TX_ATTEN_10_16 }, + { 12, IMX6Q_GPR13_SATA_TX_ATTEN_12_16 }, + { 14, IMX6Q_GPR13_SATA_TX_ATTEN_14_16 }, + { 16, IMX6Q_GPR13_SATA_TX_ATTEN_16_16 }, +}; + +static const struct reg_value gpr13_rx_eq[] = { + { 500, IMX6Q_GPR13_SATA_RX_EQ_VAL_0_5_DB }, + { 1000, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_0_DB }, + { 1500, IMX6Q_GPR13_SATA_RX_EQ_VAL_1_5_DB }, + { 2000, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_0_DB }, + { 2500, IMX6Q_GPR13_SATA_RX_EQ_VAL_2_5_DB }, + { 3000, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB }, + { 3500, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_5_DB }, + { 4000, IMX6Q_GPR13_SATA_RX_EQ_VAL_4_0_DB }, +}; + +static const struct reg_property gpr13_props[] = { + { + .name = "fsl,transmit-level-mV", + .values = gpr13_tx_level, + .num_values = ARRAY_SIZE(gpr13_tx_level), + .def_value = IMX6Q_GPR13_SATA_TX_LVL_1_025_V, + }, { + .name = "fsl,transmit-boost-mdB", + .values = gpr13_tx_boost, + .num_values = ARRAY_SIZE(gpr13_tx_boost), + .def_value = IMX6Q_GPR13_SATA_TX_BOOST_3_33_DB, + }, { + .name = "fsl,transmit-atten-16ths", + .values = gpr13_tx_atten, + .num_values = ARRAY_SIZE(gpr13_tx_atten), + .def_value = IMX6Q_GPR13_SATA_TX_ATTEN_9_16, + }, { + .name = "fsl,receive-eq-mdB", + .values = gpr13_rx_eq, + .num_values = ARRAY_SIZE(gpr13_rx_eq), + .def_value = IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB, + }, { + .name = "fsl,no-spread-spectrum", + .def_value = IMX6Q_GPR13_SATA_MPLL_SS_EN, + .set_value = 0, + }, +}; + +static u32 imx_ahci_parse_props(struct device *dev, + const struct reg_property *prop, size_t num) +{ + struct device_node *np = dev->of_node; + u32 reg_value = 0; + int i, j; + + for (i = 0; i < num; i++, prop++) { + u32 of_val; + + if (prop->num_values == 0) { + if (of_property_read_bool(np, prop->name)) + reg_value |= prop->set_value; + else + reg_value |= prop->def_value; + continue; + } + + if (of_property_read_u32(np, prop->name, &of_val)) { + dev_info(dev, "%s not specified, using %08x\n", + prop->name, prop->def_value); + reg_value |= prop->def_value; + continue; + } + + for (j = 0; j < prop->num_values; j++) { + if (prop->values[j].of_value == of_val) { + dev_info(dev, "%s value %u, using %08x\n", + prop->name, of_val, prop->values[j].reg_value); + reg_value |= prop->values[j].reg_value; + break; + } + } + + if (j == prop->num_values) { + dev_err(dev, "DT property %s is not a valid value\n", + prop->name); + reg_value |= prop->def_value; + } + } + + return reg_value; +} + static int imx_ahci_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -227,6 +384,8 @@ } if (imxpriv->type == AHCI_IMX6Q) { + u32 reg_value; + imxpriv->gpr = syscon_regmap_lookup_by_compatible( "fsl,imx6q-iomuxc-gpr"); if (IS_ERR(imxpriv->gpr)) { @@ -234,6 +393,15 @@ "failed to find fsl,imx6q-iomux-gpr regmap\n"); return PTR_ERR(imxpriv->gpr); } + + reg_value = imx_ahci_parse_props(dev, gpr13_props, + ARRAY_SIZE(gpr13_props)); + + imxpriv->phy_params = + IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M | + IMX6Q_GPR13_SATA_RX_DPLL_MODE_2P_4F | + IMX6Q_GPR13_SATA_SPD_MODE_3P0G | + reg_value; } hpriv = ahci_platform_get_resources(pdev); diff -Nur linux-3.15-rc4.orig/drivers/cec/cec-dev.c linux-3.15-rc4/drivers/cec/cec-dev.c --- linux-3.15-rc4.orig/drivers/cec/cec-dev.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/cec/cec-dev.c 2014-05-07 17:05:51.088166948 +0200 @@ -0,0 +1,384 @@ +/* + * HDMI Consumer Electronics Control + * + * This provides the user API for communication with HDMI CEC complaint + * devices in kernel drivers, and is based upon the protocol developed + * by Freescale for their i.MX SoCs. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + */ +#include +#include +#include +#include +#include +#include +#include + +struct cec_event { + struct cec_user_event usr; + struct list_head node; +}; + +static struct class *cec_class; +static int cec_major; + +static void cec_dev_send_message(struct cec_dev *cec_dev, u8 *msg, + size_t count) +{ + unsigned long flags; + + spin_lock_irqsave(&cec_dev->lock, flags); + cec_dev->retries = 5; + cec_dev->write_busy = 1; + cec_dev->send_message(cec_dev, msg, count); + spin_unlock_irqrestore(&cec_dev->lock, flags); +} + +void cec_dev_event(struct cec_dev *cec_dev, int type, u8 *msg, size_t len) +{ + struct cec_event *event; + unsigned long flags; + + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (event) { + event->usr.event_type = type; + event->usr.msg_len = len; + if (msg) + memcpy(event->usr.msg, msg, len); + + spin_lock_irqsave(&cec_dev->lock, flags); + list_add_tail(&event->node, &cec_dev->events); + spin_unlock_irqrestore(&cec_dev->lock, flags); + wake_up(&cec_dev->waitq); + } +} +EXPORT_SYMBOL_GPL(cec_dev_event); + +static int cec_dev_lock_write(struct cec_dev *cec_dev, struct file *file) + __acquires(cec_dev->mutex) +{ + int ret; + + do { + if (file->f_flags & O_NONBLOCK) { + if (cec_dev->write_busy) + return -EAGAIN; + } else { + ret = wait_event_interruptible(cec_dev->waitq, + !cec_dev->write_busy); + if (ret) + break; + } + + ret = mutex_lock_interruptible(&cec_dev->mutex); + if (ret) + break; + + if (!cec_dev->write_busy) + break; + + mutex_unlock(&cec_dev->mutex); + } while (1); + + return ret; +} + +static ssize_t cec_dev_read(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + struct cec_dev *cec_dev = file->private_data; + ssize_t ret; + + if (count > sizeof(struct cec_user_event)) + count = sizeof(struct cec_user_event); + + if (!access_ok(VERIFY_WRITE, buf, count)) + return -EFAULT; + + do { + struct cec_event *event = NULL; + unsigned long flags; + + spin_lock_irqsave(&cec_dev->lock, flags); + if (!list_empty(&cec_dev->events)) { + event = list_first_entry(&cec_dev->events, + struct cec_event, node); + list_del(&event->node); + } + spin_unlock_irqrestore(&cec_dev->lock, flags); + + if (event) { + ret = __copy_to_user(buf, &event->usr, count) ? + -EFAULT : count; + kfree(event); + break; + } + + if (file->f_flags & O_NONBLOCK) { + ret = -EAGAIN; + break; + } + + ret = wait_event_interruptible(cec_dev->waitq, + !list_empty(&cec_dev->events)); + if (ret) + break; + } while (1); + + return ret; +} + +static ssize_t cec_dev_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct cec_dev *cec_dev = file->private_data; + u8 msg[MAX_MESSAGE_LEN]; + int ret; + + if (count > sizeof(msg)) + return -E2BIG; + + if (copy_from_user(msg, buf, count)) + return -EFAULT; + + ret = cec_dev_lock_write(cec_dev, file); + if (ret) + return ret; + + cec_dev_send_message(cec_dev, msg, count); + + mutex_unlock(&cec_dev->mutex); + + return count; +} + +static long cec_dev_ioctl(struct file *file, u_int cmd, unsigned long arg) +{ + struct cec_dev *cec_dev = file->private_data; + int ret; + + switch (cmd) { + case HDMICEC_IOC_O_SETLOGICALADDRESS: + case HDMICEC_IOC_SETLOGICALADDRESS: + if (arg > 15) { + ret = -EINVAL; + break; + } + + ret = cec_dev_lock_write(cec_dev, file); + if (ret == 0) { + unsigned char msg[1]; + + cec_dev->addresses = BIT(arg); + cec_dev->set_address(cec_dev, cec_dev->addresses); + + /* + * Send a ping message with the source and destination + * set to our address; the result indicates whether + * unit has chosen our address simultaneously. + */ + msg[0] = arg << 4 | arg; + cec_dev_send_message(cec_dev, msg, sizeof(msg)); + mutex_unlock(&cec_dev->mutex); + } + break; + + case HDMICEC_IOC_STARTDEVICE: + ret = mutex_lock_interruptible(&cec_dev->mutex); + if (ret == 0) { + cec_dev->addresses = BIT(15); + cec_dev->set_address(cec_dev, cec_dev->addresses); + mutex_unlock(&cec_dev->mutex); + } + break; + + case HDMICEC_IOC_STOPDEVICE: + ret = 0; + break; + + case HDMICEC_IOC_GETPHYADDRESS: + ret = put_user(cec_dev->physical, (u16 __user *)arg); + ret = -ENOIOCTLCMD; + break; + + default: + ret = -ENOIOCTLCMD; + break; + } + + return ret; +} + +static unsigned cec_dev_poll(struct file *file, poll_table *wait) +{ + struct cec_dev *cec_dev = file->private_data; + unsigned mask = 0; + + poll_wait(file, &cec_dev->waitq, wait); + + if (cec_dev->write_busy == 0) + mask |= POLLOUT | POLLWRNORM; + if (!list_empty(&cec_dev->events)) + mask |= POLLIN | POLLRDNORM; + + return mask; +} + +static int cec_dev_release(struct inode *inode, struct file *file) +{ + struct cec_dev *cec_dev = file->private_data; + + mutex_lock(&cec_dev->mutex); + if (cec_dev->users >= 1) + cec_dev->users -= 1; + if (cec_dev->users == 0) { + /* + * Wait for any write to complete before shutting down. + * A message should complete in a maximum of 2.75ms * + * 160 bits + 4.7ms, or 444.7ms. Let's call that 500ms. + * If we time out, shutdown anyway. + */ + wait_event_timeout(cec_dev->waitq, !cec_dev->write_busy, + msecs_to_jiffies(500)); + + cec_dev->release(cec_dev); + + while (!list_empty(&cec_dev->events)) { + struct cec_event *event; + + event = list_first_entry(&cec_dev->events, + struct cec_event, node); + list_del(&event->node); + kfree(event); + } + } + mutex_unlock(&cec_dev->mutex); + return 0; +} + +static int cec_dev_open(struct inode *inode, struct file *file) +{ + struct cec_dev *cec_dev = container_of(inode->i_cdev, struct cec_dev, + cdev); + int ret = 0; + + nonseekable_open(inode, file); + + file->private_data = cec_dev; + + ret = mutex_lock_interruptible(&cec_dev->mutex); + if (ret) + return ret; + + if (cec_dev->users++ == 0) { + cec_dev->addresses = BIT(15); + + ret = cec_dev->open(cec_dev); + if (ret < 0) + cec_dev->users = 0; + } + mutex_unlock(&cec_dev->mutex); + + return ret; +} + +static const struct file_operations hdmi_cec_fops = { + .owner = THIS_MODULE, + .read = cec_dev_read, + .write = cec_dev_write, + .open = cec_dev_open, + .unlocked_ioctl = cec_dev_ioctl, + .release = cec_dev_release, + .poll = cec_dev_poll, +}; + +void cec_dev_init(struct cec_dev *cec_dev, struct module *module) +{ + cec_dev->devn = MKDEV(cec_major, 0); + + INIT_LIST_HEAD(&cec_dev->events); + init_waitqueue_head(&cec_dev->waitq); + spin_lock_init(&cec_dev->lock); + mutex_init(&cec_dev->mutex); + + cec_dev->addresses = BIT(15); + + cdev_init(&cec_dev->cdev, &hdmi_cec_fops); + cec_dev->cdev.owner = module; +} +EXPORT_SYMBOL_GPL(cec_dev_init); + +int cec_dev_add(struct cec_dev *cec_dev, struct device *dev, const char *name) +{ + struct device *cd; + int ret; + + ret = cdev_add(&cec_dev->cdev, cec_dev->devn, 1); + if (ret < 0) + goto err_cdev; + + cd = device_create(cec_class, dev, cec_dev->devn, NULL, name); + if (IS_ERR(cd)) { + ret = PTR_ERR(cd); + dev_err(dev, "can't create device: %d\n", ret); + goto err_dev; + } + + return 0; + + err_dev: + cdev_del(&cec_dev->cdev); + err_cdev: + return ret; +} +EXPORT_SYMBOL_GPL(cec_dev_add); + +void cec_dev_remove(struct cec_dev *cec_dev) +{ + device_destroy(cec_class, cec_dev->devn); + cdev_del(&cec_dev->cdev); +} +EXPORT_SYMBOL_GPL(cec_dev_remove); + +static int cec_init(void) +{ + dev_t dev; + int ret; + + cec_class = class_create(THIS_MODULE, "hdmi-cec"); + if (IS_ERR(cec_class)) { + ret = PTR_ERR(cec_class); + pr_err("cec: can't create cec class: %d\n", ret); + goto err_class; + } + + ret = alloc_chrdev_region(&dev, 0, 1, "hdmi-cec"); + if (ret) { + pr_err("cec: can't create character devices: %d\n", ret); + goto err_chrdev; + } + + cec_major = MAJOR(dev); + + return 0; + + err_chrdev: + class_destroy(cec_class); + err_class: + return ret; +} +subsys_initcall(cec_init); + +static void cec_exit(void) +{ + unregister_chrdev_region(MKDEV(cec_major, 0), 1); + class_destroy(cec_class); +} +module_exit(cec_exit); + +MODULE_AUTHOR("Russell King "); +MODULE_DESCRIPTION("Generic HDMI CEC driver"); +MODULE_LICENSE("GPL"); diff -Nur linux-3.15-rc4.orig/drivers/cec/Kconfig linux-3.15-rc4/drivers/cec/Kconfig --- linux-3.15-rc4.orig/drivers/cec/Kconfig 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/cec/Kconfig 2014-05-07 17:05:51.088166948 +0200 @@ -0,0 +1,14 @@ +# +# Consumer Electroncs Control support +# + +menu "Consumer Electronics Control devices" + +config CEC + bool + +config HDMI_CEC_CORE + tristate + select CEC + +endmenu diff -Nur linux-3.15-rc4.orig/drivers/cec/Makefile linux-3.15-rc4/drivers/cec/Makefile --- linux-3.15-rc4.orig/drivers/cec/Makefile 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/cec/Makefile 2014-05-07 17:05:51.088166948 +0200 @@ -0,0 +1 @@ +obj-$(CONFIG_HDMI_CEC_CORE) += cec-dev.o diff -Nur linux-3.15-rc4.orig/drivers/gpu/drm/drm_crtc_helper.c linux-3.15-rc4/drivers/gpu/drm/drm_crtc_helper.c --- linux-3.15-rc4.orig/drivers/gpu/drm/drm_crtc_helper.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/gpu/drm/drm_crtc_helper.c 2014-05-07 17:05:51.088166948 +0200 @@ -140,16 +140,10 @@ static void __drm_helper_disable_unused_functions(struct drm_device *dev) { struct drm_encoder *encoder; - struct drm_connector *connector; struct drm_crtc *crtc; drm_warn_on_modeset_not_all_locked(dev); - list_for_each_entry(connector, &dev->mode_config.connector_list, head) { - if (!connector->encoder) - continue; - } - list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) { if (!drm_helper_encoder_in_use(encoder)) { drm_encoder_disable(encoder); diff -Nur linux-3.15-rc4.orig/drivers/Kconfig linux-3.15-rc4/drivers/Kconfig --- linux-3.15-rc4.orig/drivers/Kconfig 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/Kconfig 2014-05-07 17:05:51.088166948 +0200 @@ -174,4 +174,6 @@ source "drivers/mcb/Kconfig" +source "drivers/cec/Kconfig" + endmenu diff -Nur linux-3.15-rc4.orig/drivers/leds/leds-pwm.c linux-3.15-rc4/drivers/leds/leds-pwm.c --- linux-3.15-rc4.orig/drivers/leds/leds-pwm.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/leds/leds-pwm.c 2014-05-07 17:05:51.092166973 +0200 @@ -69,6 +69,10 @@ duty *= brightness; do_div(duty, max); + + if (led_dat->active_low) + duty = led_dat->period - duty; + led_dat->duty = duty; if (led_dat->can_sleep) @@ -92,55 +96,75 @@ } } -static int led_pwm_create_of(struct platform_device *pdev, - struct led_pwm_priv *priv) +static int led_pwm_add(struct device *dev, struct led_pwm_priv *priv, + struct led_pwm *led, struct device_node *child) { - struct device_node *child; + struct led_pwm_data *led_data = &priv->leds[priv->num_leds]; int ret; - for_each_child_of_node(pdev->dev.of_node, child) { - struct led_pwm_data *led_dat = &priv->leds[priv->num_leds]; + led_data->active_low = led->active_low; + led_data->period = led->pwm_period_ns; + led_data->cdev.name = led->name; + led_data->cdev.default_trigger = led->default_trigger; + led_data->cdev.brightness_set = led_pwm_set; + led_data->cdev.brightness = LED_OFF; + led_data->cdev.max_brightness = led->max_brightness; + led_data->cdev.flags = LED_CORE_SUSPENDRESUME; + + if (child) + led_data->pwm = devm_of_pwm_get(dev, child, NULL); + else + led_data->pwm = devm_pwm_get(dev, led->name); + if (IS_ERR(led_data->pwm)) { + ret = PTR_ERR(led_data->pwm); + dev_err(dev, "unable to request PWM for %s: %d\n", + led->name, ret); + return ret; + } - led_dat->cdev.name = of_get_property(child, "label", - NULL) ? : child->name; + if (child) + led_data->period = pwm_get_period(led_data->pwm); - led_dat->pwm = devm_of_pwm_get(&pdev->dev, child, NULL); - if (IS_ERR(led_dat->pwm)) { - dev_err(&pdev->dev, "unable to request PWM for %s\n", - led_dat->cdev.name); - ret = PTR_ERR(led_dat->pwm); - goto err; - } - /* Get the period from PWM core when n*/ - led_dat->period = pwm_get_period(led_dat->pwm); + led_data->can_sleep = pwm_can_sleep(led_data->pwm); + if (led_data->can_sleep) + INIT_WORK(&led_data->work, led_pwm_work); - led_dat->cdev.default_trigger = of_get_property(child, + ret = led_classdev_register(dev, &led_data->cdev); + if (ret == 0) { + priv->num_leds++; + } else { + dev_err(dev, "failed to register PWM led for %s: %d\n", + led->name, ret); + } + + return ret; +} + +static int led_pwm_create_of(struct device *dev, struct led_pwm_priv *priv) +{ + struct device_node *child; + struct led_pwm led; + int ret = 0; + + memset(&led, 0, sizeof(led)); + + for_each_child_of_node(dev->of_node, child) { + led.name = of_get_property(child, "label", NULL) ? : + child->name; + + led.default_trigger = of_get_property(child, "linux,default-trigger", NULL); + led.active_low = of_property_read_bool(child, "active-low"); of_property_read_u32(child, "max-brightness", - &led_dat->cdev.max_brightness); + &led.max_brightness); - led_dat->cdev.brightness_set = led_pwm_set; - led_dat->cdev.brightness = LED_OFF; - led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - - led_dat->can_sleep = pwm_can_sleep(led_dat->pwm); - if (led_dat->can_sleep) - INIT_WORK(&led_dat->work, led_pwm_work); - - ret = led_classdev_register(&pdev->dev, &led_dat->cdev); - if (ret < 0) { - dev_err(&pdev->dev, "failed to register for %s\n", - led_dat->cdev.name); + ret = led_pwm_add(dev, priv, &led, child); + if (ret) { of_node_put(child); - goto err; + break; } - priv->num_leds++; } - return 0; -err: - led_pwm_cleanup(priv); - return ret; } @@ -166,51 +190,23 @@ if (pdata) { for (i = 0; i < count; i++) { - struct led_pwm *cur_led = &pdata->leds[i]; - struct led_pwm_data *led_dat = &priv->leds[i]; - - led_dat->pwm = devm_pwm_get(&pdev->dev, cur_led->name); - if (IS_ERR(led_dat->pwm)) { - ret = PTR_ERR(led_dat->pwm); - dev_err(&pdev->dev, - "unable to request PWM for %s\n", - cur_led->name); - goto err; - } - - led_dat->cdev.name = cur_led->name; - led_dat->cdev.default_trigger = cur_led->default_trigger; - led_dat->active_low = cur_led->active_low; - led_dat->period = cur_led->pwm_period_ns; - led_dat->cdev.brightness_set = led_pwm_set; - led_dat->cdev.brightness = LED_OFF; - led_dat->cdev.max_brightness = cur_led->max_brightness; - led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - - led_dat->can_sleep = pwm_can_sleep(led_dat->pwm); - if (led_dat->can_sleep) - INIT_WORK(&led_dat->work, led_pwm_work); - - ret = led_classdev_register(&pdev->dev, &led_dat->cdev); - if (ret < 0) - goto err; + ret = led_pwm_add(&pdev->dev, priv, &pdata->leds[i], + NULL); + if (ret) + break; } - priv->num_leds = count; } else { - ret = led_pwm_create_of(pdev, priv); - if (ret) - return ret; + ret = led_pwm_create_of(&pdev->dev, priv); + } + + if (ret) { + led_pwm_cleanup(priv); + return ret; } platform_set_drvdata(pdev, priv); return 0; - -err: - priv->num_leds = i; - led_pwm_cleanup(priv); - - return ret; } static int led_pwm_remove(struct platform_device *pdev) diff -Nur linux-3.15-rc4.orig/drivers/Makefile linux-3.15-rc4/drivers/Makefile --- linux-3.15-rc4.orig/drivers/Makefile 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/Makefile 2014-05-07 17:05:51.092166973 +0200 @@ -157,3 +157,4 @@ obj-$(CONFIG_FMC) += fmc/ obj-$(CONFIG_POWERCAP) += powercap/ obj-$(CONFIG_MCB) += mcb/ +obj-$(CONFIG_CEC) += cec/ diff -Nur linux-3.15-rc4.orig/drivers/mmc/core/core.c linux-3.15-rc4/drivers/mmc/core/core.c --- linux-3.15-rc4.orig/drivers/mmc/core/core.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/core/core.c 2014-05-07 17:05:51.092166973 +0200 @@ -13,11 +13,13 @@ #include #include #include +#include #include #include #include #include #include +#include #include #include #include @@ -1504,6 +1506,43 @@ mmc_host_clk_release(host); } +static void mmc_card_power_up(struct mmc_host *host) +{ + int i; + struct gpio_desc **gds = host->card_reset_gpios; + + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) { + if (gds[i]) { + dev_dbg(host->parent, "Asserting reset line %d", i); + gpiod_set_value(gds[i], 1); + } + } + + if (host->card_regulator) { + dev_dbg(host->parent, "Enabling external regulator"); + if (regulator_enable(host->card_regulator)) + dev_err(host->parent, "Failed to enable external regulator"); + } + + if (host->card_clk) { + dev_dbg(host->parent, "Enabling external clock"); + clk_prepare_enable(host->card_clk); + } + + /* 2ms delay to let clocks and power settle */ + mmc_delay(20); + + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) { + if (gds[i]) { + dev_dbg(host->parent, "Deasserting reset line %d", i); + gpiod_set_value(gds[i], 0); + } + } + + /* 2ms delay to after reset release */ + mmc_delay(20); +} + /* * Apply power to the MMC stack. This is a two-stage process. * First, we enable power to the card without the clock running. @@ -1520,6 +1559,9 @@ if (host->ios.power_mode == MMC_POWER_ON) return; + /* Power up the card/module first, if needed */ + mmc_card_power_up(host); + mmc_host_clk_hold(host); host->ios.vdd = fls(ocr) - 1; diff -Nur linux-3.15-rc4.orig/drivers/mmc/core/host.c linux-3.15-rc4/drivers/mmc/core/host.c --- linux-3.15-rc4.orig/drivers/mmc/core/host.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/core/host.c 2014-05-07 17:05:51.092166973 +0200 @@ -12,14 +12,18 @@ * MMC host class device management */ +#include +#include #include #include +#include #include #include #include #include #include #include +#include #include #include @@ -457,6 +461,66 @@ EXPORT_SYMBOL(mmc_of_parse); +static int mmc_of_parse_child(struct mmc_host *host) +{ + struct device_node *np; + struct clk *clk; + int i; + + if (!host->parent || !host->parent->of_node) + return 0; + + np = host->parent->of_node; + + host->card_regulator = regulator_get(host->parent, "card-external-vcc"); + if (IS_ERR(host->card_regulator)) { + if (PTR_ERR(host->card_regulator) == -EPROBE_DEFER) + return PTR_ERR(host->card_regulator); + host->card_regulator = NULL; + } + + /* Parse card power/reset/clock control */ + if (of_find_property(np, "card-reset-gpios", NULL)) { + struct gpio_desc *gpd; + int level = 0; + + /* + * If the regulator is enabled, then we can hold the + * card in reset with an active high resets. Otherwise, + * hold the resets low. + */ + if (host->card_regulator && regulator_is_enabled(host->card_regulator)) + level = 1; + + for (i = 0; i < ARRAY_SIZE(host->card_reset_gpios); i++) { + gpd = devm_gpiod_get_index(host->parent, "card-reset", i); + if (IS_ERR(gpd)) { + if (PTR_ERR(gpd) == -EPROBE_DEFER) + return PTR_ERR(gpd); + break; + } + gpiod_direction_output(gpd, gpiod_is_active_low(gpd) | level); + host->card_reset_gpios[i] = gpd; + } + + gpd = devm_gpiod_get_index(host->parent, "card-reset", ARRAY_SIZE(host->card_reset_gpios)); + if (!IS_ERR(gpd)) { + dev_warn(host->parent, "More reset gpios than we can handle"); + gpiod_put(gpd); + } + } + + clk = of_clk_get_by_name(np, "card_ext_clock"); + if (IS_ERR(clk)) { + if (PTR_ERR(clk) == -EPROBE_DEFER) + return PTR_ERR(clk); + clk = NULL; + } + host->card_clk = clk; + + return 0; +} + /** * mmc_alloc_host - initialise the per-host structure. * @extra: sizeof private data structure @@ -536,6 +600,10 @@ { int err; + err = mmc_of_parse_child(host); + if (err) + return err; + WARN_ON((host->caps & MMC_CAP_SDIO_IRQ) && !host->ops->enable_sdio_irq); diff -Nur linux-3.15-rc4.orig/drivers/mmc/core/sdio_irq.c linux-3.15-rc4/drivers/mmc/core/sdio_irq.c --- linux-3.15-rc4.orig/drivers/mmc/core/sdio_irq.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/core/sdio_irq.c 2014-05-07 17:05:51.092166973 +0200 @@ -90,6 +90,15 @@ return ret; } +void sdio_run_irqs(struct mmc_host *host) +{ + mmc_claim_host(host); + host->sdio_irq_pending = true; + process_sdio_pending_irqs(host); + mmc_release_host(host); +} +EXPORT_SYMBOL_GPL(sdio_run_irqs); + static int sdio_irq_thread(void *_host) { struct mmc_host *host = _host; @@ -189,14 +198,20 @@ WARN_ON(!host->claimed); if (!host->sdio_irqs++) { - atomic_set(&host->sdio_irq_thread_abort, 0); - host->sdio_irq_thread = - kthread_run(sdio_irq_thread, host, "ksdioirqd/%s", - mmc_hostname(host)); - if (IS_ERR(host->sdio_irq_thread)) { - int err = PTR_ERR(host->sdio_irq_thread); - host->sdio_irqs--; - return err; + if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) { + atomic_set(&host->sdio_irq_thread_abort, 0); + host->sdio_irq_thread = + kthread_run(sdio_irq_thread, host, + "ksdioirqd/%s", mmc_hostname(host)); + if (IS_ERR(host->sdio_irq_thread)) { + int err = PTR_ERR(host->sdio_irq_thread); + host->sdio_irqs--; + return err; + } + } else { + mmc_host_clk_hold(host); + host->ops->enable_sdio_irq(host, 1); + mmc_host_clk_release(host); } } @@ -211,8 +226,14 @@ BUG_ON(host->sdio_irqs < 1); if (!--host->sdio_irqs) { - atomic_set(&host->sdio_irq_thread_abort, 1); - kthread_stop(host->sdio_irq_thread); + if (!(host->caps2 & MMC_CAP2_SDIO_NOTHREAD)) { + atomic_set(&host->sdio_irq_thread_abort, 1); + kthread_stop(host->sdio_irq_thread); + } else { + mmc_host_clk_hold(host); + host->ops->enable_sdio_irq(host, 0); + mmc_host_clk_release(host); + } } return 0; diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/dw_mmc.c linux-3.15-rc4/drivers/mmc/host/dw_mmc.c --- linux-3.15-rc4.orig/drivers/mmc/host/dw_mmc.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/dw_mmc.c 2014-05-07 17:05:51.092166973 +0200 @@ -2140,6 +2140,8 @@ if (!mmc) return -ENOMEM; + mmc_of_parse(mmc); + slot = mmc_priv(mmc); slot->id = id; slot->mmc = mmc; diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/Kconfig linux-3.15-rc4/drivers/mmc/host/Kconfig --- linux-3.15-rc4.orig/drivers/mmc/host/Kconfig 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/Kconfig 2014-05-07 17:05:51.092166973 +0200 @@ -25,8 +25,7 @@ If unsure, say N. config MMC_SDHCI - tristate "Secure Digital Host Controller Interface support" - depends on HAS_DMA + tristate help This selects the generic Secure Digital Host Controller Interface. It is used by manufacturers such as Texas Instruments(R), Ricoh(R) @@ -59,7 +58,8 @@ config MMC_SDHCI_PCI tristate "SDHCI support on PCI bus" - depends on MMC_SDHCI && PCI + depends on PCI && HAS_DMA + select MMC_SDHCI help This selects the PCI Secure Digital Host Controller Interface. Most controllers found today are PCI devices. @@ -83,7 +83,8 @@ config MMC_SDHCI_ACPI tristate "SDHCI support for ACPI enumerated SDHCI controllers" - depends on MMC_SDHCI && ACPI + depends on ACPI && HAS_DMA + select MMC_SDHCI help This selects support for ACPI enumerated SDHCI controllers, identified by ACPI Compatibility ID PNP0D40 or specific @@ -94,8 +95,8 @@ If unsure, say N. config MMC_SDHCI_PLTFM - tristate "SDHCI platform and OF driver helper" - depends on MMC_SDHCI + tristate + select MMC_SDHCI help This selects the common helper functions support for Secure Digital Host Controller Interface based platform and OF drivers. @@ -106,8 +107,8 @@ config MMC_SDHCI_OF_ARASAN tristate "SDHCI OF support for the Arasan SDHCI controllers" - depends on MMC_SDHCI_PLTFM - depends on OF + depends on OF && HAS_DMA + select MMC_SDHCI_PLTFM help This selects the Arasan Secure Digital Host Controller Interface (SDHCI). This hardware is found e.g. in Xilinx' Zynq SoC. @@ -118,9 +119,9 @@ config MMC_SDHCI_OF_ESDHC tristate "SDHCI OF support for the Freescale eSDHC controller" - depends on MMC_SDHCI_PLTFM - depends on PPC_OF + depends on PPC_OF && HAS_DMA select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + select MMC_SDHCI_PLTFM help This selects the Freescale eSDHC controller support. @@ -130,9 +131,9 @@ config MMC_SDHCI_OF_HLWD tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" - depends on MMC_SDHCI_PLTFM - depends on PPC_OF + depends on PPC_OF && HAS_DMA select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER + select MMC_SDHCI_PLTFM help This selects the Secure Digital Host Controller Interface (SDHCI) found in the "Hollywood" chipset of the Nintendo Wii video game @@ -144,8 +145,8 @@ config MMC_SDHCI_CNS3XXX tristate "SDHCI support on the Cavium Networks CNS3xxx SoC" - depends on ARCH_CNS3XXX - depends on MMC_SDHCI_PLTFM + depends on ARCH_CNS3XXX && HAS_DMA + select MMC_SDHCI_PLTFM help This selects the SDHCI support for CNS3xxx System-on-Chip devices. @@ -155,9 +156,9 @@ config MMC_SDHCI_ESDHC_IMX tristate "SDHCI support for the Freescale eSDHC/uSDHC i.MX controller" - depends on ARCH_MXC - depends on MMC_SDHCI_PLTFM + depends on ARCH_MXC && HAS_DMA select MMC_SDHCI_IO_ACCESSORS + select MMC_SDHCI_PLTFM help This selects the Freescale eSDHC/uSDHC controller support found on i.MX25, i.MX35 i.MX5x and i.MX6x. @@ -168,9 +169,9 @@ config MMC_SDHCI_DOVE tristate "SDHCI support on Marvell's Dove SoC" - depends on ARCH_DOVE - depends on MMC_SDHCI_PLTFM + depends on ARCH_DOVE && HAS_DMA select MMC_SDHCI_IO_ACCESSORS + select MMC_SDHCI_PLTFM help This selects the Secure Digital Host Controller Interface in Marvell's Dove SoC. @@ -181,9 +182,9 @@ config MMC_SDHCI_TEGRA tristate "SDHCI platform support for the Tegra SD/MMC Controller" - depends on ARCH_TEGRA - depends on MMC_SDHCI_PLTFM + depends on ARCH_TEGRA && HAS_DMA select MMC_SDHCI_IO_ACCESSORS + select MMC_SDHCI_PLTFM help This selects the Tegra SD/MMC controller. If you have a Tegra platform with SD or MMC devices, say Y or M here. @@ -192,7 +193,8 @@ config MMC_SDHCI_S3C tristate "SDHCI support on Samsung S3C SoC" - depends on MMC_SDHCI && PLAT_SAMSUNG + depends on PLAT_SAMSUNG && HAS_DMA + select MMC_SDHCI help This selects the Secure Digital Host Controller Interface (SDHCI) often referrered to as the HSMMC block in some of the Samsung S3C @@ -204,8 +206,8 @@ config MMC_SDHCI_SIRF tristate "SDHCI support on CSR SiRFprimaII and SiRFmarco SoCs" - depends on ARCH_SIRF - depends on MMC_SDHCI_PLTFM + depends on ARCH_SIRF && HAS_DMA + select MMC_SDHCI_PLTFM help This selects the SDHCI support for SiRF System-on-Chip devices. @@ -215,8 +217,7 @@ config MMC_SDHCI_PXAV3 tristate "Marvell MMP2 SD Host Controller support (PXAV3)" - depends on CLKDEV_LOOKUP - select MMC_SDHCI + depends on CLKDEV_LOOKUP && HAS_DMA select MMC_SDHCI_PLTFM default CPU_MMP2 help @@ -228,8 +229,7 @@ config MMC_SDHCI_PXAV2 tristate "Marvell PXA9XX SD Host Controller support (PXAV2)" - depends on CLKDEV_LOOKUP - select MMC_SDHCI + depends on CLKDEV_LOOKUP && HAS_DMA select MMC_SDHCI_PLTFM default CPU_PXA910 help @@ -241,7 +241,8 @@ config MMC_SDHCI_SPEAR tristate "SDHCI support on ST SPEAr platform" - depends on MMC_SDHCI && PLAT_SPEAR + depends on PLAT_SPEAR && HAS_DMA + select MMC_SDHCI help This selects the Secure Digital Host Controller Interface (SDHCI) often referrered to as the HSMMC block in some of the ST SPEAR range @@ -263,7 +264,7 @@ config MMC_SDHCI_BCM_KONA tristate "SDHCI support on Broadcom KONA platform" - depends on ARCH_BCM_MOBILE + depends on ARCH_BCM_MOBILE && HAS_DMA select MMC_SDHCI_PLTFM help This selects the Broadcom Kona Secure Digital Host Controller @@ -274,9 +275,9 @@ config MMC_SDHCI_BCM2835 tristate "SDHCI platform support for the BCM2835 SD/MMC Controller" - depends on ARCH_BCM2835 - depends on MMC_SDHCI_PLTFM + depends on ARCH_BCM2835 && HAS_DMA select MMC_SDHCI_IO_ACCESSORS + select MMC_SDHCI_PLTFM help This selects the BCM2835 SD/MMC controller. If you have a BCM2835 platform with SD or MMC devices, say Y or M here. diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-acpi.c linux-3.15-rc4/drivers/mmc/host/sdhci-acpi.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-acpi.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-acpi.c 2014-05-07 17:05:51.092166973 +0200 @@ -102,11 +102,19 @@ } static const struct sdhci_ops sdhci_acpi_ops_dflt = { + .set_clock = sdhci_set_clock, .enable_dma = sdhci_acpi_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_ops sdhci_acpi_ops_int = { + .set_clock = sdhci_set_clock, .enable_dma = sdhci_acpi_enable_dma, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_acpi_int_hw_reset, }; diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-bcm2835.c linux-3.15-rc4/drivers/mmc/host/sdhci-bcm2835.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-bcm2835.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-bcm2835.c 2014-05-07 17:05:51.092166973 +0200 @@ -131,8 +131,12 @@ .read_l = bcm2835_sdhci_readl, .read_w = bcm2835_sdhci_readw, .read_b = bcm2835_sdhci_readb, + .set_clock = sdhci_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_min_clock = bcm2835_sdhci_get_min_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data bcm2835_sdhci_pdata = { diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-bcm-kona.c linux-3.15-rc4/drivers/mmc/host/sdhci-bcm-kona.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-bcm-kona.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-bcm-kona.c 2014-05-07 17:05:51.092166973 +0200 @@ -206,9 +206,13 @@ } static struct sdhci_ops sdhci_bcm_kona_ops = { + .set_clock = sdhci_set_clock, .get_max_clock = sdhci_bcm_kona_get_max_clk, .get_timeout_clock = sdhci_bcm_kona_get_timeout_clock, .platform_send_init_74_clocks = sdhci_bcm_kona_init_74_clocks, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, .card_event = sdhci_bcm_kona_card_event, }; diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci.c linux-3.15-rc4/drivers/mmc/host/sdhci.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci.c 2014-05-07 17:05:51.096166998 +0200 @@ -44,6 +44,8 @@ #define MAX_TUNING_LOOP 40 +#define ADMA_SIZE ((128 * 2 + 1) * 4) + static unsigned int debug_quirks = 0; static unsigned int debug_quirks2; @@ -131,43 +133,26 @@ * * \*****************************************************************************/ -static void sdhci_clear_set_irqs(struct sdhci_host *host, u32 clear, u32 set) -{ - u32 ier; - - ier = sdhci_readl(host, SDHCI_INT_ENABLE); - ier &= ~clear; - ier |= set; - sdhci_writel(host, ier, SDHCI_INT_ENABLE); - sdhci_writel(host, ier, SDHCI_SIGNAL_ENABLE); -} - -static void sdhci_unmask_irqs(struct sdhci_host *host, u32 irqs) -{ - sdhci_clear_set_irqs(host, 0, irqs); -} - -static void sdhci_mask_irqs(struct sdhci_host *host, u32 irqs) -{ - sdhci_clear_set_irqs(host, irqs, 0); -} - static void sdhci_set_card_detection(struct sdhci_host *host, bool enable) { - u32 present, irqs; + u32 present; if ((host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) || (host->mmc->caps & MMC_CAP_NONREMOVABLE)) return; - present = sdhci_readl(host, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT; - irqs = present ? SDHCI_INT_CARD_REMOVE : SDHCI_INT_CARD_INSERT; + if (enable) { + present = sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT; - if (enable) - sdhci_unmask_irqs(host, irqs); - else - sdhci_mask_irqs(host, irqs); + host->ier |= present ? SDHCI_INT_CARD_REMOVE : + SDHCI_INT_CARD_INSERT; + } else { + host->ier &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); + } + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } static void sdhci_enable_card_detection(struct sdhci_host *host) @@ -180,22 +165,9 @@ sdhci_set_card_detection(host, false); } -static void sdhci_reset(struct sdhci_host *host, u8 mask) +void sdhci_reset(struct sdhci_host *host, u8 mask) { unsigned long timeout; - u32 uninitialized_var(ier); - - if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { - if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT)) - return; - } - - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) - ier = sdhci_readl(host, SDHCI_INT_ENABLE); - - if (host->ops->platform_reset_enter) - host->ops->platform_reset_enter(host, mask); sdhci_writeb(host, mask, SDHCI_SOFTWARE_RESET); @@ -220,16 +192,27 @@ timeout--; mdelay(1); } +} +EXPORT_SYMBOL_GPL(sdhci_reset); + +static void sdhci_do_reset(struct sdhci_host *host, u8 mask) +{ + if (host->quirks & SDHCI_QUIRK_NO_CARD_NO_RESET) { + if (!(sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT)) + return; + } - if (host->ops->platform_reset_exit) - host->ops->platform_reset_exit(host, mask); + host->ops->reset(host, mask); - if (host->quirks & SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, ier); + if (mask & SDHCI_RESET_ALL) { + if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { + if (host->ops->enable_dma) + host->ops->enable_dma(host); + } - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { - if ((host->ops->enable_dma) && (mask & SDHCI_RESET_ALL)) - host->ops->enable_dma(host); + /* Resetting the controller clears many */ + host->preset_enabled = false; } } @@ -238,15 +221,18 @@ static void sdhci_init(struct sdhci_host *host, int soft) { if (soft) - sdhci_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD|SDHCI_RESET_DATA); else - sdhci_reset(host, SDHCI_RESET_ALL); + sdhci_do_reset(host, SDHCI_RESET_ALL); + + host->ier = SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | + SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | + SDHCI_INT_INDEX | SDHCI_INT_END_BIT | SDHCI_INT_CRC | + SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | + SDHCI_INT_RESPONSE; - sdhci_clear_set_irqs(host, SDHCI_INT_ALL_MASK, - SDHCI_INT_BUS_POWER | SDHCI_INT_DATA_END_BIT | - SDHCI_INT_DATA_CRC | SDHCI_INT_DATA_TIMEOUT | SDHCI_INT_INDEX | - SDHCI_INT_END_BIT | SDHCI_INT_CRC | SDHCI_INT_TIMEOUT | - SDHCI_INT_DATA_END | SDHCI_INT_RESPONSE); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); if (soft) { /* force clock reconfiguration */ @@ -502,11 +488,6 @@ else direction = DMA_TO_DEVICE; - /* - * The ADMA descriptor table is mapped further down as we - * need to fill it with data first. - */ - host->align_addr = dma_map_single(mmc_dev(host->mmc), host->align_buffer, 128 * 4, direction); if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) @@ -567,7 +548,7 @@ * If this triggers then we have a calculation bug * somewhere. :/ */ - WARN_ON((desc - host->adma_desc) > (128 * 2 + 1) * 4); + WARN_ON((desc - host->adma_desc) > ADMA_SIZE); } if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { @@ -595,17 +576,8 @@ host->align_addr, 128 * 4, direction); } - host->adma_addr = dma_map_single(mmc_dev(host->mmc), - host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE); - if (dma_mapping_error(mmc_dev(host->mmc), host->adma_addr)) - goto unmap_entries; - BUG_ON(host->adma_addr & 0x3); - return 0; -unmap_entries: - dma_unmap_sg(mmc_dev(host->mmc), data->sg, - data->sg_len, direction); unmap_align: dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 128 * 4, direction); @@ -623,19 +595,25 @@ u8 *align; char *buffer; unsigned long flags; + bool has_unaligned; if (data->flags & MMC_DATA_READ) direction = DMA_FROM_DEVICE; else direction = DMA_TO_DEVICE; - dma_unmap_single(mmc_dev(host->mmc), host->adma_addr, - (128 * 2 + 1) * 4, DMA_TO_DEVICE); - dma_unmap_single(mmc_dev(host->mmc), host->align_addr, 128 * 4, direction); - if (data->flags & MMC_DATA_READ) { + /* Do a quick scan of the SG list for any unaligned mappings */ + has_unaligned = false; + for_each_sg(data->sg, sg, host->sg_count, i) + if (sg_dma_address(sg) & 3) { + has_unaligned = true; + break; + } + + if (has_unaligned && data->flags & MMC_DATA_READ) { dma_sync_sg_for_cpu(mmc_dev(host->mmc), data->sg, data->sg_len, direction); @@ -721,9 +699,12 @@ u32 dma_irqs = SDHCI_INT_DMA_END | SDHCI_INT_ADMA_ERROR; if (host->flags & SDHCI_REQ_USE_DMA) - sdhci_clear_set_irqs(host, pio_irqs, dma_irqs); + host->ier = (host->ier & ~pio_irqs) | dma_irqs; else - sdhci_clear_set_irqs(host, dma_irqs, pio_irqs); + host->ier = (host->ier & ~dma_irqs) | pio_irqs; + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) @@ -976,8 +957,8 @@ * upon error conditions. */ if (data->error) { - sdhci_reset(host, SDHCI_RESET_CMD); - sdhci_reset(host, SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); } sdhci_send_command(host, data->stop); @@ -1107,24 +1088,23 @@ static u16 sdhci_get_preset_value(struct sdhci_host *host) { - u16 ctrl, preset = 0; + u16 preset = 0; - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - - switch (ctrl & SDHCI_CTRL_UHS_MASK) { - case SDHCI_CTRL_UHS_SDR12: + switch (host->timing) { + case MMC_TIMING_UHS_SDR12: preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR12); break; - case SDHCI_CTRL_UHS_SDR25: + case MMC_TIMING_UHS_SDR25: preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR25); break; - case SDHCI_CTRL_UHS_SDR50: + case MMC_TIMING_UHS_SDR50: preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR50); break; - case SDHCI_CTRL_UHS_SDR104: + case MMC_TIMING_UHS_SDR104: + case MMC_TIMING_MMC_HS200: preset = sdhci_readw(host, SDHCI_PRESET_FOR_SDR104); break; - case SDHCI_CTRL_UHS_DDR50: + case MMC_TIMING_UHS_DDR50: preset = sdhci_readw(host, SDHCI_PRESET_FOR_DDR50); break; default: @@ -1136,32 +1116,22 @@ return preset; } -static void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock) { int div = 0; /* Initialized for compiler warning */ int real_div = div, clk_mul = 1; u16 clk = 0; unsigned long timeout; - if (clock && clock == host->clock) - return; - host->mmc->actual_clock = 0; - if (host->ops->set_clock) { - host->ops->set_clock(host, clock); - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) - return; - } - sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); if (clock == 0) - goto out; + return; if (host->version >= SDHCI_SPEC_300) { - if (sdhci_readw(host, SDHCI_HOST_CONTROL2) & - SDHCI_CTRL_PRESET_VAL_ENABLE) { + if (host->preset_enabled) { u16 pre_val; clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); @@ -1247,26 +1217,16 @@ clk |= SDHCI_CLOCK_CARD_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - -out: - host->clock = clock; -} - -static inline void sdhci_update_clock(struct sdhci_host *host) -{ - unsigned int clock; - - clock = host->clock; - host->clock = 0; - sdhci_set_clock(host, clock); } +EXPORT_SYMBOL_GPL(sdhci_set_clock); -static int sdhci_set_power(struct sdhci_host *host, unsigned short power) +static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, + unsigned short vdd) { u8 pwr = 0; - if (power != (unsigned short)-1) { - switch (1 << power) { + if (mode != MMC_POWER_OFF) { + switch (1 << vdd) { case MMC_VDD_165_195: pwr = SDHCI_POWER_180; break; @@ -1284,7 +1244,7 @@ } if (host->pwr == pwr) - return -1; + return; host->pwr = pwr; @@ -1292,38 +1252,43 @@ sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) sdhci_runtime_pm_bus_off(host); - return 0; - } - - /* - * Spec says that we should clear the power reg before setting - * a new value. Some controllers don't seem to like this though. - */ - if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) - sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + vdd = 0; + } else { + /* + * Spec says that we should clear the power reg before setting + * a new value. Some controllers don't seem to like this though. + */ + if (!(host->quirks & SDHCI_QUIRK_SINGLE_POWER_WRITE)) + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); - /* - * At least the Marvell CaFe chip gets confused if we set the voltage - * and set turn on power at the same time, so set the voltage first. - */ - if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + /* + * At least the Marvell CaFe chip gets confused if we set the + * voltage and set turn on power at the same time, so set the + * voltage first. + */ + if (host->quirks & SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER) + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); - pwr |= SDHCI_POWER_ON; + pwr |= SDHCI_POWER_ON; - sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); + sdhci_writeb(host, pwr, SDHCI_POWER_CONTROL); - if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) - sdhci_runtime_pm_bus_on(host); + if (host->quirks2 & SDHCI_QUIRK2_CARD_ON_NEEDS_BUS_ON) + sdhci_runtime_pm_bus_on(host); - /* - * Some controllers need an extra 10ms delay of 10ms before they - * can apply clock after applying power - */ - if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) - mdelay(10); + /* + * Some controllers need an extra 10ms delay of 10ms before + * they can apply clock after applying power + */ + if (host->quirks & SDHCI_QUIRK_DELAY_AFTER_POWER) + mdelay(10); + } - return power; + if (host->vmmc) { + spin_unlock_irq(&host->lock); + mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd); + spin_lock_irq(&host->lock); + } } /*****************************************************************************\ @@ -1427,10 +1392,52 @@ spin_unlock_irqrestore(&host->lock, flags); } +void sdhci_set_bus_width(struct sdhci_host *host, int width) +{ + u8 ctrl; + + ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); + if (width == MMC_BUS_WIDTH_8) { + ctrl &= ~SDHCI_CTRL_4BITBUS; + if (host->version >= SDHCI_SPEC_300) + ctrl |= SDHCI_CTRL_8BITBUS; + } else { + if (host->version >= SDHCI_SPEC_300) + ctrl &= ~SDHCI_CTRL_8BITBUS; + if (width == MMC_BUS_WIDTH_4) + ctrl |= SDHCI_CTRL_4BITBUS; + else + ctrl &= ~SDHCI_CTRL_4BITBUS; + } + sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); +} +EXPORT_SYMBOL_GPL(sdhci_set_bus_width); + +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing) +{ + u16 ctrl_2; + + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); + /* Select Bus Speed Mode for host */ + ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; + if ((timing == MMC_TIMING_MMC_HS200) || + (timing == MMC_TIMING_UHS_SDR104)) + ctrl_2 |= SDHCI_CTRL_UHS_SDR104; + else if (timing == MMC_TIMING_UHS_SDR12) + ctrl_2 |= SDHCI_CTRL_UHS_SDR12; + else if (timing == MMC_TIMING_UHS_SDR25) + ctrl_2 |= SDHCI_CTRL_UHS_SDR25; + else if (timing == MMC_TIMING_UHS_SDR50) + ctrl_2 |= SDHCI_CTRL_UHS_SDR50; + else if (timing == MMC_TIMING_UHS_DDR50) + ctrl_2 |= SDHCI_CTRL_UHS_DDR50; + sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); +} +EXPORT_SYMBOL_GPL(sdhci_set_uhs_signaling); + static void sdhci_do_set_ios(struct sdhci_host *host, struct mmc_ios *ios) { unsigned long flags; - int vdd_bit = -1; u8 ctrl; spin_lock_irqsave(&host->lock, flags); @@ -1456,45 +1463,17 @@ !(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN)) sdhci_enable_preset_value(host, false); - sdhci_set_clock(host, ios->clock); - - if (ios->power_mode == MMC_POWER_OFF) - vdd_bit = sdhci_set_power(host, -1); - else - vdd_bit = sdhci_set_power(host, ios->vdd); - - if (host->vmmc && vdd_bit != -1) { - spin_unlock_irqrestore(&host->lock, flags); - mmc_regulator_set_ocr(host->mmc, host->vmmc, vdd_bit); - spin_lock_irqsave(&host->lock, flags); + if (!ios->clock || ios->clock != host->clock) { + host->ops->set_clock(host, ios->clock); + host->clock = ios->clock; } + sdhci_set_power(host, ios->power_mode, ios->vdd); + if (host->ops->platform_send_init_74_clocks) host->ops->platform_send_init_74_clocks(host, ios->power_mode); - /* - * If your platform has 8-bit width support but is not a v3 controller, - * or if it requires special setup code, you should implement that in - * platform_bus_width(). - */ - if (host->ops->platform_bus_width) { - host->ops->platform_bus_width(host, ios->bus_width); - } else { - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - if (ios->bus_width == MMC_BUS_WIDTH_8) { - ctrl &= ~SDHCI_CTRL_4BITBUS; - if (host->version >= SDHCI_SPEC_300) - ctrl |= SDHCI_CTRL_8BITBUS; - } else { - if (host->version >= SDHCI_SPEC_300) - ctrl &= ~SDHCI_CTRL_8BITBUS; - if (ios->bus_width == MMC_BUS_WIDTH_4) - ctrl |= SDHCI_CTRL_4BITBUS; - else - ctrl &= ~SDHCI_CTRL_4BITBUS; - } - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - } + host->ops->set_bus_width(host, ios->bus_width); ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); @@ -1516,13 +1495,13 @@ (ios->timing == MMC_TIMING_UHS_SDR25)) ctrl |= SDHCI_CTRL_HISPD; - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); - if (!(ctrl_2 & SDHCI_CTRL_PRESET_VAL_ENABLE)) { + if (!host->preset_enabled) { sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); /* * We only need to set Driver Strength if the * preset value enable is not set. */ + ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); ctrl_2 &= ~SDHCI_CTRL_DRV_TYPE_MASK; if (ios->drv_type == MMC_SET_DRIVER_TYPE_A) ctrl_2 |= SDHCI_CTRL_DRV_TYPE_A; @@ -1546,34 +1525,11 @@ sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); /* Re-enable SD Clock */ - sdhci_update_clock(host); + host->ops->set_clock(host, host->clock); } - - /* Reset SD Clock Enable */ - clk = sdhci_readw(host, SDHCI_CLOCK_CONTROL); - clk &= ~SDHCI_CLOCK_CARD_EN; - sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); - - if (host->ops->set_uhs_signaling) - host->ops->set_uhs_signaling(host, ios->timing); - else { - ctrl_2 = sdhci_readw(host, SDHCI_HOST_CONTROL2); - /* Select Bus Speed Mode for host */ - ctrl_2 &= ~SDHCI_CTRL_UHS_MASK; - if ((ios->timing == MMC_TIMING_MMC_HS200) || - (ios->timing == MMC_TIMING_UHS_SDR104)) - ctrl_2 |= SDHCI_CTRL_UHS_SDR104; - else if (ios->timing == MMC_TIMING_UHS_SDR12) - ctrl_2 |= SDHCI_CTRL_UHS_SDR12; - else if (ios->timing == MMC_TIMING_UHS_SDR25) - ctrl_2 |= SDHCI_CTRL_UHS_SDR25; - else if (ios->timing == MMC_TIMING_UHS_SDR50) - ctrl_2 |= SDHCI_CTRL_UHS_SDR50; - else if (ios->timing == MMC_TIMING_UHS_DDR50) - ctrl_2 |= SDHCI_CTRL_UHS_DDR50; - sdhci_writew(host, ctrl_2, SDHCI_HOST_CONTROL2); - } + host->ops->set_uhs_signaling(host, ios->timing); + host->timing = ios->timing; if (!(host->quirks2 & SDHCI_QUIRK2_PRESET_VALUE_BROKEN) && ((ios->timing == MMC_TIMING_UHS_SDR12) || @@ -1588,9 +1544,6 @@ ios->drv_type = (preset & SDHCI_PRESET_DRV_MASK) >> SDHCI_PRESET_DRV_SHIFT; } - - /* Re-enable SD Clock */ - sdhci_update_clock(host); } else sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); @@ -1600,7 +1553,7 @@ * it on each ios seems to solve the problem. */ if(host->quirks & SDHCI_QUIRK_RESET_CMD_DATA_ON_IOS) - sdhci_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD | SDHCI_RESET_DATA); mmiowb(); spin_unlock_irqrestore(&host->lock, flags); @@ -1709,24 +1662,16 @@ static void sdhci_enable_sdio_irq_nolock(struct sdhci_host *host, int enable) { - if (host->flags & SDHCI_DEVICE_DEAD) - goto out; - - if (enable) - host->flags |= SDHCI_SDIO_IRQ_ENABLED; - else - host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; - - /* SDIO IRQ will be enabled as appropriate in runtime resume */ - if (host->runtime_suspended) - goto out; + if (!(host->flags & SDHCI_DEVICE_DEAD)) { + if (enable) + host->ier |= SDHCI_INT_CARD_INT; + else + host->ier &= ~SDHCI_INT_CARD_INT; - if (enable) - sdhci_unmask_irqs(host, SDHCI_INT_CARD_INT); - else - sdhci_mask_irqs(host, SDHCI_INT_CARD_INT); -out: - mmiowb(); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); + mmiowb(); + } } static void sdhci_enable_sdio_irq(struct mmc_host *mmc, int enable) @@ -1734,9 +1679,18 @@ struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; + sdhci_runtime_pm_get(host); + spin_lock_irqsave(&host->lock, flags); + if (enable) + host->flags |= SDHCI_SDIO_IRQ_ENABLED; + else + host->flags &= ~SDHCI_SDIO_IRQ_ENABLED; + sdhci_enable_sdio_irq_nolock(host, enable); spin_unlock_irqrestore(&host->lock, flags); + + sdhci_runtime_pm_put(host); } static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, @@ -1798,9 +1752,6 @@ ctrl |= SDHCI_CTRL_VDD_180; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - /* Wait for 5ms */ - usleep_range(5000, 5500); - /* 1.8V regulator output should be stable within 5 ms */ ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); if (ctrl & SDHCI_CTRL_VDD_180) @@ -1855,22 +1806,16 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) { - struct sdhci_host *host; + struct sdhci_host *host = mmc_priv(mmc); u16 ctrl; - u32 ier; int tuning_loop_counter = MAX_TUNING_LOOP; unsigned long timeout; int err = 0; - bool requires_tuning_nonuhs = false; unsigned long flags; - host = mmc_priv(mmc); - sdhci_runtime_pm_get(host); spin_lock_irqsave(&host->lock, flags); - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - /* * The Host Controller needs tuning only in case of SDR104 mode * and for SDR50 mode when Use Tuning for SDR50 is set in the @@ -1878,15 +1823,18 @@ * If the Host Controller supports the HS200 mode then the * tuning function has to be executed. */ - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR50) && - (host->flags & SDHCI_SDR50_NEEDS_TUNING || - host->flags & SDHCI_SDR104_NEEDS_TUNING)) - requires_tuning_nonuhs = true; - - if (((ctrl & SDHCI_CTRL_UHS_MASK) == SDHCI_CTRL_UHS_SDR104) || - requires_tuning_nonuhs) - ctrl |= SDHCI_CTRL_EXEC_TUNING; - else { + switch (host->timing) { + case MMC_TIMING_MMC_HS200: + case MMC_TIMING_UHS_SDR104: + break; + + case MMC_TIMING_UHS_SDR50: + if (host->flags & SDHCI_SDR50_NEEDS_TUNING || + host->flags & SDHCI_SDR104_NEEDS_TUNING) + break; + /* FALLTHROUGH */ + + default: spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); return 0; @@ -1899,6 +1847,8 @@ return err; } + ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + ctrl |= SDHCI_CTRL_EXEC_TUNING; sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); /* @@ -1911,8 +1861,8 @@ * to make sure we don't hit a controller bug, we _only_ * enable Buffer Read Ready interrupt here. */ - ier = sdhci_readl(host, SDHCI_INT_ENABLE); - sdhci_clear_set_irqs(host, ier, SDHCI_INT_DATA_AVAIL); + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_INT_ENABLE); + sdhci_writel(host, SDHCI_INT_DATA_AVAIL, SDHCI_SIGNAL_ENABLE); /* * Issue CMD19 repeatedly till Execute Tuning is set to 0 or the number @@ -2044,7 +1994,8 @@ if (err && (host->flags & SDHCI_USING_RETUNING_TIMER)) err = 0; - sdhci_clear_set_irqs(host, SDHCI_INT_DATA_AVAIL, ier); + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); @@ -2054,26 +2005,30 @@ static void sdhci_enable_preset_value(struct sdhci_host *host, bool enable) { - u16 ctrl; - /* Host Controller v3.00 defines preset value registers */ if (host->version < SDHCI_SPEC_300) return; - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - /* * We only enable or disable Preset Value if they are not already * enabled or disabled respectively. Otherwise, we bail out. */ - if (enable && !(ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { - ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - host->flags |= SDHCI_PV_ENABLED; - } else if (!enable && (ctrl & SDHCI_CTRL_PRESET_VAL_ENABLE)) { - ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + if (host->preset_enabled != enable) { + u16 ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); + + if (enable) + ctrl |= SDHCI_CTRL_PRESET_VAL_ENABLE; + else + ctrl &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; + sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - host->flags &= ~SDHCI_PV_ENABLED; + + if (enable) + host->flags |= SDHCI_PV_ENABLED; + else + host->flags &= ~SDHCI_PV_ENABLED; + + host->preset_enabled = enable; } } @@ -2095,8 +2050,8 @@ pr_err("%s: Resetting controller.\n", mmc_hostname(host->mmc)); - sdhci_reset(host, SDHCI_RESET_CMD); - sdhci_reset(host, SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); host->mrq->cmd->error = -ENOMEDIUM; tasklet_schedule(&host->finish_tasklet); @@ -2124,15 +2079,6 @@ * * \*****************************************************************************/ -static void sdhci_tasklet_card(unsigned long param) -{ - struct sdhci_host *host = (struct sdhci_host*)param; - - sdhci_card_event(host->mmc); - - mmc_detect_change(host->mmc, msecs_to_jiffies(200)); -} - static void sdhci_tasklet_finish(unsigned long param) { struct sdhci_host *host; @@ -2169,12 +2115,12 @@ /* Some controllers need this kick or reset won't work here */ if (host->quirks & SDHCI_QUIRK_CLOCK_BEFORE_RESET) /* This is to force an update */ - sdhci_update_clock(host); + host->ops->set_clock(host, host->clock); /* Spec says we should do both at the same time, but Ricoh controllers do not like that. */ - sdhci_reset(host, SDHCI_RESET_CMD); - sdhci_reset(host, SDHCI_RESET_DATA); + sdhci_do_reset(host, SDHCI_RESET_CMD); + sdhci_do_reset(host, SDHCI_RESET_DATA); } host->mrq = NULL; @@ -2424,101 +2370,94 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id) { - irqreturn_t result; + irqreturn_t result = IRQ_NONE; struct sdhci_host *host = dev_id; - u32 intmask, unexpected = 0; - int cardint = 0, max_loops = 16; + u32 intmask, mask, unexpected = 0; + int max_loops = 16; spin_lock(&host->lock); - if (host->runtime_suspended) { + if (host->runtime_suspended && !sdhci_sdio_irq_enabled(host)) { spin_unlock(&host->lock); return IRQ_NONE; } intmask = sdhci_readl(host, SDHCI_INT_STATUS); - if (!intmask || intmask == 0xffffffff) { result = IRQ_NONE; goto out; } -again: - DBG("*** %s got interrupt: 0x%08x\n", - mmc_hostname(host->mmc), intmask); - - if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { - u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & - SDHCI_CARD_PRESENT; - - /* - * There is a observation on i.mx esdhc. INSERT bit will be - * immediately set again when it gets cleared, if a card is - * inserted. We have to mask the irq to prevent interrupt - * storm which will freeze the system. And the REMOVE gets - * the same situation. - * - * More testing are needed here to ensure it works for other - * platforms though. - */ - sdhci_mask_irqs(host, present ? SDHCI_INT_CARD_INSERT : - SDHCI_INT_CARD_REMOVE); - sdhci_unmask_irqs(host, present ? SDHCI_INT_CARD_REMOVE : - SDHCI_INT_CARD_INSERT); - - sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | - SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); - intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE); - tasklet_schedule(&host->card_tasklet); - } - - if (intmask & SDHCI_INT_CMD_MASK) { - sdhci_writel(host, intmask & SDHCI_INT_CMD_MASK, - SDHCI_INT_STATUS); - sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); - } + do { + /* Clear selected interrupts. */ + mask = intmask & (SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | + SDHCI_INT_BUS_POWER); + sdhci_writel(host, mask, SDHCI_INT_STATUS); + + DBG("*** %s got interrupt: 0x%08x\n", + mmc_hostname(host->mmc), intmask); + + if (intmask & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + u32 present = sdhci_readl(host, SDHCI_PRESENT_STATE) & + SDHCI_CARD_PRESENT; - if (intmask & SDHCI_INT_DATA_MASK) { - sdhci_writel(host, intmask & SDHCI_INT_DATA_MASK, - SDHCI_INT_STATUS); - sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); - } + /* + * There is a observation on i.mx esdhc. INSERT + * bit will be immediately set again when it gets + * cleared, if a card is inserted. We have to mask + * the irq to prevent interrupt storm which will + * freeze the system. And the REMOVE gets the + * same situation. + * + * More testing are needed here to ensure it works + * for other platforms though. + */ + host->ier &= ~(SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE); + host->ier |= present ? SDHCI_INT_CARD_REMOVE : + SDHCI_INT_CARD_INSERT; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); - intmask &= ~(SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK); + sdhci_writel(host, intmask & (SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE), SDHCI_INT_STATUS); - intmask &= ~SDHCI_INT_ERROR; + host->thread_isr |= intmask & (SDHCI_INT_CARD_INSERT | + SDHCI_INT_CARD_REMOVE); + result = IRQ_WAKE_THREAD; + } - if (intmask & SDHCI_INT_BUS_POWER) { - pr_err("%s: Card is consuming too much power!\n", - mmc_hostname(host->mmc)); - sdhci_writel(host, SDHCI_INT_BUS_POWER, SDHCI_INT_STATUS); - } + if (intmask & SDHCI_INT_CMD_MASK) + sdhci_cmd_irq(host, intmask & SDHCI_INT_CMD_MASK); - intmask &= ~SDHCI_INT_BUS_POWER; + if (intmask & SDHCI_INT_DATA_MASK) + sdhci_data_irq(host, intmask & SDHCI_INT_DATA_MASK); - if (intmask & SDHCI_INT_CARD_INT) - cardint = 1; + if (intmask & SDHCI_INT_BUS_POWER) + pr_err("%s: Card is consuming too much power!\n", + mmc_hostname(host->mmc)); - intmask &= ~SDHCI_INT_CARD_INT; + if (intmask & SDHCI_INT_CARD_INT) { + sdhci_enable_sdio_irq_nolock(host, false); + host->thread_isr |= SDHCI_INT_CARD_INT; + result = IRQ_WAKE_THREAD; + } - if (intmask) { - unexpected |= intmask; - sdhci_writel(host, intmask, SDHCI_INT_STATUS); - } + intmask &= ~(SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE | + SDHCI_INT_CMD_MASK | SDHCI_INT_DATA_MASK | + SDHCI_INT_ERROR | SDHCI_INT_BUS_POWER | + SDHCI_INT_CARD_INT); - result = IRQ_HANDLED; + if (intmask) { + unexpected |= intmask; + sdhci_writel(host, intmask, SDHCI_INT_STATUS); + } - intmask = sdhci_readl(host, SDHCI_INT_STATUS); + if (result == IRQ_NONE) + result = IRQ_HANDLED; - /* - * If we know we'll call the driver to signal SDIO IRQ, disregard - * further indications of Card Interrupt in the status to avoid a - * needless loop. - */ - if (cardint) - intmask &= ~SDHCI_INT_CARD_INT; - if (intmask && --max_loops) - goto again; + intmask = sdhci_readl(host, SDHCI_INT_STATUS); + } while (intmask && --max_loops); out: spin_unlock(&host->lock); @@ -2527,15 +2466,38 @@ mmc_hostname(host->mmc), unexpected); sdhci_dumpregs(host); } - /* - * We have to delay this as it calls back into the driver. - */ - if (cardint) - mmc_signal_sdio_irq(host->mmc); return result; } +static irqreturn_t sdhci_thread_irq(int irq, void *dev_id) +{ + struct sdhci_host *host = dev_id; + unsigned long flags; + u32 isr; + + spin_lock_irqsave(&host->lock, flags); + isr = host->thread_isr; + host->thread_isr = 0; + spin_unlock_irqrestore(&host->lock, flags); + + if (isr & (SDHCI_INT_CARD_INSERT | SDHCI_INT_CARD_REMOVE)) { + sdhci_card_event(host->mmc); + mmc_detect_change(host->mmc, msecs_to_jiffies(200)); + } + + if (isr & SDHCI_INT_CARD_INT) { + sdio_run_irqs(host->mmc); + + spin_lock_irqsave(&host->lock, flags); + if (host->flags & SDHCI_SDIO_IRQ_ENABLED) + sdhci_enable_sdio_irq_nolock(host, true); + spin_unlock_irqrestore(&host->lock, flags); + } + + return isr ? IRQ_HANDLED : IRQ_NONE; +} + /*****************************************************************************\ * * * Suspend/resume * @@ -2572,9 +2534,6 @@ int sdhci_suspend_host(struct sdhci_host *host) { - if (host->ops->platform_suspend) - host->ops->platform_suspend(host); - sdhci_disable_card_detection(host); /* Disable tuning since we are suspending */ @@ -2584,7 +2543,9 @@ } if (!device_may_wakeup(mmc_dev(host->mmc))) { - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); + host->ier = 0; + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); free_irq(host->irq, host); } else { sdhci_enable_irq_wakeups(host); @@ -2605,8 +2566,9 @@ } if (!device_may_wakeup(mmc_dev(host->mmc))) { - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, - mmc_hostname(host->mmc), host); + ret = request_threaded_irq(host->irq, sdhci_irq, + sdhci_thread_irq, IRQF_SHARED, + mmc_hostname(host->mmc), host); if (ret) return ret; } else { @@ -2628,9 +2590,6 @@ sdhci_enable_card_detection(host); - if (host->ops->platform_resume) - host->ops->platform_resume(host); - /* Set the re-tuning expiration flag */ if (host->flags & SDHCI_USING_RETUNING_TIMER) host->flags |= SDHCI_NEEDS_RETUNING; @@ -2682,10 +2641,12 @@ } spin_lock_irqsave(&host->lock, flags); - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); + host->ier &= SDHCI_INT_CARD_INT; + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); spin_unlock_irqrestore(&host->lock, flags); - synchronize_irq(host->irq); + synchronize_hardirq(host->irq); spin_lock_irqsave(&host->lock, flags); host->runtime_suspended = true; @@ -2729,7 +2690,7 @@ host->runtime_suspended = false; /* Enable SDIO IRQ */ - if ((host->flags & SDHCI_SDIO_IRQ_ENABLED)) + if (host->flags & SDHCI_SDIO_IRQ_ENABLED) sdhci_enable_sdio_irq_nolock(host, true); /* Enable Card Detection */ @@ -2788,7 +2749,7 @@ if (debug_quirks2) host->quirks2 = debug_quirks2; - sdhci_reset(host, SDHCI_RESET_ALL); + sdhci_do_reset(host, SDHCI_RESET_ALL); host->version = sdhci_readw(host, SDHCI_HOST_VERSION); host->version = (host->version & SDHCI_SPEC_VER_MASK) @@ -2848,15 +2809,29 @@ * (128) and potentially one alignment transfer for * each of those entries. */ - host->adma_desc = kmalloc((128 * 2 + 1) * 4, GFP_KERNEL); + host->adma_desc = dma_alloc_coherent(mmc_dev(host->mmc), + ADMA_SIZE, &host->adma_addr, + GFP_KERNEL); host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); if (!host->adma_desc || !host->align_buffer) { - kfree(host->adma_desc); + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + host->adma_desc, host->adma_addr); kfree(host->align_buffer); pr_warning("%s: Unable to allocate ADMA " "buffers. Falling back to standard DMA.\n", mmc_hostname(mmc)); host->flags &= ~SDHCI_USE_ADMA; + host->adma_desc = NULL; + host->align_buffer = NULL; + } else if (host->adma_addr & 3) { + pr_warning("%s: unable to allocate aligned ADMA descriptor\n", + mmc_hostname(mmc)); + host->flags &= ~SDHCI_USE_ADMA; + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + host->adma_desc, host->adma_addr); + kfree(host->align_buffer); + host->adma_desc = NULL; + host->align_buffer = NULL; } } @@ -2941,6 +2916,7 @@ mmc->max_busy_timeout = (1 << 27) / host->timeout_clk; mmc->caps |= MMC_CAP_SDIO_IRQ | MMC_CAP_ERASE | MMC_CAP_CMD23; + mmc->caps2 |= MMC_CAP2_SDIO_NOTHREAD; if (host->quirks & SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12) host->flags |= SDHCI_AUTO_CMD12; @@ -3212,8 +3188,6 @@ /* * Init tasklets. */ - tasklet_init(&host->card_tasklet, - sdhci_tasklet_card, (unsigned long)host); tasklet_init(&host->finish_tasklet, sdhci_tasklet_finish, (unsigned long)host); @@ -3230,8 +3204,8 @@ sdhci_init(host, 0); - ret = request_irq(host->irq, sdhci_irq, IRQF_SHARED, - mmc_hostname(mmc), host); + ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq, + IRQF_SHARED, mmc_hostname(mmc), host); if (ret) { pr_err("%s: Failed to request IRQ %d: %d\n", mmc_hostname(mmc), host->irq, ret); @@ -3273,12 +3247,12 @@ #ifdef SDHCI_USE_LEDS_CLASS reset: - sdhci_reset(host, SDHCI_RESET_ALL); - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); + sdhci_do_reset(host, SDHCI_RESET_ALL); + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); free_irq(host->irq, host); #endif untasklet: - tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); return ret; @@ -3315,14 +3289,14 @@ #endif if (!dead) - sdhci_reset(host, SDHCI_RESET_ALL); + sdhci_do_reset(host, SDHCI_RESET_ALL); - sdhci_mask_irqs(host, SDHCI_INT_ALL_MASK); + sdhci_writel(host, 0, SDHCI_INT_ENABLE); + sdhci_writel(host, 0, SDHCI_SIGNAL_ENABLE); free_irq(host->irq, host); del_timer_sync(&host->timer); - tasklet_kill(&host->card_tasklet); tasklet_kill(&host->finish_tasklet); if (host->vmmc) { @@ -3335,7 +3309,9 @@ regulator_put(host->vqmmc); } - kfree(host->adma_desc); + if (host->adma_desc) + dma_free_coherent(mmc_dev(host->mmc), ADMA_SIZE, + host->adma_desc, host->adma_addr); kfree(host->align_buffer); host->adma_desc = NULL; diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-cns3xxx.c linux-3.15-rc4/drivers/mmc/host/sdhci-cns3xxx.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-cns3xxx.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-cns3xxx.c 2014-05-07 17:05:51.096166998 +0200 @@ -30,13 +30,12 @@ u16 clk; unsigned long timeout; - if (clock == host->clock) - return; + host->mmc->actual_clock = 0; sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); if (clock == 0) - goto out; + return; while (host->max_clk / div > clock) { /* @@ -75,13 +74,14 @@ clk |= SDHCI_CLOCK_CARD_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); -out: - host->clock = clock; } static const struct sdhci_ops sdhci_cns3xxx_ops = { .get_max_clock = sdhci_cns3xxx_get_max_clk, .set_clock = sdhci_cns3xxx_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_cns3xxx_pdata = { @@ -90,8 +90,7 @@ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | SDHCI_QUIRK_INVERTED_WRITE_PROTECT | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN | - SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | - SDHCI_QUIRK_NONSTANDARD_CLOCK, + SDHCI_QUIRK_BROKEN_TIMEOUT_VAL, }; static int sdhci_cns3xxx_probe(struct platform_device *pdev) diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-dove.c linux-3.15-rc4/drivers/mmc/host/sdhci-dove.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-dove.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-dove.c 2014-05-07 17:05:51.096166998 +0200 @@ -86,6 +86,10 @@ static const struct sdhci_ops sdhci_dove_ops = { .read_w = sdhci_dove_readw, .read_l = sdhci_dove_readl, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_dove_pdata = { diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-esdhc.h linux-3.15-rc4/drivers/mmc/host/sdhci-esdhc.h --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-esdhc.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-esdhc.h 2014-05-07 17:05:51.096166998 +0200 @@ -20,10 +20,8 @@ #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ SDHCI_QUIRK_NO_BUSY_IRQ | \ - SDHCI_QUIRK_NONSTANDARD_CLOCK | \ SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ - SDHCI_QUIRK_PIO_NEEDS_DELAY | \ - SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET) + SDHCI_QUIRK_PIO_NEEDS_DELAY) #define ESDHC_SYSTEM_CONTROL 0x2c #define ESDHC_CLOCK_MASK 0x0000fff0 diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-esdhc-imx.c linux-3.15-rc4/drivers/mmc/host/sdhci-esdhc-imx.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-esdhc-imx.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-esdhc-imx.c 2014-05-07 17:05:51.096166998 +0200 @@ -160,7 +160,6 @@ MULTIBLK_IN_PROCESS, /* exact multiblock cmd in process */ WAIT_FOR_INT, /* sent CMD12, waiting for response INT */ } multiblock_status; - u32 uhs_mode; u32 is_ddr; }; @@ -382,7 +381,6 @@ if (val & ESDHC_MIX_CTRL_SMPCLK_SEL) ret |= SDHCI_CTRL_TUNED_CLK; - ret |= (imx_data->uhs_mode & SDHCI_CTRL_UHS_MASK); ret &= ~SDHCI_CTRL_PRESET_VAL_ENABLE; return ret; @@ -429,7 +427,6 @@ else new_val &= ~ESDHC_VENDOR_SPEC_VSELECT; writel(new_val, host->ioaddr + ESDHC_VENDOR_SPEC); - imx_data->uhs_mode = val & SDHCI_CTRL_UHS_MASK; if (imx_data->socdata->flags & ESDHC_FLAG_MAN_TUNING) { new_val = readl(host->ioaddr + ESDHC_MIX_CTRL); if (val & SDHCI_CTRL_TUNED_CLK) @@ -600,12 +597,14 @@ u32 temp, val; if (clock == 0) { + host->mmc->actual_clock = 0; + if (esdhc_is_usdhc(imx_data)) { val = readl(host->ioaddr + ESDHC_VENDOR_SPEC); writel(val & ~ESDHC_VENDOR_SPEC_FRC_SDCLK_ON, host->ioaddr + ESDHC_VENDOR_SPEC); } - goto out; + return; } if (esdhc_is_usdhc(imx_data) && !imx_data->is_ddr) @@ -645,8 +644,6 @@ } mdelay(1); -out: - host->clock = clock; } static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) @@ -668,7 +665,7 @@ return -ENOSYS; } -static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width) +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) { u32 ctrl; @@ -686,8 +683,6 @@ esdhc_clrset_le(host, ESDHC_CTRL_BUSWIDTH_MASK, ctrl, SDHCI_HOST_CONTROL); - - return 0; } static void esdhc_prepare_tuning(struct sdhci_host *host, u32 val) @@ -697,6 +692,7 @@ /* FIXME: delay a bit for card to be ready for next tuning due to errors */ mdelay(1); + /* This is balanced by the runtime put in sdhci_tasklet_finish */ pm_runtime_get_sync(host->mmc->parent); reg = readl(host->ioaddr + ESDHC_MIX_CTRL); reg |= ESDHC_MIX_CTRL_EXE_TUNE | ESDHC_MIX_CTRL_SMPCLK_SEL | @@ -713,13 +709,12 @@ complete(&mrq->completion); } -static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode) +static int esdhc_send_tuning_cmd(struct sdhci_host *host, u32 opcode, + struct scatterlist *sg) { struct mmc_command cmd = {0}; struct mmc_request mrq = {NULL}; struct mmc_data data = {0}; - struct scatterlist sg; - char tuning_pattern[ESDHC_TUNING_BLOCK_PATTERN_LEN]; cmd.opcode = opcode; cmd.arg = 0; @@ -728,11 +723,9 @@ data.blksz = ESDHC_TUNING_BLOCK_PATTERN_LEN; data.blocks = 1; data.flags = MMC_DATA_READ; - data.sg = &sg; + data.sg = sg; data.sg_len = 1; - sg_init_one(&sg, tuning_pattern, sizeof(tuning_pattern)); - mrq.cmd = &cmd; mrq.cmd->mrq = &mrq; mrq.data = &data; @@ -742,14 +735,12 @@ mrq.done = esdhc_request_done; init_completion(&(mrq.completion)); - disable_irq(host->irq); - spin_lock(&host->lock); + spin_lock_irq(&host->lock); host->mrq = &mrq; sdhci_send_command(host, mrq.cmd); - spin_unlock(&host->lock); - enable_irq(host->irq); + spin_unlock_irq(&host->lock); wait_for_completion(&mrq.completion); @@ -772,13 +763,21 @@ static int esdhc_executing_tuning(struct sdhci_host *host, u32 opcode) { + struct scatterlist sg; + char *tuning_pattern; int min, max, avg, ret; + tuning_pattern = kmalloc(ESDHC_TUNING_BLOCK_PATTERN_LEN, GFP_KERNEL); + if (!tuning_pattern) + return -ENOMEM; + + sg_init_one(&sg, tuning_pattern, ESDHC_TUNING_BLOCK_PATTERN_LEN); + /* find the mininum delay first which can pass tuning */ min = ESDHC_TUNE_CTRL_MIN; while (min < ESDHC_TUNE_CTRL_MAX) { esdhc_prepare_tuning(host, min); - if (!esdhc_send_tuning_cmd(host, opcode)) + if (!esdhc_send_tuning_cmd(host, opcode, &sg)) break; min += ESDHC_TUNE_CTRL_STEP; } @@ -787,7 +786,7 @@ max = min + ESDHC_TUNE_CTRL_STEP; while (max < ESDHC_TUNE_CTRL_MAX) { esdhc_prepare_tuning(host, max); - if (esdhc_send_tuning_cmd(host, opcode)) { + if (esdhc_send_tuning_cmd(host, opcode, &sg)) { max -= ESDHC_TUNE_CTRL_STEP; break; } @@ -797,9 +796,11 @@ /* use average delay to get the best timing */ avg = (min + max) / 2; esdhc_prepare_tuning(host, avg); - ret = esdhc_send_tuning_cmd(host, opcode); + ret = esdhc_send_tuning_cmd(host, opcode, &sg); esdhc_post_tuning(host); + kfree(tuning_pattern); + dev_dbg(mmc_dev(host->mmc), "tunning %s at 0x%x ret %d\n", ret ? "failed" : "passed", avg, ret); @@ -837,28 +838,20 @@ return pinctrl_select_state(imx_data->pinctrl, pinctrl); } -static int esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +static void esdhc_set_uhs_signaling(struct sdhci_host *host, unsigned timing) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; struct esdhc_platform_data *boarddata = &imx_data->boarddata; - switch (uhs) { + switch (timing) { case MMC_TIMING_UHS_SDR12: - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR12; - break; case MMC_TIMING_UHS_SDR25: - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR25; - break; case MMC_TIMING_UHS_SDR50: - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR50; - break; case MMC_TIMING_UHS_SDR104: case MMC_TIMING_MMC_HS200: - imx_data->uhs_mode = SDHCI_CTRL_UHS_SDR104; break; case MMC_TIMING_UHS_DDR50: - imx_data->uhs_mode = SDHCI_CTRL_UHS_DDR50; writel(readl(host->ioaddr + ESDHC_MIX_CTRL) | ESDHC_MIX_CTRL_DDREN, host->ioaddr + ESDHC_MIX_CTRL); @@ -875,7 +868,15 @@ break; } - return esdhc_change_pinstate(host, uhs); + esdhc_change_pinstate(host, timing); +} + +static void esdhc_reset(struct sdhci_host *host, u8 mask) +{ + sdhci_reset(host, mask); + + sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); + sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); } static struct sdhci_ops sdhci_esdhc_ops = { @@ -888,8 +889,9 @@ .get_max_clock = esdhc_pltfm_get_max_clock, .get_min_clock = esdhc_pltfm_get_min_clock, .get_ro = esdhc_pltfm_get_ro, - .platform_bus_width = esdhc_pltfm_bus_width, + .set_bus_width = esdhc_pltfm_set_bus_width, .set_uhs_signaling = esdhc_set_uhs_signaling, + .reset = esdhc_reset, }; static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { @@ -1170,8 +1172,10 @@ ret = sdhci_runtime_suspend_host(host); - clk_disable_unprepare(imx_data->clk_per); - clk_disable_unprepare(imx_data->clk_ipg); + if (!sdhci_sdio_irq_enabled(host)) { + clk_disable_unprepare(imx_data->clk_per); + clk_disable_unprepare(imx_data->clk_ipg); + } clk_disable_unprepare(imx_data->clk_ahb); return ret; @@ -1183,8 +1187,10 @@ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; - clk_prepare_enable(imx_data->clk_per); - clk_prepare_enable(imx_data->clk_ipg); + if (!sdhci_sdio_irq_enabled(host)) { + clk_prepare_enable(imx_data->clk_per); + clk_prepare_enable(imx_data->clk_ipg); + } clk_prepare_enable(imx_data->clk_ahb); return sdhci_runtime_resume_host(host); diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci.h linux-3.15-rc4/drivers/mmc/host/sdhci.h --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci.h 2014-05-07 17:05:51.096166998 +0200 @@ -281,18 +281,14 @@ unsigned int (*get_max_clock)(struct sdhci_host *host); unsigned int (*get_min_clock)(struct sdhci_host *host); unsigned int (*get_timeout_clock)(struct sdhci_host *host); - int (*platform_bus_width)(struct sdhci_host *host, - int width); + void (*set_bus_width)(struct sdhci_host *host, int width); void (*platform_send_init_74_clocks)(struct sdhci_host *host, u8 power_mode); unsigned int (*get_ro)(struct sdhci_host *host); - void (*platform_reset_enter)(struct sdhci_host *host, u8 mask); - void (*platform_reset_exit)(struct sdhci_host *host, u8 mask); + void (*reset)(struct sdhci_host *host, u8 mask); int (*platform_execute_tuning)(struct sdhci_host *host, u32 opcode); - int (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); + void (*set_uhs_signaling)(struct sdhci_host *host, unsigned int uhs); void (*hw_reset)(struct sdhci_host *host); - void (*platform_suspend)(struct sdhci_host *host); - void (*platform_resume)(struct sdhci_host *host); void (*adma_workaround)(struct sdhci_host *host, u32 intmask); void (*platform_init)(struct sdhci_host *host); void (*card_event)(struct sdhci_host *host); @@ -397,6 +393,16 @@ extern void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd); +static inline bool sdhci_sdio_irq_enabled(struct sdhci_host *host) +{ + return !!(host->flags & SDHCI_SDIO_IRQ_ENABLED); +} + +void sdhci_set_clock(struct sdhci_host *host, unsigned int clock); +void sdhci_set_bus_width(struct sdhci_host *host, int width); +void sdhci_reset(struct sdhci_host *host, u8 mask); +void sdhci_set_uhs_signaling(struct sdhci_host *host, unsigned timing); + #ifdef CONFIG_PM extern int sdhci_suspend_host(struct sdhci_host *host); extern int sdhci_resume_host(struct sdhci_host *host); diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-of-arasan.c linux-3.15-rc4/drivers/mmc/host/sdhci-of-arasan.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-of-arasan.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-of-arasan.c 2014-05-07 17:05:51.096166998 +0200 @@ -52,8 +52,12 @@ } static struct sdhci_ops sdhci_arasan_ops = { + .set_clock = sdhci_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, .get_timeout_clock = sdhci_arasan_get_timeout_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static struct sdhci_pltfm_data sdhci_arasan_pdata = { diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-of-esdhc.c linux-3.15-rc4/drivers/mmc/host/sdhci-of-esdhc.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-of-esdhc.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-of-esdhc.c 2014-05-07 17:05:51.096166998 +0200 @@ -199,13 +199,14 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) { - int pre_div = 2; int div = 1; u32 temp; + host->mmc->actual_clock = 0; + if (clock == 0) - goto out; + return; /* Workaround to reduce the clock frequency for p1010 esdhc */ if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { @@ -238,24 +239,8 @@ | (pre_div << ESDHC_PREDIV_SHIFT)); sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); mdelay(1); -out: - host->clock = clock; } -#ifdef CONFIG_PM -static u32 esdhc_proctl; -static void esdhc_of_suspend(struct sdhci_host *host) -{ - esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); -} - -static void esdhc_of_resume(struct sdhci_host *host) -{ - esdhc_of_enable_dma(host); - sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); -} -#endif - static void esdhc_of_platform_init(struct sdhci_host *host) { u32 vvn; @@ -269,7 +254,7 @@ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; } -static int esdhc_pltfm_bus_width(struct sdhci_host *host, int width) +static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) { u32 ctrl; @@ -289,8 +274,6 @@ clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, ESDHC_CTRL_BUSWIDTH_MASK, ctrl); - - return 0; } static const struct sdhci_ops sdhci_esdhc_ops = { @@ -305,13 +288,46 @@ .get_max_clock = esdhc_of_get_max_clock, .get_min_clock = esdhc_of_get_min_clock, .platform_init = esdhc_of_platform_init, -#ifdef CONFIG_PM - .platform_suspend = esdhc_of_suspend, - .platform_resume = esdhc_of_resume, -#endif .adma_workaround = esdhci_of_adma_workaround, - .platform_bus_width = esdhc_pltfm_bus_width, + .set_bus_width = esdhc_pltfm_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, +}; + +#ifdef CONFIG_PM + +static u32 esdhc_proctl; +static int esdhc_of_suspend(struct device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + + esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); + + return sdhci_suspend_host(host); +} + +static void esdhc_of_resume(device *dev) +{ + struct sdhci_host *host = dev_get_drvdata(dev); + int ret = sdhci_resume_host(host); + + if (ret == 0) { + /* Isn't this already done by sdhci_resume_host() ? --rmk */ + esdhc_of_enable_dma(host); + sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); + } + + return ret; +} + +static const struct dev_pm_ops esdhc_pmops = { + .suspend = esdhci_of_suspend, + .resume = esdhci_of_resume, }; +#define ESDHC_PMOPS (&esdhc_pmops) +#else +#define ESDHC_PMOPS NULL +#endif static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { /* @@ -374,7 +390,7 @@ .name = "sdhci-esdhc", .owner = THIS_MODULE, .of_match_table = sdhci_esdhc_of_match, - .pm = SDHCI_PLTFM_PMOPS, + .pm = ESDHC_PMOPS, }, .probe = sdhci_esdhc_probe, .remove = sdhci_esdhc_remove, diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-of-hlwd.c linux-3.15-rc4/drivers/mmc/host/sdhci-of-hlwd.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-of-hlwd.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-of-hlwd.c 2014-05-07 17:05:51.100167022 +0200 @@ -58,6 +58,10 @@ .write_l = sdhci_hlwd_writel, .write_w = sdhci_hlwd_writew, .write_b = sdhci_hlwd_writeb, + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_hlwd_pdata = { diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pci.c linux-3.15-rc4/drivers/mmc/host/sdhci-pci.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pci.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-pci.c 2014-05-07 17:05:51.100167022 +0200 @@ -1031,7 +1031,7 @@ return 0; } -static int sdhci_pci_bus_width(struct sdhci_host *host, int width) +static void sdhci_pci_set_bus_width(struct sdhci_host *host, int width) { u8 ctrl; @@ -1052,8 +1052,6 @@ } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - - return 0; } static void sdhci_pci_gpio_hw_reset(struct sdhci_host *host) @@ -1080,8 +1078,11 @@ } static const struct sdhci_ops sdhci_pci_ops = { + .set_clock = sdhci_set_clock, .enable_dma = sdhci_pci_enable_dma, - .platform_bus_width = sdhci_pci_bus_width, + .set_bus_width = sdhci_pci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, .hw_reset = sdhci_pci_hw_reset, }; diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pltfm.c linux-3.15-rc4/drivers/mmc/host/sdhci-pltfm.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pltfm.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-pltfm.c 2014-05-07 17:05:51.100167022 +0200 @@ -45,6 +45,10 @@ EXPORT_SYMBOL_GPL(sdhci_pltfm_clk_get_max_clock); static const struct sdhci_ops sdhci_pltfm_ops = { + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; #ifdef CONFIG_OF diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pxav2.c linux-3.15-rc4/drivers/mmc/host/sdhci-pxav2.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pxav2.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-pxav2.c 2014-05-07 17:05:51.100167022 +0200 @@ -51,11 +51,13 @@ #define MMC_CARD 0x1000 #define MMC_WIDTH 0x0100 -static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) +static void pxav2_reset(struct sdhci_host *host, u8 mask) { struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + sdhci_reset(host, mask); + if (mask == SDHCI_RESET_ALL) { u16 tmp = 0; @@ -88,7 +90,7 @@ } } -static int pxav2_mmc_set_width(struct sdhci_host *host, int width) +static void pxav2_mmc_set_bus_width(struct sdhci_host *host, int width) { u8 ctrl; u16 tmp; @@ -107,14 +109,14 @@ } writew(tmp, host->ioaddr + SD_CE_ATA_2); writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); - - return 0; } static const struct sdhci_ops pxav2_sdhci_ops = { + .set_clock = sdhci_set_clock, .get_max_clock = sdhci_pltfm_clk_get_max_clock, - .platform_reset_exit = pxav2_set_private_registers, - .platform_bus_width = pxav2_mmc_set_width, + .set_bus_width = pxav2_mmc_set_bus_width, + .reset = pxav2_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; #ifdef CONFIG_OF diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pxav3.c linux-3.15-rc4/drivers/mmc/host/sdhci-pxav3.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-pxav3.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-pxav3.c 2014-05-07 17:05:51.100167022 +0200 @@ -112,11 +112,13 @@ return 0; } -static void pxav3_set_private_registers(struct sdhci_host *host, u8 mask) +static void pxav3_reset(struct sdhci_host *host, u8 mask) { struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; + sdhci_reset(host, mask); + if (mask == SDHCI_RESET_ALL) { /* * tune timing of read data/command when crc error happen @@ -184,7 +186,7 @@ pxa->power_mode = power_mode; } -static int pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) +static void pxav3_set_uhs_signaling(struct sdhci_host *host, unsigned int uhs) { u16 ctrl_2; @@ -218,15 +220,16 @@ dev_dbg(mmc_dev(host->mmc), "%s uhs = %d, ctrl_2 = %04X\n", __func__, uhs, ctrl_2); - - return 0; } static const struct sdhci_ops pxav3_sdhci_ops = { - .platform_reset_exit = pxav3_set_private_registers, + .set_clock = sdhci_set_clock, .set_uhs_signaling = pxav3_set_uhs_signaling, .platform_send_init_74_clocks = pxav3_gen_init_74_clocks, .get_max_clock = sdhci_pltfm_clk_get_max_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = pxav3_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static struct sdhci_pltfm_data sdhci_pxav3_pdata = { diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-s3c.c linux-3.15-rc4/drivers/mmc/host/sdhci-s3c.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-s3c.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-s3c.c 2014-05-07 17:05:51.100167022 +0200 @@ -58,6 +58,8 @@ struct clk *clk_io; struct clk *clk_bus[MAX_BUS_CLK]; unsigned long clk_rates[MAX_BUS_CLK]; + + bool no_divider; }; /** @@ -70,6 +72,7 @@ */ struct sdhci_s3c_drv_data { unsigned int sdhci_quirks; + bool no_divider; }; static inline struct sdhci_s3c *to_s3c(struct sdhci_host *host) @@ -119,7 +122,7 @@ * If controller uses a non-standard clock division, find the best clock * speed possible with selected clock source and skip the division. */ - if (ourhost->host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) { + if (ourhost->no_divider) { rate = clk_round_rate(clksrc, wanted); return wanted - rate; } @@ -161,9 +164,13 @@ int src; u32 ctrl; + host->mmc->actual_clock = 0; + /* don't bother if the clock is going off. */ - if (clock == 0) + if (clock == 0) { + sdhci_set_clock(host, clock); return; + } for (src = 0; src < MAX_BUS_CLK; src++) { delta = sdhci_s3c_consider_clock(ourhost, src, clock); @@ -215,6 +222,8 @@ if (clock < 25 * 1000000) ctrl |= (S3C_SDHCI_CTRL3_FCSEL3 | S3C_SDHCI_CTRL3_FCSEL2); writel(ctrl, host->ioaddr + S3C_SDHCI_CONTROL3); + + sdhci_set_clock(host, clock); } /** @@ -295,10 +304,11 @@ unsigned long timeout; u16 clk = 0; + host->mmc->actual_clock = 0; + /* If the clock is going off, set to 0 at clock control register */ if (clock == 0) { sdhci_writew(host, 0, SDHCI_CLOCK_CONTROL); - host->clock = clock; return; } @@ -306,8 +316,6 @@ clk_set_rate(ourhost->clk_bus[ourhost->cur_clk], clock); - host->clock = clock; - clk = SDHCI_CLOCK_INT_EN; sdhci_writew(host, clk, SDHCI_CLOCK_CONTROL); @@ -329,14 +337,14 @@ } /** - * sdhci_s3c_platform_bus_width - support 8bit buswidth + * sdhci_s3c_set_bus_width - support 8bit buswidth * @host: The SDHCI host being queried * @width: MMC_BUS_WIDTH_ macro for the bus width being requested * * We have 8-bit width support but is not a v3 controller. * So we add platform_bus_width() and support 8bit width. */ -static int sdhci_s3c_platform_bus_width(struct sdhci_host *host, int width) +static void sdhci_s3c_set_bus_width(struct sdhci_host *host, int width) { u8 ctrl; @@ -358,15 +366,15 @@ } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - - return 0; } static struct sdhci_ops sdhci_s3c_ops = { .get_max_clock = sdhci_s3c_get_max_clk, .set_clock = sdhci_s3c_set_clock, .get_min_clock = sdhci_s3c_get_min_clock, - .platform_bus_width = sdhci_s3c_platform_bus_width, + .set_bus_width = sdhci_s3c_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static void sdhci_s3c_notify_change(struct platform_device *dev, int state) @@ -606,8 +614,10 @@ /* Setup quirks for the controller */ host->quirks |= SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC; host->quirks |= SDHCI_QUIRK_NO_HISPD_BIT; - if (drv_data) + if (drv_data) { host->quirks |= drv_data->sdhci_quirks; + sc->no_divider = drv_data->no_divider; + } #ifndef CONFIG_MMC_SDHCI_S3C_DMA @@ -656,7 +666,7 @@ * If controller does not have internal clock divider, * we can use overriding functions instead of default. */ - if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) { + if (sc->no_divider) { sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock; sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock; sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock; @@ -797,7 +807,7 @@ #if defined(CONFIG_CPU_EXYNOS4210) || defined(CONFIG_SOC_EXYNOS4212) static struct sdhci_s3c_drv_data exynos4_sdhci_drv_data = { - .sdhci_quirks = SDHCI_QUIRK_NONSTANDARD_CLOCK, + .no_divider = true, }; #define EXYNOS4_SDHCI_DRV_DATA ((kernel_ulong_t)&exynos4_sdhci_drv_data) #else diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-sirf.c linux-3.15-rc4/drivers/mmc/host/sdhci-sirf.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-sirf.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-sirf.c 2014-05-07 17:05:51.100167022 +0200 @@ -28,7 +28,11 @@ } static struct sdhci_ops sdhci_sirf_ops = { + .set_clock = sdhci_set_clock, .get_max_clock = sdhci_sirf_get_max_clk, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static struct sdhci_pltfm_data sdhci_sirf_pdata = { diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-spear.c linux-3.15-rc4/drivers/mmc/host/sdhci-spear.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-spear.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-spear.c 2014-05-07 17:05:51.100167022 +0200 @@ -38,7 +38,10 @@ /* sdhci ops */ static const struct sdhci_ops sdhci_pltfm_ops = { - /* Nothing to do for now. */ + .set_clock = sdhci_set_clock, + .set_bus_width = sdhci_set_bus_width, + .reset = sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; #ifdef CONFIG_OF diff -Nur linux-3.15-rc4.orig/drivers/mmc/host/sdhci-tegra.c linux-3.15-rc4/drivers/mmc/host/sdhci-tegra.c --- linux-3.15-rc4.orig/drivers/mmc/host/sdhci-tegra.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/mmc/host/sdhci-tegra.c 2014-05-07 17:05:51.100167022 +0200 @@ -48,19 +48,6 @@ int power_gpio; }; -static u32 tegra_sdhci_readl(struct sdhci_host *host, int reg) -{ - u32 val; - - if (unlikely(reg == SDHCI_PRESENT_STATE)) { - /* Use wp_gpio here instead? */ - val = readl(host->ioaddr + reg); - return val | SDHCI_WRITE_PROTECT; - } - - return readl(host->ioaddr + reg); -} - static u16 tegra_sdhci_readw(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); @@ -108,12 +95,14 @@ return mmc_gpio_get_ro(host->mmc); } -static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask) +static void tegra_sdhci_reset(struct sdhci_host *host, u8 mask) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_tegra *tegra_host = pltfm_host->priv; const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data; + sdhci_reset(host, mask); + if (!(mask & SDHCI_RESET_ALL)) return; @@ -127,7 +116,7 @@ } } -static int tegra_sdhci_buswidth(struct sdhci_host *host, int bus_width) +static void tegra_sdhci_set_bus_width(struct sdhci_host *host, int bus_width) { u32 ctrl; @@ -144,16 +133,16 @@ ctrl &= ~SDHCI_CTRL_4BITBUS; } sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - return 0; } static const struct sdhci_ops tegra_sdhci_ops = { .get_ro = tegra_sdhci_get_ro, - .read_l = tegra_sdhci_readl, .read_w = tegra_sdhci_readw, .write_l = tegra_sdhci_writel, - .platform_bus_width = tegra_sdhci_buswidth, - .platform_reset_exit = tegra_sdhci_reset_exit, + .set_clock = sdhci_set_clock, + .set_bus_width = tegra_sdhci_set_bus_width, + .reset = tegra_sdhci_reset, + .set_uhs_signaling = sdhci_set_uhs_signaling, }; static const struct sdhci_pltfm_data sdhci_tegra20_pdata = { diff -Nur linux-3.15-rc4.orig/drivers/net/ethernet/freescale/fec.h linux-3.15-rc4/drivers/net/ethernet/freescale/fec.h --- linux-3.15-rc4.orig/drivers/net/ethernet/freescale/fec.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/net/ethernet/freescale/fec.h 2014-05-07 17:05:51.100167022 +0200 @@ -14,6 +14,7 @@ /****************************************************************************/ #include +#include #include #include @@ -170,6 +171,11 @@ unsigned short res0[4]; }; +union bufdesc_u { + struct bufdesc bd; + struct bufdesc_ex ebd; +}; + /* * The following definitions courtesy of commproc.h, which where * Copyright (c) 1997 Dan Malek (dmalek@jlc.net). @@ -202,6 +208,7 @@ #define BD_ENET_RX_OV ((ushort)0x0002) #define BD_ENET_RX_CL ((ushort)0x0001) #define BD_ENET_RX_STATS ((ushort)0x013f) /* All status bits */ +#define BD_ENET_RX_ERROR ((ushort)0x003f) /* Enhanced buffer descriptor control/status used by Ethernet receive */ #define BD_ENET_RX_VLAN 0x00000004 @@ -224,10 +231,17 @@ #define BD_ENET_TX_STATS ((ushort)0x03ff) /* All status bits */ /*enhanced buffer descriptor control/status used by Ethernet transmit*/ -#define BD_ENET_TX_INT 0x40000000 -#define BD_ENET_TX_TS 0x20000000 -#define BD_ENET_TX_PINS 0x10000000 -#define BD_ENET_TX_IINS 0x08000000 +#define BD_ENET_TX_INT BIT(30) +#define BD_ENET_TX_TS BIT(29) +#define BD_ENET_TX_PINS BIT(28) +#define BD_ENET_TX_IINS BIT(27) +#define BD_ENET_TX_TXE BIT(15) +#define BD_ENET_TX_UE BIT(13) +#define BD_ENET_TX_EE BIT(12) +#define BD_ENET_TX_FE BIT(11) +#define BD_ENET_TX_LCE BIT(10) +#define BD_ENET_TX_OE BIT(9) +#define BD_ENET_TX_TSE BIT(8) /* This device has up to three irqs on some platforms */ @@ -240,28 +254,20 @@ * the skbuffer directly. */ -#define FEC_ENET_RX_PAGES 8 +#define FEC_ENET_RX_PAGES 64 #define FEC_ENET_RX_FRSIZE 2048 #define FEC_ENET_RX_FRPPG (PAGE_SIZE / FEC_ENET_RX_FRSIZE) #define RX_RING_SIZE (FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES) #define FEC_ENET_TX_FRSIZE 2048 #define FEC_ENET_TX_FRPPG (PAGE_SIZE / FEC_ENET_TX_FRSIZE) -#define TX_RING_SIZE 16 /* Must be power of two */ -#define TX_RING_MOD_MASK 15 /* for this to work */ +#define TX_RING_SIZE 128 /* Must be power of two */ #define BD_ENET_RX_INT 0x00800000 #define BD_ENET_RX_PTP ((ushort)0x0400) #define BD_ENET_RX_ICE 0x00000020 #define BD_ENET_RX_PCR 0x00000010 -#define FLAG_RX_CSUM_ENABLED (BD_ENET_RX_ICE | BD_ENET_RX_PCR) #define FLAG_RX_CSUM_ERROR (BD_ENET_RX_ICE | BD_ENET_RX_PCR) -struct fec_enet_delayed_work { - struct delayed_work delay_work; - bool timeout; - bool trig_tx; -}; - /* The FEC buffer descriptors track the ring buffers. The rx_bd_base and * tx_bd_base always point to the base of the buffer descriptors. The * cur_rx and cur_tx point to the currently available buffer. @@ -281,27 +287,33 @@ struct clk *clk_enet_out; struct clk *clk_ptp; + unsigned char tx_page_map[TX_RING_SIZE]; /* The saved address of a sent-in-place packet/buffer, for skfree(). */ unsigned char *tx_bounce[TX_RING_SIZE]; struct sk_buff *tx_skbuff[TX_RING_SIZE]; struct sk_buff *rx_skbuff[RX_RING_SIZE]; /* CPM dual port RAM relative addresses */ - dma_addr_t bd_dma; + dma_addr_t rx_bd_dma; + dma_addr_t tx_bd_dma; /* Address of Rx and Tx buffers */ - struct bufdesc *rx_bd_base; - struct bufdesc *tx_bd_base; + union bufdesc_u *rx_bd_base; + union bufdesc_u *tx_bd_base; /* The next free ring entry */ - struct bufdesc *cur_rx, *cur_tx; - /* The ring entries to be free()ed */ - struct bufdesc *dirty_tx; + unsigned short tx_next; + unsigned short tx_dirty; + unsigned short tx_min; + unsigned short rx_next; unsigned short tx_ring_size; unsigned short rx_ring_size; + unsigned char flags; + + struct mutex mutex; + struct platform_device *pdev; - int opened; int dev_id; /* Phylib and MDIO interface */ @@ -315,11 +327,12 @@ int speed; struct completion mdio_done; int irq[FEC_IRQ_NUM]; - int bufdesc_ex; - int pause_flag; + unsigned short pause_flag; + unsigned short pause_mode; struct napi_struct napi; - int csum_flags; + + struct work_struct tx_timeout_work; struct ptp_clock *ptp_clock; struct ptp_clock_info ptp_caps; @@ -333,8 +346,8 @@ int hwts_rx_en; int hwts_tx_en; struct timer_list time_keep; - struct fec_enet_delayed_work delay_work; struct regulator *reg_phy; + unsigned long quirks; }; void fec_ptp_init(struct platform_device *pdev); diff -Nur linux-3.15-rc4.orig/drivers/net/ethernet/freescale/fec_main.c linux-3.15-rc4/drivers/net/ethernet/freescale/fec_main.c --- linux-3.15-rc4.orig/drivers/net/ethernet/freescale/fec_main.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/net/ethernet/freescale/fec_main.c 2014-05-07 17:05:51.104167047 +0200 @@ -33,12 +33,6 @@ #include #include #include -#include -#include -#include -#include -#include -#include #include #include #include @@ -91,16 +85,8 @@ #define FEC_QUIRK_HAS_CSUM (1 << 5) /* Controller has hardware vlan support */ #define FEC_QUIRK_HAS_VLAN (1 << 6) -/* ENET IP errata ERR006358 - * - * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously - * detected as not set during a prior frame transmission, then the - * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs - * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in - * frames not being transmitted until there is a 0-to-1 transition on - * ENET_TDAR[TDAR]. - */ -#define FEC_QUIRK_ERR006358 (1 << 7) +/* Controller has ability to offset rx packets */ +#define FEC_QUIRK_RX_SHIFT16 (1 << 8) static struct platform_device_id fec_devtype[] = { { @@ -120,7 +106,7 @@ .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM | - FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358, + FEC_QUIRK_HAS_VLAN | FEC_QUIRK_RX_SHIFT16, }, { .name = "mvf600-fec", .driver_data = FEC_QUIRK_ENET_MAC, @@ -172,9 +158,15 @@ #endif #endif /* CONFIG_M5272 */ -#if (((RX_RING_SIZE + TX_RING_SIZE) * 32) > PAGE_SIZE) -#error "FEC: descriptor ring size constants too large" +#if RX_RING_SIZE * 32 > PAGE_SIZE +#error "FEC: receive descriptor ring size too large" #endif +#if TX_RING_SIZE * 32 > PAGE_SIZE +#error "FEC: transmit descriptor ring size too large" +#endif + +/* Minimum TX ring size when using NETIF_F_SG */ +#define TX_RING_SIZE_MIN_SG (2 * (MAX_SKB_FRAGS + 1)) /* Interrupt events/masks. */ #define FEC_ENET_HBERR ((uint)0x80000000) /* Heartbeat error */ @@ -200,6 +192,7 @@ /* FEC receive acceleration */ #define FEC_RACC_IPDIS (1 << 1) #define FEC_RACC_PRODIS (1 << 2) +#define FEC_RACC_SHIFT16 BIT(7) #define FEC_RACC_OPTIONS (FEC_RACC_IPDIS | FEC_RACC_PRODIS) /* @@ -228,62 +221,60 @@ /* Transmitter timeout */ #define TX_TIMEOUT (2 * HZ) -#define FEC_PAUSE_FLAG_AUTONEG 0x1 -#define FEC_PAUSE_FLAG_ENABLE 0x2 +/* pause mode/flag */ +#define FEC_PAUSE_FLAG_AUTONEG BIT(0) +#define FEC_PAUSE_FLAG_RX BIT(1) +#define FEC_PAUSE_FLAG_TX BIT(2) + +/* flags */ +#define FEC_FLAG_BUFDESC_EX BIT(0) +#define FEC_FLAG_RX_CSUM BIT(1) +#define FEC_FLAG_RX_VLAN BIT(2) static int mii_cnt; -static inline -struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +static unsigned copybreak = 200; +module_param(copybreak, uint, 0644); +MODULE_PARM_DESC(copybreak, + "Maximum size of packet that is copied to a new buffer on receive"); + +static bool fec_enet_rx_zerocopy(struct fec_enet_private *fep, unsigned pktlen) { - struct bufdesc *new_bd = bdp + 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp + 1; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; - } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; - } +#ifndef CONFIG_M5272 + if (fep->quirks & FEC_QUIRK_RX_SHIFT16 && pktlen >= copybreak) + return true; +#endif + return false; +} - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd >= (ex_base + ring_size)) ? - ex_base : ex_new_bd); +static union bufdesc_u * +fec_enet_tx_get(unsigned index, struct fec_enet_private *fep) +{ + union bufdesc_u *base = fep->tx_bd_base; + union bufdesc_u *bdp; + + if (fep->flags & FEC_FLAG_BUFDESC_EX) + bdp = (union bufdesc_u *)(&base->ebd + index); else - return (new_bd >= (base + ring_size)) ? - base : new_bd; + bdp = (union bufdesc_u *)(&base->bd + index); + + return bdp; } -static inline -struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp, struct fec_enet_private *fep) +static union bufdesc_u * +fec_enet_rx_get(unsigned index, struct fec_enet_private *fep) { - struct bufdesc *new_bd = bdp - 1; - struct bufdesc_ex *ex_new_bd = (struct bufdesc_ex *)bdp - 1; - struct bufdesc_ex *ex_base; - struct bufdesc *base; - int ring_size; - - if (bdp >= fep->tx_bd_base) { - base = fep->tx_bd_base; - ring_size = fep->tx_ring_size; - ex_base = (struct bufdesc_ex *)fep->tx_bd_base; - } else { - base = fep->rx_bd_base; - ring_size = fep->rx_ring_size; - ex_base = (struct bufdesc_ex *)fep->rx_bd_base; - } + union bufdesc_u *base = fep->rx_bd_base; + union bufdesc_u *bdp; + + index &= fep->rx_ring_size - 1; - if (fep->bufdesc_ex) - return (struct bufdesc *)((ex_new_bd < ex_base) ? - (ex_new_bd + ring_size) : ex_new_bd); + if (fep->flags & FEC_FLAG_BUFDESC_EX) + bdp = (union bufdesc_u *)(&base->ebd + index); else - return (new_bd < base) ? (new_bd + ring_size) : new_bd; + bdp = (union bufdesc_u *)(&base->bd + index); + + return bdp; } static void *swap_buffer(void *bufaddr, int len) @@ -297,13 +288,47 @@ return bufaddr; } +static void fec_dump(struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + union bufdesc_u *bdp; + unsigned index = 0; + + netdev_info(ndev, "TX ring dump\n"); + pr_info("Nr SC addr len SKB\n"); + + for (index = 0; index < fep->tx_ring_size; index++) { + bdp = fec_enet_tx_get(index, fep); + + pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p", + index, + index == fep->tx_next ? 'S' : ' ', + index == fep->tx_dirty ? 'H' : ' ', + bdp->bd.cbd_sc, bdp->bd.cbd_bufaddr, + bdp->bd.cbd_datlen, + fep->tx_skbuff[index]); + if (fep->flags & FEC_FLAG_BUFDESC_EX) + pr_cont(" %08lx", bdp->ebd.cbd_esc); + pr_cont("\n"); + } +} + static int fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) { + int csum_start; + /* Only run for packets requiring a checksum. */ if (skb->ip_summed != CHECKSUM_PARTIAL) return 0; + csum_start = skb_checksum_start_offset(skb); + if (csum_start + skb->csum_offset > skb_headlen(skb)) { + netdev_err(ndev, "checksum outside skb head: headlen %u start %u offset %u\n", + skb_headlen(skb), csum_start, skb->csum_offset); + return -1; + } + if (unlikely(skb_cow_head(skb, 0))) return -1; @@ -312,23 +337,56 @@ return 0; } +static void +fec_enet_tx_unmap(unsigned index, union bufdesc_u *bdp, struct fec_enet_private *fep) +{ + dma_addr_t addr = bdp->bd.cbd_bufaddr; + unsigned length = bdp->bd.cbd_datlen; + + bdp->bd.cbd_bufaddr = 0; + + if (fep->tx_page_map[index]) + dma_unmap_page(&fep->pdev->dev, addr, length, DMA_TO_DEVICE); + else + dma_unmap_single(&fep->pdev->dev, addr, length, DMA_TO_DEVICE); +} + +static void +fec_enet_tx_unmap_range(unsigned index, unsigned last, struct fec_enet_private *fep) +{ + union bufdesc_u *bdp; + + do { + if (last == 0) + last = fep->tx_ring_size; + last--; + + bdp = fec_enet_tx_get(last, fep); + fec_enet_tx_unmap(last, bdp, fep); + } while (index != last); +} + +static unsigned ring_free(unsigned ins, unsigned rem, unsigned size) +{ + int num = rem - ins; + return num < 0 ? num + size : num; +} + static netdev_tx_t fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - struct bufdesc *bdp, *bdp_pre; + union bufdesc_u *bdp; void *bufaddr; unsigned short status; - unsigned int index; + unsigned index, last, length, cbd_esc; + int f, nr_frags = skb_shinfo(skb)->nr_frags; + dma_addr_t addr; /* Fill in a Tx ring entry */ - bdp = fep->cur_tx; - - status = bdp->cbd_sc; + index = fep->tx_next; - if (status & BD_ENET_TX_READY) { + if (ring_free(index, fep->tx_dirty, fep->tx_ring_size) < 1 + nr_frags) { /* Ooops. All transmit buffers are full. Bail out. * This should not happen, since ndev->tbusy should be set. */ @@ -342,26 +400,17 @@ return NETDEV_TX_OK; } - /* Clear all of the status flags */ - status &= ~BD_ENET_TX_STATS; - /* Set buffer length and buffer pointer */ bufaddr = skb->data; - bdp->cbd_datlen = skb->len; + length = skb_headlen(skb); /* * On some FEC implementations data must be aligned on * 4-byte boundaries. Use bounce buffers to copy data * and get it aligned. Ugh. */ - if (fep->bufdesc_ex) - index = (struct bufdesc_ex *)bdp - - (struct bufdesc_ex *)fep->tx_bd_base; - else - index = bdp - fep->tx_bd_base; - if (((unsigned long) bufaddr) & FEC_ALIGNMENT) { - memcpy(fep->tx_bounce[index], skb->data, skb->len); + memcpy(fep->tx_bounce[index], skb->data, length); bufaddr = fep->tx_bounce[index]; } @@ -370,75 +419,127 @@ * the system that it's running on. As the result, driver has to * swap every frame going to and coming from the controller. */ - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) - swap_buffer(bufaddr, skb->len); + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(bufaddr, length); - /* Save skb pointer */ - fep->tx_skbuff[index] = skb; + /* Push the data cache so the CPM does not get stale memory data. */ + addr = dma_map_single(&fep->pdev->dev, bufaddr, length, DMA_TO_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) + goto release; + + bdp = fec_enet_tx_get(index, fep); + bdp->bd.cbd_datlen = length; + bdp->bd.cbd_bufaddr = addr; - /* Push the data cache so the CPM does not get stale memory - * data. - */ - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr, - skb->len, DMA_TO_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { - bdp->cbd_bufaddr = 0; - fep->tx_skbuff[index] = NULL; - dev_kfree_skb_any(skb); - if (net_ratelimit()) - netdev_err(ndev, "Tx DMA memory map failed\n"); - return NETDEV_TX_OK; - } + fep->tx_page_map[index] = 0; - if (fep->bufdesc_ex) { - - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_bdu = 0; + cbd_esc = BD_ENET_TX_INT; + if (fep->flags & FEC_FLAG_BUFDESC_EX) { if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && fep->hwts_tx_en)) { - ebdp->cbd_esc = (BD_ENET_TX_TS | BD_ENET_TX_INT); + cbd_esc |= BD_ENET_TX_TS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; } else { - ebdp->cbd_esc = BD_ENET_TX_INT; - /* Enable protocol checksum flags * We do not bother with the IP Checksum bits as they * are done by the kernel */ if (skb->ip_summed == CHECKSUM_PARTIAL) - ebdp->cbd_esc |= BD_ENET_TX_PINS; + cbd_esc |= BD_ENET_TX_PINS; + } + bdp->ebd.cbd_bdu = 0; + bdp->ebd.cbd_esc = cbd_esc; + } + + for (last = index, f = 0; f < nr_frags; f++) { + const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[f]; + + if (++last >= fep->tx_ring_size) + last = 0; + + length = skb_frag_size(frag); + + /* If the alignment is unsuitable, we need to bounce. */ + if (frag->page_offset & FEC_ALIGNMENT) { + unsigned char *bounce = fep->tx_bounce[last]; + + /* FIXME: highdma? */ + memcpy(bounce, skb_frag_address(frag), length); + + addr = dma_map_single(&fep->pdev->dev, bounce, + length, DMA_TO_DEVICE); + fep->tx_page_map[last] = 0; + } else { + addr = skb_frag_dma_map(&fep->pdev->dev, frag, 0, + length, DMA_TO_DEVICE); + fep->tx_page_map[last] = 1; + } + + if (dma_mapping_error(&fep->pdev->dev, addr)) + goto release_frags; + + bdp = fec_enet_tx_get(last, fep); + bdp->bd.cbd_datlen = length; + bdp->bd.cbd_bufaddr = addr; + if (fep->flags & FEC_FLAG_BUFDESC_EX) { + bdp->ebd.cbd_esc = cbd_esc; + bdp->ebd.cbd_bdu = 0; } } + /* Save skb pointer */ + fep->tx_skbuff[last] = skb; + + /* + * We need the preceding stores to the descriptor to complete + * before updating the status field, which hands it over to the + * hardware. The corresponding rmb() is "in the hardware". + */ + wmb(); + /* Send it on its way. Tell FEC it's ready, interrupt when done, * it's the last BD of the frame, and to put the CRC on the end. */ - status |= (BD_ENET_TX_READY | BD_ENET_TX_INTR - | BD_ENET_TX_LAST | BD_ENET_TX_TC); - bdp->cbd_sc = status; - - bdp_pre = fec_enet_get_prevdesc(bdp, fep); - if ((id_entry->driver_data & FEC_QUIRK_ERR006358) && - !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) { - fep->delay_work.trig_tx = true; - schedule_delayed_work(&(fep->delay_work.delay_work), - msecs_to_jiffies(1)); + status = bdp->bd.cbd_sc & BD_ENET_TX_WRAP; + bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_INTR | + BD_ENET_TX_LAST | BD_ENET_TX_TC; + + /* Now walk backwards setting the TX_READY on each fragment */ + for (f = nr_frags - 1; f >= 0; f--) { + unsigned i = index + f; + + if (i >= fep->tx_ring_size) + i -= fep->tx_ring_size; + + bdp = fec_enet_tx_get(i, fep); + status = bdp->bd.cbd_sc & BD_ENET_TX_WRAP; + bdp->bd.cbd_sc = status | BD_ENET_TX_READY | BD_ENET_TX_INTR; } - /* If this was the last BD in the ring, start at the beginning again. */ - bdp = fec_enet_get_nextdesc(bdp, fep); - skb_tx_timestamp(skb); + netdev_sent_queue(ndev, skb->len); + + if (++last >= fep->tx_ring_size) + last = 0; - fep->cur_tx = bdp; + fep->tx_next = last; - if (fep->cur_tx == fep->dirty_tx) + if (ring_free(last, fep->tx_dirty, fep->tx_ring_size) < fep->tx_min) netif_stop_queue(ndev); /* Trigger transmission start */ - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + if (readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) + writel(0, fep->hwp + FEC_X_DES_ACTIVE); return NETDEV_TX_OK; + + release_frags: + fec_enet_tx_unmap_range(index, last, fep); + release: + dev_kfree_skb_any(skb); + if (net_ratelimit()) + netdev_err(ndev, "Tx DMA memory map failed\n"); + return NETDEV_TX_OK; } /* Init RX & TX buffer descriptors @@ -446,71 +547,60 @@ static void fec_enet_bd_init(struct net_device *dev) { struct fec_enet_private *fep = netdev_priv(dev); - struct bufdesc *bdp; + union bufdesc_u *bdp; unsigned int i; /* Initialize the receive buffer descriptors. */ - bdp = fep->rx_bd_base; for (i = 0; i < fep->rx_ring_size; i++) { + bdp = fec_enet_rx_get(i, fep); /* Initialize the BD for every fragment in the page. */ - if (bdp->cbd_bufaddr) - bdp->cbd_sc = BD_ENET_RX_EMPTY; + if (bdp->bd.cbd_bufaddr) + bdp->bd.cbd_sc = BD_ENET_RX_EMPTY; else - bdp->cbd_sc = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); - } + bdp->bd.cbd_sc = 0; - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; + if (i == fep->rx_ring_size - 1) + bdp->bd.cbd_sc |= BD_SC_WRAP; + } - fep->cur_rx = fep->rx_bd_base; + fep->rx_next = 0; /* ...and the same for transmit */ - bdp = fep->tx_bd_base; - fep->cur_tx = bdp; for (i = 0; i < fep->tx_ring_size; i++) { + bdp = fec_enet_tx_get(i, fep); /* Initialize the BD for every fragment in the page. */ - bdp->cbd_sc = 0; - if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) { + if (i == fep->tx_ring_size - 1) + bdp->bd.cbd_sc = BD_SC_WRAP; + else + bdp->bd.cbd_sc = 0; + if (bdp->bd.cbd_bufaddr) + fec_enet_tx_unmap(i, bdp, fep); + if (fep->tx_skbuff[i]) { dev_kfree_skb_any(fep->tx_skbuff[i]); fep->tx_skbuff[i] = NULL; } - bdp->cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); } - /* Set the last buffer to wrap */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; - fep->dirty_tx = bdp; + fep->tx_next = 0; + fep->tx_dirty = fep->tx_ring_size - 1; } -/* This function is called to start or restart the FEC during a link - * change. This only happens when switching between half and full - * duplex. +/* + * This function is called to start or restart the FEC during a link + * change, transmit timeout, or to reconfigure the FEC. The network + * packet processing for this device must be stopped before this call. */ static void -fec_restart(struct net_device *ndev, int duplex) +fec_restart(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - int i; u32 val; u32 temp_mac[2]; u32 rcntl = OPT_FRAME_SIZE | 0x04; u32 ecntl = 0x2; /* ETHEREN */ - if (netif_running(ndev)) { - netif_device_detach(ndev); - napi_disable(&fep->napi); - netif_stop_queue(ndev); - netif_tx_lock_bh(ndev); - } - /* Whack a reset. We should wait for this. */ writel(1, fep->hwp + FEC_ECNTRL); udelay(10); @@ -519,7 +609,7 @@ * enet-mac reset will reset mac address registers too, * so need to reconfigure it. */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN); writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW); writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH); @@ -531,27 +621,16 @@ /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); - fec_enet_bd_init(ndev); + if (fep->rx_bd_base) + fec_enet_bd_init(ndev); + netdev_reset_queue(ndev); /* Set receive and transmit descriptor base. */ - writel(fep->bd_dma, fep->hwp + FEC_R_DES_START); - if (fep->bufdesc_ex) - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc_ex) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - else - writel((unsigned long)fep->bd_dma + sizeof(struct bufdesc) - * fep->rx_ring_size, fep->hwp + FEC_X_DES_START); - - - for (i = 0; i <= TX_RING_MOD_MASK; i++) { - if (fep->tx_skbuff[i]) { - dev_kfree_skb_any(fep->tx_skbuff[i]); - fep->tx_skbuff[i] = NULL; - } - } + writel(fep->rx_bd_dma, fep->hwp + FEC_R_DES_START); + writel(fep->tx_bd_dma, fep->hwp + FEC_X_DES_START); /* Enable MII mode */ - if (duplex) { + if (fep->full_duplex == DUPLEX_FULL) { /* FD enable */ writel(0x04, fep->hwp + FEC_X_CNTRL); } else { @@ -560,15 +639,15 @@ writel(0x0, fep->hwp + FEC_X_CNTRL); } - fep->full_duplex = duplex; - /* Set MII speed */ writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); #if !defined(CONFIG_M5272) /* set RX checksum */ val = readl(fep->hwp + FEC_RACC); - if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) + if (fep->quirks & FEC_QUIRK_RX_SHIFT16) + val |= FEC_RACC_SHIFT16; + if (fep->flags & FEC_FLAG_RX_CSUM) val |= FEC_RACC_OPTIONS; else val &= ~FEC_RACC_OPTIONS; @@ -579,9 +658,9 @@ * The phy interface and speed need to get configured * differently on enet-mac. */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* Enable flow control and length check */ - rcntl |= 0x40000000 | 0x00000020; + rcntl |= 0x40000000; /* RGMII, RMII or MII */ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII) @@ -602,7 +681,7 @@ } } else { #ifdef FEC_MIIGSK_ENR - if (id_entry->driver_data & FEC_QUIRK_USE_GASKET) { + if (fep->quirks & FEC_QUIRK_USE_GASKET) { u32 cfgr; /* disable the gasket and wait */ writel(0, fep->hwp + FEC_MIIGSK_ENR); @@ -627,22 +706,24 @@ } #if !defined(CONFIG_M5272) - /* enable pause frame*/ - if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) || - ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) && - fep->phy_dev && fep->phy_dev->pause)) { - rcntl |= FEC_ENET_FCE; - - /* set FIFO threshold parameter to reduce overrun */ - writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); - writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); - writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); - writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); + if (fep->full_duplex == DUPLEX_FULL) { + /* + * Configure pause modes according to the current status. + * Must only be enabled for full duplex links. + */ + if (fep->pause_mode & FEC_PAUSE_FLAG_RX) + rcntl |= FEC_ENET_FCE; - /* OPD */ - writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); - } else { - rcntl &= ~FEC_ENET_FCE; + if (fep->pause_mode & FEC_PAUSE_FLAG_TX) { + /* set FIFO threshold parameter to reduce overrun */ + writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM); + writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL); + writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM); + writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL); + + /* OPD */ + writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD); + } } #endif /* !defined(CONFIG_M5272) */ @@ -655,14 +736,14 @@ writel(0, fep->hwp + FEC_HASH_TABLE_LOW); #endif - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ ecntl |= (1 << 8); /* enable ENET store and forward mode */ writel(1 << 8, fep->hwp + FEC_X_WMRK); } - if (fep->bufdesc_ex) + if (fep->flags & FEC_FLAG_BUFDESC_EX) ecntl |= (1 << 4); #ifndef CONFIG_M5272 @@ -674,26 +755,17 @@ writel(ecntl, fep->hwp + FEC_ECNTRL); writel(0, fep->hwp + FEC_R_DES_ACTIVE); - if (fep->bufdesc_ex) + if (fep->flags & FEC_FLAG_BUFDESC_EX) fec_ptp_start_cyclecounter(ndev); /* Enable interrupts we wish to service */ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); - - if (netif_running(ndev)) { - netif_tx_unlock_bh(ndev); - netif_wake_queue(ndev); - napi_enable(&fep->napi); - netif_device_attach(ndev); - } } static void fec_stop(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8); /* We cannot expect a graceful transmit stop without link !!! */ @@ -711,7 +783,7 @@ writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK); /* We have to keep ENET enabled to have MII interrupt stay working */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { + if (fep->quirks & FEC_QUIRK_ENET_MAC) { writel(2, fep->hwp + FEC_ECNTRL); writel(rmii_mode, fep->hwp + FEC_R_CNTRL); } @@ -723,127 +795,312 @@ { struct fec_enet_private *fep = netdev_priv(ndev); + fec_dump(ndev); + ndev->stats.tx_errors++; - fep->delay_work.timeout = true; - schedule_delayed_work(&(fep->delay_work.delay_work), 0); + schedule_work(&fep->tx_timeout_work); } -static void fec_enet_work(struct work_struct *work) +static void fec_enet_timeout_work(struct work_struct *work) { struct fec_enet_private *fep = - container_of(work, - struct fec_enet_private, - delay_work.delay_work.work); - - if (fep->delay_work.timeout) { - fep->delay_work.timeout = false; - fec_restart(fep->netdev, fep->full_duplex); - netif_wake_queue(fep->netdev); - } + container_of(work, struct fec_enet_private, tx_timeout_work); + struct net_device *ndev = fep->netdev; - if (fep->delay_work.trig_tx) { - fep->delay_work.trig_tx = false; - writel(0, fep->hwp + FEC_X_DES_ACTIVE); + rtnl_lock(); + if (netif_device_present(ndev) || netif_running(ndev)) { + mutex_lock(&fep->mutex); + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_restart(ndev); + netif_wake_queue(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + mutex_unlock(&fep->mutex); } + rtnl_unlock(); } static void +fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts, + struct skb_shared_hwtstamps *hwtstamps) +{ + unsigned long flags; + u64 ns; + + spin_lock_irqsave(&fep->tmreg_lock, flags); + ns = timecounter_cyc2time(&fep->tc, ts); + spin_unlock_irqrestore(&fep->tmreg_lock, flags); + + memset(hwtstamps, 0, sizeof(*hwtstamps)); + hwtstamps->hwtstamp = ns_to_ktime(ns); +} + +static void noinline fec_enet_tx(struct net_device *ndev) { - struct fec_enet_private *fep; - struct bufdesc *bdp; - unsigned short status; + struct fec_enet_private *fep = netdev_priv(ndev); + union bufdesc_u *bdp; struct sk_buff *skb; - int index = 0; - - fep = netdev_priv(ndev); - bdp = fep->dirty_tx; + unsigned index = fep->tx_dirty; + unsigned pkts_compl, bytes_compl; - /* get next bdp of dirty_tx */ - bdp = fec_enet_get_nextdesc(bdp, fep); + pkts_compl = bytes_compl = 0; + do { + unsigned status, cbd_esc; - while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { + if (++index >= fep->tx_ring_size) + index = 0; /* current queue is empty */ - if (bdp == fep->cur_tx) + if (index == fep->tx_next) break; - if (fep->bufdesc_ex) - index = (struct bufdesc_ex *)bdp - - (struct bufdesc_ex *)fep->tx_bd_base; - else - index = bdp - fep->tx_bd_base; + bdp = fec_enet_tx_get(index, fep); + + status = bdp->bd.cbd_sc; + if (status & BD_ENET_TX_READY) + break; + + fec_enet_tx_unmap(index, bdp, fep); skb = fep->tx_skbuff[index]; - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len, - DMA_TO_DEVICE); - bdp->cbd_bufaddr = 0; + fep->tx_skbuff[index] = NULL; /* Check for errors. */ - if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | - BD_ENET_TX_RL | BD_ENET_TX_UN | - BD_ENET_TX_CSL)) { - ndev->stats.tx_errors++; - if (status & BD_ENET_TX_HB) /* No heartbeat */ - ndev->stats.tx_heartbeat_errors++; - if (status & BD_ENET_TX_LC) /* Late collision */ - ndev->stats.tx_window_errors++; - if (status & BD_ENET_TX_RL) /* Retrans limit */ - ndev->stats.tx_aborted_errors++; - if (status & BD_ENET_TX_UN) /* Underrun */ - ndev->stats.tx_fifo_errors++; - if (status & BD_ENET_TX_CSL) /* Carrier lost */ - ndev->stats.tx_carrier_errors++; + if (fep->flags & FEC_FLAG_BUFDESC_EX) { + cbd_esc = bdp->ebd.cbd_esc; + if (cbd_esc & BD_ENET_TX_TXE) { + ndev->stats.tx_errors++; + if (cbd_esc & BD_ENET_TX_EE) { /* excess collision */ + ndev->stats.collisions += 16; + ndev->stats.tx_aborted_errors++; + } + if (cbd_esc & BD_ENET_TX_LCE) /* late collision error */ + ndev->stats.tx_window_errors++; + if (cbd_esc & (BD_ENET_TX_UE | BD_ENET_TX_FE | BD_ENET_TX_OE)) + ndev->stats.tx_fifo_errors++; + goto next; + } } else { - ndev->stats.tx_packets++; - ndev->stats.tx_bytes += bdp->cbd_datlen; + if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | + BD_ENET_TX_RL | BD_ENET_TX_UN | + BD_ENET_TX_CSL)) { + ndev->stats.tx_errors++; + if (status & BD_ENET_TX_HB) /* No heartbeat */ + ndev->stats.tx_heartbeat_errors++; + if (status & BD_ENET_TX_LC) /* Late collision */ + ndev->stats.tx_window_errors++; + if (status & BD_ENET_TX_RL) /* Retrans limit */ + ndev->stats.tx_aborted_errors++; + if (status & BD_ENET_TX_UN) /* Underrun */ + ndev->stats.tx_fifo_errors++; + if (status & BD_ENET_TX_CSL) /* Carrier lost */ + ndev->stats.tx_carrier_errors++; + goto next; + } } - if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) && - fep->bufdesc_ex) { - struct skb_shared_hwtstamps shhwtstamps; - unsigned long flags; - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - - memset(&shhwtstamps, 0, sizeof(shhwtstamps)); - spin_lock_irqsave(&fep->tmreg_lock, flags); - shhwtstamps.hwtstamp = ns_to_ktime( - timecounter_cyc2time(&fep->tc, ebdp->ts)); - spin_unlock_irqrestore(&fep->tmreg_lock, flags); - skb_tstamp_tx(skb, &shhwtstamps); + if (skb) { + ndev->stats.tx_packets++; + ndev->stats.tx_bytes += skb->len; } - if (status & BD_ENET_TX_READY) - netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n"); - /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (status & BD_ENET_TX_DEF) ndev->stats.collisions++; + next: + if (skb) { + if (fep->flags & FEC_FLAG_BUFDESC_EX && + unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { + struct skb_shared_hwtstamps shhwtstamps; - /* Free the sk buffer associated with this last transmit */ - dev_kfree_skb_any(skb); - fep->tx_skbuff[index] = NULL; + fec_enet_hwtstamp(fep, bdp->ebd.ts, &shhwtstamps); + skb_tstamp_tx(skb, &shhwtstamps); + } - fep->dirty_tx = bdp; + pkts_compl++; + bytes_compl += skb->len; - /* Update pointer to next buffer descriptor to be transmitted */ - bdp = fec_enet_get_nextdesc(bdp, fep); + /* Free the sk buffer associated with this last transmit */ + dev_kfree_skb_any(skb); + } - /* Since we have freed up a buffer, the ring is no longer full - */ - if (fep->dirty_tx != fep->cur_tx) { - if (netif_queue_stopped(ndev)) - netif_wake_queue(ndev); + fep->tx_dirty = index; + } while (1); + + netdev_completed_queue(ndev, pkts_compl, bytes_compl); + + /* ERR006538: Keep the transmitter going */ + if (index != fep->tx_next && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0) + writel(0, fep->hwp + FEC_X_DES_ACTIVE); + + if (netif_queue_stopped(ndev) && + ring_free(fep->tx_next, fep->tx_dirty, fep->tx_ring_size) >= + fep->tx_min) + netif_wake_queue(ndev); +} + + +static void +fec_enet_receive(struct sk_buff *skb, union bufdesc_u *bdp, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + skb->protocol = eth_type_trans(skb, ndev); + + /* Get receive timestamp from the skb */ + if (fep->hwts_rx_en && fep->flags & FEC_FLAG_BUFDESC_EX) + fec_enet_hwtstamp(fep, bdp->ebd.ts, skb_hwtstamps(skb)); + + if (fep->flags & FEC_FLAG_RX_CSUM) { + if (!(bdp->ebd.cbd_esc & FLAG_RX_CSUM_ERROR)) { + /* don't check it */ + skb->ip_summed = CHECKSUM_UNNECESSARY; + } else { + skb_checksum_none_assert(skb); } } - return; + + napi_gro_receive(&fep->napi, skb); +} + +static void +fec_enet_receive_copy(unsigned pkt_len, unsigned index, union bufdesc_u *bdp, struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct sk_buff *skb; + unsigned char *data; + bool vlan_packet_rcvd = false; + + /* + * Detect the presence of the VLAN tag, and adjust + * the packet length appropriately. + */ + if (fep->flags & FEC_FLAG_RX_VLAN && + bdp->ebd.cbd_esc & BD_ENET_RX_VLAN) { + pkt_len -= VLAN_HLEN; + vlan_packet_rcvd = true; + } + + /* This does 16 byte alignment, exactly what we need. */ + skb = netdev_alloc_skb(ndev, pkt_len + NET_IP_ALIGN); + if (unlikely(!skb)) { + ndev->stats.rx_dropped++; + return; + } + + dma_sync_single_for_cpu(&fep->pdev->dev, bdp->bd.cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + + data = fep->rx_skbuff[index]->data; + +#ifndef CONFIG_M5272 + /* + * If we have enabled this feature, we need to discard + * the two bytes at the beginning of the packet before + * copying it. + */ + if (fep->quirks & FEC_QUIRK_RX_SHIFT16) { + pkt_len -= 2; + data += 2; + } +#endif + + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(data, pkt_len); + + skb_reserve(skb, NET_IP_ALIGN); + skb_put(skb, pkt_len); /* Make room */ + + /* If this is a VLAN packet remove the VLAN Tag */ + if (vlan_packet_rcvd) { + struct vlan_hdr *vlan = (struct vlan_hdr *)(data + ETH_HLEN); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + ntohs(vlan->h_vlan_TCI)); + + /* Extract the frame data without the VLAN header. */ + skb_copy_to_linear_data(skb, data, 2 * ETH_ALEN); + skb_copy_to_linear_data_offset(skb, 2 * ETH_ALEN, + data + 2 * ETH_ALEN + VLAN_HLEN, + pkt_len - 2 * ETH_ALEN); + } else { + skb_copy_to_linear_data(skb, data, pkt_len); + } + + dma_sync_single_for_device(&fep->pdev->dev, bdp->bd.cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + + fec_enet_receive(skb, bdp, ndev); } +static void +fec_enet_receive_nocopy(unsigned pkt_len, unsigned index, union bufdesc_u *bdp, + struct net_device *ndev) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + struct sk_buff *skb, *skb_new; + unsigned char *data; + dma_addr_t addr; + + skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); + if (!skb_new) { + ndev->stats.rx_dropped++; + return; + } + + addr = dma_map_single(&fep->pdev->dev, skb_new->data, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + if (dma_mapping_error(&fep->pdev->dev, addr)) { + dev_kfree_skb(skb_new); + ndev->stats.rx_dropped++; + return; + } -/* During a receive, the cur_rx points to the current incoming buffer. + /* We have the new skb, so proceed to deal with the received data. */ + dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr, + FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); + + skb = fep->rx_skbuff[index]; + + /* Now subsitute in the new skb */ + fep->rx_skbuff[index] = skb_new; + bdp->bd.cbd_bufaddr = addr; + + /* + * Update the skb length according to the raw packet length. + * Then remove the two bytes of additional padding. + */ + skb_put(skb, pkt_len); + data = skb_pull_inline(skb, 2); + + if (fep->quirks & FEC_QUIRK_SWAP_FRAME) + swap_buffer(data, skb->len); + + /* + * Now juggle things for the VLAN tag - if the hardware + * flags this as present, we need to read the tag, and + * then shuffle the ethernet addresses up. + */ + if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX && + bdp->ebd.cbd_esc & BD_ENET_RX_VLAN) { + struct vlan_hdr *vlan = (struct vlan_hdr *)(data + ETH_HLEN); + + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), + ntohs(vlan->h_vlan_TCI)); + + memmove(data + VLAN_HLEN, data, 2 * ETH_ALEN); + skb_pull_inline(skb, VLAN_HLEN); + } + + fec_enet_receive(skb, bdp, ndev); +} + +/* During a receive, the rx_next points to the current incoming buffer. * When we update through the ring, if the next incoming buffer has * not been given to the system, we just set the empty indicator, * effectively tossing the packet. @@ -852,18 +1109,9 @@ fec_enet_rx(struct net_device *ndev, int budget) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - struct bufdesc *bdp; - unsigned short status; - struct sk_buff *skb; ushort pkt_len; - __u8 *data; int pkt_received = 0; - struct bufdesc_ex *ebdp = NULL; - bool vlan_packet_rcvd = false; - u16 vlan_tag; - int index = 0; + unsigned index = fep->rx_next; #ifdef CONFIG_M532x flush_cache_all(); @@ -872,12 +1120,17 @@ /* First, grab all of the stats for the incoming packet. * These get messed up if we get called due to a busy condition. */ - bdp = fep->cur_rx; + do { + union bufdesc_u *bdp = fec_enet_rx_get(index, fep); + unsigned status; - while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) { + status = bdp->bd.cbd_sc; + if (status & BD_ENET_RX_EMPTY) + break; if (pkt_received >= budget) break; + pkt_received++; /* Since we have allocated space to hold a complete frame, @@ -886,155 +1139,81 @@ if ((status & BD_ENET_RX_LAST) == 0) netdev_err(ndev, "rcv is not +last\n"); - if (!fep->opened) - goto rx_processing_done; + writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT); /* Check for errors. */ - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | - BD_ENET_RX_CR | BD_ENET_RX_OV)) { + if (status & BD_ENET_RX_ERROR) { ndev->stats.rx_errors++; - if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) { - /* Frame too long or too short. */ - ndev->stats.rx_length_errors++; - } - if (status & BD_ENET_RX_NO) /* Frame alignment */ - ndev->stats.rx_frame_errors++; - if (status & BD_ENET_RX_CR) /* CRC Error */ - ndev->stats.rx_crc_errors++; - if (status & BD_ENET_RX_OV) /* FIFO overrun */ - ndev->stats.rx_fifo_errors++; - } - /* Report late collisions as a frame error. - * On this error, the BD is closed, but we don't know what we - * have in the buffer. So, just drop this frame on the floor. - */ - if (status & BD_ENET_RX_CL) { - ndev->stats.rx_errors++; - ndev->stats.rx_frame_errors++; + /* + * Report late collisions as a frame error. On this + * error, the BD is closed, but we don't know what we + * have in the buffer. So, just drop this frame on + * the floor. + */ + if (status & BD_ENET_RX_CL) { + ndev->stats.rx_frame_errors++; + } else { + if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) + /* Frame too long or too short. */ + ndev->stats.rx_length_errors++; + if (status & BD_ENET_RX_NO) /* Frame alignment */ + ndev->stats.rx_frame_errors++; + if (status & BD_ENET_RX_CR) /* CRC Error */ + ndev->stats.rx_crc_errors++; + if (status & BD_ENET_RX_OV) /* FIFO overrun */ + ndev->stats.rx_fifo_errors++; + } goto rx_processing_done; } /* Process the incoming frame. */ ndev->stats.rx_packets++; - pkt_len = bdp->cbd_datlen; - ndev->stats.rx_bytes += pkt_len; - - if (fep->bufdesc_ex) - index = (struct bufdesc_ex *)bdp - - (struct bufdesc_ex *)fep->rx_bd_base; - else - index = bdp - fep->rx_bd_base; - data = fep->rx_skbuff[index]->data; - dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - - if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) - swap_buffer(data, pkt_len); - /* Extract the enhanced buffer descriptor */ - ebdp = NULL; - if (fep->bufdesc_ex) - ebdp = (struct bufdesc_ex *)bdp; - - /* If this is a VLAN packet remove the VLAN Tag */ - vlan_packet_rcvd = false; - if ((ndev->features & NETIF_F_HW_VLAN_CTAG_RX) && - fep->bufdesc_ex && (ebdp->cbd_esc & BD_ENET_RX_VLAN)) { - /* Push and remove the vlan tag */ - struct vlan_hdr *vlan_header = - (struct vlan_hdr *) (data + ETH_HLEN); - vlan_tag = ntohs(vlan_header->h_vlan_TCI); - pkt_len -= VLAN_HLEN; - - vlan_packet_rcvd = true; - } - - /* This does 16 byte alignment, exactly what we need. - * The packet length includes FCS, but we don't want to - * include that when passing upstream as it messes up - * bridging applications. + /* + * The packet length includes FCS, but we don't want + * to include that when passing upstream as it messes + * up bridging applications. */ - skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN); + pkt_len = bdp->bd.cbd_datlen - 4; + ndev->stats.rx_bytes += pkt_len; - if (unlikely(!skb)) { - ndev->stats.rx_dropped++; + if (fec_enet_rx_zerocopy(fep, pkt_len)) { + fec_enet_receive_nocopy(pkt_len, index, bdp, ndev); } else { - int payload_offset = (2 * ETH_ALEN); - skb_reserve(skb, NET_IP_ALIGN); - skb_put(skb, pkt_len - 4); /* Make room */ - - /* Extract the frame data without the VLAN header. */ - skb_copy_to_linear_data(skb, data, (2 * ETH_ALEN)); - if (vlan_packet_rcvd) - payload_offset = (2 * ETH_ALEN) + VLAN_HLEN; - skb_copy_to_linear_data_offset(skb, (2 * ETH_ALEN), - data + payload_offset, - pkt_len - 4 - (2 * ETH_ALEN)); - - skb->protocol = eth_type_trans(skb, ndev); - - /* Get receive timestamp from the skb */ - if (fep->hwts_rx_en && fep->bufdesc_ex) { - struct skb_shared_hwtstamps *shhwtstamps = - skb_hwtstamps(skb); - unsigned long flags; - - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - - spin_lock_irqsave(&fep->tmreg_lock, flags); - shhwtstamps->hwtstamp = ns_to_ktime( - timecounter_cyc2time(&fep->tc, ebdp->ts)); - spin_unlock_irqrestore(&fep->tmreg_lock, flags); - } - - if (fep->bufdesc_ex && - (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) { - if (!(ebdp->cbd_esc & FLAG_RX_CSUM_ERROR)) { - /* don't check it */ - skb->ip_summed = CHECKSUM_UNNECESSARY; - } else { - skb_checksum_none_assert(skb); - } - } - - /* Handle received VLAN packets */ - if (vlan_packet_rcvd) - __vlan_hwaccel_put_tag(skb, - htons(ETH_P_8021Q), - vlan_tag); - - napi_gro_receive(&fep->napi, skb); + fec_enet_receive_copy(pkt_len, index, bdp, ndev); } - dma_sync_single_for_device(&fep->pdev->dev, bdp->cbd_bufaddr, - FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); rx_processing_done: + if (fep->flags & FEC_FLAG_BUFDESC_EX) { + bdp->ebd.cbd_esc = BD_ENET_RX_INT; + bdp->ebd.cbd_prot = 0; + bdp->ebd.cbd_bdu = 0; + } + + /* + * Ensure that the previous writes have completed before + * the status update becomes visible. + */ + wmb(); + /* Clear the status flags for this buffer */ status &= ~BD_ENET_RX_STATS; /* Mark the buffer empty */ status |= BD_ENET_RX_EMPTY; - bdp->cbd_sc = status; - - if (fep->bufdesc_ex) { - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - - ebdp->cbd_esc = BD_ENET_RX_INT; - ebdp->cbd_prot = 0; - ebdp->cbd_bdu = 0; - } - - /* Update BD pointer to next entry */ - bdp = fec_enet_get_nextdesc(bdp, fep); + bdp->bd.cbd_sc = status; /* Doing this here will keep the FEC running while we process * incoming frames. On a heavily loaded network, we should be * able to keep up at the expense of system resources. */ writel(0, fep->hwp + FEC_R_DES_ACTIVE); - } - fep->cur_rx = bdp; + + if (++index >= fep->rx_ring_size) + index = 0; + } while (1); + fep->rx_next = index; return pkt_received; } @@ -1044,29 +1223,25 @@ { struct net_device *ndev = dev_id; struct fec_enet_private *fep = netdev_priv(ndev); + const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF; uint int_events; irqreturn_t ret = IRQ_NONE; - do { - int_events = readl(fep->hwp + FEC_IEVENT); - writel(int_events, fep->hwp + FEC_IEVENT); + int_events = readl(fep->hwp + FEC_IEVENT); + writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT); - if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) { - ret = IRQ_HANDLED; + if (int_events & napi_mask) { + ret = IRQ_HANDLED; - /* Disable the RX interrupt */ - if (napi_schedule_prep(&fep->napi)) { - writel(FEC_RX_DISABLED_IMASK, - fep->hwp + FEC_IMASK); - __napi_schedule(&fep->napi); - } - } + /* Disable the NAPI interrupts */ + writel(FEC_ENET_MII, fep->hwp + FEC_IMASK); + napi_schedule(&fep->napi); + } - if (int_events & FEC_ENET_MII) { - ret = IRQ_HANDLED; - complete(&fep->mdio_done); - } - } while (int_events); + if (int_events & FEC_ENET_MII) { + ret = IRQ_HANDLED; + complete(&fep->mdio_done); + } return ret; } @@ -1074,8 +1249,16 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget) { struct net_device *ndev = napi->dev; - int pkts = fec_enet_rx(ndev, budget); struct fec_enet_private *fep = netdev_priv(ndev); + int pkts; + + /* + * Clear any pending transmit or receive interrupts before + * processing the rings to avoid racing with the hardware. + */ + writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT); + + pkts = fec_enet_rx(ndev, budget); fec_enet_tx(ndev); @@ -1173,26 +1356,78 @@ return; } - if (phy_dev->link) { + /* + * If the netdev is down, or is going down, we're not interested + * in link state events, so just mark our idea of the link as down + * and ignore the event. + */ + if (!netif_running(ndev) || !netif_device_present(ndev)) { + fep->link = 0; + } else if (phy_dev->link) { if (!fep->link) { fep->link = phy_dev->link; status_change = 1; } - if (fep->full_duplex != phy_dev->duplex) + if (fep->full_duplex != phy_dev->duplex) { + fep->full_duplex = phy_dev->duplex; status_change = 1; + } if (phy_dev->speed != fep->speed) { fep->speed = phy_dev->speed; status_change = 1; } + if (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) { + u32 lcl_adv = phy_dev->advertising; + u32 rmt_adv = phy_dev->lp_advertising; + unsigned mode = 0; + + if (lcl_adv & rmt_adv & ADVERTISED_Pause) { + /* + * Local Device Link Partner + * Pause AsymDir Pause AsymDir Result + * 1 X 1 X TX+RX + */ + mode = FEC_PAUSE_FLAG_TX | FEC_PAUSE_FLAG_RX; + } else if (lcl_adv & rmt_adv & ADVERTISED_Asym_Pause) { + /* + * 0 1 1 1 RX + * 1 1 0 1 TX + */ + if (rmt_adv & ADVERTISED_Pause) + mode = FEC_PAUSE_FLAG_RX; + else + mode = FEC_PAUSE_FLAG_TX; + } + + if (mode != fep->pause_mode) { + fep->pause_mode = mode; + status_change = 1; + } + } + /* if any of the above changed restart the FEC */ - if (status_change) - fec_restart(ndev, phy_dev->duplex); + if (status_change) { + mutex_lock(&fep->mutex); + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_restart(ndev); + netif_wake_queue(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + mutex_unlock(&fep->mutex); + } } else { if (fep->link) { + mutex_lock(&fep->mutex); + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); fec_stop(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + mutex_unlock(&fep->mutex); fep->link = phy_dev->link; status_change = 1; } @@ -1202,23 +1437,35 @@ phy_print_status(phy_dev); } -static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +static unsigned long fec_enet_mdio_op(struct fec_enet_private *fep, + unsigned data) { - struct fec_enet_private *fep = bus->priv; unsigned long time_left; fep->mii_timeout = 0; init_completion(&fep->mdio_done); - /* start a read op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | - FEC_MMFR_TA, fep->hwp + FEC_MII_DATA); + mutex_lock(&fep->mutex); + + /* start operation */ + writel(data, fep->hwp + FEC_MII_DATA); /* wait for end of transfer */ time_left = wait_for_completion_timeout(&fep->mdio_done, usecs_to_jiffies(FEC_MII_TIMEOUT)); - if (time_left == 0) { + + mutex_unlock(&fep->mutex); + + return time_left; +} + +static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) +{ + struct fec_enet_private *fep = bus->priv; + + if (fec_enet_mdio_op(fep, FEC_MMFR_ST | FEC_MMFR_OP_READ | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA) == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO read timeout\n"); return -ETIMEDOUT; @@ -1232,21 +1479,10 @@ u16 value) { struct fec_enet_private *fep = bus->priv; - unsigned long time_left; - - fep->mii_timeout = 0; - init_completion(&fep->mdio_done); - - /* start a write op */ - writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE | - FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | - FEC_MMFR_TA | FEC_MMFR_DATA(value), - fep->hwp + FEC_MII_DATA); - /* wait for end of transfer */ - time_left = wait_for_completion_timeout(&fep->mdio_done, - usecs_to_jiffies(FEC_MII_TIMEOUT)); - if (time_left == 0) { + if (fec_enet_mdio_op(fep, FEC_MMFR_ST | FEC_MMFR_OP_WRITE | + FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | + FEC_MMFR_TA | FEC_MMFR_DATA(value)) == 0) { fep->mii_timeout = 1; netdev_err(fep->netdev, "MDIO write timeout\n"); return -ETIMEDOUT; @@ -1255,11 +1491,37 @@ return 0; } +static void fec_enet_phy_config(struct net_device *ndev) +{ +#ifndef CONFIG_M5272 + struct fec_enet_private *fep = netdev_priv(ndev); + struct phy_device *phy = fep->phy_dev; + unsigned pause = 0; + + /* + * Pause advertisment logic is weird. We don't advertise the raw + * "can tx" and "can rx" modes, but instead it is whether we support + * symmetric flow or asymmetric flow. + * + * Symmetric flow means we can only support both transmit and receive + * flow control frames together. Asymmetric flow means we can + * independently control each. Note that there is no bit encoding + * for "I can only receive flow control frames." + */ + if (fep->pause_flag & FEC_PAUSE_FLAG_RX) + pause |= ADVERTISED_Asym_Pause | ADVERTISED_Pause; + if (fep->pause_flag & FEC_PAUSE_FLAG_TX) + pause |= ADVERTISED_Asym_Pause; + + pause &= phy->supported; + pause |= phy->advertising & ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); + phy->advertising = pause; +#endif +} + static int fec_enet_mii_probe(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); struct phy_device *phy_dev = NULL; char mdio_bus_id[MII_BUS_ID_SIZE]; char phy_name[MII_BUS_ID_SIZE + 3]; @@ -1297,10 +1559,11 @@ } /* mask with MAC supported features */ - if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) { + if (fep->quirks & FEC_QUIRK_HAS_GBIT) { phy_dev->supported &= PHY_GBIT_FEATURES; + phy_dev->supported &= ~SUPPORTED_1000baseT_Half; #if !defined(CONFIG_M5272) - phy_dev->supported |= SUPPORTED_Pause; + phy_dev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause; #endif } else @@ -1312,6 +1575,8 @@ fep->link = 0; fep->full_duplex = 0; + fec_enet_phy_config(ndev); + netdev_info(ndev, "Freescale FEC PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", fep->phy_dev->drv->name, dev_name(&fep->phy_dev->dev), fep->phy_dev->irq); @@ -1324,8 +1589,6 @@ static struct mii_bus *fec0_mii_bus; struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); int err = -ENXIO, i; /* @@ -1344,7 +1607,7 @@ * mdio interface in board design, and need to be configured by * fec0 mii_bus. */ - if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { + if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ if (mii_cnt && fec0_mii_bus) { fep->mii_bus = fec0_mii_bus; @@ -1365,7 +1628,7 @@ * document. */ fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000); - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_ENET_MAC) fep->phy_speed--; fep->phy_speed <<= 1; writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); @@ -1399,7 +1662,7 @@ mii_cnt++; /* save fec0 mii_bus */ - if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_ENET_MAC) fec0_mii_bus = fep->mii_bus; return 0; @@ -1461,7 +1724,7 @@ { struct fec_enet_private *fep = netdev_priv(ndev); - if (fep->bufdesc_ex) { + if (fep->flags & FEC_FLAG_BUFDESC_EX) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | @@ -1485,6 +1748,51 @@ } } +static void fec_enet_get_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + ring->rx_max_pending = RX_RING_SIZE; + ring->tx_max_pending = TX_RING_SIZE; + ring->rx_pending = fep->rx_ring_size; + ring->tx_pending = fep->tx_ring_size; +} + +static int fec_enet_set_ringparam(struct net_device *ndev, + struct ethtool_ringparam *ring) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + unsigned rx, tx, tx_min; + + tx_min = ndev->features & NETIF_F_SG ? TX_RING_SIZE_MIN_SG : 16; + + rx = clamp_t(u32, ring->rx_pending, 16, RX_RING_SIZE); + tx = clamp_t(u32, ring->tx_pending, tx_min, TX_RING_SIZE); + + if (tx == fep->tx_ring_size && rx == fep->rx_ring_size) + return 0; + + /* Setting the ring size while the interface is down is easy */ + if (!netif_running(ndev)) { + fep->tx_ring_size = tx; + fep->rx_ring_size = rx; + } else { + return -EINVAL; + + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_stop(ndev); + /* reallocate ring */ + fec_restart(ndev); + netif_wake_queue(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + } + + return 0; +} + #if !defined(CONFIG_M5272) static void fec_enet_get_pauseparam(struct net_device *ndev, @@ -1493,42 +1801,81 @@ struct fec_enet_private *fep = netdev_priv(ndev); pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0; - pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0; - pause->rx_pause = pause->tx_pause; + pause->rx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_RX) != 0; + pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_TX) != 0; } static int fec_enet_set_pauseparam(struct net_device *ndev, struct ethtool_pauseparam *pause) { struct fec_enet_private *fep = netdev_priv(ndev); + unsigned pause_flag, changed; + struct phy_device *phy = fep->phy_dev; - if (pause->tx_pause != pause->rx_pause) { - netdev_info(ndev, - "hardware only support enable/disable both tx and rx"); + if (!phy) + return -ENODEV; + if (!(phy->supported & SUPPORTED_Pause)) + return -EINVAL; + if (!(phy->supported & SUPPORTED_Asym_Pause) && + pause->rx_pause != pause->tx_pause) return -EINVAL; - } - fep->pause_flag = 0; + pause_flag = 0; + if (pause->autoneg) + pause_flag |= FEC_PAUSE_FLAG_AUTONEG; + if (pause->rx_pause) + pause_flag |= FEC_PAUSE_FLAG_RX; + if (pause->tx_pause) + pause_flag |= FEC_PAUSE_FLAG_TX; + + changed = fep->pause_flag ^ pause_flag; + fep->pause_flag = pause_flag; + + /* configure the phy advertisment according to our new options */ + fec_enet_phy_config(ndev); + + if (changed) { + if (pause_flag & FEC_PAUSE_FLAG_AUTONEG) { + if (netif_running(ndev)) + phy_start_aneg(fep->phy_dev); + } else { + int adv, old_adv; - /* tx pause must be same as rx pause */ - fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0; - fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0; - - if (pause->rx_pause || pause->autoneg) { - fep->phy_dev->supported |= ADVERTISED_Pause; - fep->phy_dev->advertising |= ADVERTISED_Pause; - } else { - fep->phy_dev->supported &= ~ADVERTISED_Pause; - fep->phy_dev->advertising &= ~ADVERTISED_Pause; - } + /* + * Even if we are not in autonegotiate mode, we + * still update the phy with our capabilities so + * our link parter can make the appropriate + * decision. PHYLIB provides no way to do this. + */ + adv = phy_read(phy, MII_ADVERTISE); + if (adv >= 0) { + old_adv = adv; + adv &= ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); + if (phy->advertising & ADVERTISED_Pause) + adv |= ADVERTISE_PAUSE_CAP; + if (phy->advertising & ADVERTISED_Asym_Pause) + adv |= ADVERTISE_PAUSE_ASYM; - if (pause->autoneg) { - if (netif_running(ndev)) - fec_stop(ndev); - phy_start_aneg(fep->phy_dev); + if (old_adv != adv) + phy_write(phy, MII_ADVERTISE, adv); + } + + /* Forced pause mode */ + fep->pause_mode = fep->pause_flag; + + if (netif_running(ndev)) { + mutex_lock(&fep->mutex); + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); + fec_stop(ndev); + fec_restart(ndev); + netif_wake_queue(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + mutex_unlock(&fep->mutex); + } + } } - if (netif_running(ndev)) - fec_restart(ndev, 0); return 0; } @@ -1645,21 +1992,21 @@ } static const struct ethtool_ops fec_enet_ethtool_ops = { -#if !defined(CONFIG_M5272) - .get_pauseparam = fec_enet_get_pauseparam, - .set_pauseparam = fec_enet_set_pauseparam, -#endif .get_settings = fec_enet_get_settings, .set_settings = fec_enet_set_settings, .get_drvinfo = fec_enet_get_drvinfo, - .get_link = ethtool_op_get_link, - .get_ts_info = fec_enet_get_ts_info, .nway_reset = fec_enet_nway_reset, + .get_link = ethtool_op_get_link, + .get_ringparam = fec_enet_get_ringparam, + .set_ringparam = fec_enet_set_ringparam, #ifndef CONFIG_M5272 - .get_ethtool_stats = fec_enet_get_ethtool_stats, + .get_pauseparam = fec_enet_get_pauseparam, + .set_pauseparam = fec_enet_set_pauseparam, .get_strings = fec_enet_get_strings, + .get_ethtool_stats = fec_enet_get_ethtool_stats, .get_sset_count = fec_enet_get_sset_count, #endif + .get_ts_info = fec_enet_get_ts_info, }; static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd) @@ -1673,7 +2020,7 @@ if (!phydev) return -ENODEV; - if (fep->bufdesc_ex) { + if (fep->flags & FEC_FLAG_BUFDESC_EX) { if (cmd == SIOCSHWTSTAMP) return fec_ptp_set(ndev, rq); if (cmd == SIOCGHWTSTAMP) @@ -1688,23 +2035,33 @@ struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; - struct bufdesc *bdp; + union bufdesc_u *bdp; - bdp = fep->rx_bd_base; for (i = 0; i < fep->rx_ring_size; i++) { - skb = fep->rx_skbuff[i]; + bdp = fec_enet_rx_get(i, fep); - if (bdp->cbd_bufaddr) - dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, + skb = fep->rx_skbuff[i]; + fep->rx_skbuff[i] = NULL; + if (skb) { + dma_unmap_single(&fep->pdev->dev, bdp->bd.cbd_bufaddr, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - if (skb) dev_kfree_skb(skb); - bdp = fec_enet_get_nextdesc(bdp, fep); + } } - bdp = fep->tx_bd_base; - for (i = 0; i < fep->tx_ring_size; i++) + for (i = 0; i < fep->tx_ring_size; i++) { + bdp = fec_enet_tx_get(i, fep); + if (bdp->bd.cbd_bufaddr) + fec_enet_tx_unmap(i, bdp, fep); kfree(fep->tx_bounce[i]); + fep->tx_bounce[i] = NULL; + skb = fep->tx_skbuff[i]; + fep->tx_skbuff[i] = NULL; + if (skb) + dev_kfree_skb(skb); + } + + dma_free_coherent(NULL, PAGE_SIZE, fep->rx_bd_base, fep->rx_bd_dma); } static int fec_enet_alloc_buffers(struct net_device *ndev) @@ -1712,59 +2069,82 @@ struct fec_enet_private *fep = netdev_priv(ndev); unsigned int i; struct sk_buff *skb; - struct bufdesc *bdp; + union bufdesc_u *bdp; + union bufdesc_u *rx_cbd_cpu, *tx_cbd_cpu; + dma_addr_t rx_cbd_dma, tx_cbd_dma; + + /* Allocate memory for buffer descriptors. */ + rx_cbd_cpu = dma_alloc_coherent(NULL, PAGE_SIZE, &rx_cbd_dma, + GFP_KERNEL); + tx_cbd_cpu = dma_alloc_coherent(NULL, PAGE_SIZE, &tx_cbd_dma, + GFP_KERNEL); + if (!rx_cbd_cpu || !tx_cbd_cpu) { + if (rx_cbd_cpu) + dma_free_coherent(NULL, PAGE_SIZE, rx_cbd_cpu, rx_cbd_dma); + if (tx_cbd_cpu) + dma_free_coherent(NULL, PAGE_SIZE, tx_cbd_cpu, tx_cbd_dma); + return -ENOMEM; + } + + memset(rx_cbd_cpu, 0, PAGE_SIZE); + memset(tx_cbd_cpu, 0, PAGE_SIZE); + + /* Set receive and transmit descriptor base. */ + fep->rx_bd_base = rx_cbd_cpu; + fep->rx_bd_dma = rx_cbd_dma; + fep->tx_bd_base = tx_cbd_cpu; + fep->tx_bd_dma = tx_cbd_dma; - bdp = fep->rx_bd_base; for (i = 0; i < fep->rx_ring_size; i++) { + dma_addr_t addr; + skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); - if (!skb) { - fec_enet_free_buffers(ndev); - return -ENOMEM; - } - fep->rx_skbuff[i] = skb; + if (!skb) + goto err_alloc; - bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data, + addr = dma_map_single(&fep->pdev->dev, skb->data, FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE); - if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) { - fec_enet_free_buffers(ndev); + if (dma_mapping_error(&fep->pdev->dev, addr)) { + dev_kfree_skb(skb); if (net_ratelimit()) netdev_err(ndev, "Rx DMA memory map failed\n"); - return -ENOMEM; + goto err_alloc; } - bdp->cbd_sc = BD_ENET_RX_EMPTY; - if (fep->bufdesc_ex) { - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_RX_INT; - } + fep->rx_skbuff[i] = skb; + bdp = fec_enet_rx_get(i, fep); + bdp->bd.cbd_bufaddr = addr; + bdp->bd.cbd_sc = BD_ENET_RX_EMPTY; + /* Set the last buffer to wrap. */ + if (i == fep->rx_ring_size - 1) + bdp->bd.cbd_sc |= BD_SC_WRAP; - bdp = fec_enet_get_nextdesc(bdp, fep); + if (fep->flags & FEC_FLAG_BUFDESC_EX) + bdp->ebd.cbd_esc = BD_ENET_RX_INT; } - /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; - - bdp = fep->tx_bd_base; for (i = 0; i < fep->tx_ring_size; i++) { + bdp = fec_enet_tx_get(i, fep); fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL); + if (!fep->tx_bounce[i]) + goto err_alloc; - bdp->cbd_sc = 0; - bdp->cbd_bufaddr = 0; - - if (fep->bufdesc_ex) { - struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp; - ebdp->cbd_esc = BD_ENET_TX_INT; - } + /* Set the last buffer to wrap. */ + if (i == fep->tx_ring_size - 1) + bdp->bd.cbd_sc = BD_SC_WRAP; + else + bdp->bd.cbd_sc = 0; + bdp->bd.cbd_bufaddr = 0; - bdp = fec_enet_get_nextdesc(bdp, fep); + if (fep->flags & FEC_FLAG_BUFDESC_EX) + bdp->ebd.cbd_esc = BD_ENET_TX_INT; } - /* Set the last buffer to wrap. */ - bdp = fec_enet_get_prevdesc(bdp, fep); - bdp->cbd_sc |= BD_SC_WRAP; - return 0; + + err_alloc: + fec_enet_free_buffers(ndev); + return -ENOMEM; } static int @@ -1788,10 +2168,12 @@ return ret; } + mutex_lock(&fep->mutex); + fec_restart(ndev); + mutex_unlock(&fep->mutex); napi_enable(&fep->napi); phy_start(fep->phy_dev); netif_start_queue(ndev); - fep->opened = 1; return 0; } @@ -1800,17 +2182,19 @@ { struct fec_enet_private *fep = netdev_priv(ndev); - /* Don't know what to do yet. */ - napi_disable(&fep->napi); - fep->opened = 0; - netif_stop_queue(ndev); - fec_stop(ndev); + phy_stop(fep->phy_dev); - if (fep->phy_dev) { - phy_stop(fep->phy_dev); - phy_disconnect(fep->phy_dev); + if (netif_device_present(ndev)) { + napi_disable(&fep->napi); + netif_tx_disable(ndev); + mutex_lock(&fep->mutex); + fec_stop(ndev); + mutex_unlock(&fep->mutex); } + phy_disconnect(fep->phy_dev); + fep->phy_dev = NULL; + fec_enet_free_buffers(ndev); return 0; @@ -1935,28 +2319,67 @@ } #endif +static netdev_features_t fec_fix_features(struct net_device *ndev, + netdev_features_t features) +{ + struct fec_enet_private *fep = netdev_priv(ndev); + + /* + * NETIF_F_SG requires a minimum transmit ring size. If we + * have less than this size, we can't support this feature. + */ + if (fep->tx_ring_size < TX_RING_SIZE_MIN_SG) + features &= ~NETIF_F_SG; + + return features; +} + +#define FEATURES_NEED_QUIESCE (NETIF_F_RXCSUM | NETIF_F_SG) + static int fec_set_features(struct net_device *netdev, netdev_features_t features) { struct fec_enet_private *fep = netdev_priv(netdev); netdev_features_t changed = features ^ netdev->features; + /* Quiesce the device if necessary */ + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { + mutex_lock(&fep->mutex); + napi_disable(&fep->napi); + netif_tx_lock_bh(netdev); + fec_stop(netdev); + } + netdev->features = features; /* Receive checksum has been changed */ if (changed & NETIF_F_RXCSUM) { if (features & NETIF_F_RXCSUM) - fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + fep->flags |= FEC_FLAG_RX_CSUM; else - fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED; + fep->flags &= ~FEC_FLAG_RX_CSUM; + } - if (netif_running(netdev)) { - fec_stop(netdev); - fec_restart(netdev, fep->phy_dev->duplex); - netif_wake_queue(netdev); - } else { - fec_restart(netdev, fep->phy_dev->duplex); - } + if (changed & NETIF_F_HW_VLAN_CTAG_RX) { + if (features & NETIF_F_HW_VLAN_CTAG_RX) + fep->flags |= FEC_FLAG_RX_VLAN; + else + fep->flags &= ~FEC_FLAG_RX_VLAN; + } + + /* Set the appropriate minimum transmit ring free threshold */ + if (features & NETIF_F_SG) + fep->tx_min = MAX_SKB_FRAGS + 1; + else + fep->tx_min = 1; + + /* Resume the device after updates */ + if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) { + fec_restart(netdev); + netif_wake_queue(netdev); + netif_tx_unlock_bh(netdev); + napi_enable(&fep->napi); + mutex_unlock(&fep->mutex); } return 0; @@ -1975,27 +2398,13 @@ #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fec_poll_controller, #endif + .ndo_fix_features = fec_fix_features, .ndo_set_features = fec_set_features, }; - /* - * XXX: We need to clean up on failure exits here. - * - */ -static int fec_enet_init(struct net_device *ndev) +static void fec_enet_init(struct net_device *ndev) { struct fec_enet_private *fep = netdev_priv(ndev); - const struct platform_device_id *id_entry = - platform_get_device_id(fep->pdev); - struct bufdesc *cbd_base; - - /* Allocate memory for buffer descriptors. */ - cbd_base = dma_alloc_coherent(NULL, PAGE_SIZE, &fep->bd_dma, - GFP_KERNEL); - if (!cbd_base) - return -ENOMEM; - - memset(cbd_base, 0, PAGE_SIZE); fep->netdev = ndev; @@ -2008,13 +2417,8 @@ fep->tx_ring_size = TX_RING_SIZE; fep->rx_ring_size = RX_RING_SIZE; - /* Set receive and transmit descriptor base. */ - fep->rx_bd_base = cbd_base; - if (fep->bufdesc_ex) - fep->tx_bd_base = (struct bufdesc *) - (((struct bufdesc_ex *)cbd_base) + fep->rx_ring_size); - else - fep->tx_bd_base = cbd_base + fep->rx_ring_size; + fep->rx_bd_base = fep->tx_bd_base = NULL; + fep->rx_bd_dma = fep->tx_bd_dma = 0; /* The FEC Ethernet specific entries in the device structure */ ndev->watchdog_timeo = TX_TIMEOUT; @@ -2024,24 +2428,37 @@ writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK); netif_napi_add(ndev, &fep->napi, fec_enet_rx_napi, NAPI_POLL_WEIGHT); - if (id_entry->driver_data & FEC_QUIRK_HAS_VLAN) { - /* enable hw VLAN support */ - ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; - ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; - } - - if (id_entry->driver_data & FEC_QUIRK_HAS_CSUM) { - /* enable hw accelerator */ - ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM - | NETIF_F_RXCSUM); - fep->csum_flags |= FLAG_RX_CSUM_ENABLED; + if (fep->flags & FEC_FLAG_BUFDESC_EX) { + /* Features which require the enhanced buffer descriptors */ + if (fep->quirks & FEC_QUIRK_HAS_VLAN) { + /* enable hw VLAN support */ + ndev->features |= NETIF_F_HW_VLAN_CTAG_RX; + ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; + fep->flags |= FEC_FLAG_RX_VLAN; + } + + if (fep->quirks & FEC_QUIRK_HAS_CSUM) { + /* enable hw accelerator */ + ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + ndev->hw_features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM + | NETIF_F_RXCSUM); + fep->flags |= FEC_FLAG_RX_CSUM; + } } - fec_restart(ndev, 0); + if (!(fep->quirks & FEC_QUIRK_SWAP_FRAME)) { + /* don't enable SG if we need to swap frames */ + ndev->features |= NETIF_F_SG; + ndev->hw_features |= NETIF_F_SG; + } - return 0; + if (ndev->features & NETIF_F_SG) + fep->tx_min = MAX_SKB_FRAGS + 1; + else + fep->tx_min = 1; + + fec_restart(ndev); } #ifdef CONFIG_OF @@ -2107,11 +2524,16 @@ /* setup board info structure */ fep = netdev_priv(ndev); + mutex_init(&fep->mutex); + + if (pdev->id_entry) + fep->quirks = pdev->id_entry->driver_data; #if !defined(CONFIG_M5272) /* default enable pause frame auto negotiation */ - if (pdev->id_entry && - (pdev->id_entry->driver_data & FEC_QUIRK_HAS_GBIT)) - fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG; + if (fep->quirks & FEC_QUIRK_HAS_GBIT) + fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG | + FEC_PAUSE_FLAG_TX | + FEC_PAUSE_FLAG_RX; #endif r = platform_get_resource(pdev, IORESOURCE_MEM, 0); @@ -2124,7 +2546,9 @@ fep->pdev = pdev; fep->dev_id = dev_id++; - fep->bufdesc_ex = 0; + fep->flags = 0; + if (pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX) + fep->flags |= FEC_FLAG_BUFDESC_EX; platform_set_drvdata(pdev, ndev); @@ -2157,11 +2581,9 @@ fep->clk_enet_out = NULL; fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp"); - fep->bufdesc_ex = - pdev->id_entry->driver_data & FEC_QUIRK_HAS_BUFDESC_EX; if (IS_ERR(fep->clk_ptp)) { fep->clk_ptp = NULL; - fep->bufdesc_ex = 0; + fep->flags &= ~FEC_FLAG_BUFDESC_EX; } ret = clk_prepare_enable(fep->clk_ahb); @@ -2198,12 +2620,10 @@ fec_reset_phy(pdev); - if (fep->bufdesc_ex) + if (fep->flags & FEC_FLAG_BUFDESC_EX) fec_ptp_init(pdev); - ret = fec_enet_init(ndev); - if (ret) - goto failed_init; + fec_enet_init(ndev); for (i = 0; i < FEC_IRQ_NUM; i++) { irq = platform_get_irq(pdev, i); @@ -2230,17 +2650,16 @@ if (ret) goto failed_register; - if (fep->bufdesc_ex && fep->ptp_clock) + if (fep->flags & FEC_FLAG_BUFDESC_EX && fep->ptp_clock) netdev_info(ndev, "registered PHC device %d\n", fep->dev_id); - INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work); + INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); return 0; failed_register: fec_enet_mii_remove(fep); failed_mii_init: failed_irq: -failed_init: if (fep->reg_phy) regulator_disable(fep->reg_phy); failed_regulator: @@ -2266,7 +2685,7 @@ struct net_device *ndev = platform_get_drvdata(pdev); struct fec_enet_private *fep = netdev_priv(ndev); - cancel_delayed_work_sync(&(fep->delay_work.delay_work)); + cancel_work_sync(&fep->tx_timeout_work); unregister_netdev(ndev); fec_enet_mii_remove(fep); del_timer_sync(&fep->time_keep); @@ -2292,10 +2711,19 @@ struct net_device *ndev = dev_get_drvdata(dev); struct fec_enet_private *fep = netdev_priv(ndev); + rtnl_lock(); if (netif_running(ndev)) { - fec_stop(ndev); + phy_stop(fep->phy_dev); + napi_disable(&fep->napi); + netif_tx_lock_bh(ndev); netif_device_detach(ndev); + netif_tx_unlock_bh(ndev); + mutex_lock(&fep->mutex); + fec_stop(ndev); + mutex_unlock(&fep->mutex); } + rtnl_unlock(); + if (fep->clk_ptp) clk_disable_unprepare(fep->clk_ptp); if (fep->clk_enet_out) @@ -2342,10 +2770,18 @@ goto failed_clk_ptp; } + rtnl_lock(); if (netif_running(ndev)) { - fec_restart(ndev, fep->full_duplex); + mutex_lock(&fep->mutex); + fec_restart(ndev); + mutex_unlock(&fep->mutex); + netif_tx_lock_bh(ndev); netif_device_attach(ndev); + netif_tx_unlock_bh(ndev); + napi_enable(&fep->napi); + phy_start(fep->phy_dev); } + rtnl_unlock(); return 0; diff -Nur linux-3.15-rc4.orig/drivers/regulator/anatop-regulator.c linux-3.15-rc4/drivers/regulator/anatop-regulator.c --- linux-3.15-rc4.orig/drivers/regulator/anatop-regulator.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/regulator/anatop-regulator.c 2014-05-07 17:05:51.104167047 +0200 @@ -267,6 +267,7 @@ config.driver_data = sreg; config.of_node = pdev->dev.of_node; config.regmap = sreg->anatop; + config.ena_gpio = -EINVAL; /* Only core regulators have the ramp up delay configuration. */ if (sreg->control_reg && sreg->delay_bit_width) { diff -Nur linux-3.15-rc4.orig/drivers/regulator/core.c linux-3.15-rc4/drivers/regulator/core.c --- linux-3.15-rc4.orig/drivers/regulator/core.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/regulator/core.c 2014-05-07 17:05:51.104167047 +0200 @@ -3459,7 +3459,7 @@ dev_set_drvdata(&rdev->dev, rdev); - if (config->ena_gpio && gpio_is_valid(config->ena_gpio)) { + if (gpio_is_valid(config->ena_gpio)) { ret = regulator_ena_gpio_request(rdev, config); if (ret != 0) { rdev_err(rdev, "Failed to request enable GPIO%d: %d\n", diff -Nur linux-3.15-rc4.orig/drivers/regulator/dummy.c linux-3.15-rc4/drivers/regulator/dummy.c --- linux-3.15-rc4.orig/drivers/regulator/dummy.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/regulator/dummy.c 2014-05-07 17:05:51.104167047 +0200 @@ -48,6 +48,7 @@ config.dev = &pdev->dev; config.init_data = &dummy_initdata; + config.ena_gpio = -EINVAL; dummy_regulator_rdev = regulator_register(&dummy_desc, &config); if (IS_ERR(dummy_regulator_rdev)) { diff -Nur linux-3.15-rc4.orig/drivers/regulator/fixed.c linux-3.15-rc4/drivers/regulator/fixed.c --- linux-3.15-rc4.orig/drivers/regulator/fixed.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/regulator/fixed.c 2014-05-07 17:05:51.104167047 +0200 @@ -161,9 +161,7 @@ drvdata->desc.n_voltages = 1; drvdata->desc.fixed_uV = config->microvolts; - - if (config->gpio >= 0) - cfg.ena_gpio = config->gpio; + cfg.ena_gpio = config->gpio; cfg.ena_gpio_invert = !config->enable_high; if (config->enabled_at_boot) { if (config->enable_high) diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/drm-ddc-connector.c linux-3.15-rc4/drivers/staging/imx-drm/drm-ddc-connector.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/drm-ddc-connector.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/staging/imx-drm/drm-ddc-connector.c 2014-05-07 17:05:51.104167047 +0200 @@ -0,0 +1,92 @@ +#include +#include +#include +#include +#include + +#include "drm-ddc-connector.h" + +static enum drm_connector_status +drm_ddc_connector_detect(struct drm_connector *connector, bool force) +{ + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector); + + return ddc_conn->detect ? ddc_conn->detect(connector, force) : + connector_status_connected; +} + +int drm_ddc_connector_get_modes(struct drm_connector *connector) +{ + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector); + struct edid *edid; + int ret = 0; + + if (!ddc_conn->ddc) + return 0; + + edid = drm_get_edid(connector, ddc_conn->ddc); + if (edid) { + drm_mode_connector_update_edid_property(connector, edid); + ret = drm_add_edid_modes(connector, edid); + /* Store the ELD */ + drm_edid_to_eld(connector, edid); + kfree(edid); + } + + return ret; +} +EXPORT_SYMBOL_GPL(drm_ddc_connector_get_modes); + +static void drm_ddc_connector_destroy(struct drm_connector *connector) +{ + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector); + + drm_sysfs_connector_remove(connector); + drm_connector_cleanup(connector); + if (ddc_conn->ddc) + i2c_put_adapter(ddc_conn->ddc); +} + +static const struct drm_connector_funcs drm_ddc_connector_funcs = { + .dpms = drm_helper_connector_dpms, + .fill_modes = drm_helper_probe_single_connector_modes, + .detect = drm_ddc_connector_detect, + .destroy = drm_ddc_connector_destroy, +}; + +int drm_ddc_connector_add(struct drm_device *drm, + struct drm_ddc_connector *ddc_conn, int connector_type) +{ + drm_connector_init(drm, &ddc_conn->connector, &drm_ddc_connector_funcs, + connector_type); + return 0; +} +EXPORT_SYMBOL_GPL(drm_ddc_connector_add); + +struct drm_ddc_connector *drm_ddc_connector_create(struct drm_device *drm, + struct device_node *np, void *private) +{ + struct drm_ddc_connector *ddc_conn; + struct device_node *ddc_node; + + ddc_conn = devm_kzalloc(drm->dev, sizeof(*ddc_conn), GFP_KERNEL); + if (!ddc_conn) + return ERR_PTR(-ENOMEM); + + ddc_conn->private = private; + + ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); + if (ddc_node) { + ddc_conn->ddc = of_find_i2c_adapter_by_node(ddc_node); + of_node_put(ddc_node); + if (!ddc_conn->ddc) + return ERR_PTR(-EPROBE_DEFER); + } + + return ddc_conn; +} +EXPORT_SYMBOL_GPL(drm_ddc_connector_create); + +MODULE_AUTHOR("Russell King "); +MODULE_DESCRIPTION("Generic DRM DDC connector module"); +MODULE_LICENSE("GPL v2"); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/drm-ddc-connector.h linux-3.15-rc4/drivers/staging/imx-drm/drm-ddc-connector.h --- linux-3.15-rc4.orig/drivers/staging/imx-drm/drm-ddc-connector.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/staging/imx-drm/drm-ddc-connector.h 2014-05-07 17:05:51.104167047 +0200 @@ -0,0 +1,26 @@ +#ifndef DRM_DDC_CONNECTOR_H +#define DRM_DDC_CONNECTOR_H + +struct drm_ddc_connector { + struct i2c_adapter *ddc; + struct drm_connector connector; + enum drm_connector_status (*detect)(struct drm_connector *, bool); + void *private; +}; + +#define to_ddc_conn(c) container_of(c, struct drm_ddc_connector, connector) + +int drm_ddc_connector_get_modes(struct drm_connector *connector); +int drm_ddc_connector_add(struct drm_device *drm, + struct drm_ddc_connector *ddc_conn, int connector_type); +struct drm_ddc_connector *drm_ddc_connector_create(struct drm_device *drm, + struct device_node *np, void *private); + +static inline void *drm_ddc_private(struct drm_connector *connector) +{ + struct drm_ddc_connector *ddc_conn = to_ddc_conn(connector); + + return ddc_conn->private; +} + +#endif diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-audio.c linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-audio.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-audio.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-audio.c 2014-05-07 17:05:51.108167072 +0200 @@ -0,0 +1,654 @@ +/* + * DesignWare HDMI audio driver + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Written and tested against the (alleged) DW HDMI Tx found in iMX6S. + */ +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include "dw-hdmi-audio.h" + +#define DRIVER_NAME "dw-hdmi-audio" + +/* Provide some bits rather than bit offsets */ +enum { + HDMI_AHB_DMA_CONF0_SW_FIFO_RST = BIT(7), + HDMI_AHB_DMA_CONF0_EN_HLOCK = BIT(3), + HDMI_AHB_DMA_START_START = BIT(0), + HDMI_AHB_DMA_STOP_STOP = BIT(0), + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR = BIT(5), + HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST = BIT(4), + HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY = BIT(3), + HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE = BIT(2), + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL = BIT(1), + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0), + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL = + HDMI_IH_MUTE_AHBDMAAUD_STAT0_ERROR | + HDMI_IH_MUTE_AHBDMAAUD_STAT0_LOST | + HDMI_IH_MUTE_AHBDMAAUD_STAT0_RETRY | + HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE | + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFFULL | + HDMI_IH_MUTE_AHBDMAAUD_STAT0_BUFFEMPTY, + HDMI_IH_AHBDMAAUD_STAT0_ERROR = BIT(5), + HDMI_IH_AHBDMAAUD_STAT0_LOST = BIT(4), + HDMI_IH_AHBDMAAUD_STAT0_RETRY = BIT(3), + HDMI_IH_AHBDMAAUD_STAT0_DONE = BIT(2), + HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL = BIT(1), + HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY = BIT(0), + HDMI_IH_AHBDMAAUD_STAT0_ALL = + HDMI_IH_AHBDMAAUD_STAT0_ERROR | + HDMI_IH_AHBDMAAUD_STAT0_LOST | + HDMI_IH_AHBDMAAUD_STAT0_RETRY | + HDMI_IH_AHBDMAAUD_STAT0_DONE | + HDMI_IH_AHBDMAAUD_STAT0_BUFFFULL | + HDMI_IH_AHBDMAAUD_STAT0_BUFFEMPTY, + HDMI_AHB_DMA_CONF0_INCR16 = 2 << 1, + HDMI_AHB_DMA_CONF0_INCR8 = 1 << 1, + HDMI_AHB_DMA_CONF0_INCR4 = 0, + HDMI_AHB_DMA_CONF0_BURST_MODE = BIT(0), + HDMI_AHB_DMA_MASK_DONE = BIT(7), + HDMI_REVISION_ID = 0x0001, + HDMI_IH_AHBDMAAUD_STAT0 = 0x0109, + HDMI_IH_MUTE_AHBDMAAUD_STAT0 = 0x0189, + HDMI_AUD_N1 = 0x3200, + HDMI_AUD_CTS1 = 0x3203, + HDMI_AHB_DMA_CONF0 = 0x3600, + HDMI_AHB_DMA_START = 0x3601, + HDMI_AHB_DMA_STOP = 0x3602, + HDMI_AHB_DMA_THRSLD = 0x3603, + HDMI_AHB_DMA_STRADDR0 = 0x3604, + HDMI_AHB_DMA_STPADDR0 = 0x3608, + HDMI_AHB_DMA_STAT = 0x3612, + HDMI_AHB_DMA_STAT_FULL = BIT(1), + HDMI_AHB_DMA_MASK = 0x3614, + HDMI_AHB_DMA_POL = 0x3615, + HDMI_AHB_DMA_CONF1 = 0x3616, + HDMI_AHB_DMA_BUFFPOL = 0x361a, +}; + +struct snd_dw_hdmi { + struct snd_card *card; + struct snd_pcm *pcm; + struct dw_hdmi_audio_data data; + struct snd_pcm_substream *substream; + void (*reformat)(struct snd_dw_hdmi *, size_t, size_t); + void *buf_src; + void *buf_dst; + dma_addr_t buf_addr; + unsigned buf_offset; + unsigned buf_period; + unsigned buf_size; + unsigned channels; + uint8_t revision; + uint8_t iec_offset; + uint8_t cs[192][8]; +}; + +static void dw_hdmi_writel(unsigned long val, void __iomem *ptr) +{ + writeb_relaxed(val, ptr); + writeb_relaxed(val >> 8, ptr + 1); + writeb_relaxed(val >> 16, ptr + 2); + writeb_relaxed(val >> 24, ptr + 3); +} + +/* + * Convert to hardware format: The userspace buffer contains IEC958 samples, + * with the PCUV bits in bits 31..28 and audio samples in bits 27..4. We + * need these to be in bits 27..24, with the IEC B bit in bit 28, and audio + * samples in 23..0. + * + * Default preamble in bits 3..0: 8 = block start, 4 = even 2 = odd + * + * Ideally, we could do with having the data properly formatted in userspace. + */ +static void dw_hdmi_reformat_iec958(struct snd_dw_hdmi *dw, + size_t offset, size_t bytes) +{ + uint32_t *src = dw->buf_src + offset; + uint32_t *dst = dw->buf_dst + offset; + uint32_t *end = dw->buf_src + offset + bytes; + + do { + uint32_t b, sample = *src++; + + b = (sample & 8) << (28 - 3); + + sample >>= 4; + + *dst++ = sample | b; + } while (src < end); +} + +static uint32_t parity(uint32_t sample) +{ + sample ^= sample >> 16; + sample ^= sample >> 8; + sample ^= sample >> 4; + sample ^= sample >> 2; + sample ^= sample >> 1; + return (sample & 1) << 27; +} + +static void dw_hdmi_reformat_s24(struct snd_dw_hdmi *dw, + size_t offset, size_t bytes) +{ + uint32_t *src = dw->buf_src + offset; + uint32_t *dst = dw->buf_dst + offset; + uint32_t *end = dw->buf_src + offset + bytes; + + do { + unsigned i; + uint8_t *cs; + + cs = dw->cs[dw->iec_offset++]; + if (dw->iec_offset >= 192) + dw->iec_offset = 0; + + i = dw->channels; + do { + uint32_t sample = *src++; + + sample &= ~0xff000000; + sample |= *cs++ << 24; + sample |= parity(sample & ~0xf8000000); + + *dst++ = sample; + } while (--i); + } while (src < end); +} + +static void dw_hdmi_create_cs(struct snd_dw_hdmi *dw, + struct snd_pcm_runtime *runtime) +{ + uint8_t cs[4]; + unsigned ch, i, j; + + cs[0] = IEC958_AES0_CON_NOT_COPYRIGHT | IEC958_AES0_CON_EMPHASIS_NONE; + cs[1] = IEC958_AES1_CON_GENERAL; + cs[2] = IEC958_AES2_CON_SOURCE_UNSPEC; + cs[3] = IEC958_AES3_CON_CLOCK_1000PPM; + + switch (runtime->rate) { + case 32000: + cs[3] |= IEC958_AES3_CON_FS_32000; + break; + case 44100: + cs[3] |= IEC958_AES3_CON_FS_44100; + break; + case 48000: + cs[3] |= IEC958_AES3_CON_FS_48000; + break; + case 88200: + cs[3] |= IEC958_AES3_CON_FS_88200; + break; + case 96000: + cs[3] |= IEC958_AES3_CON_FS_96000; + break; + case 176400: + cs[3] |= IEC958_AES3_CON_FS_176400; + break; + case 192000: + cs[3] |= IEC958_AES3_CON_FS_192000; + break; + } + + memset(dw->cs, 0, sizeof(dw->cs)); + + for (ch = 0; ch < 8; ch++) { + cs[2] &= ~IEC958_AES2_CON_CHANNEL; + cs[2] |= (ch + 1) << 4; + + for (i = 0; i < ARRAY_SIZE(cs); i++) { + unsigned c = cs[i]; + + for (j = 0; j < 8; j++, c >>= 1) + dw->cs[i * 8 + j][ch] = (c & 1) << 2; + } + } + dw->cs[0][0] |= BIT(4); +} + +static void dw_hdmi_start_dma(struct snd_dw_hdmi *dw) +{ + void __iomem *base = dw->data.base; + unsigned offset = dw->buf_offset; + unsigned period = dw->buf_period; + u32 start, stop; + + dw->reformat(dw, offset, period); + + /* Clear all irqs before enabling irqs and starting DMA */ + writeb_relaxed(HDMI_IH_AHBDMAAUD_STAT0_ALL, + base + HDMI_IH_AHBDMAAUD_STAT0); + + start = dw->buf_addr + offset; + stop = start + period - 1; + + /* Setup the hardware start/stop addresses */ + dw_hdmi_writel(start, base + HDMI_AHB_DMA_STRADDR0); + dw_hdmi_writel(stop, base + HDMI_AHB_DMA_STPADDR0); + + writeb_relaxed((u8)~HDMI_AHB_DMA_MASK_DONE, base + HDMI_AHB_DMA_MASK); + writeb(HDMI_AHB_DMA_START_START, base + HDMI_AHB_DMA_START); + + offset += period; + if (offset >= dw->buf_size) + offset = 0; + dw->buf_offset = offset; +} + +static void dw_hdmi_stop_dma(struct snd_dw_hdmi *dw) +{ + dw->substream = NULL; + + /* Disable interrupts before disabling DMA */ + writeb_relaxed(~0, dw->data.base + HDMI_AHB_DMA_MASK); + writeb_relaxed(HDMI_AHB_DMA_STOP_STOP, dw->data.base + HDMI_AHB_DMA_STOP); +} + +static irqreturn_t snd_dw_hdmi_irq(int irq, void *data) +{ + struct snd_dw_hdmi *dw = data; + struct snd_pcm_substream *substream; + unsigned stat; + + stat = readb_relaxed(dw->data.base + HDMI_IH_AHBDMAAUD_STAT0); + if (!stat) + return IRQ_NONE; + + writeb_relaxed(stat, dw->data.base + HDMI_IH_AHBDMAAUD_STAT0); + + substream = dw->substream; + if (stat & HDMI_IH_AHBDMAAUD_STAT0_DONE && substream) { + snd_pcm_period_elapsed(substream); + if (dw->substream) + dw_hdmi_start_dma(dw); + } + + return IRQ_HANDLED; +} + +static struct snd_pcm_hardware dw_hdmi_hw = { + .info = SNDRV_PCM_INFO_INTERLEAVED | + SNDRV_PCM_INFO_BLOCK_TRANSFER | + SNDRV_PCM_INFO_MMAP | + SNDRV_PCM_INFO_MMAP_VALID, + .formats = SNDRV_PCM_FMTBIT_IEC958_SUBFRAME_LE | + SNDRV_PCM_FMTBIT_S24_LE, + .rates = SNDRV_PCM_RATE_32000 | + SNDRV_PCM_RATE_44100 | + SNDRV_PCM_RATE_48000 | + SNDRV_PCM_RATE_88200 | + SNDRV_PCM_RATE_96000 | + SNDRV_PCM_RATE_176400 | + SNDRV_PCM_RATE_192000, + .channels_min = 2, + .channels_max = 8, + .buffer_bytes_max = 64 * 1024, + .period_bytes_min = 256, + .period_bytes_max = 8192, /* ERR004323: must limit to 8k */ + .periods_min = 2, + .periods_max = 16, + .fifo_size = 0, +}; + +static unsigned rates_mask[] = { + SNDRV_PCM_RATE_32000, + SNDRV_PCM_RATE_44100, + SNDRV_PCM_RATE_48000, + SNDRV_PCM_RATE_88200, + SNDRV_PCM_RATE_96000, + SNDRV_PCM_RATE_176400, + SNDRV_PCM_RATE_192000, +}; + +static void dw_hdmi_parse_eld(struct snd_dw_hdmi *dw, + struct snd_pcm_runtime *runtime) +{ + u8 *sad, *eld = dw->data.eld; + unsigned eld_ver, mnl, sad_count, rates, rate_mask, i; + unsigned max_channels; + + eld_ver = eld[0] >> 3; + if (eld_ver != 2 && eld_ver != 31) + return; + + mnl = eld[4] & 0x1f; + if (mnl > 16) + return; + + sad_count = eld[5] >> 4; + sad = eld + 20 + mnl; + + /* Start from the basic audio settings */ + max_channels = 2; + rates = 7; + while (sad_count > 0) { + switch (sad[0] & 0x78) { + case 0x08: /* PCM */ + max_channels = max(max_channels, (sad[0] & 7) + 1u); + rates |= sad[1]; + break; + } + sad += 3; + sad_count -= 1; + } + + for (rate_mask = i = 0; i < ARRAY_SIZE(rates_mask); i++) + if (rates & 1 << i) + rate_mask |= rates_mask[i]; + + runtime->hw.rates &= rate_mask; + runtime->hw.channels_max = min(runtime->hw.channels_max, max_channels); +} + +static int dw_hdmi_open(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_dw_hdmi *dw = substream->private_data; + void __iomem *base = dw->data.base; + int ret; + + /* Clear FIFO */ + writeb_relaxed(HDMI_AHB_DMA_CONF0_SW_FIFO_RST, + base + HDMI_AHB_DMA_CONF0); + + /* Configure interrupt polarities */ + writeb_relaxed(~0, base + HDMI_AHB_DMA_POL); + writeb_relaxed(~0, base + HDMI_AHB_DMA_BUFFPOL); + + /* Keep interrupts masked, and clear any pending */ + writeb_relaxed(~0, base + HDMI_AHB_DMA_MASK); + writeb_relaxed(~0, base + HDMI_IH_AHBDMAAUD_STAT0); + + ret = request_irq(dw->data.irq, snd_dw_hdmi_irq, IRQF_SHARED, + "dw-hdmi-audio", dw); + if (ret) + return ret; + + /* Un-mute done interrupt */ + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL & + ~HDMI_IH_MUTE_AHBDMAAUD_STAT0_DONE, + base + HDMI_IH_MUTE_AHBDMAAUD_STAT0); + + runtime->hw = dw_hdmi_hw; + dw_hdmi_parse_eld(dw, runtime); + snd_pcm_limit_hw_rates(runtime); + + return 0; +} + +static int dw_hdmi_close(struct snd_pcm_substream *substream) +{ + struct snd_dw_hdmi *dw = substream->private_data; + + /* Mute all interrupts */ + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL, + dw->data.base + HDMI_IH_MUTE_AHBDMAAUD_STAT0); + + free_irq(dw->data.irq, dw); + + return 0; +} + +static int dw_hdmi_hw_free(struct snd_pcm_substream *substream) +{ + return snd_pcm_lib_free_vmalloc_buffer(substream); +} + +static int dw_hdmi_hw_params(struct snd_pcm_substream *substream, + struct snd_pcm_hw_params *params) +{ + return snd_pcm_lib_alloc_vmalloc_buffer(substream, + params_buffer_bytes(params)); +} + +static int dw_hdmi_prepare(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_dw_hdmi *dw = substream->private_data; + uint8_t threshold, conf0, conf1; + + /* Setup as per 3.0.5 FSL 4.1.0 BSP */ + switch (dw->revision) { + case 0x0a: + conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE | + HDMI_AHB_DMA_CONF0_INCR4; + if (runtime->channels == 2) + threshold = 126; + else + threshold = 124; + break; + case 0x1a: + conf0 = HDMI_AHB_DMA_CONF0_BURST_MODE | + HDMI_AHB_DMA_CONF0_INCR8; + threshold = 128; + break; + default: + /* NOTREACHED */ + return -EINVAL; + } + + dw->data.set_sample_rate(dw->data.hdmi, runtime->rate); + + /* Minimum number of bytes in the fifo. */ + runtime->hw.fifo_size = threshold * 32; + + conf0 |= HDMI_AHB_DMA_CONF0_EN_HLOCK; + conf1 = (1 << runtime->channels) - 1; + + writeb_relaxed(threshold, dw->data.base + HDMI_AHB_DMA_THRSLD); + writeb_relaxed(conf0, dw->data.base + HDMI_AHB_DMA_CONF0); + writeb_relaxed(conf1, dw->data.base + HDMI_AHB_DMA_CONF1); + + switch (runtime->format) { + case SNDRV_PCM_FORMAT_IEC958_SUBFRAME_LE: + dw->reformat = dw_hdmi_reformat_iec958; + break; + case SNDRV_PCM_FORMAT_S24_LE: + dw_hdmi_create_cs(dw, runtime); + dw->reformat = dw_hdmi_reformat_s24; + break; + } + dw->iec_offset = 0; + dw->channels = runtime->channels; + dw->buf_src = runtime->dma_area; + dw->buf_dst = substream->dma_buffer.area; + dw->buf_addr = substream->dma_buffer.addr; + dw->buf_period = snd_pcm_lib_period_bytes(substream); + dw->buf_size = snd_pcm_lib_buffer_bytes(substream); + + return 0; +} + +static int dw_hdmi_trigger(struct snd_pcm_substream *substream, int cmd) +{ + struct snd_dw_hdmi *dw = substream->private_data; + void __iomem *base = dw->data.base; + unsigned n[3], cts[3]; + int ret = 0, i; + bool err005174; + + switch (cmd) { + case SNDRV_PCM_TRIGGER_START: + err005174 = dw->revision == 0x0a; + if (err005174) { + for (i = 2; i >= 1; i--) { + n[i] = readb_relaxed(base + HDMI_AUD_N1 + i); + cts[i] = readb_relaxed(base + HDMI_AUD_CTS1 + i); + writeb_relaxed(0, base + HDMI_AUD_N1 + i); + writeb_relaxed(0, base + HDMI_AUD_CTS1 + i); + } + } + + dw->buf_offset = 0; + dw->substream = substream; + dw_hdmi_start_dma(dw); + + if (err005174) { + for (i = 2; i >= 1; i--) + writeb_relaxed(cts[i], base + HDMI_AUD_CTS1 + i); + for (i = 2; i >= 1; i--) + writeb_relaxed(n[i], base + HDMI_AUD_N1 + i); + } + + substream->runtime->delay = substream->runtime->period_size; + break; + + case SNDRV_PCM_TRIGGER_STOP: + dw_hdmi_stop_dma(dw); + break; + + default: + ret = -EINVAL; + break; + } + + return ret; +} + +static snd_pcm_uframes_t dw_hdmi_pointer(struct snd_pcm_substream *substream) +{ + struct snd_pcm_runtime *runtime = substream->runtime; + struct snd_dw_hdmi *dw = substream->private_data; + + return bytes_to_frames(runtime, dw->buf_offset); +} + +static struct snd_pcm_ops snd_dw_hdmi_ops = { + .open = dw_hdmi_open, + .close = dw_hdmi_close, + .ioctl = snd_pcm_lib_ioctl, + .hw_params = dw_hdmi_hw_params, + .hw_free = dw_hdmi_hw_free, + .prepare = dw_hdmi_prepare, + .trigger = dw_hdmi_trigger, + .pointer = dw_hdmi_pointer, + .page = snd_pcm_lib_get_vmalloc_page, +}; + +static int snd_dw_hdmi_probe(struct platform_device *pdev) +{ + const struct dw_hdmi_audio_data *data = pdev->dev.platform_data; + struct device *dev = pdev->dev.parent; + struct snd_dw_hdmi *dw; + struct snd_card *card; + struct snd_pcm *pcm; + unsigned revision; + int ret; + + writeb_relaxed(HDMI_IH_MUTE_AHBDMAAUD_STAT0_ALL, + data->base + HDMI_IH_MUTE_AHBDMAAUD_STAT0); + revision = readb_relaxed(data->base + HDMI_REVISION_ID); + if (revision != 0x0a && revision != 0x1a) { + dev_err(dev, "dw-hdmi-audio: unknown revision 0x%02x\n", + revision); + return -ENXIO; + } + + ret = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, + THIS_MODULE, sizeof(struct snd_dw_hdmi), &card); + if (ret < 0) + return ret; + + snd_card_set_dev(card, dev); + + strlcpy(card->driver, DRIVER_NAME, sizeof(card->driver)); + strlcpy(card->shortname, "DW-HDMI", sizeof(card->shortname)); + snprintf(card->longname, sizeof(card->longname), + "%s rev 0x%02x, irq %d", card->shortname, revision, + data->irq); + + dw = card->private_data; + dw->card = card; + dw->data = *data; + dw->revision = revision; + + ret = snd_pcm_new(card, "DW HDMI", 0, 1, 0, &pcm); + if (ret < 0) + goto err; + + dw->pcm = pcm; + pcm->private_data = dw; + strlcpy(pcm->name, DRIVER_NAME, sizeof(pcm->name)); + snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_dw_hdmi_ops); + + snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, + dev, 64 * 1024, 64 * 1024); + + ret = snd_card_register(card); + if (ret < 0) + goto err; + + platform_set_drvdata(pdev, dw); + + return 0; + +err: + snd_card_free(card); + return ret; +} + +static int snd_dw_hdmi_remove(struct platform_device *pdev) +{ + struct snd_dw_hdmi *dw = platform_get_drvdata(pdev); + + snd_card_free(dw->card); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int snd_dw_hdmi_suspend(struct device *dev) +{ + struct snd_dw_hdmi *dw = dev_get_drvdata(dev); + + snd_power_change_state(dw->card, SNDRV_CTL_POWER_D3cold); + snd_pcm_suspend_all(dw->pcm); + + return 0; +} + +static int snd_dw_hdmi_resume(struct device *dev) +{ + struct snd_dw_hdmi *dw = dev_get_drvdata(dev); + + snd_power_change_state(dw->card, SNDRV_CTL_POWER_D0); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(snd_dw_hdmi_pm, snd_dw_hdmi_suspend, + snd_dw_hdmi_resume); +#define PM_OPS &snd_dw_hdmi_pm +#else +#define PM_OPS NULL +#endif + +static struct platform_driver snd_dw_hdmi_driver = { + .probe = snd_dw_hdmi_probe, + .remove = snd_dw_hdmi_remove, + .driver = { + .name = "dw-hdmi-audio", + .owner = THIS_MODULE, + .pm = PM_OPS, + }, +}; + +module_platform_driver(snd_dw_hdmi_driver); + +MODULE_AUTHOR("Russell King "); +MODULE_LICENSE("GPL"); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-audio.h linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-audio.h --- linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-audio.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-audio.h 2014-05-07 17:05:51.108167072 +0200 @@ -0,0 +1,15 @@ +#ifndef DW_HDMI_AUDIO_H +#define DW_HDMI_AUDIO_H + +struct imx_hdmi; + +struct dw_hdmi_audio_data { + phys_addr_t phys; + void __iomem *base; + int irq; + struct imx_hdmi *hdmi; + u8 *eld; + void (*set_sample_rate)(struct imx_hdmi *, unsigned); +}; + +#endif diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-cec.c linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-cec.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-cec.c 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-cec.c 2014-05-07 17:05:51.108167072 +0200 @@ -0,0 +1,205 @@ +/* http://git.freescale.com/git/cgit.cgi/imx/linux-2.6-imx.git/tree/drivers/mxc/hdmi-cec/mxc_hdmi-cec.c?h=imx_3.0.35_4.1.0 */ +#include +#include +#include +#include +#include +#include +#include + +#include "imx-hdmi.h" +#include "dw-hdmi-cec.h" + +#define DEV_NAME "mxc_hdmi_cec" + +enum { + CEC_STAT_DONE = BIT(0), + CEC_STAT_EOM = BIT(1), + CEC_STAT_NACK = BIT(2), + CEC_STAT_ARBLOST = BIT(3), + CEC_STAT_ERROR_INIT = BIT(4), + CEC_STAT_ERROR_FOLL = BIT(5), + CEC_STAT_WAKEUP = BIT(6), + + CEC_CTRL_START = BIT(0), + CEC_CTRL_NORMAL = 1 << 1, +}; + +struct dw_hdmi_cec { + struct cec_dev cec; + + struct device *dev; + void __iomem *base; + const struct dw_hdmi_cec_ops *ops; + void *ops_data; + int irq; +}; + +static void dw_hdmi_set_address(struct cec_dev *cec_dev, unsigned addresses) +{ + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec); + + writeb(addresses & 255, cec->base + HDMI_CEC_ADDR_L); + writeb(addresses >> 8, cec->base + HDMI_CEC_ADDR_H); +} + +static void dw_hdmi_send_message(struct cec_dev *cec_dev, u8 *msg, + size_t count) +{ + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec); + unsigned i; + + for (i = 0; i < count; i++) + writeb(msg[i], cec->base + HDMI_CEC_TX_DATA0 + i); + + writeb(count, cec->base + HDMI_CEC_TX_CNT); + writeb(CEC_CTRL_NORMAL | CEC_CTRL_START, cec->base + HDMI_CEC_CTRL); +} + +static irqreturn_t dw_hdmi_cec_irq(int irq, void *data) +{ + struct dw_hdmi_cec *cec = data; + struct cec_dev *cec_dev = &cec->cec; + unsigned stat = readb(cec->base + HDMI_IH_CEC_STAT0); + + if (stat == 0) + return IRQ_NONE; + + writeb(stat, cec->base + HDMI_IH_CEC_STAT0); + + if (stat & CEC_STAT_ERROR_INIT) { + if (cec->cec.retries) { + unsigned v = readb(cec->base + HDMI_CEC_CTRL); + writeb(v | CEC_CTRL_START, cec->base + HDMI_CEC_CTRL); + cec->cec.retries -= 1; + } else { + cec->cec.write_busy = 0; + cec_dev_event(cec_dev, MESSAGE_TYPE_SEND_ERROR, NULL, 0); + } + } else if (stat & (CEC_STAT_DONE | CEC_STAT_NACK)) + cec_dev_send_complete(cec_dev, stat & CEC_STAT_DONE); + + if (stat & CEC_STAT_EOM) { + unsigned len, i; + u8 msg[MAX_MESSAGE_LEN]; + + len = readb(cec->base + HDMI_CEC_RX_CNT); + if (len > sizeof(msg)) + len = sizeof(msg); + + for (i = 0; i < len; i++) + msg[i] = readb(cec->base + HDMI_CEC_RX_DATA0 + i); + + writeb(0, cec->base + HDMI_CEC_LOCK); + + cec_dev_receive(cec_dev, msg, len); + } + + return IRQ_HANDLED; +} +EXPORT_SYMBOL(dw_hdmi_cec_irq); + +static void dw_hdmi_cec_release(struct cec_dev *cec_dev) +{ + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec); + + writeb(~0, cec->base + HDMI_CEC_MASK); + writeb(~0, cec->base + HDMI_IH_MUTE_CEC_STAT0); + writeb(0, cec->base + HDMI_CEC_POLARITY); + + free_irq(cec->irq, cec); + + cec->ops->disable(cec->ops_data); +} + +static int dw_hdmi_cec_open(struct cec_dev *cec_dev) +{ + struct dw_hdmi_cec *cec = container_of(cec_dev, struct dw_hdmi_cec, cec); + unsigned irqs; + int ret; + + writeb(0, cec->base + HDMI_CEC_CTRL); + writeb(~0, cec->base + HDMI_IH_CEC_STAT0); + writeb(0, cec->base + HDMI_CEC_LOCK); + + ret = request_irq(cec->irq, dw_hdmi_cec_irq, IRQF_SHARED, + DEV_NAME, cec); + if (ret < 0) + return ret; + + dw_hdmi_set_address(cec_dev, cec_dev->addresses); + + cec->ops->enable(cec->ops_data); + + irqs = CEC_STAT_ERROR_INIT | CEC_STAT_NACK | CEC_STAT_EOM | + CEC_STAT_DONE; + writeb(irqs, cec->base + HDMI_CEC_POLARITY); + writeb(~irqs, cec->base + HDMI_CEC_MASK); + writeb(~irqs, cec->base + HDMI_IH_MUTE_CEC_STAT0); + + return 0; +} + +static int dw_hdmi_cec_probe(struct platform_device *pdev) +{ + struct dw_hdmi_cec_data *data = dev_get_platdata(&pdev->dev); + struct dw_hdmi_cec *cec; + + if (!data) + return -ENXIO; + + cec = devm_kzalloc(&pdev->dev, sizeof(*cec), GFP_KERNEL); + if (!cec) + return -ENOMEM; + + cec->dev = &pdev->dev; + cec->base = data->base; + cec->irq = data->irq; + cec->ops = data->ops; + cec->ops_data = data->ops_data; + cec->cec.open = dw_hdmi_cec_open; + cec->cec.release = dw_hdmi_cec_release; + cec->cec.send_message = dw_hdmi_send_message; + cec->cec.set_address = dw_hdmi_set_address; + + cec_dev_init(&cec->cec, THIS_MODULE); + + /* FIXME: soft-reset the CEC interface */ + + dw_hdmi_set_address(&cec->cec, cec->cec.addresses); + writeb(0, cec->base + HDMI_CEC_TX_CNT); + writeb(~0, cec->base + HDMI_CEC_MASK); + writeb(~0, cec->base + HDMI_IH_MUTE_CEC_STAT0); + writeb(0, cec->base + HDMI_CEC_POLARITY); + + /* + * Our device is just a convenience - we want to link to the real + * hardware device here, so that userspace can see the association + * between the HDMI hardware and its associated CEC chardev. + */ + return cec_dev_add(&cec->cec, cec->dev->parent, DEV_NAME); +} + +static int dw_hdmi_cec_remove(struct platform_device *pdev) +{ + struct dw_hdmi_cec *cec = platform_get_drvdata(pdev); + + cec_dev_remove(&cec->cec); + + return 0; +} + +static struct platform_driver dw_hdmi_cec_driver = { + .probe = dw_hdmi_cec_probe, + .remove = dw_hdmi_cec_remove, + .driver = { + .name = "dw-hdmi-cec", + .owner = THIS_MODULE, + }, +}; +module_platform_driver(dw_hdmi_cec_driver); + +MODULE_AUTHOR("Russell King "); +MODULE_DESCRIPTION("Synopsis Designware HDMI CEC driver for i.MX"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS(PLATFORM_MODULE_PREFIX "dw-hdmi-cec"); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-cec.h linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-cec.h --- linux-3.15-rc4.orig/drivers/staging/imx-drm/dw-hdmi-cec.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/drivers/staging/imx-drm/dw-hdmi-cec.h 2014-05-07 17:05:51.108167072 +0200 @@ -0,0 +1,16 @@ +#ifndef DW_HDMI_CEC_H +#define DW_HDMI_CEC_H + +struct dw_hdmi_cec_ops { + void (*enable)(void *); + void (*disable)(void *); +}; + +struct dw_hdmi_cec_data { + void __iomem *base; + int irq; + const struct dw_hdmi_cec_ops *ops; + void *ops_data; +}; + +#endif diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-drm-core.c linux-3.15-rc4/drivers/staging/imx-drm/imx-drm-core.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-drm-core.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/imx-drm-core.c 2014-05-07 17:05:51.108167072 +0200 @@ -517,7 +517,7 @@ of_node_put(port); if (port == imx_crtc->port) { ret = of_graph_parse_endpoint(ep, &endpoint); - return ret ? ret : endpoint.id; + return ret ? ret : endpoint.port; } } while (ep); @@ -675,6 +675,11 @@ if (!remote || !of_device_is_available(remote)) { of_node_put(remote); continue; + } else if (!of_device_is_available(remote->parent)) { + dev_warn(&pdev->dev, "parent device of %s is not available\n", + remote->full_name); + of_node_put(remote); + continue; } ret = imx_drm_add_component(&pdev->dev, remote); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-hdmi.c linux-3.15-rc4/drivers/staging/imx-drm/imx-hdmi.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-hdmi.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/imx-hdmi.c 2014-05-07 17:05:51.112167097 +0200 @@ -28,6 +28,9 @@ #include #include +#include "drm-ddc-connector.h" +#include "dw-hdmi-audio.h" +#include "dw-hdmi-cec.h" #include "ipu-v3/imx-ipu-v3.h" #include "imx-hdmi.h" #include "imx-drm.h" @@ -112,27 +115,27 @@ }; struct imx_hdmi { - struct drm_connector connector; + struct drm_ddc_connector *ddc_conn; struct drm_encoder encoder; + struct platform_device *audio; + struct platform_device *cec; enum imx_hdmi_devtype dev_type; struct device *dev; struct clk *isfr_clk; struct clk *iahb_clk; - enum drm_connector_status connector_status; - struct hdmi_data_info hdmi_data; int vic; u8 edid[HDMI_EDID_LEN]; + u8 mc_clkdis; bool cable_plugin; bool phy_enabled; struct drm_display_mode previous_mode; struct regmap *regmap; - struct i2c_adapter *ddc; void __iomem *regs; unsigned int sample_rate; @@ -362,6 +365,12 @@ hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock); } +static void imx_hdmi_set_sample_rate(struct imx_hdmi *hdmi, unsigned rate) +{ + hdmi->sample_rate = rate; + hdmi_set_clk_regenerator(hdmi, hdmi->hdmi_data.video_mode.mpixelclock); +} + /* * this submodule is responsible for the video data synchronization. * for example, for RGB 4:4:4 input, the data map is defined as @@ -1148,8 +1157,6 @@ /* HDMI Initialization Step B.4 */ static void imx_hdmi_enable_video_path(struct imx_hdmi *hdmi) { - u8 clkdis; - /* control period minimum duration */ hdmi_writeb(hdmi, 12, HDMI_FC_CTRLDUR); hdmi_writeb(hdmi, 32, HDMI_FC_EXCTRLDUR); @@ -1161,23 +1168,28 @@ hdmi_writeb(hdmi, 0x21, HDMI_FC_CH2PREAM); /* Enable pixel clock and tmds data path */ - clkdis = 0x7F; - clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE; - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS); + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_HDCPCLK_DISABLE | + HDMI_MC_CLKDIS_CSCCLK_DISABLE | + HDMI_MC_CLKDIS_AUDCLK_DISABLE | + HDMI_MC_CLKDIS_PREPCLK_DISABLE | + HDMI_MC_CLKDIS_TMDSCLK_DISABLE; + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_PIXELCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); - clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE; - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS); + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_TMDSCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); /* Enable csc path */ if (is_color_space_conversion(hdmi)) { - clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; - hdmi_writeb(hdmi, clkdis, HDMI_MC_CLKDIS); + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CSCCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } } static void hdmi_enable_audio_clk(struct imx_hdmi *hdmi) { - hdmi_modb(hdmi, 0, HDMI_MC_CLKDIS_AUDCLK_DISABLE, HDMI_MC_CLKDIS); + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_AUDCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); } /* Workaround to clear the overflow condition */ @@ -1380,41 +1392,16 @@ static enum drm_connector_status imx_hdmi_connector_detect(struct drm_connector *connector, bool force) { - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi, - connector); - return hdmi->connector_status; -} - -static int imx_hdmi_connector_get_modes(struct drm_connector *connector) -{ - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi, - connector); - struct edid *edid; - int ret; + struct imx_hdmi *hdmi = drm_ddc_private(connector); - if (!hdmi->ddc) - return 0; - - edid = drm_get_edid(connector, hdmi->ddc); - if (edid) { - dev_dbg(hdmi->dev, "got edid: width[%d] x height[%d]\n", - edid->width_cm, edid->height_cm); - - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - } else { - dev_dbg(hdmi->dev, "failed to get edid\n"); - } - - return 0; + return hdmi_readb(hdmi, HDMI_PHY_STAT0) & HDMI_PHY_HPD ? + connector_status_connected : connector_status_disconnected; } static struct drm_encoder *imx_hdmi_connector_best_encoder(struct drm_connector *connector) { - struct imx_hdmi *hdmi = container_of(connector, struct imx_hdmi, - connector); + struct imx_hdmi *hdmi = drm_ddc_private(connector); return &hdmi->encoder; } @@ -1483,15 +1470,8 @@ .disable = imx_hdmi_encoder_disable, }; -static struct drm_connector_funcs imx_hdmi_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = imx_hdmi_connector_detect, - .destroy = imx_drm_connector_destroy, -}; - static struct drm_connector_helper_funcs imx_hdmi_connector_helper_funcs = { - .get_modes = imx_hdmi_connector_get_modes, + .get_modes = drm_ddc_connector_get_modes, .mode_valid = imx_drm_connector_mode_valid, .best_encoder = imx_hdmi_connector_best_encoder, }; @@ -1524,7 +1504,6 @@ hdmi_modb(hdmi, 0, HDMI_PHY_HPD, HDMI_PHY_POL0); - hdmi->connector_status = connector_status_connected; imx_hdmi_poweron(hdmi); } else { dev_dbg(hdmi->dev, "EVENT=plugout\n"); @@ -1532,10 +1511,9 @@ hdmi_modb(hdmi, HDMI_PHY_HPD, HDMI_PHY_HPD, HDMI_PHY_POL0); - hdmi->connector_status = connector_status_disconnected; imx_hdmi_poweroff(hdmi); } - drm_helper_hpd_irq_event(hdmi->connector.dev); + drm_helper_hpd_irq_event(hdmi->ddc_conn->connector.dev); } hdmi_writeb(hdmi, intr_stat, HDMI_IH_PHY_STAT0); @@ -1553,24 +1531,42 @@ if (ret) return ret; - hdmi->connector.polled = DRM_CONNECTOR_POLL_HPD; + hdmi->ddc_conn->connector.polled = DRM_CONNECTOR_POLL_HPD; drm_encoder_helper_add(&hdmi->encoder, &imx_hdmi_encoder_helper_funcs); drm_encoder_init(drm, &hdmi->encoder, &imx_hdmi_encoder_funcs, DRM_MODE_ENCODER_TMDS); - drm_connector_helper_add(&hdmi->connector, + drm_connector_helper_add(&hdmi->ddc_conn->connector, &imx_hdmi_connector_helper_funcs); - drm_connector_init(drm, &hdmi->connector, &imx_hdmi_connector_funcs, - DRM_MODE_CONNECTOR_HDMIA); - - hdmi->connector.encoder = &hdmi->encoder; + drm_ddc_connector_add(drm, hdmi->ddc_conn, DRM_MODE_CONNECTOR_HDMIA); - drm_mode_connector_attach_encoder(&hdmi->connector, &hdmi->encoder); + drm_mode_connector_attach_encoder(&hdmi->ddc_conn->connector, &hdmi->encoder); return 0; } +static void imx_hdmi_cec_enable(void *data) +{ + struct imx_hdmi *hdmi = data; + + hdmi->mc_clkdis &= ~HDMI_MC_CLKDIS_CECCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); +} + +static void imx_hdmi_cec_disable(void *data) +{ + struct imx_hdmi *hdmi = data; + + hdmi->mc_clkdis |= HDMI_MC_CLKDIS_CECCLK_DISABLE; + hdmi_writeb(hdmi, hdmi->mc_clkdis, HDMI_MC_CLKDIS); +} + +static const struct dw_hdmi_cec_ops imx_hdmi_cec_ops = { + .enable = imx_hdmi_cec_enable, + .disable = imx_hdmi_cec_disable, +}; + static struct platform_device_id imx_hdmi_devtype[] = { { .name = "imx6q-hdmi", @@ -1592,11 +1588,13 @@ static int imx_hdmi_bind(struct device *dev, struct device *master, void *data) { struct platform_device *pdev = to_platform_device(dev); + struct platform_device_info pdevinfo; const struct of_device_id *of_id = of_match_device(imx_hdmi_dt_ids, dev); struct drm_device *drm = data; struct device_node *np = dev->of_node; - struct device_node *ddc_node; + struct dw_hdmi_audio_data audio; + struct dw_hdmi_cec_data cec; struct imx_hdmi *hdmi; struct resource *iores; int ret, irq; @@ -1605,27 +1603,22 @@ if (!hdmi) return -ENOMEM; + hdmi->ddc_conn = drm_ddc_connector_create(drm, np, hdmi); + if (IS_ERR(hdmi->ddc_conn)) + return PTR_ERR(hdmi->ddc_conn); + + hdmi->ddc_conn->detect = imx_hdmi_connector_detect; + hdmi->dev = dev; - hdmi->connector_status = connector_status_disconnected; hdmi->sample_rate = 48000; hdmi->ratio = 100; + hdmi->mc_clkdis = 0x7f; if (of_id) { const struct platform_device_id *device_id = of_id->data; hdmi->dev_type = device_id->driver_data; } - ddc_node = of_parse_phandle(np, "ddc-i2c-bus", 0); - if (ddc_node) { - hdmi->ddc = of_find_i2c_adapter_by_node(ddc_node); - if (!hdmi->ddc) - dev_dbg(hdmi->dev, "failed to read ddc node\n"); - - of_node_put(ddc_node); - } else { - dev_dbg(hdmi->dev, "no ddc property found\n"); - } - irq = platform_get_irq(pdev, 0); if (irq < 0) return -EINVAL; @@ -1711,6 +1704,35 @@ /* Unmute interrupts */ hdmi_writeb(hdmi, ~HDMI_IH_PHY_STAT0_HPD, HDMI_IH_MUTE_PHY_STAT0); + memset(&pdevinfo, 0, sizeof(pdevinfo)); + pdevinfo.parent = dev; + pdevinfo.id = PLATFORM_DEVID_AUTO; + + audio.phys = iores->start; + audio.base = hdmi->regs; + audio.irq = irq; + audio.hdmi = hdmi; + audio.eld = hdmi->ddc_conn->connector.eld; + audio.set_sample_rate = imx_hdmi_set_sample_rate; + + pdevinfo.name = "dw-hdmi-audio"; + pdevinfo.data = &audio; + pdevinfo.size_data = sizeof(audio); + pdevinfo.dma_mask = DMA_BIT_MASK(32); + hdmi->audio = platform_device_register_full(&pdevinfo); + + cec.base = hdmi->regs; + cec.irq = irq; + cec.ops = &imx_hdmi_cec_ops; + cec.ops_data = hdmi; + + pdevinfo.name = "dw-hdmi-cec"; + pdevinfo.data = &cec; + pdevinfo.size_data = sizeof(cec); + pdevinfo.dma_mask = 0; + + hdmi->cec = platform_device_register_full(&pdevinfo); + dev_set_drvdata(dev, hdmi); return 0; @@ -1728,15 +1750,19 @@ { struct imx_hdmi *hdmi = dev_get_drvdata(dev); + if (!IS_ERR(hdmi->audio)) + platform_device_unregister(hdmi->audio); + if (!IS_ERR(hdmi->cec)) + platform_device_unregister(hdmi->cec); + /* Disable all interrupts */ hdmi_writeb(hdmi, ~0, HDMI_IH_MUTE_PHY_STAT0); - hdmi->connector.funcs->destroy(&hdmi->connector); + hdmi->ddc_conn->connector.funcs->destroy(&hdmi->ddc_conn->connector); hdmi->encoder.funcs->destroy(&hdmi->encoder); clk_disable_unprepare(hdmi->iahb_clk); clk_disable_unprepare(hdmi->isfr_clk); - i2c_put_adapter(hdmi->ddc); } static const struct component_ops hdmi_ops = { diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-ldb.c linux-3.15-rc4/drivers/staging/imx-drm/imx-ldb.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-ldb.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/imx-ldb.c 2014-05-07 17:05:51.112167097 +0200 @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -60,6 +61,7 @@ struct imx_ldb *ldb; struct drm_connector connector; struct drm_encoder encoder; + struct drm_panel *panel; struct device_node *child; int chno; void *edid; @@ -96,6 +98,13 @@ struct imx_ldb_channel *imx_ldb_ch = con_to_imx_ldb_ch(connector); int num_modes = 0; + if (imx_ldb_ch->panel && imx_ldb_ch->panel->funcs && + imx_ldb_ch->panel->funcs->get_modes) { + num_modes = imx_ldb_ch->panel->funcs->get_modes(imx_ldb_ch->panel); + if (num_modes > 0) + return num_modes; + } + if (imx_ldb_ch->edid) { drm_mode_connector_update_edid_property(connector, imx_ldb_ch->edid); @@ -243,6 +252,8 @@ } regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl); + + drm_panel_enable(imx_ldb_ch->panel); } static void imx_ldb_encoder_mode_set(struct drm_encoder *encoder, @@ -294,6 +305,8 @@ (ldb->ldb_ctrl & LDB_CH1_MODE_EN_MASK) == 0) return; + drm_panel_disable(imx_ldb_ch->panel); + if (imx_ldb_ch == &ldb->channel[0]) ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK; else if (imx_ldb_ch == &ldb->channel[1]) @@ -379,6 +392,9 @@ drm_connector_init(drm, &imx_ldb_ch->connector, &imx_ldb_connector_funcs, DRM_MODE_CONNECTOR_LVDS); + if (imx_ldb_ch->panel) + drm_panel_attach(imx_ldb_ch->panel, &imx_ldb_ch->connector); + drm_mode_connector_attach_encoder(&imx_ldb_ch->connector, &imx_ldb_ch->encoder); @@ -493,6 +509,7 @@ for_each_child_of_node(np, child) { struct imx_ldb_channel *channel; + struct device_node *panel_node; ret = of_property_read_u32(child, "reg", &i); if (ret || i < 0 || i > 1) @@ -556,6 +573,10 @@ return -EINVAL; } + panel_node = of_parse_phandle(child, "fsl,panel", 0); + if (panel_node) + channel->panel = of_drm_find_panel(panel_node); + ret = imx_ldb_register(drm, channel); if (ret) return ret; diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-tve.c linux-3.15-rc4/drivers/staging/imx-drm/imx-tve.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/imx-tve.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/imx-tve.c 2014-05-07 17:05:51.112167097 +0200 @@ -22,7 +22,6 @@ #include #include #include -#include #include #include #include @@ -31,6 +30,7 @@ #include #include +#include "drm-ddc-connector.h" #include "ipu-v3/imx-ipu-v3.h" #include "imx-drm.h" @@ -111,7 +111,7 @@ }; struct imx_tve { - struct drm_connector connector; + struct drm_ddc_connector *ddc_conn; struct drm_encoder encoder; struct device *dev; spinlock_t lock; /* register lock */ @@ -120,7 +120,6 @@ struct regmap *regmap; struct regulator *dac_reg; - struct i2c_adapter *ddc; struct clk *clk; struct clk *di_sel_clk; struct clk_hw clk_hw_di; @@ -219,35 +218,10 @@ return 0; } -static enum drm_connector_status imx_tve_connector_detect( - struct drm_connector *connector, bool force) -{ - return connector_status_connected; -} - -static int imx_tve_connector_get_modes(struct drm_connector *connector) -{ - struct imx_tve *tve = con_to_tve(connector); - struct edid *edid; - int ret = 0; - - if (!tve->ddc) - return 0; - - edid = drm_get_edid(connector, tve->ddc); - if (edid) { - drm_mode_connector_update_edid_property(connector, edid); - ret = drm_add_edid_modes(connector, edid); - kfree(edid); - } - - return ret; -} - static int imx_tve_connector_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - struct imx_tve *tve = con_to_tve(connector); + struct imx_tve *tve = to_ddc_conn(connector)->private; unsigned long rate; int ret; @@ -274,7 +248,7 @@ static struct drm_encoder *imx_tve_connector_best_encoder( struct drm_connector *connector) { - struct imx_tve *tve = con_to_tve(connector); + struct imx_tve *tve = drm_ddc_private(connector); return &tve->encoder; } @@ -362,15 +336,8 @@ tve_disable(tve); } -static struct drm_connector_funcs imx_tve_connector_funcs = { - .dpms = drm_helper_connector_dpms, - .fill_modes = drm_helper_probe_single_connector_modes, - .detect = imx_tve_connector_detect, - .destroy = imx_drm_connector_destroy, -}; - static struct drm_connector_helper_funcs imx_tve_connector_helper_funcs = { - .get_modes = imx_tve_connector_get_modes, + .get_modes = drm_ddc_connector_get_modes, .best_encoder = imx_tve_connector_best_encoder, .mode_valid = imx_tve_connector_mode_valid, }; @@ -513,12 +480,11 @@ drm_encoder_init(drm, &tve->encoder, &imx_tve_encoder_funcs, encoder_type); - drm_connector_helper_add(&tve->connector, + drm_connector_helper_add(&tve->ddc_conn->connector, &imx_tve_connector_helper_funcs); - drm_connector_init(drm, &tve->connector, &imx_tve_connector_funcs, - DRM_MODE_CONNECTOR_VGA); + drm_ddc_connector_add(drm, tve->ddc_conn, DRM_MODE_CONNECTOR_VGA); - drm_mode_connector_attach_encoder(&tve->connector, &tve->encoder); + drm_mode_connector_attach_encoder(&tve->ddc_conn->connector, &tve->encoder); return 0; } @@ -567,7 +533,6 @@ struct platform_device *pdev = to_platform_device(dev); struct drm_device *drm = data; struct device_node *np = dev->of_node; - struct device_node *ddc_node; struct imx_tve *tve; struct resource *res; void __iomem *base; @@ -579,15 +544,13 @@ if (!tve) return -ENOMEM; + tve->ddc_conn = drm_ddc_connector_create(drm, np, tve); + if (IS_ERR(tve->ddc_conn)) + return PTR_ERR(tve->ddc_conn); + tve->dev = dev; spin_lock_init(&tve->lock); - ddc_node = of_parse_phandle(np, "i2c-ddc-bus", 0); - if (ddc_node) { - tve->ddc = of_find_i2c_adapter_by_node(ddc_node); - of_node_put(ddc_node); - } - tve->mode = of_get_tve_mode(np); if (tve->mode != TVE_MODE_VGA) { dev_err(dev, "only VGA mode supported, currently\n"); @@ -694,7 +657,7 @@ { struct imx_tve *tve = dev_get_drvdata(dev); - tve->connector.funcs->destroy(&tve->connector); + tve->ddc_conn->connector.funcs->destroy(&tve->ddc_conn->connector); tve->encoder.funcs->destroy(&tve->encoder); if (!IS_ERR(tve->dac_reg)) diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/imx-ipu-v3.h 2014-05-07 17:05:51.112167097 +0200 @@ -76,6 +76,7 @@ IPU_IRQ_EOS = 192, }; +int ipu_map_irq(struct ipu_soc *ipu, int irq); int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel, enum ipu_channel_irq irq); @@ -114,8 +115,10 @@ void ipu_dc_put(struct ipu_dc *dc); int ipu_dc_init_sync(struct ipu_dc *dc, struct ipu_di *di, bool interlaced, u32 pixel_fmt, u32 width); +void ipu_dc_enable(struct ipu_soc *ipu); void ipu_dc_enable_channel(struct ipu_dc *dc); void ipu_dc_disable_channel(struct ipu_dc *dc); +void ipu_dc_disable(struct ipu_soc *ipu); /* * IPU Display Interface (di) functions @@ -152,8 +155,10 @@ struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow); void ipu_dp_put(struct ipu_dp *); +int ipu_dp_enable(struct ipu_soc *ipu); int ipu_dp_enable_channel(struct ipu_dp *dp); void ipu_dp_disable_channel(struct ipu_dp *dp); +void ipu_dp_disable(struct ipu_soc *ipu); int ipu_dp_setup_channel(struct ipu_dp *dp, enum ipu_color_space in, enum ipu_color_space out); int ipu_dp_set_window_pos(struct ipu_dp *, u16 x_pos, u16 y_pos); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-common.c linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-common.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-common.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-common.c 2014-05-07 17:05:51.128167196 +0200 @@ -697,6 +697,12 @@ } EXPORT_SYMBOL_GPL(ipu_idmac_enable_channel); +bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno) +{ + return (ipu_idmac_read(ipu, IDMAC_CHA_BUSY(chno)) & idma_mask(chno)); +} +EXPORT_SYMBOL_GPL(ipu_idmac_channel_busy); + int ipu_idmac_wait_busy(struct ipuv3_channel *channel, int ms) { struct ipu_soc *ipu = channel->ipu; @@ -714,6 +720,22 @@ } EXPORT_SYMBOL_GPL(ipu_idmac_wait_busy); +int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms) +{ + unsigned long timeout; + + timeout = jiffies + msecs_to_jiffies(ms); + ipu_cm_write(ipu, BIT(irq % 32), IPU_INT_STAT(irq / 32)); + while (!(ipu_cm_read(ipu, IPU_INT_STAT(irq / 32) & BIT(irq % 32)))) { + if (time_after(jiffies, timeout)) + return -ETIMEDOUT; + cpu_relax(); + } + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_wait_interrupt); + int ipu_idmac_disable_channel(struct ipuv3_channel *channel) { struct ipu_soc *ipu = channel->ipu; @@ -933,15 +955,22 @@ chained_irq_exit(chip, desc); } -int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel, - enum ipu_channel_irq irq_type) +int ipu_map_irq(struct ipu_soc *ipu, int irq) { - int irq = irq_linear_revmap(ipu->domain, irq_type + channel->num); + int virq; - if (!irq) - irq = irq_create_mapping(ipu->domain, irq_type + channel->num); + virq = irq_linear_revmap(ipu->domain, irq); + if (!virq) + virq = irq_create_mapping(ipu->domain, irq); - return irq; + return virq; +} +EXPORT_SYMBOL_GPL(ipu_map_irq); + +int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel, + enum ipu_channel_irq irq_type) +{ + return ipu_map_irq(ipu, irq_type + channel->num); } EXPORT_SYMBOL_GPL(ipu_idmac_channel_irq); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-dc.c linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-dc.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-dc.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-dc.c 2014-05-07 17:05:51.136167246 +0200 @@ -18,6 +18,7 @@ #include #include #include +#include #include #include "../imx-drm.h" @@ -92,6 +93,7 @@ IPU_DC_MAP_GBR24, /* TVEv2 */ IPU_DC_MAP_BGR666, IPU_DC_MAP_BGR24, + IPU_DC_MAP_RGB666, }; struct ipu_dc { @@ -110,6 +112,9 @@ struct device *dev; struct ipu_dc channels[IPU_DC_NUM_CHANNELS]; struct mutex mutex; + struct completion comp; + int dc_irq; + int dp_irq; }; static void dc_link_event(struct ipu_dc *dc, int event, int addr, int priority) @@ -155,6 +160,8 @@ return IPU_DC_MAP_BGR666; case V4L2_PIX_FMT_BGR24: return IPU_DC_MAP_BGR24; + case V4L2_PIX_FMT_RGB666: + return IPU_DC_MAP_RGB666; default: return -EINVAL; } @@ -220,12 +227,16 @@ writel(0x0, dc->base + DC_WR_CH_ADDR); writel(width, priv->dc_reg + DC_DISP_CONF2(dc->di)); - ipu_module_enable(priv->ipu, IPU_CONF_DC_EN); - return 0; } EXPORT_SYMBOL_GPL(ipu_dc_init_sync); +void ipu_dc_enable(struct ipu_soc *ipu) +{ + ipu_module_enable(ipu, IPU_CONF_DC_EN); +} +EXPORT_SYMBOL_GPL(ipu_dc_enable); + void ipu_dc_enable_channel(struct ipu_dc *dc) { int di; @@ -239,41 +250,55 @@ } EXPORT_SYMBOL_GPL(ipu_dc_enable_channel); +static irqreturn_t dc_irq_handler(int irq, void *dev_id) +{ + struct ipu_dc *dc = dev_id; + u32 reg; + + reg = readl(dc->base + DC_WR_CH_CONF); + reg &= ~DC_WR_CH_CONF_PROG_TYPE_MASK; + writel(reg, dc->base + DC_WR_CH_CONF); + + /* The Freescale BSP kernel clears DIx_COUNTER_RELEASE here */ + + complete(&dc->priv->comp); + return IRQ_HANDLED; +} + void ipu_dc_disable_channel(struct ipu_dc *dc) { struct ipu_dc_priv *priv = dc->priv; + int irq, ret; u32 val; - int irq = 0, timeout = 50; + /* TODO: Handle MEM_FG_SYNC differently from MEM_BG_SYNC */ if (dc->chno == 1) - irq = IPU_IRQ_DC_FC_1; + irq = priv->dc_irq; else if (dc->chno == 5) - irq = IPU_IRQ_DP_SF_END; + irq = priv->dp_irq; else return; - /* should wait for the interrupt here */ - mdelay(50); - - if (dc->di == 0) - val = 0x00000002; - else - val = 0x00000020; - - /* Wait for DC triple buffer to empty */ - while ((readl(priv->dc_reg + DC_STAT) & val) != val) { - usleep_range(2000, 20000); - timeout -= 2; - if (timeout <= 0) - break; + init_completion(&priv->comp); + enable_irq(irq); + ret = wait_for_completion_timeout(&priv->comp, msecs_to_jiffies(50)); + disable_irq(irq); + if (ret <= 0) { + dev_warn(priv->dev, "DC stop timeout after 50 ms\n"); + + val = readl(dc->base + DC_WR_CH_CONF); + val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK; + writel(val, dc->base + DC_WR_CH_CONF); } - - val = readl(dc->base + DC_WR_CH_CONF); - val &= ~DC_WR_CH_CONF_PROG_TYPE_MASK; - writel(val, dc->base + DC_WR_CH_CONF); } EXPORT_SYMBOL_GPL(ipu_dc_disable_channel); +void ipu_dc_disable(struct ipu_soc *ipu) +{ + ipu_module_disable(ipu, IPU_CONF_DC_EN); +} +EXPORT_SYMBOL_GPL(ipu_dc_disable); + static void ipu_dc_map_config(struct ipu_dc_priv *priv, enum ipu_dc_map map, int byte_num, int offset, int mask) { @@ -340,7 +365,7 @@ struct ipu_dc_priv *priv; static int channel_offsets[] = { 0, 0x1c, 0x38, 0x54, 0x58, 0x5c, 0x78, 0, 0x94, 0xb4}; - int i; + int i, ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) @@ -361,6 +386,23 @@ priv->channels[i].base = priv->dc_reg + channel_offsets[i]; } + priv->dc_irq = ipu_map_irq(ipu, IPU_IRQ_DC_FC_1); + if (!priv->dc_irq) + return -EINVAL; + ret = devm_request_irq(dev, priv->dc_irq, dc_irq_handler, 0, NULL, + &priv->channels[1]); + if (ret < 0) + return ret; + disable_irq(priv->dc_irq); + priv->dp_irq = ipu_map_irq(ipu, IPU_IRQ_DP_SF_END); + if (!priv->dp_irq) + return -EINVAL; + ret = devm_request_irq(dev, priv->dp_irq, dc_irq_handler, 0, NULL, + &priv->channels[5]); + if (ret < 0) + return ret; + disable_irq(priv->dp_irq); + writel(DC_WR_CH_CONF_WORD_SIZE_24 | DC_WR_CH_CONF_DISP_ID_PARALLEL(1) | DC_WR_CH_CONF_PROG_DI_ID, priv->channels[1].base + DC_WR_CH_CONF); @@ -404,6 +446,12 @@ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 1, 15, 0xff); /* green */ ipu_dc_map_config(priv, IPU_DC_MAP_BGR24, 0, 23, 0xff); /* blue */ + /* rgb666 */ + ipu_dc_map_clear(priv, IPU_DC_MAP_RGB666); + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 0, 5, 0xfc); /* blue */ + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 1, 11, 0xfc); /* green */ + ipu_dc_map_config(priv, IPU_DC_MAP_RGB666, 2, 17, 0xfc); /* red */ + return 0; } diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-di.c linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-di.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-di.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-di.c 2014-05-07 17:05:51.144167296 +0200 @@ -595,7 +595,7 @@ } } - if (!sig->clk_pol) + if (sig->clk_pol) di_gen |= DI_GEN_POLARITY_DISP_CLK; ipu_di_write(di, di_gen, DI_GENERAL); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-dmfc.c 2014-05-07 17:05:51.152167344 +0200 @@ -28,7 +28,12 @@ #define DMFC_GENERAL1 0x0014 #define DMFC_GENERAL2 0x0018 #define DMFC_IC_CTRL 0x001c -#define DMFC_STAT 0x0020 +#define DMFC_WR_CHAN_ALT 0x0020 +#define DMFC_WR_CHAN_DEF_ALT 0x0024 +#define DMFC_DP_CHAN_ALT 0x0028 +#define DMFC_DP_CHAN_DEF_ALT 0x002c +#define DMFC_GENERAL1_ALT 0x0030 +#define DMFC_STAT 0x0034 #define DMFC_WR_CHAN_1_28 0 #define DMFC_WR_CHAN_2_41 8 @@ -133,6 +138,20 @@ } EXPORT_SYMBOL_GPL(ipu_dmfc_enable_channel); +static void ipu_dmfc_wait_fifos(struct ipu_dmfc_priv *priv) +{ + unsigned long timeout = jiffies + msecs_to_jiffies(1000); + + while ((readl(priv->base + DMFC_STAT) & 0x02fff000) != 0x02fff000) { + if (time_after(jiffies, timeout)) { + dev_warn(priv->dev, + "Timeout waiting for DMFC FIFOs to clear\n"); + break; + } + cpu_relax(); + } +} + void ipu_dmfc_disable_channel(struct dmfc_channel *dmfc) { struct ipu_dmfc_priv *priv = dmfc->priv; @@ -141,8 +160,10 @@ priv->use_count--; - if (!priv->use_count) + if (!priv->use_count) { + ipu_dmfc_wait_fifos(priv); ipu_module_disable(priv->ipu, IPU_CONF_DMFC_EN); + } if (priv->use_count < 0) priv->use_count = 0; diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-dp.c linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-dp.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-dp.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-dp.c 2014-05-07 17:05:51.160167394 +0200 @@ -215,10 +215,9 @@ } EXPORT_SYMBOL_GPL(ipu_dp_setup_channel); -int ipu_dp_enable_channel(struct ipu_dp *dp) +int ipu_dp_enable(struct ipu_soc *ipu) { - struct ipu_flow *flow = to_flow(dp); - struct ipu_dp_priv *priv = flow->priv; + struct ipu_dp_priv *priv = ipu->dp_priv; mutex_lock(&priv->mutex); @@ -227,15 +226,28 @@ priv->use_count++; - if (dp->foreground) { - u32 reg; + mutex_unlock(&priv->mutex); + + return 0; +} +EXPORT_SYMBOL_GPL(ipu_dp_enable); + +int ipu_dp_enable_channel(struct ipu_dp *dp) +{ + struct ipu_flow *flow = to_flow(dp); + struct ipu_dp_priv *priv = flow->priv; + u32 reg; + + if (!dp->foreground) + return 0; + + mutex_lock(&priv->mutex); - reg = readl(flow->base + DP_COM_CONF); - reg |= DP_COM_CONF_FG_EN; - writel(reg, flow->base + DP_COM_CONF); + reg = readl(flow->base + DP_COM_CONF); + reg |= DP_COM_CONF_FG_EN; + writel(reg, flow->base + DP_COM_CONF); - ipu_srm_dp_sync_update(priv->ipu); - } + ipu_srm_dp_sync_update(priv->ipu); mutex_unlock(&priv->mutex); @@ -247,25 +259,38 @@ { struct ipu_flow *flow = to_flow(dp); struct ipu_dp_priv *priv = flow->priv; + u32 reg, csc; + + if (!dp->foreground) + return; mutex_lock(&priv->mutex); - priv->use_count--; + reg = readl(flow->base + DP_COM_CONF); + csc = reg & DP_COM_CONF_CSC_DEF_MASK; + if (csc == DP_COM_CONF_CSC_DEF_FG) + reg &= ~DP_COM_CONF_CSC_DEF_MASK; + + reg &= ~DP_COM_CONF_FG_EN; + writel(reg, flow->base + DP_COM_CONF); + + writel(0, flow->base + DP_FG_POS); + ipu_srm_dp_sync_update(priv->ipu); + + if (ipu_idmac_channel_busy(priv->ipu, IPUV3_CHANNEL_MEM_BG_SYNC)) + ipu_wait_interrupt(priv->ipu, IPU_IRQ_DP_SF_END, 50); + + mutex_unlock(&priv->mutex); +} +EXPORT_SYMBOL_GPL(ipu_dp_disable_channel); - if (dp->foreground) { - u32 reg, csc; +void ipu_dp_disable(struct ipu_soc *ipu) +{ + struct ipu_dp_priv *priv = ipu->dp_priv; - reg = readl(flow->base + DP_COM_CONF); - csc = reg & DP_COM_CONF_CSC_DEF_MASK; - if (csc == DP_COM_CONF_CSC_DEF_FG) - reg &= ~DP_COM_CONF_CSC_DEF_MASK; - - reg &= ~DP_COM_CONF_FG_EN; - writel(reg, flow->base + DP_COM_CONF); - - writel(0, flow->base + DP_FG_POS); - ipu_srm_dp_sync_update(priv->ipu); - } + mutex_lock(&priv->mutex); + + priv->use_count--; if (!priv->use_count) ipu_module_disable(priv->ipu, IPU_CONF_DP_EN); @@ -275,7 +300,7 @@ mutex_unlock(&priv->mutex); } -EXPORT_SYMBOL_GPL(ipu_dp_disable_channel); +EXPORT_SYMBOL_GPL(ipu_dp_disable); struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow) { diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-prv.h linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-prv.h --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipu-v3/ipu-prv.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipu-v3/ipu-prv.h 2014-05-07 17:05:51.172167469 +0200 @@ -185,6 +185,9 @@ int ipu_module_enable(struct ipu_soc *ipu, u32 mask); int ipu_module_disable(struct ipu_soc *ipu, u32 mask); +bool ipu_idmac_channel_busy(struct ipu_soc *ipu, unsigned int chno); +int ipu_wait_interrupt(struct ipu_soc *ipu, int irq, int ms); + int ipu_di_init(struct ipu_soc *ipu, struct device *dev, int id, unsigned long base, u32 module, struct clk *ipu_clk); void ipu_di_exit(struct ipu_soc *ipu, int id); diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipuv3-crtc.c linux-3.15-rc4/drivers/staging/imx-drm/ipuv3-crtc.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipuv3-crtc.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipuv3-crtc.c 2014-05-07 17:05:51.172167469 +0200 @@ -60,24 +60,32 @@ static void ipu_fb_enable(struct ipu_crtc *ipu_crtc) { + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); + if (ipu_crtc->enabled) return; - ipu_di_enable(ipu_crtc->di); - ipu_dc_enable_channel(ipu_crtc->dc); + ipu_dc_enable(ipu); ipu_plane_enable(ipu_crtc->plane[0]); + /* Start DC channel and DI after IDMAC */ + ipu_dc_enable_channel(ipu_crtc->dc); + ipu_di_enable(ipu_crtc->di); ipu_crtc->enabled = 1; } static void ipu_fb_disable(struct ipu_crtc *ipu_crtc) { + struct ipu_soc *ipu = dev_get_drvdata(ipu_crtc->dev->parent); + if (!ipu_crtc->enabled) return; - ipu_plane_disable(ipu_crtc->plane[0]); + /* Stop DC channel and DI before IDMAC */ ipu_dc_disable_channel(ipu_crtc->dc); ipu_di_disable(ipu_crtc->di); + ipu_plane_disable(ipu_crtc->plane[0]); + ipu_dc_disable(ipu); ipu_crtc->enabled = 0; } @@ -158,7 +166,7 @@ sig_cfg.Vsync_pol = 1; sig_cfg.enable_pol = 1; - sig_cfg.clk_pol = 1; + sig_cfg.clk_pol = 0; sig_cfg.width = mode->hdisplay; sig_cfg.height = mode->vdisplay; sig_cfg.pixel_fmt = out_pixel_fmt; diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/ipuv3-plane.c linux-3.15-rc4/drivers/staging/imx-drm/ipuv3-plane.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/ipuv3-plane.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/ipuv3-plane.c 2014-05-07 17:05:51.176167493 +0200 @@ -239,6 +239,8 @@ void ipu_plane_enable(struct ipu_plane *ipu_plane) { + if (ipu_plane->dp) + ipu_dp_enable(ipu_plane->ipu); ipu_dmfc_enable_channel(ipu_plane->dmfc); ipu_idmac_enable_channel(ipu_plane->ipu_ch); if (ipu_plane->dp) @@ -257,6 +259,8 @@ ipu_dp_disable_channel(ipu_plane->dp); ipu_idmac_disable_channel(ipu_plane->ipu_ch); ipu_dmfc_disable_channel(ipu_plane->dmfc); + if (ipu_plane->dp) + ipu_dp_disable(ipu_plane->ipu); } static void ipu_plane_dpms(struct ipu_plane *ipu_plane, int mode) diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/Kconfig linux-3.15-rc4/drivers/staging/imx-drm/Kconfig --- linux-3.15-rc4.orig/drivers/staging/imx-drm/Kconfig 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/Kconfig 2014-05-07 17:05:51.176167493 +0200 @@ -35,6 +35,7 @@ config DRM_IMX_LDB tristate "Support for LVDS displays" depends on DRM_IMX && MFD_SYSCON + select DRM_PANEL help Choose this to enable the internal LVDS Display Bridge (LDB) found on i.MX53 and i.MX6 processors. @@ -60,3 +61,20 @@ depends on DRM_IMX help Choose this if you want to use HDMI on i.MX6. + +config DRM_DW_HDMI_AUDIO + tristate "Synopsis Designware Audio interface" + depends on DRM_IMX_HDMI != n + help + Support the Audio interface which is part of the Synopsis + Designware HDMI block. This is used in conjunction with + the i.MX HDMI driver. + +config DRM_DW_HDMI_CEC + tristate "Synopsis Designware CEC interface" + depends on DRM_IMX_HDMI != n + select HDMI_CEC_CORE + help + Support the CEC interface which is part of the Synposis + Designware HDMI block. This is used in conjunction with + the i.MX HDMI driver. diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/Makefile linux-3.15-rc4/drivers/staging/imx-drm/Makefile --- linux-3.15-rc4.orig/drivers/staging/imx-drm/Makefile 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/Makefile 2014-05-07 17:05:51.180167518 +0200 @@ -3,6 +3,7 @@ obj-$(CONFIG_DRM_IMX) += imxdrm.o +obj-$(CONFIG_DRM_IMX) += drm-ddc-connector.o obj-$(CONFIG_DRM_IMX_PARALLEL_DISPLAY) += parallel-display.o obj-$(CONFIG_DRM_IMX_TVE) += imx-tve.o obj-$(CONFIG_DRM_IMX_LDB) += imx-ldb.o @@ -11,3 +12,5 @@ imx-ipuv3-crtc-objs := ipuv3-crtc.o ipuv3-plane.o obj-$(CONFIG_DRM_IMX_IPUV3) += imx-ipuv3-crtc.o obj-$(CONFIG_DRM_IMX_HDMI) += imx-hdmi.o +obj-$(CONFIG_DRM_DW_HDMI_AUDIO) += dw-hdmi-audio.o +obj-$(CONFIG_DRM_DW_HDMI_CEC) += dw-hdmi-cec.o diff -Nur linux-3.15-rc4.orig/drivers/staging/imx-drm/parallel-display.c linux-3.15-rc4/drivers/staging/imx-drm/parallel-display.c --- linux-3.15-rc4.orig/drivers/staging/imx-drm/parallel-display.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/drivers/staging/imx-drm/parallel-display.c 2014-05-07 17:05:51.180167518 +0200 @@ -219,6 +219,8 @@ imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB565; else if (!strcmp(fmt, "bgr666")) imxpd->interface_pix_fmt = V4L2_PIX_FMT_BGR666; + else if (!strcmp(fmt, "rgb666")) + imxpd->interface_pix_fmt = V4L2_PIX_FMT_RGB666; } panel_node = of_parse_phandle(np, "fsl,panel", 0); diff -Nur linux-3.15-rc4.orig/include/linux/cec-dev.h linux-3.15-rc4/include/linux/cec-dev.h --- linux-3.15-rc4.orig/include/linux/cec-dev.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/include/linux/cec-dev.h 2014-05-07 17:05:51.180167518 +0200 @@ -0,0 +1,69 @@ +#ifndef _LINUX_CEC_DEV_H +#define _LINUX_CEC_DEV_H + +#include +#include +#include +#include +#include + +#include + +struct device; + +struct cec_dev { + struct cdev cdev; + dev_t devn; + + struct mutex mutex; + unsigned users; + + spinlock_t lock; + wait_queue_head_t waitq; + struct list_head events; + u8 write_busy; + + u8 retries; + u16 addresses; + u16 physical; + + int (*open)(struct cec_dev *); + void (*release)(struct cec_dev *); + void (*send_message)(struct cec_dev *, u8 *, size_t); + void (*set_address)(struct cec_dev *, unsigned); +}; + +void cec_dev_event(struct cec_dev *cec_dev, int type, u8 *msg, size_t len); + +static inline void cec_dev_receive(struct cec_dev *cec_dev, u8 *msg, + unsigned len) +{ + cec_dev_event(cec_dev, MESSAGE_TYPE_RECEIVE_SUCCESS, msg, len); +} + +static inline void cec_dev_send_complete(struct cec_dev *cec_dev, int ack) +{ + cec_dev->retries = 0; + cec_dev->write_busy = 0; + + cec_dev_event(cec_dev, ack ? MESSAGE_TYPE_SEND_SUCCESS : + MESSAGE_TYPE_NOACK, NULL, 0); +} + +static inline void cec_dev_disconnect(struct cec_dev *cec_dev) +{ + cec_dev->physical = 0; + cec_dev_event(cec_dev, MESSAGE_TYPE_DISCONNECTED, NULL, 0); +} + +static inline void cec_dev_connect(struct cec_dev *cec_dev, u32 phys) +{ + cec_dev->physical = phys; + cec_dev_event(cec_dev, MESSAGE_TYPE_CONNECTED, NULL, 0); +} + +void cec_dev_init(struct cec_dev *cec_dev, struct module *); +int cec_dev_add(struct cec_dev *cec_dev, struct device *, const char *name); +void cec_dev_remove(struct cec_dev *cec_dev); + +#endif diff -Nur linux-3.15-rc4.orig/include/linux/mmc/host.h linux-3.15-rc4/include/linux/mmc/host.h --- linux-3.15-rc4.orig/include/linux/mmc/host.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/include/linux/mmc/host.h 2014-05-07 17:05:51.180167518 +0200 @@ -278,6 +278,7 @@ #define MMC_CAP2_PACKED_CMD (MMC_CAP2_PACKED_RD | \ MMC_CAP2_PACKED_WR) #define MMC_CAP2_NO_PRESCAN_POWERUP (1 << 14) /* Don't power up before scan */ +#define MMC_CAP2_SDIO_NOTHREAD (1 << 15) mmc_pm_flag_t pm_caps; /* supported pm features */ @@ -293,6 +294,11 @@ unsigned long clkgate_delay; #endif + /* card specific properties to deal with power and reset */ + struct regulator *card_regulator; /* External VCC needed by the card */ + struct gpio_desc *card_reset_gpios[2]; /* External resets, active low */ + struct clk *card_clk; /* External clock needed by the card */ + /* host specific block data */ unsigned int max_seg_size; /* see blk_queue_max_segment_size */ unsigned short max_segs; /* see blk_queue_max_segments */ @@ -391,6 +397,8 @@ wake_up_process(host->sdio_irq_thread); } +void sdio_run_irqs(struct mmc_host *host); + #ifdef CONFIG_REGULATOR int mmc_regulator_get_ocrmask(struct regulator *supply); int mmc_regulator_set_ocr(struct mmc_host *mmc, diff -Nur linux-3.15-rc4.orig/include/linux/mmc/sdhci.h linux-3.15-rc4/include/linux/mmc/sdhci.h --- linux-3.15-rc4.orig/include/linux/mmc/sdhci.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/include/linux/mmc/sdhci.h 2014-05-07 17:05:51.180167518 +0200 @@ -57,12 +57,8 @@ #define SDHCI_QUIRK_BROKEN_CARD_DETECTION (1<<15) /* Controller reports inverted write-protect state */ #define SDHCI_QUIRK_INVERTED_WRITE_PROTECT (1<<16) -/* Controller has nonstandard clock management */ -#define SDHCI_QUIRK_NONSTANDARD_CLOCK (1<<17) /* Controller does not like fast PIO transfers */ #define SDHCI_QUIRK_PIO_NEEDS_DELAY (1<<18) -/* Controller losing signal/interrupt enable states after reset */ -#define SDHCI_QUIRK_RESTORE_IRQS_AFTER_RESET (1<<19) /* Controller has to be forced to use block size of 2048 bytes */ #define SDHCI_QUIRK_FORCE_BLK_SZ_2048 (1<<20) /* Controller cannot do multi-block transfers */ @@ -147,6 +143,7 @@ bool runtime_suspended; /* Host is runtime suspended */ bool bus_on; /* Bus power prevents runtime suspend */ + bool preset_enabled; /* Preset is enabled */ struct mmc_request *mrq; /* Current request */ struct mmc_command *cmd; /* Current command */ @@ -164,8 +161,7 @@ dma_addr_t adma_addr; /* Mapped ADMA descr. table */ dma_addr_t align_addr; /* Mapped bounce buffer */ - struct tasklet_struct card_tasklet; /* Tasklet structures */ - struct tasklet_struct finish_tasklet; + struct tasklet_struct finish_tasklet; /* Tasklet structures */ struct timer_list timer; /* Timer for timeouts */ @@ -177,6 +173,13 @@ unsigned int ocr_avail_mmc; u32 ocr_mask; /* available voltages */ + unsigned timing; /* Current timing */ + + u32 thread_isr; + + /* cached registers */ + u32 ier; + wait_queue_head_t buf_ready_int; /* Waitqueue for Buffer Read Ready interrupt */ unsigned int tuning_done; /* Condition flag set when CMD19 succeeds */ diff -Nur linux-3.15-rc4.orig/include/uapi/linux/cec-dev.h linux-3.15-rc4/include/uapi/linux/cec-dev.h --- linux-3.15-rc4.orig/include/uapi/linux/cec-dev.h 1970-01-01 01:00:00.000000000 +0100 +++ linux-3.15-rc4/include/uapi/linux/cec-dev.h 2014-05-07 17:05:51.180167518 +0200 @@ -0,0 +1,34 @@ +#ifndef _UAPI_LINUX_CEC_DEV_H +#define _UAPI_LINUX_CEC_DEV_H + +#include +#include + +#define MAX_MESSAGE_LEN 16 + +enum { + HDMICEC_IOC_MAGIC = 'H', + /* This is wrong: we pass the argument as a number, not a pointer */ + HDMICEC_IOC_O_SETLOGICALADDRESS = _IOW(HDMICEC_IOC_MAGIC, 1, unsigned char), + HDMICEC_IOC_SETLOGICALADDRESS = _IO(HDMICEC_IOC_MAGIC, 1), + HDMICEC_IOC_STARTDEVICE = _IO(HDMICEC_IOC_MAGIC, 2), + HDMICEC_IOC_STOPDEVICE = _IO(HDMICEC_IOC_MAGIC, 3), + HDMICEC_IOC_GETPHYADDRESS = _IOR(HDMICEC_IOC_MAGIC, 4, unsigned char[4]), +}; + +enum { + MESSAGE_TYPE_RECEIVE_SUCCESS = 1, + MESSAGE_TYPE_NOACK, + MESSAGE_TYPE_DISCONNECTED, + MESSAGE_TYPE_CONNECTED, + MESSAGE_TYPE_SEND_SUCCESS, + MESSAGE_TYPE_SEND_ERROR, +}; + +struct cec_user_event { + __u32 event_type; + __u32 msg_len; + __u8 msg[MAX_MESSAGE_LEN]; +}; + +#endif diff -Nur linux-3.15-rc4.orig/include/uapi/linux/videodev2.h linux-3.15-rc4/include/uapi/linux/videodev2.h --- linux-3.15-rc4.orig/include/uapi/linux/videodev2.h 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/include/uapi/linux/videodev2.h 2014-05-07 17:05:51.180167518 +0200 @@ -299,6 +299,7 @@ #define V4L2_PIX_FMT_RGB555X v4l2_fourcc('R', 'G', 'B', 'Q') /* 16 RGB-5-5-5 BE */ #define V4L2_PIX_FMT_RGB565X v4l2_fourcc('R', 'G', 'B', 'R') /* 16 RGB-5-6-5 BE */ #define V4L2_PIX_FMT_BGR666 v4l2_fourcc('B', 'G', 'R', 'H') /* 18 BGR-6-6-6 */ +#define V4L2_PIX_FMT_RGB666 v4l2_fourcc('R', 'G', 'B', 'H') /* 18 RGB-6-6-6 */ #define V4L2_PIX_FMT_BGR24 v4l2_fourcc('B', 'G', 'R', '3') /* 24 BGR-8-8-8 */ #define V4L2_PIX_FMT_RGB24 v4l2_fourcc('R', 'G', 'B', '3') /* 24 RGB-8-8-8 */ #define V4L2_PIX_FMT_BGR32 v4l2_fourcc('B', 'G', 'R', '4') /* 32 BGR-8-8-8-8 */ diff -Nur linux-3.15-rc4.orig/sound/soc/fsl/imx-pcm-dma.c linux-3.15-rc4/sound/soc/fsl/imx-pcm-dma.c --- linux-3.15-rc4.orig/sound/soc/fsl/imx-pcm-dma.c 2014-05-05 03:14:42.000000000 +0200 +++ linux-3.15-rc4/sound/soc/fsl/imx-pcm-dma.c 2014-05-07 17:05:51.180167518 +0200 @@ -44,7 +44,7 @@ .buffer_bytes_max = IMX_SSI_DMABUF_SIZE, .period_bytes_min = 128, .period_bytes_max = 65535, /* Limited by SDMA engine */ - .periods_min = 2, + .periods_min = 4, .periods_max = 255, .fifo_size = 0, };