summaryrefslogtreecommitdiff
path: root/target/linux/patches
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/patches')
-rw-r--r--target/linux/patches/3.10.75/bsd-compatibility.patch (renamed from target/linux/patches/3.10.71/bsd-compatibility.patch)0
-rw-r--r--target/linux/patches/3.10.75/headers-install.patch (renamed from target/linux/patches/3.10.71/headers-install.patch)0
-rw-r--r--target/linux/patches/3.10.75/mkpiggy.patch (renamed from target/linux/patches/3.10.71/mkpiggy.patch)0
-rw-r--r--target/linux/patches/3.10.75/relocs.patch (renamed from target/linux/patches/3.10.71/relocs.patch)0
-rw-r--r--target/linux/patches/3.10.75/sgidefs.patch (renamed from target/linux/patches/3.10.71/sgidefs.patch)0
-rw-r--r--target/linux/patches/3.10.75/sortext.patch (renamed from target/linux/patches/3.10.71/sortext.patch)0
-rw-r--r--target/linux/patches/3.10.75/startup.patch (renamed from target/linux/patches/3.10.71/startup.patch)0
-rw-r--r--target/linux/patches/3.10.75/yaffs2.patch (renamed from target/linux/patches/3.10.71/yaffs2.patch)0
-rw-r--r--target/linux/patches/3.12.40/bsd-compatibility.patch (renamed from target/linux/patches/3.12.38/bsd-compatibility.patch)0
-rw-r--r--target/linux/patches/3.12.40/cleankernel.patch (renamed from target/linux/patches/3.12.38/cleankernel.patch)0
-rw-r--r--target/linux/patches/3.12.40/defaults.patch (renamed from target/linux/patches/3.12.38/defaults.patch)0
-rw-r--r--target/linux/patches/3.12.40/disable-netfilter.patch (renamed from target/linux/patches/3.12.38/disable-netfilter.patch)0
-rw-r--r--target/linux/patches/3.12.40/export-symbol-for-exmap.patch (renamed from target/linux/patches/3.12.38/export-symbol-for-exmap.patch)0
-rw-r--r--target/linux/patches/3.12.40/gemalto.patch (renamed from target/linux/patches/3.12.38/gemalto.patch)0
-rw-r--r--target/linux/patches/3.12.40/lemote-rfkill.patch (renamed from target/linux/patches/3.12.38/lemote-rfkill.patch)0
-rw-r--r--target/linux/patches/3.12.40/microblaze-ethernet.patch (renamed from target/linux/patches/3.12.38/microblaze-ethernet.patch)0
-rw-r--r--target/linux/patches/3.12.40/microblaze-setup.patch (renamed from target/linux/patches/3.12.38/microblaze-setup.patch)0
-rw-r--r--target/linux/patches/3.12.40/mips-lzo-fix.patch (renamed from target/linux/patches/3.12.38/mips-lzo-fix.patch)0
-rw-r--r--target/linux/patches/3.12.40/mkpiggy.patch (renamed from target/linux/patches/3.12.38/mkpiggy.patch)0
-rw-r--r--target/linux/patches/3.12.40/mtd-rootfs.patch (renamed from target/linux/patches/3.12.38/mtd-rootfs.patch)0
-rw-r--r--target/linux/patches/3.12.40/non-static.patch (renamed from target/linux/patches/3.12.38/non-static.patch)0
-rw-r--r--target/linux/patches/3.12.40/ppc64-missing-zlib.patch (renamed from target/linux/patches/3.12.38/ppc64-missing-zlib.patch)0
-rw-r--r--target/linux/patches/3.12.40/regmap-bool.patch (renamed from target/linux/patches/3.12.38/regmap-bool.patch)0
-rw-r--r--target/linux/patches/3.12.40/relocs.patch (renamed from target/linux/patches/3.12.38/relocs.patch)0
-rw-r--r--target/linux/patches/3.12.40/sgidefs.patch (renamed from target/linux/patches/3.12.38/sgidefs.patch)0
-rw-r--r--target/linux/patches/3.12.40/sortext.patch (renamed from target/linux/patches/3.12.38/sortext.patch)0
-rw-r--r--target/linux/patches/3.12.40/startup.patch (renamed from target/linux/patches/3.12.38/startup.patch)0
-rw-r--r--target/linux/patches/3.12.40/usb-defaults-off.patch (renamed from target/linux/patches/3.12.38/usb-defaults-off.patch)0
-rw-r--r--target/linux/patches/3.12.40/vga-cons-default-off.patch (renamed from target/linux/patches/3.12.38/vga-cons-default-off.patch)0
-rw-r--r--target/linux/patches/3.12.40/wlan-cf.patch (renamed from target/linux/patches/3.12.38/wlan-cf.patch)0
-rw-r--r--target/linux/patches/3.12.40/xargs.patch (renamed from target/linux/patches/3.12.38/xargs.patch)0
-rw-r--r--target/linux/patches/3.12.40/yaffs2.patch (renamed from target/linux/patches/3.12.38/yaffs2.patch)0
-rw-r--r--target/linux/patches/3.12.40/zlib-inflate.patch (renamed from target/linux/patches/3.12.38/zlib-inflate.patch)0
-rw-r--r--target/linux/patches/3.14.40/bsd-compatibility.patch (renamed from target/linux/patches/3.14.35/bsd-compatibility.patch)0
-rw-r--r--target/linux/patches/3.14.40/cleankernel.patch (renamed from target/linux/patches/3.14.35/cleankernel.patch)0
-rw-r--r--target/linux/patches/3.14.40/defaults.patch (renamed from target/linux/patches/3.14.35/defaults.patch)0
-rw-r--r--target/linux/patches/3.14.40/disable-netfilter.patch (renamed from target/linux/patches/3.14.35/disable-netfilter.patch)0
-rw-r--r--target/linux/patches/3.14.40/export-symbol-for-exmap.patch (renamed from target/linux/patches/3.14.35/export-symbol-for-exmap.patch)0
-rw-r--r--target/linux/patches/3.14.40/fblogo.patch (renamed from target/linux/patches/3.14.35/fblogo.patch)0
-rw-r--r--target/linux/patches/3.14.40/gemalto.patch (renamed from target/linux/patches/3.14.35/gemalto.patch)0
-rw-r--r--target/linux/patches/3.14.40/initramfs-nosizelimit.patch (renamed from target/linux/patches/3.14.35/initramfs-nosizelimit.patch)0
-rw-r--r--target/linux/patches/3.14.40/lemote-rfkill.patch (renamed from target/linux/patches/3.14.35/lemote-rfkill.patch)0
-rw-r--r--target/linux/patches/3.14.40/microblaze-axi.patch (renamed from target/linux/patches/3.14.35/microblaze-axi.patch)0
-rw-r--r--target/linux/patches/3.14.40/microblaze-ethernet.patch (renamed from target/linux/patches/3.14.35/microblaze-ethernet.patch)0
-rw-r--r--target/linux/patches/3.14.40/mkpiggy.patch (renamed from target/linux/patches/3.14.35/mkpiggy.patch)0
-rw-r--r--target/linux/patches/3.14.40/mptcp.patch (renamed from target/linux/patches/3.14.35/mptcp.patch)0
-rw-r--r--target/linux/patches/3.14.40/mtd-rootfs.patch (renamed from target/linux/patches/3.14.35/mtd-rootfs.patch)0
-rw-r--r--target/linux/patches/3.14.40/nfsv3-tcp.patch (renamed from target/linux/patches/3.14.35/nfsv3-tcp.patch)0
-rw-r--r--target/linux/patches/3.14.40/non-static.patch (renamed from target/linux/patches/3.14.35/non-static.patch)0
-rw-r--r--target/linux/patches/3.14.40/ppc64-missing-zlib.patch (renamed from target/linux/patches/3.14.35/ppc64-missing-zlib.patch)0
-rw-r--r--target/linux/patches/3.14.40/regmap-boolean.patch (renamed from target/linux/patches/3.14.35/regmap-boolean.patch)0
-rw-r--r--target/linux/patches/3.14.40/relocs.patch (renamed from target/linux/patches/3.14.35/relocs.patch)0
-rw-r--r--target/linux/patches/3.14.40/sgidefs.patch (renamed from target/linux/patches/3.14.35/sgidefs.patch)0
-rw-r--r--target/linux/patches/3.14.40/sortext.patch (renamed from target/linux/patches/3.14.35/sortext.patch)0
-rw-r--r--target/linux/patches/3.14.40/startup.patch (renamed from target/linux/patches/3.14.35/startup.patch)0
-rw-r--r--target/linux/patches/3.14.40/wlan-cf.patch (renamed from target/linux/patches/3.14.35/wlan-cf.patch)0
-rw-r--r--target/linux/patches/3.14.40/xargs.patch (renamed from target/linux/patches/3.14.35/xargs.patch)0
-rw-r--r--target/linux/patches/3.14.40/yaffs2.patch (renamed from target/linux/patches/3.14.35/yaffs2.patch)0
-rw-r--r--target/linux/patches/3.14.40/zlib-inflate.patch (renamed from target/linux/patches/3.14.35/zlib-inflate.patch)0
-rw-r--r--target/linux/patches/3.18.12/bsd-compatibility.patch (renamed from target/linux/patches/3.18.10/bsd-compatibility.patch)0
-rw-r--r--target/linux/patches/3.18.12/cleankernel.patch (renamed from target/linux/patches/3.18.10/cleankernel.patch)0
-rw-r--r--target/linux/patches/3.18.12/cris-header.patch (renamed from target/linux/patches/3.18.10/cris-header.patch)0
-rw-r--r--target/linux/patches/3.18.12/defaults.patch (renamed from target/linux/patches/3.18.10/defaults.patch)0
-rw-r--r--target/linux/patches/3.18.12/export-symbol-for-exmap.patch (renamed from target/linux/patches/3.18.10/export-symbol-for-exmap.patch)0
-rw-r--r--target/linux/patches/3.18.12/fblogo.patch (renamed from target/linux/patches/3.18.10/fblogo.patch)0
-rw-r--r--target/linux/patches/3.18.12/gemalto.patch (renamed from target/linux/patches/3.18.10/gemalto.patch)0
-rw-r--r--target/linux/patches/3.18.12/initramfs-nosizelimit.patch (renamed from target/linux/patches/3.18.10/initramfs-nosizelimit.patch)0
-rw-r--r--target/linux/patches/3.18.12/lemote-rfkill.patch (renamed from target/linux/patches/3.18.10/lemote-rfkill.patch)0
-rw-r--r--target/linux/patches/3.18.12/microblaze-ethernet.patch (renamed from target/linux/patches/3.18.10/microblaze-ethernet.patch)0
-rw-r--r--target/linux/patches/3.18.12/mkpiggy.patch (renamed from target/linux/patches/3.18.10/mkpiggy.patch)0
-rw-r--r--target/linux/patches/3.18.12/mtd-rootfs.patch (renamed from target/linux/patches/3.18.10/mtd-rootfs.patch)0
-rw-r--r--target/linux/patches/3.18.12/nfsv3-tcp.patch (renamed from target/linux/patches/3.18.10/nfsv3-tcp.patch)0
-rw-r--r--target/linux/patches/3.18.12/non-static.patch (renamed from target/linux/patches/3.18.10/non-static.patch)0
-rw-r--r--target/linux/patches/3.18.12/ppc64-missing-zlib.patch (renamed from target/linux/patches/3.18.10/ppc64-missing-zlib.patch)0
-rw-r--r--target/linux/patches/3.18.12/realtime.patch (renamed from target/linux/patches/3.18.10/realtime.patch)14803
-rw-r--r--target/linux/patches/3.18.12/regmap-bool.patch (renamed from target/linux/patches/3.18.10/regmap-bool.patch)0
-rw-r--r--target/linux/patches/3.18.12/relocs.patch (renamed from target/linux/patches/3.18.10/relocs.patch)0
-rw-r--r--target/linux/patches/3.18.12/sgidefs.patch (renamed from target/linux/patches/3.18.10/sgidefs.patch)0
-rw-r--r--target/linux/patches/3.18.12/sortext.patch (renamed from target/linux/patches/3.18.10/sortext.patch)0
-rw-r--r--target/linux/patches/3.18.12/startup.patch (renamed from target/linux/patches/3.18.10/startup.patch)0
-rw-r--r--target/linux/patches/3.18.12/wlan-cf.patch (renamed from target/linux/patches/3.18.10/wlan-cf.patch)0
-rw-r--r--target/linux/patches/3.18.12/xargs.patch (renamed from target/linux/patches/3.18.10/xargs.patch)0
-rw-r--r--target/linux/patches/3.18.12/yaffs2.patch (renamed from target/linux/patches/3.18.10/yaffs2.patch)0
-rw-r--r--target/linux/patches/3.19.5/bsd-compatibility.patch (renamed from target/linux/patches/3.19.1/bsd-compatibility.patch)0
-rw-r--r--target/linux/patches/3.19.5/cleankernel.patch (renamed from target/linux/patches/3.19.1/cleankernel.patch)0
-rw-r--r--target/linux/patches/3.19.5/cris-header.patch (renamed from target/linux/patches/3.19.1/cris-header.patch)0
-rw-r--r--target/linux/patches/3.19.5/export-symbol-for-exmap.patch (renamed from target/linux/patches/3.19.1/export-symbol-for-exmap.patch)0
-rw-r--r--target/linux/patches/3.19.5/fblogo.patch (renamed from target/linux/patches/3.19.1/fblogo.patch)0
-rw-r--r--target/linux/patches/3.19.5/gemalto.patch (renamed from target/linux/patches/3.19.1/gemalto.patch)0
-rw-r--r--target/linux/patches/3.19.5/initramfs-nosizelimit.patch (renamed from target/linux/patches/3.19.1/initramfs-nosizelimit.patch)0
-rw-r--r--target/linux/patches/3.19.5/lemote-rfkill.patch (renamed from target/linux/patches/3.19.1/lemote-rfkill.patch)0
-rw-r--r--target/linux/patches/3.19.5/microblaze-axi.patch (renamed from target/linux/patches/3.19.1/microblaze-axi.patch)0
-rw-r--r--target/linux/patches/3.19.5/microblaze-ethernet.patch (renamed from target/linux/patches/3.19.1/microblaze-ethernet.patch)0
-rw-r--r--target/linux/patches/3.19.5/mkpiggy.patch (renamed from target/linux/patches/3.19.1/mkpiggy.patch)0
-rw-r--r--target/linux/patches/3.19.5/mtd-rootfs.patch (renamed from target/linux/patches/3.19.1/mtd-rootfs.patch)0
-rw-r--r--target/linux/patches/3.19.5/nfsv3-tcp.patch (renamed from target/linux/patches/3.19.1/nfsv3-tcp.patch)0
-rw-r--r--target/linux/patches/3.19.5/non-static.patch (renamed from target/linux/patches/3.19.1/non-static.patch)0
-rw-r--r--target/linux/patches/3.19.5/ppc64-missing-zlib.patch (renamed from target/linux/patches/3.19.1/ppc64-missing-zlib.patch)0
-rw-r--r--target/linux/patches/3.19.5/regmap-bool.patch (renamed from target/linux/patches/3.19.1/regmap-bool.patch)0
-rw-r--r--target/linux/patches/3.19.5/relocs.patch (renamed from target/linux/patches/3.19.1/relocs.patch)0
-rw-r--r--target/linux/patches/3.19.5/sgidefs.patch (renamed from target/linux/patches/3.19.1/sgidefs.patch)0
-rw-r--r--target/linux/patches/3.19.5/sortext.patch (renamed from target/linux/patches/3.19.1/sortext.patch)0
-rw-r--r--target/linux/patches/3.19.5/sparc-aout.patch (renamed from target/linux/patches/3.19.1/sparc-aout.patch)0
-rw-r--r--target/linux/patches/3.19.5/startup.patch (renamed from target/linux/patches/3.19.1/startup.patch)0
-rw-r--r--target/linux/patches/3.19.5/wlan-cf.patch (renamed from target/linux/patches/3.19.1/wlan-cf.patch)0
-rw-r--r--target/linux/patches/3.19.5/xargs.patch (renamed from target/linux/patches/3.19.1/xargs.patch)0
-rw-r--r--target/linux/patches/3.19.5/yaffs2.patch (renamed from target/linux/patches/3.19.1/yaffs2.patch)0
-rw-r--r--target/linux/patches/3.4.107/aufs2.patch (renamed from target/linux/patches/3.4.106/aufs2.patch)0
-rw-r--r--target/linux/patches/3.4.107/bsd-compatibility.patch (renamed from target/linux/patches/3.4.106/bsd-compatibility.patch)0
-rw-r--r--target/linux/patches/3.4.107/defaults.patch (renamed from target/linux/patches/3.4.106/defaults.patch)0
-rw-r--r--target/linux/patches/3.4.107/gemalto.patch (renamed from target/linux/patches/3.4.106/gemalto.patch)0
-rw-r--r--target/linux/patches/3.4.107/lemote-rfkill.patch (renamed from target/linux/patches/3.4.106/lemote-rfkill.patch)0
-rw-r--r--target/linux/patches/3.4.107/linux-gcc-check.patch (renamed from target/linux/patches/3.4.106/linux-gcc-check.patch)0
-rw-r--r--target/linux/patches/3.4.107/mips-error.patch (renamed from target/linux/patches/3.4.106/mips-error.patch)0
-rw-r--r--target/linux/patches/3.4.107/mkpiggy.patch (renamed from target/linux/patches/3.4.106/mkpiggy.patch)0
-rw-r--r--target/linux/patches/3.4.107/module-alloc-size-check.patch (renamed from target/linux/patches/3.4.106/module-alloc-size-check.patch)0
-rw-r--r--target/linux/patches/3.4.107/non-static.patch (renamed from target/linux/patches/3.4.106/non-static.patch)0
-rw-r--r--target/linux/patches/3.4.107/relocs.patch (renamed from target/linux/patches/3.4.106/relocs.patch)0
-rw-r--r--target/linux/patches/3.4.107/sparc-aout.patch (renamed from target/linux/patches/3.4.106/sparc-aout.patch)0
-rw-r--r--target/linux/patches/3.4.107/sparc-include.patch (renamed from target/linux/patches/3.4.106/sparc-include.patch)0
-rw-r--r--target/linux/patches/3.4.107/startup.patch (renamed from target/linux/patches/3.4.106/startup.patch)0
-rw-r--r--target/linux/patches/3.4.107/usb-defaults-off.patch (renamed from target/linux/patches/3.4.106/usb-defaults-off.patch)0
-rw-r--r--target/linux/patches/3.4.107/vga-cons-default-off.patch (renamed from target/linux/patches/3.4.106/vga-cons-default-off.patch)0
-rw-r--r--target/linux/patches/3.4.107/wlan-cf.patch (renamed from target/linux/patches/3.4.106/wlan-cf.patch)0
-rw-r--r--target/linux/patches/3.4.107/yaffs2.patch (renamed from target/linux/patches/3.4.106/yaffs2.patch)0
-rw-r--r--target/linux/patches/3.4.107/zlib-inflate.patch (renamed from target/linux/patches/3.4.106/zlib-inflate.patch)0
126 files changed, 13427 insertions, 1376 deletions
diff --git a/target/linux/patches/3.10.71/bsd-compatibility.patch b/target/linux/patches/3.10.75/bsd-compatibility.patch
index b954b658f..b954b658f 100644
--- a/target/linux/patches/3.10.71/bsd-compatibility.patch
+++ b/target/linux/patches/3.10.75/bsd-compatibility.patch
diff --git a/target/linux/patches/3.10.71/headers-install.patch b/target/linux/patches/3.10.75/headers-install.patch
index 46dc9fe13..46dc9fe13 100644
--- a/target/linux/patches/3.10.71/headers-install.patch
+++ b/target/linux/patches/3.10.75/headers-install.patch
diff --git a/target/linux/patches/3.10.71/mkpiggy.patch b/target/linux/patches/3.10.75/mkpiggy.patch
index d4e815cd2..d4e815cd2 100644
--- a/target/linux/patches/3.10.71/mkpiggy.patch
+++ b/target/linux/patches/3.10.75/mkpiggy.patch
diff --git a/target/linux/patches/3.10.71/relocs.patch b/target/linux/patches/3.10.75/relocs.patch
index 649b9e73e..649b9e73e 100644
--- a/target/linux/patches/3.10.71/relocs.patch
+++ b/target/linux/patches/3.10.75/relocs.patch
diff --git a/target/linux/patches/3.10.71/sgidefs.patch b/target/linux/patches/3.10.75/sgidefs.patch
index f00a284d9..f00a284d9 100644
--- a/target/linux/patches/3.10.71/sgidefs.patch
+++ b/target/linux/patches/3.10.75/sgidefs.patch
diff --git a/target/linux/patches/3.10.71/sortext.patch b/target/linux/patches/3.10.75/sortext.patch
index 65bbbb64b..65bbbb64b 100644
--- a/target/linux/patches/3.10.71/sortext.patch
+++ b/target/linux/patches/3.10.75/sortext.patch
diff --git a/target/linux/patches/3.10.71/startup.patch b/target/linux/patches/3.10.75/startup.patch
index 3ebc5db1e..3ebc5db1e 100644
--- a/target/linux/patches/3.10.71/startup.patch
+++ b/target/linux/patches/3.10.75/startup.patch
diff --git a/target/linux/patches/3.10.71/yaffs2.patch b/target/linux/patches/3.10.75/yaffs2.patch
index 172629530..172629530 100644
--- a/target/linux/patches/3.10.71/yaffs2.patch
+++ b/target/linux/patches/3.10.75/yaffs2.patch
diff --git a/target/linux/patches/3.12.38/bsd-compatibility.patch b/target/linux/patches/3.12.40/bsd-compatibility.patch
index b954b658f..b954b658f 100644
--- a/target/linux/patches/3.12.38/bsd-compatibility.patch
+++ b/target/linux/patches/3.12.40/bsd-compatibility.patch
diff --git a/target/linux/patches/3.12.38/cleankernel.patch b/target/linux/patches/3.12.40/cleankernel.patch
index d8c055dc3..d8c055dc3 100644
--- a/target/linux/patches/3.12.38/cleankernel.patch
+++ b/target/linux/patches/3.12.40/cleankernel.patch
diff --git a/target/linux/patches/3.12.38/defaults.patch b/target/linux/patches/3.12.40/defaults.patch
index 6cdca084e..6cdca084e 100644
--- a/target/linux/patches/3.12.38/defaults.patch
+++ b/target/linux/patches/3.12.40/defaults.patch
diff --git a/target/linux/patches/3.12.38/disable-netfilter.patch b/target/linux/patches/3.12.40/disable-netfilter.patch
index 7b1ca013a..7b1ca013a 100644
--- a/target/linux/patches/3.12.38/disable-netfilter.patch
+++ b/target/linux/patches/3.12.40/disable-netfilter.patch
diff --git a/target/linux/patches/3.12.38/export-symbol-for-exmap.patch b/target/linux/patches/3.12.40/export-symbol-for-exmap.patch
index 4f0fc8449..4f0fc8449 100644
--- a/target/linux/patches/3.12.38/export-symbol-for-exmap.patch
+++ b/target/linux/patches/3.12.40/export-symbol-for-exmap.patch
diff --git a/target/linux/patches/3.12.38/gemalto.patch b/target/linux/patches/3.12.40/gemalto.patch
index 65f7af1d7..65f7af1d7 100644
--- a/target/linux/patches/3.12.38/gemalto.patch
+++ b/target/linux/patches/3.12.40/gemalto.patch
diff --git a/target/linux/patches/3.12.38/lemote-rfkill.patch b/target/linux/patches/3.12.40/lemote-rfkill.patch
index a61488434..a61488434 100644
--- a/target/linux/patches/3.12.38/lemote-rfkill.patch
+++ b/target/linux/patches/3.12.40/lemote-rfkill.patch
diff --git a/target/linux/patches/3.12.38/microblaze-ethernet.patch b/target/linux/patches/3.12.40/microblaze-ethernet.patch
index 742ab477e..742ab477e 100644
--- a/target/linux/patches/3.12.38/microblaze-ethernet.patch
+++ b/target/linux/patches/3.12.40/microblaze-ethernet.patch
diff --git a/target/linux/patches/3.12.38/microblaze-setup.patch b/target/linux/patches/3.12.40/microblaze-setup.patch
index 43815f274..43815f274 100644
--- a/target/linux/patches/3.12.38/microblaze-setup.patch
+++ b/target/linux/patches/3.12.40/microblaze-setup.patch
diff --git a/target/linux/patches/3.12.38/mips-lzo-fix.patch b/target/linux/patches/3.12.40/mips-lzo-fix.patch
index 0740bdc6a..0740bdc6a 100644
--- a/target/linux/patches/3.12.38/mips-lzo-fix.patch
+++ b/target/linux/patches/3.12.40/mips-lzo-fix.patch
diff --git a/target/linux/patches/3.12.38/mkpiggy.patch b/target/linux/patches/3.12.40/mkpiggy.patch
index d4e815cd2..d4e815cd2 100644
--- a/target/linux/patches/3.12.38/mkpiggy.patch
+++ b/target/linux/patches/3.12.40/mkpiggy.patch
diff --git a/target/linux/patches/3.12.38/mtd-rootfs.patch b/target/linux/patches/3.12.40/mtd-rootfs.patch
index 775d5fc80..775d5fc80 100644
--- a/target/linux/patches/3.12.38/mtd-rootfs.patch
+++ b/target/linux/patches/3.12.40/mtd-rootfs.patch
diff --git a/target/linux/patches/3.12.38/non-static.patch b/target/linux/patches/3.12.40/non-static.patch
index a967703d0..a967703d0 100644
--- a/target/linux/patches/3.12.38/non-static.patch
+++ b/target/linux/patches/3.12.40/non-static.patch
diff --git a/target/linux/patches/3.12.38/ppc64-missing-zlib.patch b/target/linux/patches/3.12.40/ppc64-missing-zlib.patch
index c6e0616be..c6e0616be 100644
--- a/target/linux/patches/3.12.38/ppc64-missing-zlib.patch
+++ b/target/linux/patches/3.12.40/ppc64-missing-zlib.patch
diff --git a/target/linux/patches/3.12.38/regmap-bool.patch b/target/linux/patches/3.12.40/regmap-bool.patch
index c3fd9a318..c3fd9a318 100644
--- a/target/linux/patches/3.12.38/regmap-bool.patch
+++ b/target/linux/patches/3.12.40/regmap-bool.patch
diff --git a/target/linux/patches/3.12.38/relocs.patch b/target/linux/patches/3.12.40/relocs.patch
index 649b9e73e..649b9e73e 100644
--- a/target/linux/patches/3.12.38/relocs.patch
+++ b/target/linux/patches/3.12.40/relocs.patch
diff --git a/target/linux/patches/3.12.38/sgidefs.patch b/target/linux/patches/3.12.40/sgidefs.patch
index f00a284d9..f00a284d9 100644
--- a/target/linux/patches/3.12.38/sgidefs.patch
+++ b/target/linux/patches/3.12.40/sgidefs.patch
diff --git a/target/linux/patches/3.12.38/sortext.patch b/target/linux/patches/3.12.40/sortext.patch
index 8fd4e1d6b..8fd4e1d6b 100644
--- a/target/linux/patches/3.12.38/sortext.patch
+++ b/target/linux/patches/3.12.40/sortext.patch
diff --git a/target/linux/patches/3.12.38/startup.patch b/target/linux/patches/3.12.40/startup.patch
index 4cd477da1..4cd477da1 100644
--- a/target/linux/patches/3.12.38/startup.patch
+++ b/target/linux/patches/3.12.40/startup.patch
diff --git a/target/linux/patches/3.12.38/usb-defaults-off.patch b/target/linux/patches/3.12.40/usb-defaults-off.patch
index 54dff2bd4..54dff2bd4 100644
--- a/target/linux/patches/3.12.38/usb-defaults-off.patch
+++ b/target/linux/patches/3.12.40/usb-defaults-off.patch
diff --git a/target/linux/patches/3.12.38/vga-cons-default-off.patch b/target/linux/patches/3.12.40/vga-cons-default-off.patch
index 08a57f783..08a57f783 100644
--- a/target/linux/patches/3.12.38/vga-cons-default-off.patch
+++ b/target/linux/patches/3.12.40/vga-cons-default-off.patch
diff --git a/target/linux/patches/3.12.38/wlan-cf.patch b/target/linux/patches/3.12.40/wlan-cf.patch
index fc20759e2..fc20759e2 100644
--- a/target/linux/patches/3.12.38/wlan-cf.patch
+++ b/target/linux/patches/3.12.40/wlan-cf.patch
diff --git a/target/linux/patches/3.12.38/xargs.patch b/target/linux/patches/3.12.40/xargs.patch
index 2c7b3df59..2c7b3df59 100644
--- a/target/linux/patches/3.12.38/xargs.patch
+++ b/target/linux/patches/3.12.40/xargs.patch
diff --git a/target/linux/patches/3.12.38/yaffs2.patch b/target/linux/patches/3.12.40/yaffs2.patch
index 306814439..306814439 100644
--- a/target/linux/patches/3.12.38/yaffs2.patch
+++ b/target/linux/patches/3.12.40/yaffs2.patch
diff --git a/target/linux/patches/3.12.38/zlib-inflate.patch b/target/linux/patches/3.12.40/zlib-inflate.patch
index 58e1f6d21..58e1f6d21 100644
--- a/target/linux/patches/3.12.38/zlib-inflate.patch
+++ b/target/linux/patches/3.12.40/zlib-inflate.patch
diff --git a/target/linux/patches/3.14.35/bsd-compatibility.patch b/target/linux/patches/3.14.40/bsd-compatibility.patch
index b954b658f..b954b658f 100644
--- a/target/linux/patches/3.14.35/bsd-compatibility.patch
+++ b/target/linux/patches/3.14.40/bsd-compatibility.patch
diff --git a/target/linux/patches/3.14.35/cleankernel.patch b/target/linux/patches/3.14.40/cleankernel.patch
index d8c055dc3..d8c055dc3 100644
--- a/target/linux/patches/3.14.35/cleankernel.patch
+++ b/target/linux/patches/3.14.40/cleankernel.patch
diff --git a/target/linux/patches/3.14.35/defaults.patch b/target/linux/patches/3.14.40/defaults.patch
index f071fd1dd..f071fd1dd 100644
--- a/target/linux/patches/3.14.35/defaults.patch
+++ b/target/linux/patches/3.14.40/defaults.patch
diff --git a/target/linux/patches/3.14.35/disable-netfilter.patch b/target/linux/patches/3.14.40/disable-netfilter.patch
index 7b1ca013a..7b1ca013a 100644
--- a/target/linux/patches/3.14.35/disable-netfilter.patch
+++ b/target/linux/patches/3.14.40/disable-netfilter.patch
diff --git a/target/linux/patches/3.14.35/export-symbol-for-exmap.patch b/target/linux/patches/3.14.40/export-symbol-for-exmap.patch
index 4f0fc8449..4f0fc8449 100644
--- a/target/linux/patches/3.14.35/export-symbol-for-exmap.patch
+++ b/target/linux/patches/3.14.40/export-symbol-for-exmap.patch
diff --git a/target/linux/patches/3.14.35/fblogo.patch b/target/linux/patches/3.14.40/fblogo.patch
index cbbb4216f..cbbb4216f 100644
--- a/target/linux/patches/3.14.35/fblogo.patch
+++ b/target/linux/patches/3.14.40/fblogo.patch
diff --git a/target/linux/patches/3.14.35/gemalto.patch b/target/linux/patches/3.14.40/gemalto.patch
index 65f7af1d7..65f7af1d7 100644
--- a/target/linux/patches/3.14.35/gemalto.patch
+++ b/target/linux/patches/3.14.40/gemalto.patch
diff --git a/target/linux/patches/3.14.35/initramfs-nosizelimit.patch b/target/linux/patches/3.14.40/initramfs-nosizelimit.patch
index 40d2f6bd8..40d2f6bd8 100644
--- a/target/linux/patches/3.14.35/initramfs-nosizelimit.patch
+++ b/target/linux/patches/3.14.40/initramfs-nosizelimit.patch
diff --git a/target/linux/patches/3.14.35/lemote-rfkill.patch b/target/linux/patches/3.14.40/lemote-rfkill.patch
index a61488434..a61488434 100644
--- a/target/linux/patches/3.14.35/lemote-rfkill.patch
+++ b/target/linux/patches/3.14.40/lemote-rfkill.patch
diff --git a/target/linux/patches/3.14.35/microblaze-axi.patch b/target/linux/patches/3.14.40/microblaze-axi.patch
index 1a4b17d8c..1a4b17d8c 100644
--- a/target/linux/patches/3.14.35/microblaze-axi.patch
+++ b/target/linux/patches/3.14.40/microblaze-axi.patch
diff --git a/target/linux/patches/3.14.35/microblaze-ethernet.patch b/target/linux/patches/3.14.40/microblaze-ethernet.patch
index 742ab477e..742ab477e 100644
--- a/target/linux/patches/3.14.35/microblaze-ethernet.patch
+++ b/target/linux/patches/3.14.40/microblaze-ethernet.patch
diff --git a/target/linux/patches/3.14.35/mkpiggy.patch b/target/linux/patches/3.14.40/mkpiggy.patch
index 751678b74..751678b74 100644
--- a/target/linux/patches/3.14.35/mkpiggy.patch
+++ b/target/linux/patches/3.14.40/mkpiggy.patch
diff --git a/target/linux/patches/3.14.35/mptcp.patch b/target/linux/patches/3.14.40/mptcp.patch
index 9784e0577..9784e0577 100644
--- a/target/linux/patches/3.14.35/mptcp.patch
+++ b/target/linux/patches/3.14.40/mptcp.patch
diff --git a/target/linux/patches/3.14.35/mtd-rootfs.patch b/target/linux/patches/3.14.40/mtd-rootfs.patch
index 775d5fc80..775d5fc80 100644
--- a/target/linux/patches/3.14.35/mtd-rootfs.patch
+++ b/target/linux/patches/3.14.40/mtd-rootfs.patch
diff --git a/target/linux/patches/3.14.35/nfsv3-tcp.patch b/target/linux/patches/3.14.40/nfsv3-tcp.patch
index 68ed95b08..68ed95b08 100644
--- a/target/linux/patches/3.14.35/nfsv3-tcp.patch
+++ b/target/linux/patches/3.14.40/nfsv3-tcp.patch
diff --git a/target/linux/patches/3.14.35/non-static.patch b/target/linux/patches/3.14.40/non-static.patch
index a967703d0..a967703d0 100644
--- a/target/linux/patches/3.14.35/non-static.patch
+++ b/target/linux/patches/3.14.40/non-static.patch
diff --git a/target/linux/patches/3.14.35/ppc64-missing-zlib.patch b/target/linux/patches/3.14.40/ppc64-missing-zlib.patch
index c6e0616be..c6e0616be 100644
--- a/target/linux/patches/3.14.35/ppc64-missing-zlib.patch
+++ b/target/linux/patches/3.14.40/ppc64-missing-zlib.patch
diff --git a/target/linux/patches/3.14.35/regmap-boolean.patch b/target/linux/patches/3.14.40/regmap-boolean.patch
index d73620e4d..d73620e4d 100644
--- a/target/linux/patches/3.14.35/regmap-boolean.patch
+++ b/target/linux/patches/3.14.40/regmap-boolean.patch
diff --git a/target/linux/patches/3.14.35/relocs.patch b/target/linux/patches/3.14.40/relocs.patch
index 69a7c88a9..69a7c88a9 100644
--- a/target/linux/patches/3.14.35/relocs.patch
+++ b/target/linux/patches/3.14.40/relocs.patch
diff --git a/target/linux/patches/3.14.35/sgidefs.patch b/target/linux/patches/3.14.40/sgidefs.patch
index f00a284d9..f00a284d9 100644
--- a/target/linux/patches/3.14.35/sgidefs.patch
+++ b/target/linux/patches/3.14.40/sgidefs.patch
diff --git a/target/linux/patches/3.14.35/sortext.patch b/target/linux/patches/3.14.40/sortext.patch
index 8fd4e1d6b..8fd4e1d6b 100644
--- a/target/linux/patches/3.14.35/sortext.patch
+++ b/target/linux/patches/3.14.40/sortext.patch
diff --git a/target/linux/patches/3.14.35/startup.patch b/target/linux/patches/3.14.40/startup.patch
index d396b75e4..d396b75e4 100644
--- a/target/linux/patches/3.14.35/startup.patch
+++ b/target/linux/patches/3.14.40/startup.patch
diff --git a/target/linux/patches/3.14.35/wlan-cf.patch b/target/linux/patches/3.14.40/wlan-cf.patch
index fc20759e2..fc20759e2 100644
--- a/target/linux/patches/3.14.35/wlan-cf.patch
+++ b/target/linux/patches/3.14.40/wlan-cf.patch
diff --git a/target/linux/patches/3.14.35/xargs.patch b/target/linux/patches/3.14.40/xargs.patch
index 2c7b3df59..2c7b3df59 100644
--- a/target/linux/patches/3.14.35/xargs.patch
+++ b/target/linux/patches/3.14.40/xargs.patch
diff --git a/target/linux/patches/3.14.35/yaffs2.patch b/target/linux/patches/3.14.40/yaffs2.patch
index f075aa658..f075aa658 100644
--- a/target/linux/patches/3.14.35/yaffs2.patch
+++ b/target/linux/patches/3.14.40/yaffs2.patch
diff --git a/target/linux/patches/3.14.35/zlib-inflate.patch b/target/linux/patches/3.14.40/zlib-inflate.patch
index 58e1f6d21..58e1f6d21 100644
--- a/target/linux/patches/3.14.35/zlib-inflate.patch
+++ b/target/linux/patches/3.14.40/zlib-inflate.patch
diff --git a/target/linux/patches/3.18.10/bsd-compatibility.patch b/target/linux/patches/3.18.12/bsd-compatibility.patch
index b954b658f..b954b658f 100644
--- a/target/linux/patches/3.18.10/bsd-compatibility.patch
+++ b/target/linux/patches/3.18.12/bsd-compatibility.patch
diff --git a/target/linux/patches/3.18.10/cleankernel.patch b/target/linux/patches/3.18.12/cleankernel.patch
index d8c055dc3..d8c055dc3 100644
--- a/target/linux/patches/3.18.10/cleankernel.patch
+++ b/target/linux/patches/3.18.12/cleankernel.patch
diff --git a/target/linux/patches/3.18.10/cris-header.patch b/target/linux/patches/3.18.12/cris-header.patch
index 3db07e530..3db07e530 100644
--- a/target/linux/patches/3.18.10/cris-header.patch
+++ b/target/linux/patches/3.18.12/cris-header.patch
diff --git a/target/linux/patches/3.18.10/defaults.patch b/target/linux/patches/3.18.12/defaults.patch
index 6cdca084e..6cdca084e 100644
--- a/target/linux/patches/3.18.10/defaults.patch
+++ b/target/linux/patches/3.18.12/defaults.patch
diff --git a/target/linux/patches/3.18.10/export-symbol-for-exmap.patch b/target/linux/patches/3.18.12/export-symbol-for-exmap.patch
index 4f0fc8449..4f0fc8449 100644
--- a/target/linux/patches/3.18.10/export-symbol-for-exmap.patch
+++ b/target/linux/patches/3.18.12/export-symbol-for-exmap.patch
diff --git a/target/linux/patches/3.18.10/fblogo.patch b/target/linux/patches/3.18.12/fblogo.patch
index 5b9070242..5b9070242 100644
--- a/target/linux/patches/3.18.10/fblogo.patch
+++ b/target/linux/patches/3.18.12/fblogo.patch
diff --git a/target/linux/patches/3.18.10/gemalto.patch b/target/linux/patches/3.18.12/gemalto.patch
index 65f7af1d7..65f7af1d7 100644
--- a/target/linux/patches/3.18.10/gemalto.patch
+++ b/target/linux/patches/3.18.12/gemalto.patch
diff --git a/target/linux/patches/3.18.10/initramfs-nosizelimit.patch b/target/linux/patches/3.18.12/initramfs-nosizelimit.patch
index 40d2f6bd8..40d2f6bd8 100644
--- a/target/linux/patches/3.18.10/initramfs-nosizelimit.patch
+++ b/target/linux/patches/3.18.12/initramfs-nosizelimit.patch
diff --git a/target/linux/patches/3.18.10/lemote-rfkill.patch b/target/linux/patches/3.18.12/lemote-rfkill.patch
index a61488434..a61488434 100644
--- a/target/linux/patches/3.18.10/lemote-rfkill.patch
+++ b/target/linux/patches/3.18.12/lemote-rfkill.patch
diff --git a/target/linux/patches/3.18.10/microblaze-ethernet.patch b/target/linux/patches/3.18.12/microblaze-ethernet.patch
index 742ab477e..742ab477e 100644
--- a/target/linux/patches/3.18.10/microblaze-ethernet.patch
+++ b/target/linux/patches/3.18.12/microblaze-ethernet.patch
diff --git a/target/linux/patches/3.18.10/mkpiggy.patch b/target/linux/patches/3.18.12/mkpiggy.patch
index 751678b74..751678b74 100644
--- a/target/linux/patches/3.18.10/mkpiggy.patch
+++ b/target/linux/patches/3.18.12/mkpiggy.patch
diff --git a/target/linux/patches/3.18.10/mtd-rootfs.patch b/target/linux/patches/3.18.12/mtd-rootfs.patch
index 775d5fc80..775d5fc80 100644
--- a/target/linux/patches/3.18.10/mtd-rootfs.patch
+++ b/target/linux/patches/3.18.12/mtd-rootfs.patch
diff --git a/target/linux/patches/3.18.10/nfsv3-tcp.patch b/target/linux/patches/3.18.12/nfsv3-tcp.patch
index d5e07e1c2..d5e07e1c2 100644
--- a/target/linux/patches/3.18.10/nfsv3-tcp.patch
+++ b/target/linux/patches/3.18.12/nfsv3-tcp.patch
diff --git a/target/linux/patches/3.18.10/non-static.patch b/target/linux/patches/3.18.12/non-static.patch
index a967703d0..a967703d0 100644
--- a/target/linux/patches/3.18.10/non-static.patch
+++ b/target/linux/patches/3.18.12/non-static.patch
diff --git a/target/linux/patches/3.18.10/ppc64-missing-zlib.patch b/target/linux/patches/3.18.12/ppc64-missing-zlib.patch
index c6e0616be..c6e0616be 100644
--- a/target/linux/patches/3.18.10/ppc64-missing-zlib.patch
+++ b/target/linux/patches/3.18.12/ppc64-missing-zlib.patch
diff --git a/target/linux/patches/3.18.10/realtime.patch b/target/linux/patches/3.18.12/realtime.patch
index 87f669fbc..e91381e07 100644
--- a/target/linux/patches/3.18.10/realtime.patch
+++ b/target/linux/patches/3.18.12/realtime.patch
@@ -1,6 +1,6 @@
-diff -Nur linux-3.18.10.orig/arch/alpha/mm/fault.c linux-3.18.10/arch/alpha/mm/fault.c
---- linux-3.18.10.orig/arch/alpha/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/alpha/mm/fault.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/alpha/mm/fault.c linux-3.18.12/arch/alpha/mm/fault.c
+--- linux-3.18.12.orig/arch/alpha/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/alpha/mm/fault.c 2015-04-26 13:32:22.351684003 -0500
@@ -107,7 +107,7 @@
/* If we're in an interrupt context, or have no user context,
@@ -10,9 +10,9 @@ diff -Nur linux-3.18.10.orig/arch/alpha/mm/fault.c linux-3.18.10/arch/alpha/mm/f
goto no_context;
#ifdef CONFIG_ALPHA_LARGE_VMALLOC
-diff -Nur linux-3.18.10.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.10/arch/arm/include/asm/cmpxchg.h
---- linux-3.18.10.orig/arch/arm/include/asm/cmpxchg.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/include/asm/cmpxchg.h 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.12/arch/arm/include/asm/cmpxchg.h
+--- linux-3.18.12.orig/arch/arm/include/asm/cmpxchg.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/include/asm/cmpxchg.h 2015-04-26 13:32:22.351684003 -0500
@@ -129,6 +129,8 @@
#else /* min ARCH >= ARMv6 */
@@ -22,9 +22,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/include/asm/cmpxchg.h linux-3.18.10/arch/a
extern void __bad_cmpxchg(volatile void *ptr, int size);
/*
-diff -Nur linux-3.18.10.orig/arch/arm/include/asm/futex.h linux-3.18.10/arch/arm/include/asm/futex.h
---- linux-3.18.10.orig/arch/arm/include/asm/futex.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/include/asm/futex.h 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/include/asm/futex.h linux-3.18.12/arch/arm/include/asm/futex.h
+--- linux-3.18.12.orig/arch/arm/include/asm/futex.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/include/asm/futex.h 2015-04-26 13:32:22.351684003 -0500
@@ -93,6 +93,8 @@
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
return -EFAULT;
@@ -43,9 +43,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/include/asm/futex.h linux-3.18.10/arch/arm
return ret;
}
-diff -Nur linux-3.18.10.orig/arch/arm/include/asm/switch_to.h linux-3.18.10/arch/arm/include/asm/switch_to.h
---- linux-3.18.10.orig/arch/arm/include/asm/switch_to.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/include/asm/switch_to.h 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/include/asm/switch_to.h linux-3.18.12/arch/arm/include/asm/switch_to.h
+--- linux-3.18.12.orig/arch/arm/include/asm/switch_to.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/include/asm/switch_to.h 2015-04-26 13:32:22.355684003 -0500
@@ -3,6 +3,13 @@
#include <linux/thread_info.h>
@@ -68,9 +68,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/include/asm/switch_to.h linux-3.18.10/arch
last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
} while (0)
-diff -Nur linux-3.18.10.orig/arch/arm/include/asm/thread_info.h linux-3.18.10/arch/arm/include/asm/thread_info.h
---- linux-3.18.10.orig/arch/arm/include/asm/thread_info.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/include/asm/thread_info.h 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/include/asm/thread_info.h linux-3.18.12/arch/arm/include/asm/thread_info.h
+--- linux-3.18.12.orig/arch/arm/include/asm/thread_info.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/include/asm/thread_info.h 2015-04-26 13:32:22.355684003 -0500
@@ -51,6 +51,7 @@
struct thread_info {
unsigned long flags; /* low level flags */
@@ -95,9 +95,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/include/asm/thread_info.h linux-3.18.10/ar
#define _TIF_UPROBE (1 << TIF_UPROBE)
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
-diff -Nur linux-3.18.10.orig/arch/arm/Kconfig linux-3.18.10/arch/arm/Kconfig
---- linux-3.18.10.orig/arch/arm/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/Kconfig 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/Kconfig linux-3.18.12/arch/arm/Kconfig
+--- linux-3.18.12.orig/arch/arm/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/Kconfig 2015-04-26 13:32:22.351684003 -0500
@@ -62,6 +62,7 @@
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
@@ -106,9 +106,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/Kconfig linux-3.18.10/arch/arm/Kconfig
select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_SYSCALL_TRACEPOINTS
-diff -Nur linux-3.18.10.orig/arch/arm/kernel/asm-offsets.c linux-3.18.10/arch/arm/kernel/asm-offsets.c
---- linux-3.18.10.orig/arch/arm/kernel/asm-offsets.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/kernel/asm-offsets.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/kernel/asm-offsets.c linux-3.18.12/arch/arm/kernel/asm-offsets.c
+--- linux-3.18.12.orig/arch/arm/kernel/asm-offsets.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/kernel/asm-offsets.c 2015-04-26 13:32:22.355684003 -0500
@@ -64,6 +64,7 @@
BLANK();
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
@@ -117,9 +117,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/kernel/asm-offsets.c linux-3.18.10/arch/ar
DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
-diff -Nur linux-3.18.10.orig/arch/arm/kernel/entry-armv.S linux-3.18.10/arch/arm/kernel/entry-armv.S
---- linux-3.18.10.orig/arch/arm/kernel/entry-armv.S 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/kernel/entry-armv.S 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/kernel/entry-armv.S linux-3.18.12/arch/arm/kernel/entry-armv.S
+--- linux-3.18.12.orig/arch/arm/kernel/entry-armv.S 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/kernel/entry-armv.S 2015-04-26 13:32:22.355684003 -0500
@@ -207,11 +207,18 @@
#ifdef CONFIG_PREEMPT
get_thread_info tsk
@@ -150,9 +150,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/kernel/entry-armv.S linux-3.18.10/arch/arm
reteq r8 @ go again
b 1b
#endif
-diff -Nur linux-3.18.10.orig/arch/arm/kernel/process.c linux-3.18.10/arch/arm/kernel/process.c
---- linux-3.18.10.orig/arch/arm/kernel/process.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/kernel/process.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/kernel/process.c linux-3.18.12/arch/arm/kernel/process.c
+--- linux-3.18.12.orig/arch/arm/kernel/process.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/kernel/process.c 2015-04-26 13:32:22.355684003 -0500
@@ -431,6 +431,30 @@
}
@@ -184,9 +184,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/kernel/process.c linux-3.18.10/arch/arm/ke
#ifdef CONFIG_KUSER_HELPERS
/*
* The vectors page is always readable from user space for the
-diff -Nur linux-3.18.10.orig/arch/arm/kernel/signal.c linux-3.18.10/arch/arm/kernel/signal.c
---- linux-3.18.10.orig/arch/arm/kernel/signal.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/kernel/signal.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/kernel/signal.c linux-3.18.12/arch/arm/kernel/signal.c
+--- linux-3.18.12.orig/arch/arm/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/kernel/signal.c 2015-04-26 13:32:22.359684003 -0500
@@ -574,7 +574,8 @@
do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
{
@@ -197,27 +197,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/kernel/signal.c linux-3.18.10/arch/arm/ker
schedule();
} else {
if (unlikely(!user_mode(regs)))
-diff -Nur linux-3.18.10.orig/arch/arm/kernel/smp.c linux-3.18.10/arch/arm/kernel/smp.c
---- linux-3.18.10.orig/arch/arm/kernel/smp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/kernel/smp.c 2015-03-26 12:42:13.555582327 +0100
-@@ -506,12 +506,14 @@
- }
-
- #ifdef CONFIG_IRQ_WORK
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void arch_irq_work_raise(void)
- {
- if (arch_irq_work_has_interrupt())
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
- }
- #endif
-+#endif
-
- #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
- void tick_broadcast(const struct cpumask *mask)
-diff -Nur linux-3.18.10.orig/arch/arm/kernel/unwind.c linux-3.18.10/arch/arm/kernel/unwind.c
---- linux-3.18.10.orig/arch/arm/kernel/unwind.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/kernel/unwind.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/kernel/unwind.c linux-3.18.12/arch/arm/kernel/unwind.c
+--- linux-3.18.12.orig/arch/arm/kernel/unwind.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/kernel/unwind.c 2015-04-26 13:32:22.359684003 -0500
@@ -93,7 +93,7 @@
static const struct unwind_idx *__origin_unwind_idx;
extern const struct unwind_idx __stop_unwind_idx[];
@@ -269,9 +251,45 @@ diff -Nur linux-3.18.10.orig/arch/arm/kernel/unwind.c linux-3.18.10/arch/arm/ker
kfree(tab);
}
-diff -Nur linux-3.18.10.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.10/arch/arm/mach-at91/at91rm9200_time.c
---- linux-3.18.10.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-at91/at91rm9200_time.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/kvm/arm.c linux-3.18.12/arch/arm/kvm/arm.c
+--- linux-3.18.12.orig/arch/arm/kvm/arm.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/kvm/arm.c 2015-04-26 13:32:22.359684003 -0500
+@@ -441,9 +441,9 @@
+
+ static void vcpu_pause(struct kvm_vcpu *vcpu)
+ {
+- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
++ struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
+
+- wait_event_interruptible(*wq, !vcpu->arch.pause);
++ swait_event_interruptible(*wq, !vcpu->arch.pause);
+ }
+
+ static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
+diff -Nur linux-3.18.12.orig/arch/arm/kvm/psci.c linux-3.18.12/arch/arm/kvm/psci.c
+--- linux-3.18.12.orig/arch/arm/kvm/psci.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/kvm/psci.c 2015-04-26 13:32:22.359684003 -0500
+@@ -66,7 +66,7 @@
+ {
+ struct kvm *kvm = source_vcpu->kvm;
+ struct kvm_vcpu *vcpu = NULL, *tmp;
+- wait_queue_head_t *wq;
++ struct swait_head *wq;
+ unsigned long cpu_id;
+ unsigned long context_id;
+ unsigned long mpidr;
+@@ -123,7 +123,7 @@
+ smp_mb(); /* Make sure the above is visible */
+
+ wq = kvm_arch_vcpu_wq(vcpu);
+- wake_up_interruptible(wq);
++ swait_wake_interruptible(wq);
+
+ return PSCI_RET_SUCCESS;
+ }
+diff -Nur linux-3.18.12.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.12/arch/arm/mach-at91/at91rm9200_time.c
+--- linux-3.18.12.orig/arch/arm/mach-at91/at91rm9200_time.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-at91/at91rm9200_time.c 2015-04-26 13:32:22.359684003 -0500
@@ -135,6 +135,7 @@
break;
case CLOCK_EVT_MODE_SHUTDOWN:
@@ -280,9 +298,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-at91/at91rm9200_time.c linux-3.18.10/
case CLOCK_EVT_MODE_RESUME:
irqmask = 0;
break;
-diff -Nur linux-3.18.10.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.10/arch/arm/mach-exynos/platsmp.c
---- linux-3.18.10.orig/arch/arm/mach-exynos/platsmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-exynos/platsmp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.12/arch/arm/mach-exynos/platsmp.c
+--- linux-3.18.12.orig/arch/arm/mach-exynos/platsmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-exynos/platsmp.c 2015-04-26 13:32:22.359684003 -0500
@@ -137,7 +137,7 @@
return (void __iomem *)(S5P_VA_SCU);
}
@@ -330,9 +348,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-exynos/platsmp.c linux-3.18.10/arch/a
return pen_release != -1 ? ret : 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.10/arch/arm/mach-hisi/platmcpm.c
---- linux-3.18.10.orig/arch/arm/mach-hisi/platmcpm.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-hisi/platmcpm.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.12/arch/arm/mach-hisi/platmcpm.c
+--- linux-3.18.12.orig/arch/arm/mach-hisi/platmcpm.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-hisi/platmcpm.c 2015-04-26 13:32:22.363684003 -0500
@@ -57,7 +57,7 @@
static void __iomem *sysctrl, *fabric;
@@ -435,9 +453,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-hisi/platmcpm.c linux-3.18.10/arch/ar
}
static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
-diff -Nur linux-3.18.10.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.10/arch/arm/mach-omap2/omap-smp.c
---- linux-3.18.10.orig/arch/arm/mach-omap2/omap-smp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-omap2/omap-smp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.12/arch/arm/mach-omap2/omap-smp.c
+--- linux-3.18.12.orig/arch/arm/mach-omap2/omap-smp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-omap2/omap-smp.c 2015-04-26 13:32:22.363684003 -0500
@@ -43,7 +43,7 @@
/* SCU base address */
static void __iomem *scu_base;
@@ -476,9 +494,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-omap2/omap-smp.c linux-3.18.10/arch/a
return 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.10/arch/arm/mach-prima2/platsmp.c
---- linux-3.18.10.orig/arch/arm/mach-prima2/platsmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-prima2/platsmp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.12/arch/arm/mach-prima2/platsmp.c
+--- linux-3.18.12.orig/arch/arm/mach-prima2/platsmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-prima2/platsmp.c 2015-04-26 13:32:22.363684003 -0500
@@ -23,7 +23,7 @@
static void __iomem *scu_base;
static void __iomem *rsc_base;
@@ -517,9 +535,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-prima2/platsmp.c linux-3.18.10/arch/a
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.10/arch/arm/mach-qcom/platsmp.c
---- linux-3.18.10.orig/arch/arm/mach-qcom/platsmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-qcom/platsmp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.12/arch/arm/mach-qcom/platsmp.c
+--- linux-3.18.12.orig/arch/arm/mach-qcom/platsmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-qcom/platsmp.c 2015-04-26 13:32:22.363684003 -0500
@@ -46,7 +46,7 @@
extern void secondary_startup(void);
@@ -558,9 +576,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-qcom/platsmp.c linux-3.18.10/arch/arm
return ret;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mach-spear/platsmp.c linux-3.18.10/arch/arm/mach-spear/platsmp.c
---- linux-3.18.10.orig/arch/arm/mach-spear/platsmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-spear/platsmp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-spear/platsmp.c linux-3.18.12/arch/arm/mach-spear/platsmp.c
+--- linux-3.18.12.orig/arch/arm/mach-spear/platsmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-spear/platsmp.c 2015-04-26 13:32:22.363684003 -0500
@@ -32,7 +32,7 @@
sync_cache_w(&pen_release);
}
@@ -599,9 +617,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-spear/platsmp.c linux-3.18.10/arch/ar
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mach-sti/platsmp.c linux-3.18.10/arch/arm/mach-sti/platsmp.c
---- linux-3.18.10.orig/arch/arm/mach-sti/platsmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-sti/platsmp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-sti/platsmp.c linux-3.18.12/arch/arm/mach-sti/platsmp.c
+--- linux-3.18.12.orig/arch/arm/mach-sti/platsmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-sti/platsmp.c 2015-04-26 13:32:22.363684003 -0500
@@ -34,7 +34,7 @@
sync_cache_w(&pen_release);
}
@@ -640,9 +658,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-sti/platsmp.c linux-3.18.10/arch/arm/
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.10/arch/arm/mach-ux500/platsmp.c
---- linux-3.18.10.orig/arch/arm/mach-ux500/platsmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mach-ux500/platsmp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.12/arch/arm/mach-ux500/platsmp.c
+--- linux-3.18.12.orig/arch/arm/mach-ux500/platsmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mach-ux500/platsmp.c 2015-04-26 13:32:22.363684003 -0500
@@ -51,7 +51,7 @@
return NULL;
}
@@ -681,9 +699,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mach-ux500/platsmp.c linux-3.18.10/arch/ar
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mm/fault.c linux-3.18.10/arch/arm/mm/fault.c
---- linux-3.18.10.orig/arch/arm/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mm/fault.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mm/fault.c linux-3.18.12/arch/arm/mm/fault.c
+--- linux-3.18.12.orig/arch/arm/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -277,7 +277,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -713,9 +731,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mm/fault.c linux-3.18.10/arch/arm/mm/fault
do_bad_area(addr, fsr, regs);
return 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm/mm/highmem.c linux-3.18.10/arch/arm/mm/highmem.c
---- linux-3.18.10.orig/arch/arm/mm/highmem.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/mm/highmem.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/mm/highmem.c linux-3.18.12/arch/arm/mm/highmem.c
+--- linux-3.18.12.orig/arch/arm/mm/highmem.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/mm/highmem.c 2015-04-26 13:32:22.367684003 -0500
@@ -53,6 +53,7 @@
void *kmap_atomic(struct page *page)
@@ -802,9 +820,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/mm/highmem.c linux-3.18.10/arch/arm/mm/hig
+ }
+}
+#endif
-diff -Nur linux-3.18.10.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.10/arch/arm/plat-versatile/platsmp.c
---- linux-3.18.10.orig/arch/arm/plat-versatile/platsmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm/plat-versatile/platsmp.c 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.12/arch/arm/plat-versatile/platsmp.c
+--- linux-3.18.12.orig/arch/arm/plat-versatile/platsmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/arm/plat-versatile/platsmp.c 2015-04-26 13:32:22.367684003 -0500
@@ -30,7 +30,7 @@
sync_cache_w(&pen_release);
}
@@ -843,27 +861,9 @@ diff -Nur linux-3.18.10.orig/arch/arm/plat-versatile/platsmp.c linux-3.18.10/arc
return pen_release != -1 ? -ENOSYS : 0;
}
-diff -Nur linux-3.18.10.orig/arch/arm64/kernel/smp.c linux-3.18.10/arch/arm64/kernel/smp.c
---- linux-3.18.10.orig/arch/arm64/kernel/smp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/arm64/kernel/smp.c 2015-03-26 12:42:13.559582331 +0100
-@@ -529,12 +529,14 @@
- }
-
- #ifdef CONFIG_IRQ_WORK
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void arch_irq_work_raise(void)
- {
- if (__smp_cross_call)
- smp_cross_call(cpumask_of(smp_processor_id()), IPI_IRQ_WORK);
- }
- #endif
-+#endif
-
- static DEFINE_RAW_SPINLOCK(stop_lock);
-
-diff -Nur linux-3.18.10.orig/arch/avr32/mm/fault.c linux-3.18.10/arch/avr32/mm/fault.c
---- linux-3.18.10.orig/arch/avr32/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/avr32/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/avr32/mm/fault.c linux-3.18.12/arch/avr32/mm/fault.c
+--- linux-3.18.12.orig/arch/avr32/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/avr32/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -81,7 +81,7 @@
* If we're in an interrupt or have no user context, we must
* not take the fault...
@@ -873,9 +873,9 @@ diff -Nur linux-3.18.10.orig/arch/avr32/mm/fault.c linux-3.18.10/arch/avr32/mm/f
goto no_context;
local_irq_enable();
-diff -Nur linux-3.18.10.orig/arch/cris/mm/fault.c linux-3.18.10/arch/cris/mm/fault.c
---- linux-3.18.10.orig/arch/cris/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/cris/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/cris/mm/fault.c linux-3.18.12/arch/cris/mm/fault.c
+--- linux-3.18.12.orig/arch/cris/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/cris/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -113,7 +113,7 @@
* user context, we must not take the fault.
*/
@@ -885,9 +885,9 @@ diff -Nur linux-3.18.10.orig/arch/cris/mm/fault.c linux-3.18.10/arch/cris/mm/fau
goto no_context;
if (user_mode(regs))
-diff -Nur linux-3.18.10.orig/arch/frv/mm/fault.c linux-3.18.10/arch/frv/mm/fault.c
---- linux-3.18.10.orig/arch/frv/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/frv/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/frv/mm/fault.c linux-3.18.12/arch/frv/mm/fault.c
+--- linux-3.18.12.orig/arch/frv/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/frv/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -78,7 +78,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -897,9 +897,9 @@ diff -Nur linux-3.18.10.orig/arch/frv/mm/fault.c linux-3.18.10/arch/frv/mm/fault
goto no_context;
if (user_mode(__frame))
-diff -Nur linux-3.18.10.orig/arch/ia64/mm/fault.c linux-3.18.10/arch/ia64/mm/fault.c
---- linux-3.18.10.orig/arch/ia64/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/ia64/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/ia64/mm/fault.c linux-3.18.12/arch/ia64/mm/fault.c
+--- linux-3.18.12.orig/arch/ia64/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/ia64/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -96,7 +96,7 @@
/*
* If we're in an interrupt or have no user context, we must not take the fault..
@@ -909,9 +909,9 @@ diff -Nur linux-3.18.10.orig/arch/ia64/mm/fault.c linux-3.18.10/arch/ia64/mm/fau
goto no_context;
#ifdef CONFIG_VIRTUAL_MEM_MAP
-diff -Nur linux-3.18.10.orig/arch/Kconfig linux-3.18.10/arch/Kconfig
---- linux-3.18.10.orig/arch/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/Kconfig 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/arch/Kconfig linux-3.18.12/arch/Kconfig
+--- linux-3.18.12.orig/arch/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/Kconfig 2015-04-26 13:32:22.351684003 -0500
@@ -6,6 +6,7 @@
tristate "OProfile system profiling"
depends on PROFILING
@@ -920,9 +920,9 @@ diff -Nur linux-3.18.10.orig/arch/Kconfig linux-3.18.10/arch/Kconfig
select RING_BUFFER
select RING_BUFFER_ALLOW_SWAP
help
-diff -Nur linux-3.18.10.orig/arch/m32r/mm/fault.c linux-3.18.10/arch/m32r/mm/fault.c
---- linux-3.18.10.orig/arch/m32r/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/m32r/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/m32r/mm/fault.c linux-3.18.12/arch/m32r/mm/fault.c
+--- linux-3.18.12.orig/arch/m32r/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/m32r/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -114,7 +114,7 @@
* If we're in an interrupt or have no user context or are running in an
* atomic region then we must not take the fault..
@@ -932,9 +932,9 @@ diff -Nur linux-3.18.10.orig/arch/m32r/mm/fault.c linux-3.18.10/arch/m32r/mm/fau
goto bad_area_nosemaphore;
if (error_code & ACE_USERMODE)
-diff -Nur linux-3.18.10.orig/arch/m68k/mm/fault.c linux-3.18.10/arch/m68k/mm/fault.c
---- linux-3.18.10.orig/arch/m68k/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/m68k/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/m68k/mm/fault.c linux-3.18.12/arch/m68k/mm/fault.c
+--- linux-3.18.12.orig/arch/m68k/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/m68k/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -81,7 +81,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -944,9 +944,9 @@ diff -Nur linux-3.18.10.orig/arch/m68k/mm/fault.c linux-3.18.10/arch/m68k/mm/fau
goto no_context;
if (user_mode(regs))
-diff -Nur linux-3.18.10.orig/arch/microblaze/mm/fault.c linux-3.18.10/arch/microblaze/mm/fault.c
---- linux-3.18.10.orig/arch/microblaze/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/microblaze/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/microblaze/mm/fault.c linux-3.18.12/arch/microblaze/mm/fault.c
+--- linux-3.18.12.orig/arch/microblaze/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/microblaze/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -107,7 +107,7 @@
if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
is_write = 0;
@@ -956,9 +956,9 @@ diff -Nur linux-3.18.10.orig/arch/microblaze/mm/fault.c linux-3.18.10/arch/micro
if (kernel_mode(regs))
goto bad_area_nosemaphore;
-diff -Nur linux-3.18.10.orig/arch/mips/Kconfig linux-3.18.10/arch/mips/Kconfig
---- linux-3.18.10.orig/arch/mips/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/mips/Kconfig 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/mips/Kconfig linux-3.18.12/arch/mips/Kconfig
+--- linux-3.18.12.orig/arch/mips/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/mips/Kconfig 2015-04-26 13:32:22.367684003 -0500
@@ -2196,7 +2196,7 @@
#
config HIGHMEM
@@ -968,9 +968,9 @@ diff -Nur linux-3.18.10.orig/arch/mips/Kconfig linux-3.18.10/arch/mips/Kconfig
config CPU_SUPPORTS_HIGHMEM
bool
-diff -Nur linux-3.18.10.orig/arch/mips/kernel/signal.c linux-3.18.10/arch/mips/kernel/signal.c
---- linux-3.18.10.orig/arch/mips/kernel/signal.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/mips/kernel/signal.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/mips/kernel/signal.c linux-3.18.12/arch/mips/kernel/signal.c
+--- linux-3.18.12.orig/arch/mips/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/mips/kernel/signal.c 2015-04-26 13:32:22.367684003 -0500
@@ -613,6 +613,7 @@
__u32 thread_info_flags)
{
@@ -979,9 +979,9 @@ diff -Nur linux-3.18.10.orig/arch/mips/kernel/signal.c linux-3.18.10/arch/mips/k
user_exit();
-diff -Nur linux-3.18.10.orig/arch/mips/mm/fault.c linux-3.18.10/arch/mips/mm/fault.c
---- linux-3.18.10.orig/arch/mips/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/mips/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/mips/mm/fault.c linux-3.18.12/arch/mips/mm/fault.c
+--- linux-3.18.12.orig/arch/mips/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/mips/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -89,7 +89,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -991,9 +991,9 @@ diff -Nur linux-3.18.10.orig/arch/mips/mm/fault.c linux-3.18.10/arch/mips/mm/fau
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-3.18.10.orig/arch/mips/mm/init.c linux-3.18.10/arch/mips/mm/init.c
---- linux-3.18.10.orig/arch/mips/mm/init.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/mips/mm/init.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/mips/mm/init.c linux-3.18.12/arch/mips/mm/init.c
+--- linux-3.18.12.orig/arch/mips/mm/init.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/mips/mm/init.c 2015-04-26 13:32:22.367684003 -0500
@@ -90,7 +90,7 @@
BUG_ON(Page_dcache_dirty(page));
@@ -1012,9 +1012,9 @@ diff -Nur linux-3.18.10.orig/arch/mips/mm/init.c linux-3.18.10/arch/mips/mm/init
}
void copy_user_highpage(struct page *to, struct page *from,
-diff -Nur linux-3.18.10.orig/arch/mn10300/mm/fault.c linux-3.18.10/arch/mn10300/mm/fault.c
---- linux-3.18.10.orig/arch/mn10300/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/mn10300/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/mn10300/mm/fault.c linux-3.18.12/arch/mn10300/mm/fault.c
+--- linux-3.18.12.orig/arch/mn10300/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/mn10300/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -168,7 +168,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1024,9 +1024,9 @@ diff -Nur linux-3.18.10.orig/arch/mn10300/mm/fault.c linux-3.18.10/arch/mn10300/
goto no_context;
if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
-diff -Nur linux-3.18.10.orig/arch/parisc/mm/fault.c linux-3.18.10/arch/parisc/mm/fault.c
---- linux-3.18.10.orig/arch/parisc/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/parisc/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/parisc/mm/fault.c linux-3.18.12/arch/parisc/mm/fault.c
+--- linux-3.18.12.orig/arch/parisc/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/parisc/mm/fault.c 2015-04-26 13:32:22.367684003 -0500
@@ -207,7 +207,7 @@
int fault;
unsigned int flags;
@@ -1036,9 +1036,30 @@ diff -Nur linux-3.18.10.orig/arch/parisc/mm/fault.c linux-3.18.10/arch/parisc/mm
goto no_context;
tsk = current;
-diff -Nur linux-3.18.10.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.10/arch/powerpc/include/asm/thread_info.h
---- linux-3.18.10.orig/arch/powerpc/include/asm/thread_info.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/include/asm/thread_info.h 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/include/asm/kvm_host.h linux-3.18.12/arch/powerpc/include/asm/kvm_host.h
+--- linux-3.18.12.orig/arch/powerpc/include/asm/kvm_host.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/include/asm/kvm_host.h 2015-04-26 13:32:22.367684003 -0500
+@@ -296,7 +296,7 @@
+ u8 in_guest;
+ struct list_head runnable_threads;
+ spinlock_t lock;
+- wait_queue_head_t wq;
++ struct swait_head wq;
+ u64 stolen_tb;
+ u64 preempt_tb;
+ struct kvm_vcpu *runner;
+@@ -618,7 +618,7 @@
+ u8 prodded;
+ u32 last_inst;
+
+- wait_queue_head_t *wqp;
++ struct swait_head *wqp;
+ struct kvmppc_vcore *vcore;
+ int ret;
+ int trap;
+diff -Nur linux-3.18.12.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.12/arch/powerpc/include/asm/thread_info.h
+--- linux-3.18.12.orig/arch/powerpc/include/asm/thread_info.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/include/asm/thread_info.h 2015-04-26 13:32:22.367684003 -0500
@@ -43,6 +43,8 @@
int cpu; /* cpu we're on */
int preempt_count; /* 0 => preemptable,
@@ -1085,9 +1106,9 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/include/asm/thread_info.h linux-3.18.1
/* Bits in local_flags */
/* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
-diff -Nur linux-3.18.10.orig/arch/powerpc/Kconfig linux-3.18.10/arch/powerpc/Kconfig
---- linux-3.18.10.orig/arch/powerpc/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/Kconfig 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/Kconfig linux-3.18.12/arch/powerpc/Kconfig
+--- linux-3.18.12.orig/arch/powerpc/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/Kconfig 2015-04-26 13:32:22.367684003 -0500
@@ -60,10 +60,11 @@
config RWSEM_GENERIC_SPINLOCK
@@ -1118,9 +1139,9 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/Kconfig linux-3.18.10/arch/powerpc/Kco
source kernel/Kconfig.hz
source kernel/Kconfig.preempt
-diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/asm-offsets.c linux-3.18.10/arch/powerpc/kernel/asm-offsets.c
---- linux-3.18.10.orig/arch/powerpc/kernel/asm-offsets.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/kernel/asm-offsets.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/asm-offsets.c linux-3.18.12/arch/powerpc/kernel/asm-offsets.c
+--- linux-3.18.12.orig/arch/powerpc/kernel/asm-offsets.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kernel/asm-offsets.c 2015-04-26 13:32:22.371684003 -0500
@@ -159,6 +159,7 @@
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -1129,9 +1150,9 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/asm-offsets.c linux-3.18.10/arc
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
-diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/entry_32.S linux-3.18.10/arch/powerpc/kernel/entry_32.S
---- linux-3.18.10.orig/arch/powerpc/kernel/entry_32.S 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/kernel/entry_32.S 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/entry_32.S linux-3.18.12/arch/powerpc/kernel/entry_32.S
+--- linux-3.18.12.orig/arch/powerpc/kernel/entry_32.S 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kernel/entry_32.S 2015-04-26 13:32:22.371684003 -0500
@@ -890,7 +890,14 @@
cmpwi 0,r0,0 /* if non-zero, just restore regs and return */
bne restore
@@ -1180,9 +1201,9 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/entry_32.S linux-3.18.10/arch/p
bne- do_resched
andi. r0,r9,_TIF_USER_WORK_MASK
beq restore_user
-diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/entry_64.S linux-3.18.10/arch/powerpc/kernel/entry_64.S
---- linux-3.18.10.orig/arch/powerpc/kernel/entry_64.S 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/kernel/entry_64.S 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/entry_64.S linux-3.18.12/arch/powerpc/kernel/entry_64.S
+--- linux-3.18.12.orig/arch/powerpc/kernel/entry_64.S 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kernel/entry_64.S 2015-04-26 13:32:22.371684003 -0500
@@ -644,7 +644,7 @@
#else
beq restore
@@ -1221,9 +1242,9 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/entry_64.S linux-3.18.10/arch/p
bne 1b
/*
-diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/irq.c linux-3.18.10/arch/powerpc/kernel/irq.c
---- linux-3.18.10.orig/arch/powerpc/kernel/irq.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/kernel/irq.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/irq.c linux-3.18.12/arch/powerpc/kernel/irq.c
+--- linux-3.18.12.orig/arch/powerpc/kernel/irq.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kernel/irq.c 2015-04-26 13:32:22.371684003 -0500
@@ -615,6 +615,7 @@
}
}
@@ -1240,9 +1261,9 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/irq.c linux-3.18.10/arch/powerp
irq_hw_number_t virq_to_hw(unsigned int virq)
{
-diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/misc_32.S linux-3.18.10/arch/powerpc/kernel/misc_32.S
---- linux-3.18.10.orig/arch/powerpc/kernel/misc_32.S 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/kernel/misc_32.S 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/misc_32.S linux-3.18.12/arch/powerpc/kernel/misc_32.S
+--- linux-3.18.12.orig/arch/powerpc/kernel/misc_32.S 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kernel/misc_32.S 2015-04-26 13:32:22.371684003 -0500
@@ -40,6 +40,7 @@
* We store the saved ksp_limit in the unused part
* of the STACK_FRAME_OVERHEAD
@@ -1259,9 +1280,9 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/misc_32.S linux-3.18.10/arch/po
/*
* void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
-diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/misc_64.S linux-3.18.10/arch/powerpc/kernel/misc_64.S
---- linux-3.18.10.orig/arch/powerpc/kernel/misc_64.S 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/kernel/misc_64.S 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/misc_64.S linux-3.18.12/arch/powerpc/kernel/misc_64.S
+--- linux-3.18.12.orig/arch/powerpc/kernel/misc_64.S 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kernel/misc_64.S 2015-04-26 13:32:22.371684003 -0500
@@ -29,6 +29,7 @@
.text
@@ -1278,21 +1299,85 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/misc_64.S linux-3.18.10/arch/po
_GLOBAL(call_do_irq)
mflr r0
-diff -Nur linux-3.18.10.orig/arch/powerpc/kernel/time.c linux-3.18.10/arch/powerpc/kernel/time.c
---- linux-3.18.10.orig/arch/powerpc/kernel/time.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/kernel/time.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kernel/time.c linux-3.18.12/arch/powerpc/kernel/time.c
+--- linux-3.18.12.orig/arch/powerpc/kernel/time.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kernel/time.c 2015-04-26 13:32:22.371684003 -0500
@@ -424,7 +424,7 @@
EXPORT_SYMBOL(profile_pc);
#endif
-#ifdef CONFIG_IRQ_WORK
-+#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
++#if defined(CONFIG_IRQ_WORK)
/*
* 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
-diff -Nur linux-3.18.10.orig/arch/powerpc/mm/fault.c linux-3.18.10/arch/powerpc/mm/fault.c
---- linux-3.18.10.orig/arch/powerpc/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/powerpc/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/powerpc/kvm/book3s_hv.c linux-3.18.12/arch/powerpc/kvm/book3s_hv.c
+--- linux-3.18.12.orig/arch/powerpc/kvm/book3s_hv.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/kvm/book3s_hv.c 2015-04-26 13:32:22.371684003 -0500
+@@ -84,11 +84,11 @@
+ {
+ int me;
+ int cpu = vcpu->cpu;
+- wait_queue_head_t *wqp;
++ struct swait_head *wqp;
+
+ wqp = kvm_arch_vcpu_wq(vcpu);
+- if (waitqueue_active(wqp)) {
+- wake_up_interruptible(wqp);
++ if (swaitqueue_active(wqp)) {
++ swait_wake_interruptible(wqp);
+ ++vcpu->stat.halt_wakeup;
+ }
+
+@@ -639,8 +639,8 @@
+ tvcpu->arch.prodded = 1;
+ smp_mb();
+ if (vcpu->arch.ceded) {
+- if (waitqueue_active(&vcpu->wq)) {
+- wake_up_interruptible(&vcpu->wq);
++ if (swaitqueue_active(&vcpu->wq)) {
++ swait_wake_interruptible(&vcpu->wq);
+ vcpu->stat.halt_wakeup++;
+ }
+ }
+@@ -1357,7 +1357,7 @@
+
+ INIT_LIST_HEAD(&vcore->runnable_threads);
+ spin_lock_init(&vcore->lock);
+- init_waitqueue_head(&vcore->wq);
++ init_swait_head(&vcore->wq);
+ vcore->preempt_tb = TB_NIL;
+ vcore->lpcr = kvm->arch.lpcr;
+ vcore->first_vcpuid = core * threads_per_subcore;
+@@ -1826,13 +1826,13 @@
+ */
+ static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
+ {
+- DEFINE_WAIT(wait);
++ DEFINE_SWAITER(wait);
+
+- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
++ swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+ vc->vcore_state = VCORE_SLEEPING;
+ spin_unlock(&vc->lock);
+ schedule();
+- finish_wait(&vc->wq, &wait);
++ swait_finish(&vc->wq, &wait);
+ spin_lock(&vc->lock);
+ vc->vcore_state = VCORE_INACTIVE;
+ }
+@@ -1873,7 +1873,7 @@
+ kvmppc_create_dtl_entry(vcpu, vc);
+ kvmppc_start_thread(vcpu);
+ } else if (vc->vcore_state == VCORE_SLEEPING) {
+- wake_up(&vc->wq);
++ swait_wake(&vc->wq);
+ }
+
+ }
+diff -Nur linux-3.18.12.orig/arch/powerpc/mm/fault.c linux-3.18.12/arch/powerpc/mm/fault.c
+--- linux-3.18.12.orig/arch/powerpc/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/powerpc/mm/fault.c 2015-04-26 13:32:22.371684003 -0500
@@ -273,7 +273,7 @@
if (!arch_irq_disabled_regs(regs))
local_irq_enable();
@@ -1302,9 +1387,58 @@ diff -Nur linux-3.18.10.orig/arch/powerpc/mm/fault.c linux-3.18.10/arch/powerpc/
if (!user_mode(regs)) {
rc = SIGSEGV;
goto bail;
-diff -Nur linux-3.18.10.orig/arch/s390/mm/fault.c linux-3.18.10/arch/s390/mm/fault.c
---- linux-3.18.10.orig/arch/s390/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/s390/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/s390/include/asm/kvm_host.h linux-3.18.12/arch/s390/include/asm/kvm_host.h
+--- linux-3.18.12.orig/arch/s390/include/asm/kvm_host.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/s390/include/asm/kvm_host.h 2015-04-26 13:32:22.371684003 -0500
+@@ -311,7 +311,7 @@
+ struct list_head list;
+ atomic_t active;
+ struct kvm_s390_float_interrupt *float_int;
+- wait_queue_head_t *wq;
++ struct swait_head *wq;
+ atomic_t *cpuflags;
+ unsigned int action_bits;
+ };
+diff -Nur linux-3.18.12.orig/arch/s390/kvm/interrupt.c linux-3.18.12/arch/s390/kvm/interrupt.c
+--- linux-3.18.12.orig/arch/s390/kvm/interrupt.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/s390/kvm/interrupt.c 2015-04-26 13:32:22.371684003 -0500
+@@ -619,13 +619,13 @@
+
+ void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
+ {
+- if (waitqueue_active(&vcpu->wq)) {
++ if (swaitqueue_active(&vcpu->wq)) {
+ /*
+ * The vcpu gave up the cpu voluntarily, mark it as a good
+ * yield-candidate.
+ */
+ vcpu->preempted = true;
+- wake_up_interruptible(&vcpu->wq);
++ swait_wake_interruptible(&vcpu->wq);
+ vcpu->stat.halt_wakeup++;
+ }
+ }
+@@ -746,7 +746,7 @@
+ spin_lock(&li->lock);
+ list_add(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+- BUG_ON(waitqueue_active(li->wq));
++ BUG_ON(swaitqueue_active(li->wq));
+ spin_unlock(&li->lock);
+ return 0;
+ }
+@@ -771,7 +771,7 @@
+ spin_lock(&li->lock);
+ list_add(&inti->list, &li->list);
+ atomic_set(&li->active, 1);
+- BUG_ON(waitqueue_active(li->wq));
++ BUG_ON(swaitqueue_active(li->wq));
+ spin_unlock(&li->lock);
+ return 0;
+ }
+diff -Nur linux-3.18.12.orig/arch/s390/mm/fault.c linux-3.18.12/arch/s390/mm/fault.c
+--- linux-3.18.12.orig/arch/s390/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/s390/mm/fault.c 2015-04-26 13:32:22.371684003 -0500
@@ -435,7 +435,8 @@
* user context.
*/
@@ -1315,9 +1449,9 @@ diff -Nur linux-3.18.10.orig/arch/s390/mm/fault.c linux-3.18.10/arch/s390/mm/fau
goto out;
address = trans_exc_code & __FAIL_ADDR_MASK;
-diff -Nur linux-3.18.10.orig/arch/score/mm/fault.c linux-3.18.10/arch/score/mm/fault.c
---- linux-3.18.10.orig/arch/score/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/score/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/score/mm/fault.c linux-3.18.12/arch/score/mm/fault.c
+--- linux-3.18.12.orig/arch/score/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/score/mm/fault.c 2015-04-26 13:32:22.371684003 -0500
@@ -73,7 +73,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1327,9 +1461,9 @@ diff -Nur linux-3.18.10.orig/arch/score/mm/fault.c linux-3.18.10/arch/score/mm/f
goto bad_area_nosemaphore;
if (user_mode(regs))
-diff -Nur linux-3.18.10.orig/arch/sh/kernel/irq.c linux-3.18.10/arch/sh/kernel/irq.c
---- linux-3.18.10.orig/arch/sh/kernel/irq.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sh/kernel/irq.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sh/kernel/irq.c linux-3.18.12/arch/sh/kernel/irq.c
+--- linux-3.18.12.orig/arch/sh/kernel/irq.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sh/kernel/irq.c 2015-04-26 13:32:22.371684003 -0500
@@ -149,6 +149,7 @@
hardirq_ctx[cpu] = NULL;
}
@@ -1346,9 +1480,9 @@ diff -Nur linux-3.18.10.orig/arch/sh/kernel/irq.c linux-3.18.10/arch/sh/kernel/i
#else
static inline void handle_one_irq(unsigned int irq)
{
-diff -Nur linux-3.18.10.orig/arch/sh/mm/fault.c linux-3.18.10/arch/sh/mm/fault.c
---- linux-3.18.10.orig/arch/sh/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sh/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sh/mm/fault.c linux-3.18.12/arch/sh/mm/fault.c
+--- linux-3.18.12.orig/arch/sh/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sh/mm/fault.c 2015-04-26 13:32:22.371684003 -0500
@@ -440,7 +440,7 @@
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
@@ -1358,9 +1492,9 @@ diff -Nur linux-3.18.10.orig/arch/sh/mm/fault.c linux-3.18.10/arch/sh/mm/fault.c
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-3.18.10.orig/arch/sparc/Kconfig linux-3.18.10/arch/sparc/Kconfig
---- linux-3.18.10.orig/arch/sparc/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sparc/Kconfig 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sparc/Kconfig linux-3.18.12/arch/sparc/Kconfig
+--- linux-3.18.12.orig/arch/sparc/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sparc/Kconfig 2015-04-26 13:32:22.371684003 -0500
@@ -182,12 +182,10 @@
source kernel/Kconfig.hz
@@ -1387,9 +1521,9 @@ diff -Nur linux-3.18.10.orig/arch/sparc/Kconfig linux-3.18.10/arch/sparc/Kconfig
config COMPAT
bool
depends on SPARC64
-diff -Nur linux-3.18.10.orig/arch/sparc/kernel/irq_64.c linux-3.18.10/arch/sparc/kernel/irq_64.c
---- linux-3.18.10.orig/arch/sparc/kernel/irq_64.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sparc/kernel/irq_64.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sparc/kernel/irq_64.c linux-3.18.12/arch/sparc/kernel/irq_64.c
+--- linux-3.18.12.orig/arch/sparc/kernel/irq_64.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sparc/kernel/irq_64.c 2015-04-26 13:32:22.375684003 -0500
@@ -849,6 +849,7 @@
set_irq_regs(old_regs);
}
@@ -1406,25 +1540,9 @@ diff -Nur linux-3.18.10.orig/arch/sparc/kernel/irq_64.c linux-3.18.10/arch/sparc
#ifdef CONFIG_HOTPLUG_CPU
void fixup_irqs(void)
-diff -Nur linux-3.18.10.orig/arch/sparc/kernel/pcr.c linux-3.18.10/arch/sparc/kernel/pcr.c
---- linux-3.18.10.orig/arch/sparc/kernel/pcr.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sparc/kernel/pcr.c 2015-03-26 12:42:13.559582331 +0100
-@@ -43,10 +43,12 @@
- set_irq_regs(old_regs);
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void arch_irq_work_raise(void)
- {
- set_softint(1 << PIL_DEFERRED_PCR_WORK);
- }
-+#endif
-
- const struct pcr_ops *pcr_ops;
- EXPORT_SYMBOL_GPL(pcr_ops);
-diff -Nur linux-3.18.10.orig/arch/sparc/kernel/setup_32.c linux-3.18.10/arch/sparc/kernel/setup_32.c
---- linux-3.18.10.orig/arch/sparc/kernel/setup_32.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sparc/kernel/setup_32.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sparc/kernel/setup_32.c linux-3.18.12/arch/sparc/kernel/setup_32.c
+--- linux-3.18.12.orig/arch/sparc/kernel/setup_32.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sparc/kernel/setup_32.c 2015-04-26 13:32:22.375684003 -0500
@@ -309,6 +309,7 @@
boot_flags_init(*cmdline_p);
@@ -1433,9 +1551,9 @@ diff -Nur linux-3.18.10.orig/arch/sparc/kernel/setup_32.c linux-3.18.10/arch/spa
register_console(&prom_early_console);
printk("ARCH: ");
-diff -Nur linux-3.18.10.orig/arch/sparc/kernel/setup_64.c linux-3.18.10/arch/sparc/kernel/setup_64.c
---- linux-3.18.10.orig/arch/sparc/kernel/setup_64.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sparc/kernel/setup_64.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sparc/kernel/setup_64.c linux-3.18.12/arch/sparc/kernel/setup_64.c
+--- linux-3.18.12.orig/arch/sparc/kernel/setup_64.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sparc/kernel/setup_64.c 2015-04-26 13:32:22.375684003 -0500
@@ -563,6 +563,12 @@
pause_patch();
}
@@ -1458,9 +1576,9 @@ diff -Nur linux-3.18.10.orig/arch/sparc/kernel/setup_64.c linux-3.18.10/arch/spa
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
-diff -Nur linux-3.18.10.orig/arch/sparc/mm/fault_32.c linux-3.18.10/arch/sparc/mm/fault_32.c
---- linux-3.18.10.orig/arch/sparc/mm/fault_32.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sparc/mm/fault_32.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sparc/mm/fault_32.c linux-3.18.12/arch/sparc/mm/fault_32.c
+--- linux-3.18.12.orig/arch/sparc/mm/fault_32.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sparc/mm/fault_32.c 2015-04-26 13:32:22.375684003 -0500
@@ -196,7 +196,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1470,9 +1588,9 @@ diff -Nur linux-3.18.10.orig/arch/sparc/mm/fault_32.c linux-3.18.10/arch/sparc/m
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-3.18.10.orig/arch/sparc/mm/fault_64.c linux-3.18.10/arch/sparc/mm/fault_64.c
---- linux-3.18.10.orig/arch/sparc/mm/fault_64.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/sparc/mm/fault_64.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/sparc/mm/fault_64.c linux-3.18.12/arch/sparc/mm/fault_64.c
+--- linux-3.18.12.orig/arch/sparc/mm/fault_64.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/sparc/mm/fault_64.c 2015-04-26 13:32:22.375684003 -0500
@@ -330,7 +330,7 @@
* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -1482,9 +1600,9 @@ diff -Nur linux-3.18.10.orig/arch/sparc/mm/fault_64.c linux-3.18.10/arch/sparc/m
goto intr_or_no_mm;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
-diff -Nur linux-3.18.10.orig/arch/tile/mm/fault.c linux-3.18.10/arch/tile/mm/fault.c
---- linux-3.18.10.orig/arch/tile/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/tile/mm/fault.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/tile/mm/fault.c linux-3.18.12/arch/tile/mm/fault.c
+--- linux-3.18.12.orig/arch/tile/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/tile/mm/fault.c 2015-04-26 13:32:22.375684003 -0500
@@ -357,7 +357,7 @@
* If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault.
@@ -1494,9 +1612,9 @@ diff -Nur linux-3.18.10.orig/arch/tile/mm/fault.c linux-3.18.10/arch/tile/mm/fau
vma = NULL; /* happy compiler */
goto bad_area_nosemaphore;
}
-diff -Nur linux-3.18.10.orig/arch/um/kernel/trap.c linux-3.18.10/arch/um/kernel/trap.c
---- linux-3.18.10.orig/arch/um/kernel/trap.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/um/kernel/trap.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/um/kernel/trap.c linux-3.18.12/arch/um/kernel/trap.c
+--- linux-3.18.12.orig/arch/um/kernel/trap.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/um/kernel/trap.c 2015-04-26 13:32:22.375684003 -0500
@@ -38,7 +38,7 @@
* If the fault was during atomic operation, don't take the fault, just
* fail.
@@ -1506,9 +1624,9 @@ diff -Nur linux-3.18.10.orig/arch/um/kernel/trap.c linux-3.18.10/arch/um/kernel/
goto out_nosemaphore;
if (is_user)
-diff -Nur linux-3.18.10.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.10/arch/x86/crypto/aesni-intel_glue.c
---- linux-3.18.10.orig/arch/x86/crypto/aesni-intel_glue.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/crypto/aesni-intel_glue.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.12/arch/x86/crypto/aesni-intel_glue.c
+--- linux-3.18.12.orig/arch/x86/crypto/aesni-intel_glue.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/crypto/aesni-intel_glue.c 2015-04-26 13:32:22.375684003 -0500
@@ -381,14 +381,14 @@
err = blkcipher_walk_virt(desc, &walk);
desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
@@ -1601,9 +1719,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/crypto/aesni-intel_glue.c linux-3.18.10/ar
return err;
}
-diff -Nur linux-3.18.10.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.10/arch/x86/crypto/cast5_avx_glue.c
---- linux-3.18.10.orig/arch/x86/crypto/cast5_avx_glue.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/crypto/cast5_avx_glue.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.12/arch/x86/crypto/cast5_avx_glue.c
+--- linux-3.18.12.orig/arch/x86/crypto/cast5_avx_glue.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/crypto/cast5_avx_glue.c 2015-04-26 13:32:22.375684003 -0500
@@ -60,7 +60,7 @@
static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
bool enc)
@@ -1683,9 +1801,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/crypto/cast5_avx_glue.c linux-3.18.10/arch
if (walk.nbytes) {
ctr_crypt_final(desc, &walk);
err = blkcipher_walk_done(desc, &walk, 0);
-diff -Nur linux-3.18.10.orig/arch/x86/crypto/glue_helper.c linux-3.18.10/arch/x86/crypto/glue_helper.c
---- linux-3.18.10.orig/arch/x86/crypto/glue_helper.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/crypto/glue_helper.c 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/crypto/glue_helper.c linux-3.18.12/arch/x86/crypto/glue_helper.c
+--- linux-3.18.12.orig/arch/x86/crypto/glue_helper.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/crypto/glue_helper.c 2015-04-26 13:32:22.375684003 -0500
@@ -39,7 +39,7 @@
void *ctx = crypto_blkcipher_ctx(desc->tfm);
const unsigned int bsize = 128 / 8;
@@ -1801,9 +1919,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/crypto/glue_helper.c linux-3.18.10/arch/x8
return err;
}
EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
-diff -Nur linux-3.18.10.orig/arch/x86/include/asm/preempt.h linux-3.18.10/arch/x86/include/asm/preempt.h
---- linux-3.18.10.orig/arch/x86/include/asm/preempt.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/include/asm/preempt.h 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/include/asm/preempt.h linux-3.18.12/arch/x86/include/asm/preempt.h
+--- linux-3.18.12.orig/arch/x86/include/asm/preempt.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/include/asm/preempt.h 2015-04-26 13:32:22.375684003 -0500
@@ -85,17 +85,33 @@
* a decrement which hits zero means we have no preempt_count and should
* reschedule.
@@ -1839,9 +1957,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/include/asm/preempt.h linux-3.18.10/arch/x
}
#ifdef CONFIG_PREEMPT
-diff -Nur linux-3.18.10.orig/arch/x86/include/asm/signal.h linux-3.18.10/arch/x86/include/asm/signal.h
---- linux-3.18.10.orig/arch/x86/include/asm/signal.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/include/asm/signal.h 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/include/asm/signal.h linux-3.18.12/arch/x86/include/asm/signal.h
+--- linux-3.18.12.orig/arch/x86/include/asm/signal.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/include/asm/signal.h 2015-04-26 13:32:22.375684003 -0500
@@ -23,6 +23,19 @@
unsigned long sig[_NSIG_WORDS];
} sigset_t;
@@ -1862,9 +1980,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/include/asm/signal.h linux-3.18.10/arch/x8
#ifndef CONFIG_COMPAT
typedef sigset_t compat_sigset_t;
#endif
-diff -Nur linux-3.18.10.orig/arch/x86/include/asm/stackprotector.h linux-3.18.10/arch/x86/include/asm/stackprotector.h
---- linux-3.18.10.orig/arch/x86/include/asm/stackprotector.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/include/asm/stackprotector.h 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/include/asm/stackprotector.h linux-3.18.12/arch/x86/include/asm/stackprotector.h
+--- linux-3.18.12.orig/arch/x86/include/asm/stackprotector.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/include/asm/stackprotector.h 2015-04-26 13:32:22.375684003 -0500
@@ -57,7 +57,7 @@
*/
static __always_inline void boot_init_stack_canary(void)
@@ -1891,9 +2009,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/include/asm/stackprotector.h linux-3.18.10
tsc = __native_read_tsc();
canary += tsc + (tsc << 32UL);
-diff -Nur linux-3.18.10.orig/arch/x86/include/asm/thread_info.h linux-3.18.10/arch/x86/include/asm/thread_info.h
---- linux-3.18.10.orig/arch/x86/include/asm/thread_info.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/include/asm/thread_info.h 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/include/asm/thread_info.h linux-3.18.12/arch/x86/include/asm/thread_info.h
+--- linux-3.18.12.orig/arch/x86/include/asm/thread_info.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/include/asm/thread_info.h 2015-04-26 13:32:22.375684003 -0500
@@ -30,6 +30,8 @@
__u32 status; /* thread synchronous flags */
__u32 cpu; /* current CPU */
@@ -1928,9 +2046,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/include/asm/thread_info.h linux-3.18.10/ar
#define STACK_WARN (THREAD_SIZE/8)
#define KERNEL_STACK_OFFSET (5*(BITS_PER_LONG/8))
-diff -Nur linux-3.18.10.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.10/arch/x86/include/asm/uv/uv_bau.h
---- linux-3.18.10.orig/arch/x86/include/asm/uv/uv_bau.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/include/asm/uv/uv_bau.h 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.12/arch/x86/include/asm/uv/uv_bau.h
+--- linux-3.18.12.orig/arch/x86/include/asm/uv/uv_bau.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/include/asm/uv/uv_bau.h 2015-04-26 13:32:22.375684003 -0500
@@ -615,9 +615,9 @@
cycles_t send_message;
cycles_t period_end;
@@ -1964,9 +2082,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/include/asm/uv/uv_bau.h linux-3.18.10/arch
return 1;
}
-diff -Nur linux-3.18.10.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.10/arch/x86/include/asm/uv/uv_hub.h
---- linux-3.18.10.orig/arch/x86/include/asm/uv/uv_hub.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/include/asm/uv/uv_hub.h 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.12/arch/x86/include/asm/uv/uv_hub.h
+--- linux-3.18.12.orig/arch/x86/include/asm/uv/uv_hub.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/include/asm/uv/uv_hub.h 2015-04-26 13:32:22.375684003 -0500
@@ -492,7 +492,7 @@
unsigned short nr_online_cpus;
unsigned short pnode;
@@ -1976,9 +2094,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/include/asm/uv/uv_hub.h linux-3.18.10/arch
unsigned long nmi_count; /* obsolete, see uv_hub_nmi */
};
extern struct uv_blade_info *uv_blade_info;
-diff -Nur linux-3.18.10.orig/arch/x86/Kconfig linux-3.18.10/arch/x86/Kconfig
---- linux-3.18.10.orig/arch/x86/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/Kconfig 2015-03-26 12:42:13.559582331 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/Kconfig linux-3.18.12/arch/x86/Kconfig
+--- linux-3.18.12.orig/arch/x86/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/Kconfig 2015-04-26 13:32:22.375684003 -0500
@@ -21,6 +21,7 @@
### Arch settings
config X86
@@ -2009,9 +2127,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/Kconfig linux-3.18.10/arch/x86/Kconfig
---help---
Enable maximum number of CPUS and NUMA Nodes for this architecture.
If unsure, say N.
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.10/arch/x86/kernel/apic/io_apic.c
---- linux-3.18.10.orig/arch/x86/kernel/apic/io_apic.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/apic/io_apic.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.12/arch/x86/kernel/apic/io_apic.c
+--- linux-3.18.12.orig/arch/x86/kernel/apic/io_apic.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/apic/io_apic.c 2015-04-26 13:32:22.379684003 -0500
@@ -2494,7 +2494,8 @@
static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
{
@@ -2022,9 +2140,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/apic/io_apic.c linux-3.18.10/arch/x
mask_ioapic(cfg);
return true;
}
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.10/arch/x86/kernel/apic/x2apic_uv_x.c
---- linux-3.18.10.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/apic/x2apic_uv_x.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.12/arch/x86/kernel/apic/x2apic_uv_x.c
+--- linux-3.18.12.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/apic/x2apic_uv_x.c 2015-04-26 13:32:22.379684003 -0500
@@ -918,7 +918,7 @@
uv_blade_info[blade].pnode = pnode;
uv_blade_info[blade].nr_possible_cpus = 0;
@@ -2034,9 +2152,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-3.18.10/ar
min_pnode = min(pnode, min_pnode);
max_pnode = max(pnode, max_pnode);
blade++;
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/asm-offsets.c linux-3.18.10/arch/x86/kernel/asm-offsets.c
---- linux-3.18.10.orig/arch/x86/kernel/asm-offsets.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/asm-offsets.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/asm-offsets.c linux-3.18.12/arch/x86/kernel/asm-offsets.c
+--- linux-3.18.12.orig/arch/x86/kernel/asm-offsets.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/asm-offsets.c 2015-04-26 13:32:22.379684003 -0500
@@ -32,6 +32,7 @@
OFFSET(TI_flags, thread_info, flags);
OFFSET(TI_status, thread_info, status);
@@ -2051,9 +2169,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/asm-offsets.c linux-3.18.10/arch/x8
DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
}
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.10/arch/x86/kernel/cpu/mcheck/mce.c
---- linux-3.18.10.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/cpu/mcheck/mce.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.12/arch/x86/kernel/cpu/mcheck/mce.c
+--- linux-3.18.12.orig/arch/x86/kernel/cpu/mcheck/mce.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/cpu/mcheck/mce.c 2015-04-26 13:32:22.379684003 -0500
@@ -41,6 +41,8 @@
#include <linux/debugfs.h>
#include <linux/irq_work.h>
@@ -2306,9 +2424,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-3.18.10/arch
if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
err = -ENOMEM;
goto err_out;
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/entry_32.S linux-3.18.10/arch/x86/kernel/entry_32.S
---- linux-3.18.10.orig/arch/x86/kernel/entry_32.S 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/entry_32.S 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/entry_32.S linux-3.18.12/arch/x86/kernel/entry_32.S
+--- linux-3.18.12.orig/arch/x86/kernel/entry_32.S 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/entry_32.S 2015-04-26 13:32:22.379684003 -0500
@@ -359,8 +359,24 @@
ENTRY(resume_kernel)
DISABLE_INTERRUPTS(CLBR_ANY)
@@ -2352,9 +2470,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/entry_32.S linux-3.18.10/arch/x86/k
jnz work_resched
work_notifysig: # deal with pending signals and
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/entry_64.S linux-3.18.10/arch/x86/kernel/entry_64.S
---- linux-3.18.10.orig/arch/x86/kernel/entry_64.S 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/entry_64.S 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/entry_64.S linux-3.18.12/arch/x86/kernel/entry_64.S
+--- linux-3.18.12.orig/arch/x86/kernel/entry_64.S 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/entry_64.S 2015-04-26 13:32:22.379684003 -0500
@@ -454,8 +454,8 @@
/* Handle reschedules */
/* edx: work, edi: workmask */
@@ -2436,9 +2554,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/entry_64.S linux-3.18.10/arch/x86/k
jnz paranoid_schedule
movl %ebx,%edx /* arg3: thread flags */
TRACE_IRQS_ON
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/irq_32.c linux-3.18.10/arch/x86/kernel/irq_32.c
---- linux-3.18.10.orig/arch/x86/kernel/irq_32.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/irq_32.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/irq_32.c linux-3.18.12/arch/x86/kernel/irq_32.c
+--- linux-3.18.12.orig/arch/x86/kernel/irq_32.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/irq_32.c 2015-04-26 13:32:22.379684003 -0500
@@ -142,6 +142,7 @@
cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu));
}
@@ -2455,25 +2573,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/irq_32.c linux-3.18.10/arch/x86/ker
bool handle_irq(unsigned irq, struct pt_regs *regs)
{
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/irq_work.c linux-3.18.10/arch/x86/kernel/irq_work.c
---- linux-3.18.10.orig/arch/x86/kernel/irq_work.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/irq_work.c 2015-03-26 12:42:13.563582336 +0100
-@@ -38,6 +38,7 @@
- exiting_irq();
- }
-
-+#ifndef CONFIG_PREEMPT_RT_FULL
- void arch_irq_work_raise(void)
- {
- #ifdef CONFIG_X86_LOCAL_APIC
-@@ -48,3 +49,4 @@
- apic_wait_icr_idle();
- #endif
- }
-+#endif
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/process_32.c linux-3.18.10/arch/x86/kernel/process_32.c
---- linux-3.18.10.orig/arch/x86/kernel/process_32.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/process_32.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/process_32.c linux-3.18.12/arch/x86/kernel/process_32.c
+--- linux-3.18.12.orig/arch/x86/kernel/process_32.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/process_32.c 2015-04-26 13:32:22.379684003 -0500
@@ -35,6 +35,7 @@
#include <linux/uaccess.h>
#include <linux/io.h>
@@ -2527,9 +2629,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/process_32.c linux-3.18.10/arch/x86
/*
* Leave lazy mode, flushing any hypercalls made here.
* This must be done before restoring TLS segments so
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/signal.c linux-3.18.10/arch/x86/kernel/signal.c
---- linux-3.18.10.orig/arch/x86/kernel/signal.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/signal.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/signal.c linux-3.18.12/arch/x86/kernel/signal.c
+--- linux-3.18.12.orig/arch/x86/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/signal.c 2015-04-26 13:32:22.379684003 -0500
@@ -746,6 +746,14 @@
mce_notify_process();
#endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
@@ -2545,9 +2647,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/signal.c linux-3.18.10/arch/x86/ker
if (thread_info_flags & _TIF_UPROBE)
uprobe_notify_resume(regs);
-diff -Nur linux-3.18.10.orig/arch/x86/kernel/traps.c linux-3.18.10/arch/x86/kernel/traps.c
---- linux-3.18.10.orig/arch/x86/kernel/traps.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kernel/traps.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/kernel/traps.c linux-3.18.12/arch/x86/kernel/traps.c
+--- linux-3.18.12.orig/arch/x86/kernel/traps.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kernel/traps.c 2015-04-26 13:32:22.379684003 -0500
@@ -87,9 +87,21 @@
local_irq_enable();
}
@@ -2622,10 +2724,115 @@ diff -Nur linux-3.18.10.orig/arch/x86/kernel/traps.c linux-3.18.10/arch/x86/kern
debug_stack_usage_dec();
exit:
-diff -Nur linux-3.18.10.orig/arch/x86/kvm/x86.c linux-3.18.10/arch/x86/kvm/x86.c
---- linux-3.18.10.orig/arch/x86/kvm/x86.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/kvm/x86.c 2015-03-26 12:42:13.563582336 +0100
-@@ -5773,6 +5773,13 @@
+diff -Nur linux-3.18.12.orig/arch/x86/kvm/lapic.c linux-3.18.12/arch/x86/kvm/lapic.c
+--- linux-3.18.12.orig/arch/x86/kvm/lapic.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kvm/lapic.c 2015-04-26 13:32:22.379684003 -0500
+@@ -1034,8 +1034,38 @@
+ apic->divide_count);
+ }
+
++
++static enum hrtimer_restart apic_timer_fn(struct hrtimer *data);
++
++static void apic_timer_expired(struct hrtimer *data)
++{
++ int ret, i = 0;
++ enum hrtimer_restart r;
++ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
++
++ r = apic_timer_fn(data);
++
++ if (r == HRTIMER_RESTART) {
++ do {
++ ret = hrtimer_start_expires(data, HRTIMER_MODE_ABS);
++ if (ret == -ETIME)
++ hrtimer_add_expires_ns(&ktimer->timer,
++ ktimer->period);
++ i++;
++ } while (ret == -ETIME && i < 10);
++
++ if (ret == -ETIME) {
++ printk_once(KERN_ERR "%s: failed to reprogram timer\n",
++ __func__);
++ WARN_ON_ONCE(1);
++ }
++ }
++}
++
++
+ static void start_apic_timer(struct kvm_lapic *apic)
+ {
++ int ret;
+ ktime_t now;
+ atomic_set(&apic->lapic_timer.pending, 0);
+
+@@ -1065,9 +1095,11 @@
+ }
+ }
+
+- hrtimer_start(&apic->lapic_timer.timer,
++ ret = hrtimer_start(&apic->lapic_timer.timer,
+ ktime_add_ns(now, apic->lapic_timer.period),
+ HRTIMER_MODE_ABS);
++ if (ret == -ETIME)
++ apic_timer_expired(&apic->lapic_timer.timer);
+
+ apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
+ PRIx64 ", "
+@@ -1097,8 +1129,10 @@
+ ns = (tscdeadline - guest_tsc) * 1000000ULL;
+ do_div(ns, this_tsc_khz);
+ }
+- hrtimer_start(&apic->lapic_timer.timer,
++ ret = hrtimer_start(&apic->lapic_timer.timer,
+ ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
++ if (ret == -ETIME)
++ apic_timer_expired(&apic->lapic_timer.timer);
+
+ local_irq_restore(flags);
+ }
+@@ -1539,7 +1573,7 @@
+ struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
+ struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
+ struct kvm_vcpu *vcpu = apic->vcpu;
+- wait_queue_head_t *q = &vcpu->wq;
++ struct swait_head *q = &vcpu->wq;
+
+ /*
+ * There is a race window between reading and incrementing, but we do
+@@ -1553,8 +1587,8 @@
+ kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
+ }
+
+- if (waitqueue_active(q))
+- wake_up_interruptible(q);
++ if (swaitqueue_active(q))
++ swait_wake_interruptible(q);
+
+ if (lapic_is_periodic(apic)) {
+ hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
+@@ -1587,6 +1621,7 @@
+ hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
+ HRTIMER_MODE_ABS);
+ apic->lapic_timer.timer.function = apic_timer_fn;
++ apic->lapic_timer.timer.irqsafe = 1;
+
+ /*
+ * APIC is created enabled. This will prevent kvm_lapic_set_base from
+@@ -1707,7 +1742,8 @@
+
+ timer = &vcpu->arch.apic->lapic_timer.timer;
+ if (hrtimer_cancel(timer))
+- hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
++ if (hrtimer_start_expires(timer, HRTIMER_MODE_ABS) == -ETIME)
++ apic_timer_expired(timer);
+ }
+
+ /*
+diff -Nur linux-3.18.12.orig/arch/x86/kvm/x86.c linux-3.18.12/arch/x86/kvm/x86.c
+--- linux-3.18.12.orig/arch/x86/kvm/x86.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/kvm/x86.c 2015-04-26 13:32:22.383684003 -0500
+@@ -5772,6 +5772,13 @@
goto out;
}
@@ -2639,9 +2846,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/kvm/x86.c linux-3.18.10/arch/x86/kvm/x86.c
r = kvm_mmu_module_init();
if (r)
goto out_free_percpu;
-diff -Nur linux-3.18.10.orig/arch/x86/mm/fault.c linux-3.18.10/arch/x86/mm/fault.c
---- linux-3.18.10.orig/arch/x86/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/mm/fault.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/mm/fault.c linux-3.18.12/arch/x86/mm/fault.c
+--- linux-3.18.12.orig/arch/x86/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/mm/fault.c 2015-04-26 13:32:22.383684003 -0500
@@ -1128,7 +1128,7 @@
* If we're in an interrupt, have no user context or are running
* in an atomic region then we must not take the fault:
@@ -2651,9 +2858,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/mm/fault.c linux-3.18.10/arch/x86/mm/fault
bad_area_nosemaphore(regs, error_code, address);
return;
}
-diff -Nur linux-3.18.10.orig/arch/x86/mm/highmem_32.c linux-3.18.10/arch/x86/mm/highmem_32.c
---- linux-3.18.10.orig/arch/x86/mm/highmem_32.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/mm/highmem_32.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/mm/highmem_32.c linux-3.18.12/arch/x86/mm/highmem_32.c
+--- linux-3.18.12.orig/arch/x86/mm/highmem_32.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/mm/highmem_32.c 2015-04-26 13:32:22.383684003 -0500
@@ -32,6 +32,7 @@
*/
void *kmap_atomic_prot(struct page *page, pgprot_t prot)
@@ -2684,9 +2891,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/mm/highmem_32.c linux-3.18.10/arch/x86/mm/
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
arch_flush_lazy_mmu_mode();
-diff -Nur linux-3.18.10.orig/arch/x86/mm/iomap_32.c linux-3.18.10/arch/x86/mm/iomap_32.c
---- linux-3.18.10.orig/arch/x86/mm/iomap_32.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/mm/iomap_32.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/mm/iomap_32.c linux-3.18.12/arch/x86/mm/iomap_32.c
+--- linux-3.18.12.orig/arch/x86/mm/iomap_32.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/mm/iomap_32.c 2015-04-26 13:32:22.383684003 -0500
@@ -56,6 +56,7 @@
void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
@@ -2719,9 +2926,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/mm/iomap_32.c linux-3.18.10/arch/x86/mm/io
kpte_clear_flush(kmap_pte-idx, vaddr);
kmap_atomic_idx_pop();
}
-diff -Nur linux-3.18.10.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.10/arch/x86/platform/uv/tlb_uv.c
---- linux-3.18.10.orig/arch/x86/platform/uv/tlb_uv.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/platform/uv/tlb_uv.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.12/arch/x86/platform/uv/tlb_uv.c
+--- linux-3.18.12.orig/arch/x86/platform/uv/tlb_uv.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/platform/uv/tlb_uv.c 2015-04-26 13:32:22.383684003 -0500
@@ -714,9 +714,9 @@
quiesce_local_uvhub(hmaster);
@@ -2808,9 +3015,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/platform/uv/tlb_uv.c linux-3.18.10/arch/x8
}
}
-diff -Nur linux-3.18.10.orig/arch/x86/platform/uv/uv_time.c linux-3.18.10/arch/x86/platform/uv/uv_time.c
---- linux-3.18.10.orig/arch/x86/platform/uv/uv_time.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/x86/platform/uv/uv_time.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/x86/platform/uv/uv_time.c linux-3.18.12/arch/x86/platform/uv/uv_time.c
+--- linux-3.18.12.orig/arch/x86/platform/uv/uv_time.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/x86/platform/uv/uv_time.c 2015-04-26 13:32:22.383684003 -0500
@@ -58,7 +58,7 @@
/* There is one of these allocated per node */
@@ -2891,9 +3098,9 @@ diff -Nur linux-3.18.10.orig/arch/x86/platform/uv/uv_time.c linux-3.18.10/arch/x
}
/*
-diff -Nur linux-3.18.10.orig/arch/xtensa/mm/fault.c linux-3.18.10/arch/xtensa/mm/fault.c
---- linux-3.18.10.orig/arch/xtensa/mm/fault.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/arch/xtensa/mm/fault.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/arch/xtensa/mm/fault.c linux-3.18.12/arch/xtensa/mm/fault.c
+--- linux-3.18.12.orig/arch/xtensa/mm/fault.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/arch/xtensa/mm/fault.c 2015-04-26 13:32:22.383684003 -0500
@@ -57,7 +57,7 @@
/* If we're in an interrupt or have no user
* context, we must not take the fault..
@@ -2903,9 +3110,9 @@ diff -Nur linux-3.18.10.orig/arch/xtensa/mm/fault.c linux-3.18.10/arch/xtensa/mm
bad_page_fault(regs, address, SIGSEGV);
return;
}
-diff -Nur linux-3.18.10.orig/block/blk-core.c linux-3.18.10/block/blk-core.c
---- linux-3.18.10.orig/block/blk-core.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/blk-core.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/block/blk-core.c linux-3.18.12/block/blk-core.c
+--- linux-3.18.12.orig/block/blk-core.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/blk-core.c 2015-04-26 13:32:22.383684003 -0500
@@ -100,6 +100,9 @@
INIT_LIST_HEAD(&rq->queuelist);
@@ -2981,9 +3188,9 @@ diff -Nur linux-3.18.10.orig/block/blk-core.c linux-3.18.10/block/blk-core.c
}
void blk_finish_plug(struct blk_plug *plug)
-diff -Nur linux-3.18.10.orig/block/blk-ioc.c linux-3.18.10/block/blk-ioc.c
---- linux-3.18.10.orig/block/blk-ioc.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/blk-ioc.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/block/blk-ioc.c linux-3.18.12/block/blk-ioc.c
+--- linux-3.18.12.orig/block/blk-ioc.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/blk-ioc.c 2015-04-26 13:32:22.383684003 -0500
@@ -7,6 +7,7 @@
#include <linux/bio.h>
#include <linux/blkdev.h>
@@ -3010,9 +3217,9 @@ diff -Nur linux-3.18.10.orig/block/blk-ioc.c linux-3.18.10/block/blk-ioc.c
goto retry;
}
}
-diff -Nur linux-3.18.10.orig/block/blk-iopoll.c linux-3.18.10/block/blk-iopoll.c
---- linux-3.18.10.orig/block/blk-iopoll.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/blk-iopoll.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/block/blk-iopoll.c linux-3.18.12/block/blk-iopoll.c
+--- linux-3.18.12.orig/block/blk-iopoll.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/blk-iopoll.c 2015-04-26 13:32:22.383684003 -0500
@@ -35,6 +35,7 @@
list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
@@ -3037,9 +3244,9 @@ diff -Nur linux-3.18.10.orig/block/blk-iopoll.c linux-3.18.10/block/blk-iopoll.c
}
return NOTIFY_OK;
-diff -Nur linux-3.18.10.orig/block/blk-mq.c linux-3.18.10/block/blk-mq.c
---- linux-3.18.10.orig/block/blk-mq.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/blk-mq.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/block/blk-mq.c linux-3.18.12/block/blk-mq.c
+--- linux-3.18.12.orig/block/blk-mq.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/blk-mq.c 2015-04-26 13:32:22.383684003 -0500
@@ -85,7 +85,7 @@
if (percpu_ref_tryget_live(&q->mq_usage_counter))
return 0;
@@ -3080,7 +3287,7 @@ diff -Nur linux-3.18.10.orig/block/blk-mq.c linux-3.18.10/block/blk-mq.c
rq->resid_len = 0;
rq->sense = NULL;
-+#if CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT_FULL
+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
INIT_LIST_HEAD(&rq->timeout_list);
@@ -3123,7 +3330,7 @@ diff -Nur linux-3.18.10.orig/block/blk-mq.c linux-3.18.10/block/blk-mq.c
shared = cpus_share_cache(cpu, ctx->cpu);
if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
-+#if CONFIG_PREEMPT_RT_FULL
++#ifdef CONFIG_PREEMPT_RT_FULL
+ schedule_work_on(ctx->cpu, &rq->work);
+#else
rq->csd.func = __blk_mq_complete_request_remote;
@@ -3184,9 +3391,9 @@ diff -Nur linux-3.18.10.orig/block/blk-mq.c linux-3.18.10/block/blk-mq.c
return blk_mq_hctx_cpu_offline(hctx, cpu);
else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
return blk_mq_hctx_cpu_online(hctx, cpu);
-diff -Nur linux-3.18.10.orig/block/blk-mq-cpu.c linux-3.18.10/block/blk-mq-cpu.c
---- linux-3.18.10.orig/block/blk-mq-cpu.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/blk-mq-cpu.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/block/blk-mq-cpu.c linux-3.18.12/block/blk-mq-cpu.c
+--- linux-3.18.12.orig/block/blk-mq-cpu.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/blk-mq-cpu.c 2015-04-26 13:32:22.383684003 -0500
@@ -16,7 +16,7 @@
#include "blk-mq.h"
@@ -3238,9 +3445,9 @@ diff -Nur linux-3.18.10.orig/block/blk-mq-cpu.c linux-3.18.10/block/blk-mq-cpu.c
}
void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
-diff -Nur linux-3.18.10.orig/block/blk-mq.h linux-3.18.10/block/blk-mq.h
---- linux-3.18.10.orig/block/blk-mq.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/blk-mq.h 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/block/blk-mq.h linux-3.18.12/block/blk-mq.h
+--- linux-3.18.12.orig/block/blk-mq.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/blk-mq.h 2015-04-26 13:32:22.383684003 -0500
@@ -73,7 +73,10 @@
static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
unsigned int cpu)
@@ -3268,9 +3475,9 @@ diff -Nur linux-3.18.10.orig/block/blk-mq.h linux-3.18.10/block/blk-mq.h
}
struct blk_mq_alloc_data {
-diff -Nur linux-3.18.10.orig/block/blk-softirq.c linux-3.18.10/block/blk-softirq.c
---- linux-3.18.10.orig/block/blk-softirq.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/blk-softirq.c 2015-03-26 12:42:13.563582336 +0100
+diff -Nur linux-3.18.12.orig/block/blk-softirq.c linux-3.18.12/block/blk-softirq.c
+--- linux-3.18.12.orig/block/blk-softirq.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/blk-softirq.c 2015-04-26 13:32:22.387684003 -0500
@@ -51,6 +51,7 @@
raise_softirq_irqoff(BLOCK_SOFTIRQ);
@@ -3295,9 +3502,9 @@ diff -Nur linux-3.18.10.orig/block/blk-softirq.c linux-3.18.10/block/blk-softirq
}
/**
-diff -Nur linux-3.18.10.orig/block/bounce.c linux-3.18.10/block/bounce.c
---- linux-3.18.10.orig/block/bounce.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/block/bounce.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/block/bounce.c linux-3.18.12/block/bounce.c
+--- linux-3.18.12.orig/block/bounce.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/block/bounce.c 2015-04-26 13:32:22.387684003 -0500
@@ -54,11 +54,11 @@
unsigned long flags;
unsigned char *vto;
@@ -3312,9 +3519,9 @@ diff -Nur linux-3.18.10.orig/block/bounce.c linux-3.18.10/block/bounce.c
}
#else /* CONFIG_HIGHMEM */
-diff -Nur linux-3.18.10.orig/crypto/algapi.c linux-3.18.10/crypto/algapi.c
---- linux-3.18.10.orig/crypto/algapi.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/crypto/algapi.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/crypto/algapi.c linux-3.18.12/crypto/algapi.c
+--- linux-3.18.12.orig/crypto/algapi.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/crypto/algapi.c 2015-04-26 13:32:22.387684003 -0500
@@ -698,13 +698,13 @@
int crypto_register_notifier(struct notifier_block *nb)
@@ -3331,9 +3538,9 @@ diff -Nur linux-3.18.10.orig/crypto/algapi.c linux-3.18.10/crypto/algapi.c
}
EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
-diff -Nur linux-3.18.10.orig/crypto/api.c linux-3.18.10/crypto/api.c
---- linux-3.18.10.orig/crypto/api.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/crypto/api.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/crypto/api.c linux-3.18.12/crypto/api.c
+--- linux-3.18.12.orig/crypto/api.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/crypto/api.c 2015-04-26 13:32:22.387684003 -0500
@@ -31,7 +31,7 @@
DECLARE_RWSEM(crypto_alg_sem);
EXPORT_SYMBOL_GPL(crypto_alg_sem);
@@ -3356,9 +3563,9 @@ diff -Nur linux-3.18.10.orig/crypto/api.c linux-3.18.10/crypto/api.c
}
return ok;
-diff -Nur linux-3.18.10.orig/crypto/internal.h linux-3.18.10/crypto/internal.h
---- linux-3.18.10.orig/crypto/internal.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/crypto/internal.h 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/crypto/internal.h linux-3.18.12/crypto/internal.h
+--- linux-3.18.12.orig/crypto/internal.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/crypto/internal.h 2015-04-26 13:32:22.387684003 -0500
@@ -48,7 +48,7 @@
extern struct list_head crypto_alg_list;
@@ -3377,9 +3584,9 @@ diff -Nur linux-3.18.10.orig/crypto/internal.h linux-3.18.10/crypto/internal.h
}
#endif /* _CRYPTO_INTERNAL_H */
-diff -Nur linux-3.18.10.orig/Documentation/hwlat_detector.txt linux-3.18.10/Documentation/hwlat_detector.txt
---- linux-3.18.10.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/Documentation/hwlat_detector.txt 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/Documentation/hwlat_detector.txt linux-3.18.12/Documentation/hwlat_detector.txt
+--- linux-3.18.12.orig/Documentation/hwlat_detector.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/Documentation/hwlat_detector.txt 2015-04-26 13:32:22.347684003 -0500
@@ -0,0 +1,64 @@
+Introduction:
+-------------
@@ -3445,9 +3652,9 @@ diff -Nur linux-3.18.10.orig/Documentation/hwlat_detector.txt linux-3.18.10/Docu
+observe any latencies that exceed the threshold (initially 100 usecs),
+then we write to a global sample ring buffer of 8K samples, which is
+consumed by reading from the "sample" (pipe) debugfs file interface.
-diff -Nur linux-3.18.10.orig/Documentation/sysrq.txt linux-3.18.10/Documentation/sysrq.txt
---- linux-3.18.10.orig/Documentation/sysrq.txt 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/Documentation/sysrq.txt 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/Documentation/sysrq.txt linux-3.18.12/Documentation/sysrq.txt
+--- linux-3.18.12.orig/Documentation/sysrq.txt 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/Documentation/sysrq.txt 2015-04-26 13:32:22.347684003 -0500
@@ -59,10 +59,17 @@
On other - If you know of the key combos for other architectures, please
let me know so I can add them to this section.
@@ -3468,9 +3675,9 @@ diff -Nur linux-3.18.10.orig/Documentation/sysrq.txt linux-3.18.10/Documentation
* What are the 'command' keys?
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'b' - Will immediately reboot the system without syncing or unmounting
-diff -Nur linux-3.18.10.orig/Documentation/trace/histograms.txt linux-3.18.10/Documentation/trace/histograms.txt
---- linux-3.18.10.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/Documentation/trace/histograms.txt 2015-03-26 12:42:13.555582327 +0100
+diff -Nur linux-3.18.12.orig/Documentation/trace/histograms.txt linux-3.18.12/Documentation/trace/histograms.txt
+--- linux-3.18.12.orig/Documentation/trace/histograms.txt 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/Documentation/trace/histograms.txt 2015-04-26 13:32:22.351684003 -0500
@@ -0,0 +1,186 @@
+ Using the Linux Kernel Latency Histograms
+
@@ -3658,9 +3865,9 @@ diff -Nur linux-3.18.10.orig/Documentation/trace/histograms.txt linux-3.18.10/Do
+is provided.
+
+These data are also reset when the wakeup histogram is reset.
-diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/acglobal.h linux-3.18.10/drivers/acpi/acpica/acglobal.h
---- linux-3.18.10.orig/drivers/acpi/acpica/acglobal.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/acpi/acpica/acglobal.h 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/acglobal.h linux-3.18.12/drivers/acpi/acpica/acglobal.h
+--- linux-3.18.12.orig/drivers/acpi/acpica/acglobal.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/acpi/acpica/acglobal.h 2015-04-26 13:32:22.387684003 -0500
@@ -112,7 +112,7 @@
* interrupt level
*/
@@ -3670,9 +3877,9 @@ diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/acglobal.h linux-3.18.10/driver
ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
/* Mutex for _OSI support */
-diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/hwregs.c linux-3.18.10/drivers/acpi/acpica/hwregs.c
---- linux-3.18.10.orig/drivers/acpi/acpica/hwregs.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/acpi/acpica/hwregs.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/hwregs.c linux-3.18.12/drivers/acpi/acpica/hwregs.c
+--- linux-3.18.12.orig/drivers/acpi/acpica/hwregs.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/acpi/acpica/hwregs.c 2015-04-26 13:32:22.387684003 -0500
@@ -269,14 +269,14 @@
ACPI_BITMASK_ALL_FIXED_STATUS,
ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
@@ -3690,9 +3897,9 @@ diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/hwregs.c linux-3.18.10/drivers/
if (ACPI_FAILURE(status)) {
goto exit;
-diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/hwxface.c linux-3.18.10/drivers/acpi/acpica/hwxface.c
---- linux-3.18.10.orig/drivers/acpi/acpica/hwxface.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/acpi/acpica/hwxface.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/hwxface.c linux-3.18.12/drivers/acpi/acpica/hwxface.c
+--- linux-3.18.12.orig/drivers/acpi/acpica/hwxface.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/acpi/acpica/hwxface.c 2015-04-26 13:32:22.387684003 -0500
@@ -374,7 +374,7 @@
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
@@ -3711,9 +3918,9 @@ diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/hwxface.c linux-3.18.10/drivers
return_ACPI_STATUS(status);
}
-diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/utmutex.c linux-3.18.10/drivers/acpi/acpica/utmutex.c
---- linux-3.18.10.orig/drivers/acpi/acpica/utmutex.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/acpi/acpica/utmutex.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/acpi/acpica/utmutex.c linux-3.18.12/drivers/acpi/acpica/utmutex.c
+--- linux-3.18.12.orig/drivers/acpi/acpica/utmutex.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/acpi/acpica/utmutex.c 2015-04-26 13:32:22.387684003 -0500
@@ -88,7 +88,7 @@
return_ACPI_STATUS (status);
}
@@ -3732,9 +3939,9 @@ diff -Nur linux-3.18.10.orig/drivers/acpi/acpica/utmutex.c linux-3.18.10/drivers
acpi_os_delete_lock(acpi_gbl_reference_count_lock);
/* Delete the reader/writer lock */
-diff -Nur linux-3.18.10.orig/drivers/ata/libata-sff.c linux-3.18.10/drivers/ata/libata-sff.c
---- linux-3.18.10.orig/drivers/ata/libata-sff.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ata/libata-sff.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/ata/libata-sff.c linux-3.18.12/drivers/ata/libata-sff.c
+--- linux-3.18.12.orig/drivers/ata/libata-sff.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ata/libata-sff.c 2015-04-26 13:32:22.387684003 -0500
@@ -678,9 +678,9 @@
unsigned long flags;
unsigned int consumed;
@@ -3783,9 +3990,9 @@ diff -Nur linux-3.18.10.orig/drivers/ata/libata-sff.c linux-3.18.10/drivers/ata/
} else {
buf = page_address(page);
consumed = ap->ops->sff_data_xfer(dev, buf + offset,
-diff -Nur linux-3.18.10.orig/drivers/char/random.c linux-3.18.10/drivers/char/random.c
---- linux-3.18.10.orig/drivers/char/random.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/char/random.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/char/random.c linux-3.18.12/drivers/char/random.c
+--- linux-3.18.12.orig/drivers/char/random.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/char/random.c 2015-04-26 13:32:22.387684003 -0500
@@ -776,8 +776,6 @@
} sample;
long delta, delta2, delta3;
@@ -3837,9 +4044,9 @@ diff -Nur linux-3.18.10.orig/drivers/char/random.c linux-3.18.10/drivers/char/ra
fast_mix(fast_pool);
add_interrupt_bench(cycles);
-diff -Nur linux-3.18.10.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.10/drivers/clocksource/tcb_clksrc.c
---- linux-3.18.10.orig/drivers/clocksource/tcb_clksrc.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/clocksource/tcb_clksrc.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.12/drivers/clocksource/tcb_clksrc.c
+--- linux-3.18.12.orig/drivers/clocksource/tcb_clksrc.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/clocksource/tcb_clksrc.c 2015-04-26 13:32:22.387684003 -0500
@@ -23,8 +23,7 @@
* this 32 bit free-running counter. the second channel is not used.
*
@@ -3954,9 +4161,9 @@ diff -Nur linux-3.18.10.orig/drivers/clocksource/tcb_clksrc.c linux-3.18.10/driv
if (ret)
goto err_unregister_clksrc;
-diff -Nur linux-3.18.10.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.10/drivers/clocksource/timer-atmel-pit.c
---- linux-3.18.10.orig/drivers/clocksource/timer-atmel-pit.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/clocksource/timer-atmel-pit.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.12/drivers/clocksource/timer-atmel-pit.c
+--- linux-3.18.12.orig/drivers/clocksource/timer-atmel-pit.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/clocksource/timer-atmel-pit.c 2015-04-26 13:32:22.387684003 -0500
@@ -90,6 +90,7 @@
return elapsed;
}
@@ -3982,9 +4189,21 @@ diff -Nur linux-3.18.10.orig/drivers/clocksource/timer-atmel-pit.c linux-3.18.10
break;
case CLOCK_EVT_MODE_RESUME:
break;
-diff -Nur linux-3.18.10.orig/drivers/gpio/gpio-omap.c linux-3.18.10/drivers/gpio/gpio-omap.c
---- linux-3.18.10.orig/drivers/gpio/gpio-omap.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpio/gpio-omap.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/cpufreq/Kconfig.x86 linux-3.18.12/drivers/cpufreq/Kconfig.x86
+--- linux-3.18.12.orig/drivers/cpufreq/Kconfig.x86 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/cpufreq/Kconfig.x86 2015-04-26 13:32:22.387684003 -0500
+@@ -113,7 +113,7 @@
+
+ config X86_POWERNOW_K8
+ tristate "AMD Opteron/Athlon64 PowerNow!"
+- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
++ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
+ help
+ This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
+ Support for K10 and newer processors is now in acpi-cpufreq.
+diff -Nur linux-3.18.12.orig/drivers/gpio/gpio-omap.c linux-3.18.12/drivers/gpio/gpio-omap.c
+--- linux-3.18.12.orig/drivers/gpio/gpio-omap.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/gpio/gpio-omap.c 2015-04-26 13:32:22.387684003 -0500
@@ -57,7 +57,7 @@
u32 saved_datain;
u32 level_mask;
@@ -4259,9 +4478,9 @@ diff -Nur linux-3.18.10.orig/drivers/gpio/gpio-omap.c linux-3.18.10/drivers/gpio
return 0;
}
-diff -Nur linux-3.18.10.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.10/drivers/gpu/drm/i915/i915_gem.c
---- linux-3.18.10.orig/drivers/gpu/drm/i915/i915_gem.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpu/drm/i915/i915_gem.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.12/drivers/gpu/drm/i915/i915_gem.c
+--- linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/gpu/drm/i915/i915_gem.c 2015-04-26 13:32:22.391684003 -0500
@@ -5144,7 +5144,7 @@
if (!mutex_is_locked(mutex))
return false;
@@ -4271,9 +4490,9 @@ diff -Nur linux-3.18.10.orig/drivers/gpu/drm/i915/i915_gem.c linux-3.18.10/drive
return mutex->owner == task;
#else
/* Since UP may be pre-empted, we cannot assume that we own the lock */
-diff -Nur linux-3.18.10.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.18.10/drivers/gpu/drm/i915/i915_gem_execbuffer.c
---- linux-3.18.10.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-03-26 12:42:13.567582340 +0100
+diff -Nur linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.18.12/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+--- linux-3.18.12.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2015-04-26 13:32:22.391684003 -0500
@@ -1170,7 +1170,9 @@
return ret;
}
@@ -4284,148 +4503,9 @@ diff -Nur linux-3.18.10.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-3.
i915_gem_execbuffer_move_to_active(vmas, ring);
i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
-diff -Nur linux-3.18.10.orig/drivers/gpu/drm/radeon/evergreen.c linux-3.18.10/drivers/gpu/drm/radeon/evergreen.c
---- linux-3.18.10.orig/drivers/gpu/drm/radeon/evergreen.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpu/drm/radeon/evergreen.c 2015-03-26 12:42:16.827586167 +0100
-@@ -4589,6 +4589,9 @@
- WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, afmt5);
- WREG32(AFMT_AUDIO_PACKET_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, afmt6);
-
-+ /* posting read */
-+ RREG32(SRBM_STATUS);
-+
- return 0;
- }
-
-diff -Nur linux-3.18.10.orig/drivers/gpu/drm/radeon/r600.c linux-3.18.10/drivers/gpu/drm/radeon/r600.c
---- linux-3.18.10.orig/drivers/gpu/drm/radeon/r600.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpu/drm/radeon/r600.c 2015-03-26 12:42:18.651588307 +0100
-@@ -3787,6 +3787,9 @@
- WREG32(RV770_CG_THERMAL_INT, thermal_int);
- }
-
-+ /* posting read */
-+ RREG32(R_000E50_SRBM_STATUS);
-+
- return 0;
- }
-
-diff -Nur linux-3.18.10.orig/drivers/gpu/drm/radeon/radeon_fence.c linux-3.18.10/drivers/gpu/drm/radeon/radeon_fence.c
---- linux-3.18.10.orig/drivers/gpu/drm/radeon/radeon_fence.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpu/drm/radeon/radeon_fence.c 2015-03-26 12:42:18.651588307 +0100
-@@ -1029,37 +1029,59 @@
- return test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags);
- }
-
-+struct radeon_wait_cb {
-+ struct fence_cb base;
-+ struct task_struct *task;
-+};
-+
-+static void
-+radeon_fence_wait_cb(struct fence *fence, struct fence_cb *cb)
-+{
-+ struct radeon_wait_cb *wait =
-+ container_of(cb, struct radeon_wait_cb, base);
-+
-+ wake_up_process(wait->task);
-+}
-+
- static signed long radeon_fence_default_wait(struct fence *f, bool intr,
- signed long t)
- {
- struct radeon_fence *fence = to_radeon_fence(f);
- struct radeon_device *rdev = fence->rdev;
-- bool signaled;
-+ struct radeon_wait_cb cb;
-+
-+ cb.task = current;
-+
-+ if (fence_add_callback(f, &cb.base, radeon_fence_wait_cb))
-+ return t;
-
-- fence_enable_sw_signaling(&fence->base);
-+ while (t > 0) {
-+ if (intr)
-+ set_current_state(TASK_INTERRUPTIBLE);
-+ else
-+ set_current_state(TASK_UNINTERRUPTIBLE);
-+
-+ /*
-+ * radeon_test_signaled must be called after
-+ * set_current_state to prevent a race with wake_up_process
-+ */
-+ if (radeon_test_signaled(fence))
-+ break;
-+
-+ if (rdev->needs_reset) {
-+ t = -EDEADLK;
-+ break;
-+ }
-+
-+ t = schedule_timeout(t);
-+
-+ if (t > 0 && intr && signal_pending(current))
-+ t = -ERESTARTSYS;
-+ }
-
-- /*
-- * This function has to return -EDEADLK, but cannot hold
-- * exclusive_lock during the wait because some callers
-- * may already hold it. This means checking needs_reset without
-- * lock, and not fiddling with any gpu internals.
-- *
-- * The callback installed with fence_enable_sw_signaling will
-- * run before our wait_event_*timeout call, so we will see
-- * both the signaled fence and the changes to needs_reset.
-- */
--
-- if (intr)
-- t = wait_event_interruptible_timeout(rdev->fence_queue,
-- ((signaled = radeon_test_signaled(fence)) ||
-- rdev->needs_reset), t);
-- else
-- t = wait_event_timeout(rdev->fence_queue,
-- ((signaled = radeon_test_signaled(fence)) ||
-- rdev->needs_reset), t);
-+ __set_current_state(TASK_RUNNING);
-+ fence_remove_callback(f, &cb.base);
-
-- if (t > 0 && !signaled)
-- return -EDEADLK;
- return t;
- }
-
-diff -Nur linux-3.18.10.orig/drivers/gpu/drm/radeon/rs600.c linux-3.18.10/drivers/gpu/drm/radeon/rs600.c
---- linux-3.18.10.orig/drivers/gpu/drm/radeon/rs600.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpu/drm/radeon/rs600.c 2015-03-26 12:42:18.651588307 +0100
-@@ -693,6 +693,10 @@
- WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
- if (ASIC_IS_DCE2(rdev))
- WREG32(R_007408_HDMI0_AUDIO_PACKET_CONTROL, hdmi0);
-+
-+ /* posting read */
-+ RREG32(R_000040_GEN_INT_CNTL);
-+
- return 0;
- }
-
-diff -Nur linux-3.18.10.orig/drivers/gpu/drm/radeon/si.c linux-3.18.10/drivers/gpu/drm/radeon/si.c
---- linux-3.18.10.orig/drivers/gpu/drm/radeon/si.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/gpu/drm/radeon/si.c 2015-03-26 12:42:18.655588312 +0100
-@@ -6192,6 +6192,9 @@
-
- WREG32(CG_THERMAL_INT, thermal_int);
-
-+ /* posting read */
-+ RREG32(SRBM_STATUS);
-+
- return 0;
- }
-
-diff -Nur linux-3.18.10.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.10/drivers/i2c/busses/i2c-omap.c
---- linux-3.18.10.orig/drivers/i2c/busses/i2c-omap.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/i2c/busses/i2c-omap.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.12/drivers/i2c/busses/i2c-omap.c
+--- linux-3.18.12.orig/drivers/i2c/busses/i2c-omap.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/i2c/busses/i2c-omap.c 2015-04-26 13:32:22.391684003 -0500
@@ -875,15 +875,12 @@
u16 mask;
u16 stat;
@@ -4443,9 +4523,9 @@ diff -Nur linux-3.18.10.orig/drivers/i2c/busses/i2c-omap.c linux-3.18.10/drivers
return ret;
}
-diff -Nur linux-3.18.10.orig/drivers/ide/alim15x3.c linux-3.18.10/drivers/ide/alim15x3.c
---- linux-3.18.10.orig/drivers/ide/alim15x3.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ide/alim15x3.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/ide/alim15x3.c linux-3.18.12/drivers/ide/alim15x3.c
+--- linux-3.18.12.orig/drivers/ide/alim15x3.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ide/alim15x3.c 2015-04-26 13:32:22.391684003 -0500
@@ -234,7 +234,7 @@
isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
@@ -4464,9 +4544,9 @@ diff -Nur linux-3.18.10.orig/drivers/ide/alim15x3.c linux-3.18.10/drivers/ide/al
return 0;
}
-diff -Nur linux-3.18.10.orig/drivers/ide/hpt366.c linux-3.18.10/drivers/ide/hpt366.c
---- linux-3.18.10.orig/drivers/ide/hpt366.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ide/hpt366.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/ide/hpt366.c linux-3.18.12/drivers/ide/hpt366.c
+--- linux-3.18.12.orig/drivers/ide/hpt366.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ide/hpt366.c 2015-04-26 13:32:22.391684003 -0500
@@ -1241,7 +1241,7 @@
dma_old = inb(base + 2);
@@ -4485,9 +4565,9 @@ diff -Nur linux-3.18.10.orig/drivers/ide/hpt366.c linux-3.18.10/drivers/ide/hpt3
printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n",
hwif->name, base, base + 7);
-diff -Nur linux-3.18.10.orig/drivers/ide/ide-io.c linux-3.18.10/drivers/ide/ide-io.c
---- linux-3.18.10.orig/drivers/ide/ide-io.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ide/ide-io.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/ide/ide-io.c linux-3.18.12/drivers/ide/ide-io.c
+--- linux-3.18.12.orig/drivers/ide/ide-io.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ide/ide-io.c 2015-04-26 13:32:22.391684003 -0500
@@ -659,7 +659,7 @@
/* disable_irq_nosync ?? */
disable_irq(hwif->irq);
@@ -4497,9 +4577,9 @@ diff -Nur linux-3.18.10.orig/drivers/ide/ide-io.c linux-3.18.10/drivers/ide/ide-
if (hwif->polling) {
startstop = handler(drive);
} else if (drive_is_ready(drive)) {
-diff -Nur linux-3.18.10.orig/drivers/ide/ide-iops.c linux-3.18.10/drivers/ide/ide-iops.c
---- linux-3.18.10.orig/drivers/ide/ide-iops.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ide/ide-iops.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/ide/ide-iops.c linux-3.18.12/drivers/ide/ide-iops.c
+--- linux-3.18.12.orig/drivers/ide/ide-iops.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ide/ide-iops.c 2015-04-26 13:32:22.391684003 -0500
@@ -129,12 +129,12 @@
if ((stat & ATA_BUSY) == 0)
break;
@@ -4515,9 +4595,9 @@ diff -Nur linux-3.18.10.orig/drivers/ide/ide-iops.c linux-3.18.10/drivers/ide/id
}
/*
* Allow status to settle, then read it again.
-diff -Nur linux-3.18.10.orig/drivers/ide/ide-io-std.c linux-3.18.10/drivers/ide/ide-io-std.c
---- linux-3.18.10.orig/drivers/ide/ide-io-std.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ide/ide-io-std.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/ide/ide-io-std.c linux-3.18.12/drivers/ide/ide-io-std.c
+--- linux-3.18.12.orig/drivers/ide/ide-io-std.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ide/ide-io-std.c 2015-04-26 13:32:22.391684003 -0500
@@ -175,7 +175,7 @@
unsigned long uninitialized_var(flags);
@@ -4554,9 +4634,9 @@ diff -Nur linux-3.18.10.orig/drivers/ide/ide-io-std.c linux-3.18.10/drivers/ide/
if (((len + 1) & 3) < 2)
return;
-diff -Nur linux-3.18.10.orig/drivers/ide/ide-probe.c linux-3.18.10/drivers/ide/ide-probe.c
---- linux-3.18.10.orig/drivers/ide/ide-probe.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ide/ide-probe.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/ide/ide-probe.c linux-3.18.12/drivers/ide/ide-probe.c
+--- linux-3.18.12.orig/drivers/ide/ide-probe.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ide/ide-probe.c 2015-04-26 13:32:22.391684003 -0500
@@ -196,10 +196,10 @@
int bswap = 1;
@@ -4570,9 +4650,9 @@ diff -Nur linux-3.18.10.orig/drivers/ide/ide-probe.c linux-3.18.10/drivers/ide/i
drive->dev_flags |= IDE_DFLAG_ID_READ;
#ifdef DEBUG
-diff -Nur linux-3.18.10.orig/drivers/ide/ide-taskfile.c linux-3.18.10/drivers/ide/ide-taskfile.c
---- linux-3.18.10.orig/drivers/ide/ide-taskfile.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/ide/ide-taskfile.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/ide/ide-taskfile.c linux-3.18.12/drivers/ide/ide-taskfile.c
+--- linux-3.18.12.orig/drivers/ide/ide-taskfile.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/ide/ide-taskfile.c 2015-04-26 13:32:22.391684003 -0500
@@ -250,7 +250,7 @@
page_is_high = PageHighMem(page);
@@ -4600,9 +4680,9 @@ diff -Nur linux-3.18.10.orig/drivers/ide/ide-taskfile.c linux-3.18.10/drivers/id
ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
-diff -Nur linux-3.18.10.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-3.18.10/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
---- linux-3.18.10.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-3.18.12/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+--- linux-3.18.12.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2015-04-26 13:32:22.391684003 -0500
@@ -796,7 +796,7 @@
ipoib_mcast_stop_thread(dev, 0);
@@ -4621,9 +4701,9 @@ diff -Nur linux-3.18.10.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linu
/* We have to cancel outside of the spinlock */
list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
-diff -Nur linux-3.18.10.orig/drivers/input/gameport/gameport.c linux-3.18.10/drivers/input/gameport/gameport.c
---- linux-3.18.10.orig/drivers/input/gameport/gameport.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/input/gameport/gameport.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/input/gameport/gameport.c linux-3.18.12/drivers/input/gameport/gameport.c
+--- linux-3.18.12.orig/drivers/input/gameport/gameport.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/input/gameport/gameport.c 2015-04-26 13:32:22.391684003 -0500
@@ -124,12 +124,12 @@
tx = 1 << 30;
@@ -4653,9 +4733,9 @@ diff -Nur linux-3.18.10.orig/drivers/input/gameport/gameport.c linux-3.18.10/dri
udelay(i * 10);
if (t2 - t1 < tx) tx = t2 - t1;
}
-diff -Nur linux-3.18.10.orig/drivers/leds/trigger/Kconfig linux-3.18.10/drivers/leds/trigger/Kconfig
---- linux-3.18.10.orig/drivers/leds/trigger/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/leds/trigger/Kconfig 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/leds/trigger/Kconfig linux-3.18.12/drivers/leds/trigger/Kconfig
+--- linux-3.18.12.orig/drivers/leds/trigger/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/leds/trigger/Kconfig 2015-04-26 13:32:22.391684003 -0500
@@ -61,7 +61,7 @@
config LEDS_TRIGGER_CPU
@@ -4665,9 +4745,9 @@ diff -Nur linux-3.18.10.orig/drivers/leds/trigger/Kconfig linux-3.18.10/drivers/
help
This allows LEDs to be controlled by active CPUs. This shows
the active CPUs across an array of LEDs so you can see which
-diff -Nur linux-3.18.10.orig/drivers/md/bcache/Kconfig linux-3.18.10/drivers/md/bcache/Kconfig
---- linux-3.18.10.orig/drivers/md/bcache/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/md/bcache/Kconfig 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/md/bcache/Kconfig linux-3.18.12/drivers/md/bcache/Kconfig
+--- linux-3.18.12.orig/drivers/md/bcache/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/md/bcache/Kconfig 2015-04-26 13:32:22.391684003 -0500
@@ -1,6 +1,7 @@
config BCACHE
@@ -4676,9 +4756,9 @@ diff -Nur linux-3.18.10.orig/drivers/md/bcache/Kconfig linux-3.18.10/drivers/md/
---help---
Allows a block device to be used as cache for other devices; uses
a btree for indexing and the layout is optimized for SSDs.
-diff -Nur linux-3.18.10.orig/drivers/md/dm.c linux-3.18.10/drivers/md/dm.c
---- linux-3.18.10.orig/drivers/md/dm.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/md/dm.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/md/dm.c linux-3.18.12/drivers/md/dm.c
+--- linux-3.18.12.orig/drivers/md/dm.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/md/dm.c 2015-04-26 13:32:22.395684003 -0500
@@ -1898,14 +1898,14 @@
if (map_request(ti, clone, md))
goto requeued;
@@ -4696,9 +4776,9 @@ diff -Nur linux-3.18.10.orig/drivers/md/dm.c linux-3.18.10/drivers/md/dm.c
spin_lock(q->queue_lock);
delay_and_out:
-diff -Nur linux-3.18.10.orig/drivers/md/raid5.c linux-3.18.10/drivers/md/raid5.c
---- linux-3.18.10.orig/drivers/md/raid5.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/md/raid5.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/md/raid5.c linux-3.18.12/drivers/md/raid5.c
+--- linux-3.18.12.orig/drivers/md/raid5.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/md/raid5.c 2015-04-26 13:32:22.395684003 -0500
@@ -1649,8 +1649,9 @@
struct raid5_percpu *percpu;
unsigned long cpu;
@@ -4728,9 +4808,9 @@ diff -Nur linux-3.18.10.orig/drivers/md/raid5.c linux-3.18.10/drivers/md/raid5.c
}
put_online_cpus();
-diff -Nur linux-3.18.10.orig/drivers/md/raid5.h linux-3.18.10/drivers/md/raid5.h
---- linux-3.18.10.orig/drivers/md/raid5.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/md/raid5.h 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/md/raid5.h linux-3.18.12/drivers/md/raid5.h
+--- linux-3.18.12.orig/drivers/md/raid5.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/md/raid5.h 2015-04-26 13:32:22.395684003 -0500
@@ -457,6 +457,7 @@
int recovery_disabled;
/* per cpu variables */
@@ -4739,9 +4819,9 @@ diff -Nur linux-3.18.10.orig/drivers/md/raid5.h linux-3.18.10/drivers/md/raid5.h
struct page *spare_page; /* Used when checking P/Q in raid6 */
void *scribble; /* space for constructing buffer
* lists and performing address
-diff -Nur linux-3.18.10.orig/drivers/misc/hwlat_detector.c linux-3.18.10/drivers/misc/hwlat_detector.c
---- linux-3.18.10.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/drivers/misc/hwlat_detector.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/misc/hwlat_detector.c linux-3.18.12/drivers/misc/hwlat_detector.c
+--- linux-3.18.12.orig/drivers/misc/hwlat_detector.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/drivers/misc/hwlat_detector.c 2015-04-26 13:32:22.395684003 -0500
@@ -0,0 +1,1240 @@
+/*
+ * hwlat_detector.c - A simple Hardware Latency detector.
@@ -5983,9 +6063,9 @@ diff -Nur linux-3.18.10.orig/drivers/misc/hwlat_detector.c linux-3.18.10/drivers
+
+module_init(detector_init);
+module_exit(detector_exit);
-diff -Nur linux-3.18.10.orig/drivers/misc/Kconfig linux-3.18.10/drivers/misc/Kconfig
---- linux-3.18.10.orig/drivers/misc/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/misc/Kconfig 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/misc/Kconfig linux-3.18.12/drivers/misc/Kconfig
+--- linux-3.18.12.orig/drivers/misc/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/misc/Kconfig 2015-04-26 13:32:22.395684003 -0500
@@ -54,6 +54,7 @@
config ATMEL_TCLIB
bool "Atmel AT32/AT91 Timer/Counter Library"
@@ -6056,9 +6136,9 @@ diff -Nur linux-3.18.10.orig/drivers/misc/Kconfig linux-3.18.10/drivers/misc/Kco
config PHANTOM
tristate "Sensable PHANToM (PCI)"
depends on PCI
-diff -Nur linux-3.18.10.orig/drivers/misc/Makefile linux-3.18.10/drivers/misc/Makefile
---- linux-3.18.10.orig/drivers/misc/Makefile 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/misc/Makefile 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/misc/Makefile linux-3.18.12/drivers/misc/Makefile
+--- linux-3.18.12.orig/drivers/misc/Makefile 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/misc/Makefile 2015-04-26 13:32:22.395684003 -0500
@@ -38,6 +38,7 @@
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
@@ -6067,9 +6147,9 @@ diff -Nur linux-3.18.10.orig/drivers/misc/Makefile linux-3.18.10/drivers/misc/Ma
obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
-diff -Nur linux-3.18.10.orig/drivers/mmc/host/mmci.c linux-3.18.10/drivers/mmc/host/mmci.c
---- linux-3.18.10.orig/drivers/mmc/host/mmci.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/mmc/host/mmci.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/mmc/host/mmci.c linux-3.18.12/drivers/mmc/host/mmci.c
+--- linux-3.18.12.orig/drivers/mmc/host/mmci.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/mmc/host/mmci.c 2015-04-26 13:32:22.395684003 -0500
@@ -1153,15 +1153,12 @@
struct sg_mapping_iter *sg_miter = &host->sg_miter;
struct variant_data *variant = host->variant;
@@ -6095,9 +6175,9 @@ diff -Nur linux-3.18.10.orig/drivers/mmc/host/mmci.c linux-3.18.10/drivers/mmc/h
/*
* If we have less than the fifo 'half-full' threshold to transfer,
* trigger a PIO interrupt as soon as any data is available.
-diff -Nur linux-3.18.10.orig/drivers/mmc/host/sdhci.c linux-3.18.10/drivers/mmc/host/sdhci.c
---- linux-3.18.10.orig/drivers/mmc/host/sdhci.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/mmc/host/sdhci.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/mmc/host/sdhci.c linux-3.18.12/drivers/mmc/host/sdhci.c
+--- linux-3.18.12.orig/drivers/mmc/host/sdhci.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/mmc/host/sdhci.c 2015-04-26 13:32:22.399684003 -0500
@@ -2565,6 +2565,31 @@
return isr ? IRQ_HANDLED : IRQ_NONE;
}
@@ -6151,9 +6231,9 @@ diff -Nur linux-3.18.10.orig/drivers/mmc/host/sdhci.c linux-3.18.10/drivers/mmc/
if (ret) {
pr_err("%s: Failed to request IRQ %d: %d\n",
mmc_hostname(mmc), host->irq, ret);
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.10/drivers/net/ethernet/3com/3c59x.c
---- linux-3.18.10.orig/drivers/net/ethernet/3com/3c59x.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/3com/3c59x.c 2015-03-26 12:42:18.655588312 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.12/drivers/net/ethernet/3com/3c59x.c
+--- linux-3.18.12.orig/drivers/net/ethernet/3com/3c59x.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/3com/3c59x.c 2015-04-26 13:32:22.399684003 -0500
@@ -842,9 +842,9 @@
{
struct vortex_private *vp = netdev_priv(dev);
@@ -6181,9 +6261,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/3com/3c59x.c linux-3.18.10/dri
}
}
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-3.18.10/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
---- linux-3.18.10.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-3.18.12/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+--- linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2015-04-26 13:32:22.399684003 -0500
@@ -2213,11 +2213,7 @@
}
@@ -6197,9 +6277,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c lin
if (atl1c_tpd_avail(adapter, type) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-3.18.10/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
---- linux-3.18.10.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-3.18.12/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+--- linux-3.18.12.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2015-04-26 13:32:22.399684003 -0500
@@ -1880,8 +1880,7 @@
return NETDEV_TX_OK;
}
@@ -6210,9 +6290,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c lin
if (atl1e_tpd_avail(adapter) < tpd_req) {
/* no enough descriptor, just stop queue */
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.10/drivers/net/ethernet/chelsio/cxgb/sge.c
---- linux-3.18.10.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.12/drivers/net/ethernet/chelsio/cxgb/sge.c
+--- linux-3.18.12.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/chelsio/cxgb/sge.c 2015-04-26 13:32:22.399684003 -0500
@@ -1663,8 +1663,7 @@
struct cmdQ *q = &sge->cmdQ[qid];
unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
@@ -6223,9 +6303,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-3.18.
reclaim_completed_tx(sge, q);
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.10/drivers/net/ethernet/freescale/gianfar.c
---- linux-3.18.10.orig/drivers/net/ethernet/freescale/gianfar.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/freescale/gianfar.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18.12/drivers/net/ethernet/freescale/gianfar.c
+--- linux-3.18.12.orig/drivers/net/ethernet/freescale/gianfar.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/freescale/gianfar.c 2015-04-26 13:32:22.399684003 -0500
@@ -1483,7 +1483,7 @@
if (netif_running(ndev)) {
@@ -6279,9 +6359,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/freescale/gianfar.c linux-3.18
}
netif_dbg(priv, tx_err, dev, "Transmit Error\n");
}
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.10/drivers/net/ethernet/neterion/s2io.c
---- linux-3.18.10.orig/drivers/net/ethernet/neterion/s2io.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/neterion/s2io.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.12/drivers/net/ethernet/neterion/s2io.c
+--- linux-3.18.12.orig/drivers/net/ethernet/neterion/s2io.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/neterion/s2io.c 2015-04-26 13:32:22.403684003 -0500
@@ -4084,12 +4084,7 @@
[skb->priority & (MAX_TX_FIFOS - 1)];
fifo = &mac_control->fifos[queue];
@@ -6296,9 +6376,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/neterion/s2io.c linux-3.18.10/
if (sp->config.multiq) {
if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-3.18.10/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
---- linux-3.18.10.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-3.18.12/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+--- linux-3.18.12.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2015-04-26 13:32:22.403684003 -0500
@@ -2137,10 +2137,8 @@
struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
unsigned long flags;
@@ -6312,9 +6392,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.
if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
netif_stop_queue(netdev);
spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.10/drivers/net/ethernet/realtek/8139too.c
---- linux-3.18.10.orig/drivers/net/ethernet/realtek/8139too.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/realtek/8139too.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.12/drivers/net/ethernet/realtek/8139too.c
+--- linux-3.18.12.orig/drivers/net/ethernet/realtek/8139too.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/realtek/8139too.c 2015-04-26 13:32:22.403684003 -0500
@@ -2215,7 +2215,7 @@
struct rtl8139_private *tp = netdev_priv(dev);
const int irq = tp->pci_dev->irq;
@@ -6324,9 +6404,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/realtek/8139too.c linux-3.18.1
rtl8139_interrupt(irq, dev);
enable_irq(irq);
}
-diff -Nur linux-3.18.10.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.10/drivers/net/ethernet/tehuti/tehuti.c
---- linux-3.18.10.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/ethernet/tehuti/tehuti.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.12/drivers/net/ethernet/tehuti/tehuti.c
+--- linux-3.18.12.orig/drivers/net/ethernet/tehuti/tehuti.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/ethernet/tehuti/tehuti.c 2015-04-26 13:32:22.403684003 -0500
@@ -1629,13 +1629,8 @@
unsigned long flags;
@@ -6343,9 +6423,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/ethernet/tehuti/tehuti.c linux-3.18.10/
/* build tx descriptor */
BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */
-diff -Nur linux-3.18.10.orig/drivers/net/rionet.c linux-3.18.10/drivers/net/rionet.c
---- linux-3.18.10.orig/drivers/net/rionet.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/rionet.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/rionet.c linux-3.18.12/drivers/net/rionet.c
+--- linux-3.18.12.orig/drivers/net/rionet.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/rionet.c 2015-04-26 13:32:22.403684003 -0500
@@ -174,11 +174,7 @@
unsigned long flags;
int add_num = 1;
@@ -6359,9 +6439,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/rionet.c linux-3.18.10/drivers/net/rion
if (is_multicast_ether_addr(eth->h_dest))
add_num = nets[rnet->mport->id].nact;
-diff -Nur linux-3.18.10.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.18.10/drivers/net/wireless/orinoco/orinoco_usb.c
---- linux-3.18.10.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/net/wireless/orinoco/orinoco_usb.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.18.12/drivers/net/wireless/orinoco/orinoco_usb.c
+--- linux-3.18.12.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/net/wireless/orinoco/orinoco_usb.c 2015-04-26 13:32:22.403684003 -0500
@@ -699,7 +699,7 @@
while (!ctx->done.done && msecs--)
udelay(1000);
@@ -6371,9 +6451,9 @@ diff -Nur linux-3.18.10.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-3.
ctx->done.done);
}
break;
-diff -Nur linux-3.18.10.orig/drivers/pci/access.c linux-3.18.10/drivers/pci/access.c
---- linux-3.18.10.orig/drivers/pci/access.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/pci/access.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/pci/access.c linux-3.18.12/drivers/pci/access.c
+--- linux-3.18.12.orig/drivers/pci/access.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/pci/access.c 2015-04-26 13:32:22.403684003 -0500
@@ -434,7 +434,7 @@
WARN_ON(!dev->block_cfg_access);
@@ -6383,9 +6463,9 @@ diff -Nur linux-3.18.10.orig/drivers/pci/access.c linux-3.18.10/drivers/pci/acce
raw_spin_unlock_irqrestore(&pci_lock, flags);
}
EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
-diff -Nur linux-3.18.10.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.10/drivers/scsi/fcoe/fcoe.c
---- linux-3.18.10.orig/drivers/scsi/fcoe/fcoe.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/scsi/fcoe/fcoe.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.12/drivers/scsi/fcoe/fcoe.c
+--- linux-3.18.12.orig/drivers/scsi/fcoe/fcoe.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/scsi/fcoe/fcoe.c 2015-04-26 13:32:22.403684003 -0500
@@ -1286,7 +1286,7 @@
struct sk_buff *skb;
#ifdef CONFIG_SMP
@@ -6449,9 +6529,9 @@ diff -Nur linux-3.18.10.orig/drivers/scsi/fcoe/fcoe.c linux-3.18.10/drivers/scsi
kfree_skb(skb);
}
-diff -Nur linux-3.18.10.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.10/drivers/scsi/fcoe/fcoe_ctlr.c
---- linux-3.18.10.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/scsi/fcoe/fcoe_ctlr.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.12/drivers/scsi/fcoe/fcoe_ctlr.c
+--- linux-3.18.12.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/scsi/fcoe/fcoe_ctlr.c 2015-04-26 13:32:22.403684003 -0500
@@ -831,7 +831,7 @@
INIT_LIST_HEAD(&del_list);
@@ -6470,9 +6550,9 @@ diff -Nur linux-3.18.10.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-3.18.10/drivers
list_for_each_entry_safe(fcf, next, &del_list, list) {
/* Removes fcf from current list */
-diff -Nur linux-3.18.10.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.10/drivers/scsi/libfc/fc_exch.c
---- linux-3.18.10.orig/drivers/scsi/libfc/fc_exch.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/scsi/libfc/fc_exch.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.12/drivers/scsi/libfc/fc_exch.c
+--- linux-3.18.12.orig/drivers/scsi/libfc/fc_exch.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/scsi/libfc/fc_exch.c 2015-04-26 13:32:22.403684003 -0500
@@ -816,10 +816,10 @@
}
memset(ep, 0, sizeof(*ep));
@@ -6486,9 +6566,9 @@ diff -Nur linux-3.18.10.orig/drivers/scsi/libfc/fc_exch.c linux-3.18.10/drivers/
/* peek cache of free slot */
if (pool->left != FC_XID_UNKNOWN) {
-diff -Nur linux-3.18.10.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.10/drivers/scsi/libsas/sas_ata.c
---- linux-3.18.10.orig/drivers/scsi/libsas/sas_ata.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/scsi/libsas/sas_ata.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.12/drivers/scsi/libsas/sas_ata.c
+--- linux-3.18.12.orig/drivers/scsi/libsas/sas_ata.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/scsi/libsas/sas_ata.c 2015-04-26 13:32:22.407684003 -0500
@@ -191,7 +191,7 @@
/* TODO: audit callers to ensure they are ready for qc_issue to
* unconditionally re-enable interrupts
@@ -6507,9 +6587,9 @@ diff -Nur linux-3.18.10.orig/drivers/scsi/libsas/sas_ata.c linux-3.18.10/drivers
return ret;
}
-diff -Nur linux-3.18.10.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.10/drivers/scsi/qla2xxx/qla_inline.h
---- linux-3.18.10.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/scsi/qla2xxx/qla_inline.h 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.12/drivers/scsi/qla2xxx/qla_inline.h
+--- linux-3.18.12.orig/drivers/scsi/qla2xxx/qla_inline.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/scsi/qla2xxx/qla_inline.h 2015-04-26 13:32:22.407684003 -0500
@@ -59,12 +59,12 @@
{
unsigned long flags;
@@ -6525,9 +6605,9 @@ diff -Nur linux-3.18.10.orig/drivers/scsi/qla2xxx/qla_inline.h linux-3.18.10/dri
}
static inline uint8_t *
-diff -Nur linux-3.18.10.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-3.18.10/drivers/thermal/x86_pkg_temp_thermal.c
---- linux-3.18.10.orig/drivers/thermal/x86_pkg_temp_thermal.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/thermal/x86_pkg_temp_thermal.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-3.18.12/drivers/thermal/x86_pkg_temp_thermal.c
+--- linux-3.18.12.orig/drivers/thermal/x86_pkg_temp_thermal.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/thermal/x86_pkg_temp_thermal.c 2015-04-26 13:32:22.407684003 -0500
@@ -29,6 +29,7 @@
#include <linux/pm.h>
#include <linux/thermal.h>
@@ -6630,9 +6710,9 @@ diff -Nur linux-3.18.10.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-3.18.1
for_each_online_cpu(i)
cancel_delayed_work_sync(
&per_cpu(pkg_temp_thermal_threshold_work, i));
-diff -Nur linux-3.18.10.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.10/drivers/tty/serial/8250/8250_core.c
---- linux-3.18.10.orig/drivers/tty/serial/8250/8250_core.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/tty/serial/8250/8250_core.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.12/drivers/tty/serial/8250/8250_core.c
+--- linux-3.18.12.orig/drivers/tty/serial/8250/8250_core.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/tty/serial/8250/8250_core.c 2015-04-26 13:32:22.407684003 -0500
@@ -37,6 +37,7 @@
#include <linux/nmi.h>
#include <linux/mutex.h>
@@ -6668,9 +6748,9 @@ diff -Nur linux-3.18.10.orig/drivers/tty/serial/8250/8250_core.c linux-3.18.10/d
locked = spin_trylock_irqsave(&port->lock, flags);
else
spin_lock_irqsave(&port->lock, flags);
-diff -Nur linux-3.18.10.orig/drivers/tty/serial/amba-pl011.c linux-3.18.10/drivers/tty/serial/amba-pl011.c
---- linux-3.18.10.orig/drivers/tty/serial/amba-pl011.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/tty/serial/amba-pl011.c 2015-03-26 12:42:18.659588317 +0100
+diff -Nur linux-3.18.12.orig/drivers/tty/serial/amba-pl011.c linux-3.18.12/drivers/tty/serial/amba-pl011.c
+--- linux-3.18.12.orig/drivers/tty/serial/amba-pl011.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/tty/serial/amba-pl011.c 2015-04-26 13:32:22.407684003 -0500
@@ -1935,13 +1935,19 @@
clk_enable(uap->clk);
@@ -6704,9 +6784,9 @@ diff -Nur linux-3.18.10.orig/drivers/tty/serial/amba-pl011.c linux-3.18.10/drive
clk_disable(uap->clk);
}
-diff -Nur linux-3.18.10.orig/drivers/tty/serial/omap-serial.c linux-3.18.10/drivers/tty/serial/omap-serial.c
---- linux-3.18.10.orig/drivers/tty/serial/omap-serial.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/tty/serial/omap-serial.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/drivers/tty/serial/omap-serial.c linux-3.18.12/drivers/tty/serial/omap-serial.c
+--- linux-3.18.12.orig/drivers/tty/serial/omap-serial.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/tty/serial/omap-serial.c 2015-04-26 13:32:22.407684003 -0500
@@ -1270,13 +1270,10 @@
pm_runtime_get_sync(up->dev);
@@ -6734,9 +6814,9 @@ diff -Nur linux-3.18.10.orig/drivers/tty/serial/omap-serial.c linux-3.18.10/driv
}
static int __init
-diff -Nur linux-3.18.10.orig/drivers/usb/core/hcd.c linux-3.18.10/drivers/usb/core/hcd.c
---- linux-3.18.10.orig/drivers/usb/core/hcd.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/usb/core/hcd.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/drivers/usb/core/hcd.c linux-3.18.12/drivers/usb/core/hcd.c
+--- linux-3.18.12.orig/drivers/usb/core/hcd.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/usb/core/hcd.c 2015-04-26 13:32:22.407684003 -0500
@@ -1681,9 +1681,9 @@
* and no one may trigger the above deadlock situation when
* running complete() in tasklet.
@@ -6749,9 +6829,9 @@ diff -Nur linux-3.18.10.orig/drivers/usb/core/hcd.c linux-3.18.10/drivers/usb/co
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
-diff -Nur linux-3.18.10.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.10/drivers/usb/gadget/function/f_fs.c
---- linux-3.18.10.orig/drivers/usb/gadget/function/f_fs.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/usb/gadget/function/f_fs.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.12/drivers/usb/gadget/function/f_fs.c
+--- linux-3.18.12.orig/drivers/usb/gadget/function/f_fs.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/usb/gadget/function/f_fs.c 2015-04-26 13:32:22.407684003 -0500
@@ -1428,7 +1428,7 @@
pr_info("%s(): freeing\n", __func__);
ffs_data_clear(ffs);
@@ -6761,9 +6841,9 @@ diff -Nur linux-3.18.10.orig/drivers/usb/gadget/function/f_fs.c linux-3.18.10/dr
kfree(ffs->dev_name);
kfree(ffs);
}
-diff -Nur linux-3.18.10.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.10/drivers/usb/gadget/legacy/inode.c
---- linux-3.18.10.orig/drivers/usb/gadget/legacy/inode.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/drivers/usb/gadget/legacy/inode.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.12/drivers/usb/gadget/legacy/inode.c
+--- linux-3.18.12.orig/drivers/usb/gadget/legacy/inode.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/drivers/usb/gadget/legacy/inode.c 2015-04-26 13:32:22.407684003 -0500
@@ -339,7 +339,7 @@
spin_unlock_irq (&epdata->dev->lock);
@@ -6782,9 +6862,9 @@ diff -Nur linux-3.18.10.orig/drivers/usb/gadget/legacy/inode.c linux-3.18.10/dri
if (epdata->status == -ECONNRESET)
epdata->status = -EINTR;
} else {
-diff -Nur linux-3.18.10.orig/fs/aio.c linux-3.18.10/fs/aio.c
---- linux-3.18.10.orig/fs/aio.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/aio.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/aio.c linux-3.18.12/fs/aio.c
+--- linux-3.18.12.orig/fs/aio.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/aio.c 2015-04-26 13:32:22.407684003 -0500
@@ -40,6 +40,7 @@
#include <linux/ramfs.h>
#include <linux/percpu-refcount.h>
@@ -6860,9 +6940,9 @@ diff -Nur linux-3.18.10.orig/fs/aio.c linux-3.18.10/fs/aio.c
static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
{
unsigned i, new_nr;
-diff -Nur linux-3.18.10.orig/fs/autofs4/autofs_i.h linux-3.18.10/fs/autofs4/autofs_i.h
---- linux-3.18.10.orig/fs/autofs4/autofs_i.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/autofs4/autofs_i.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/autofs4/autofs_i.h linux-3.18.12/fs/autofs4/autofs_i.h
+--- linux-3.18.12.orig/fs/autofs4/autofs_i.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/autofs4/autofs_i.h 2015-04-26 13:32:22.411684003 -0500
@@ -34,6 +34,7 @@
#include <linux/sched.h>
#include <linux/mount.h>
@@ -6871,9 +6951,9 @@ diff -Nur linux-3.18.10.orig/fs/autofs4/autofs_i.h linux-3.18.10/fs/autofs4/auto
#include <asm/current.h>
#include <asm/uaccess.h>
-diff -Nur linux-3.18.10.orig/fs/autofs4/expire.c linux-3.18.10/fs/autofs4/expire.c
---- linux-3.18.10.orig/fs/autofs4/expire.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/autofs4/expire.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/autofs4/expire.c linux-3.18.12/fs/autofs4/expire.c
+--- linux-3.18.12.orig/fs/autofs4/expire.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/autofs4/expire.c 2015-04-26 13:32:22.411684003 -0500
@@ -151,7 +151,7 @@
parent = p->d_parent;
if (!spin_trylock(&parent->d_lock)) {
@@ -6883,9 +6963,9 @@ diff -Nur linux-3.18.10.orig/fs/autofs4/expire.c linux-3.18.10/fs/autofs4/expire
goto relock;
}
spin_unlock(&p->d_lock);
-diff -Nur linux-3.18.10.orig/fs/buffer.c linux-3.18.10/fs/buffer.c
---- linux-3.18.10.orig/fs/buffer.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/buffer.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/buffer.c linux-3.18.12/fs/buffer.c
+--- linux-3.18.12.orig/fs/buffer.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/buffer.c 2015-04-26 13:32:22.411684003 -0500
@@ -301,8 +301,7 @@
* decide that the page is now completely done.
*/
@@ -6953,9 +7033,9 @@ diff -Nur linux-3.18.10.orig/fs/buffer.c linux-3.18.10/fs/buffer.c
preempt_disable();
__this_cpu_inc(bh_accounting.nr);
recalc_bh_state();
-diff -Nur linux-3.18.10.orig/fs/dcache.c linux-3.18.10/fs/dcache.c
---- linux-3.18.10.orig/fs/dcache.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/dcache.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/dcache.c linux-3.18.12/fs/dcache.c
+--- linux-3.18.12.orig/fs/dcache.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/dcache.c 2015-04-26 13:32:22.411684003 -0500
@@ -19,6 +19,7 @@
#include <linux/mm.h>
#include <linux/fs.h>
@@ -6982,9 +7062,9 @@ diff -Nur linux-3.18.10.orig/fs/dcache.c linux-3.18.10/fs/dcache.c
goto again;
}
dentry->d_flags &= ~DCACHE_CANT_MOUNT;
-diff -Nur linux-3.18.10.orig/fs/eventpoll.c linux-3.18.10/fs/eventpoll.c
---- linux-3.18.10.orig/fs/eventpoll.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/eventpoll.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/eventpoll.c linux-3.18.12/fs/eventpoll.c
+--- linux-3.18.12.orig/fs/eventpoll.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/eventpoll.c 2015-04-26 13:32:22.411684003 -0500
@@ -505,12 +505,12 @@
*/
static void ep_poll_safewake(wait_queue_head_t *wq)
@@ -7000,9 +7080,9 @@ diff -Nur linux-3.18.10.orig/fs/eventpoll.c linux-3.18.10/fs/eventpoll.c
}
static void ep_remove_wait_queue(struct eppoll_entry *pwq)
-diff -Nur linux-3.18.10.orig/fs/exec.c linux-3.18.10/fs/exec.c
---- linux-3.18.10.orig/fs/exec.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/exec.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/exec.c linux-3.18.12/fs/exec.c
+--- linux-3.18.12.orig/fs/exec.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/exec.c 2015-04-26 13:32:22.411684003 -0500
@@ -841,12 +841,14 @@
}
}
@@ -7018,9 +7098,9 @@ diff -Nur linux-3.18.10.orig/fs/exec.c linux-3.18.10/fs/exec.c
task_unlock(tsk);
if (old_mm) {
up_read(&old_mm->mmap_sem);
-diff -Nur linux-3.18.10.orig/fs/jbd/checkpoint.c linux-3.18.10/fs/jbd/checkpoint.c
---- linux-3.18.10.orig/fs/jbd/checkpoint.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/jbd/checkpoint.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/jbd/checkpoint.c linux-3.18.12/fs/jbd/checkpoint.c
+--- linux-3.18.12.orig/fs/jbd/checkpoint.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/jbd/checkpoint.c 2015-04-26 13:32:22.411684003 -0500
@@ -129,6 +129,8 @@
if (journal->j_flags & JFS_ABORT)
return;
@@ -7030,9 +7110,9 @@ diff -Nur linux-3.18.10.orig/fs/jbd/checkpoint.c linux-3.18.10/fs/jbd/checkpoint
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-3.18.10.orig/fs/jbd2/checkpoint.c linux-3.18.10/fs/jbd2/checkpoint.c
---- linux-3.18.10.orig/fs/jbd2/checkpoint.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/jbd2/checkpoint.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/jbd2/checkpoint.c linux-3.18.12/fs/jbd2/checkpoint.c
+--- linux-3.18.12.orig/fs/jbd2/checkpoint.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/jbd2/checkpoint.c 2015-04-26 13:32:22.411684003 -0500
@@ -116,6 +116,8 @@
nblocks = jbd2_space_needed(journal);
while (jbd2_log_space_left(journal) < nblocks) {
@@ -7042,9 +7122,9 @@ diff -Nur linux-3.18.10.orig/fs/jbd2/checkpoint.c linux-3.18.10/fs/jbd2/checkpoi
mutex_lock(&journal->j_checkpoint_mutex);
/*
-diff -Nur linux-3.18.10.orig/fs/namespace.c linux-3.18.10/fs/namespace.c
---- linux-3.18.10.orig/fs/namespace.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/namespace.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/namespace.c linux-3.18.12/fs/namespace.c
+--- linux-3.18.12.orig/fs/namespace.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/namespace.c 2015-04-26 13:32:22.411684003 -0500
@@ -14,6 +14,7 @@
#include <linux/mnt_namespace.h>
#include <linux/user_namespace.h>
@@ -7067,9 +7147,9 @@ diff -Nur linux-3.18.10.orig/fs/namespace.c linux-3.18.10/fs/namespace.c
/*
* After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
* be set to match its requirements. So we must not load that until
-diff -Nur linux-3.18.10.orig/fs/ntfs/aops.c linux-3.18.10/fs/ntfs/aops.c
---- linux-3.18.10.orig/fs/ntfs/aops.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/ntfs/aops.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/ntfs/aops.c linux-3.18.12/fs/ntfs/aops.c
+--- linux-3.18.12.orig/fs/ntfs/aops.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/ntfs/aops.c 2015-04-26 13:32:22.411684003 -0500
@@ -107,8 +107,7 @@
"0x%llx.", (unsigned long long)bh->b_blocknr);
}
@@ -7117,9 +7197,9 @@ diff -Nur linux-3.18.10.orig/fs/ntfs/aops.c linux-3.18.10/fs/ntfs/aops.c
}
/**
-diff -Nur linux-3.18.10.orig/fs/timerfd.c linux-3.18.10/fs/timerfd.c
---- linux-3.18.10.orig/fs/timerfd.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/fs/timerfd.c 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/fs/timerfd.c linux-3.18.12/fs/timerfd.c
+--- linux-3.18.12.orig/fs/timerfd.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/fs/timerfd.c 2015-04-26 13:32:22.411684003 -0500
@@ -449,7 +449,10 @@
break;
}
@@ -7132,9 +7212,9 @@ diff -Nur linux-3.18.10.orig/fs/timerfd.c linux-3.18.10/fs/timerfd.c
}
/*
-diff -Nur linux-3.18.10.orig/include/acpi/platform/aclinux.h linux-3.18.10/include/acpi/platform/aclinux.h
---- linux-3.18.10.orig/include/acpi/platform/aclinux.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/acpi/platform/aclinux.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/acpi/platform/aclinux.h linux-3.18.12/include/acpi/platform/aclinux.h
+--- linux-3.18.12.orig/include/acpi/platform/aclinux.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/acpi/platform/aclinux.h 2015-04-26 13:32:22.415684003 -0500
@@ -123,6 +123,7 @@
#define acpi_cache_t struct kmem_cache
@@ -7164,9 +7244,9 @@ diff -Nur linux-3.18.10.orig/include/acpi/platform/aclinux.h linux-3.18.10/inclu
/*
* OSL interfaces used by debugger/disassembler
*/
-diff -Nur linux-3.18.10.orig/include/asm-generic/bug.h linux-3.18.10/include/asm-generic/bug.h
---- linux-3.18.10.orig/include/asm-generic/bug.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/asm-generic/bug.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/asm-generic/bug.h linux-3.18.12/include/asm-generic/bug.h
+--- linux-3.18.12.orig/include/asm-generic/bug.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/asm-generic/bug.h 2015-04-26 13:32:22.415684003 -0500
@@ -206,6 +206,20 @@
# define WARN_ON_SMP(x) ({0;})
#endif
@@ -7188,9 +7268,9 @@ diff -Nur linux-3.18.10.orig/include/asm-generic/bug.h linux-3.18.10/include/asm
#endif /* __ASSEMBLY__ */
#endif
-diff -Nur linux-3.18.10.orig/include/linux/blkdev.h linux-3.18.10/include/linux/blkdev.h
---- linux-3.18.10.orig/include/linux/blkdev.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/blkdev.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/blkdev.h linux-3.18.12/include/linux/blkdev.h
+--- linux-3.18.12.orig/include/linux/blkdev.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/blkdev.h 2015-04-26 13:32:22.415684003 -0500
@@ -101,6 +101,7 @@
struct list_head queuelist;
union {
@@ -7208,9 +7288,9 @@ diff -Nur linux-3.18.10.orig/include/linux/blkdev.h linux-3.18.10/include/linux/
struct percpu_ref mq_usage_counter;
struct list_head all_q_node;
-diff -Nur linux-3.18.10.orig/include/linux/blk-mq.h linux-3.18.10/include/linux/blk-mq.h
---- linux-3.18.10.orig/include/linux/blk-mq.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/blk-mq.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/blk-mq.h linux-3.18.12/include/linux/blk-mq.h
+--- linux-3.18.12.orig/include/linux/blk-mq.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/blk-mq.h 2015-04-26 13:32:22.415684003 -0500
@@ -169,6 +169,7 @@
struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
@@ -7219,9 +7299,9 @@ diff -Nur linux-3.18.10.orig/include/linux/blk-mq.h linux-3.18.10/include/linux/
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, int error);
-diff -Nur linux-3.18.10.orig/include/linux/bottom_half.h linux-3.18.10/include/linux/bottom_half.h
---- linux-3.18.10.orig/include/linux/bottom_half.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/bottom_half.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/bottom_half.h linux-3.18.12/include/linux/bottom_half.h
+--- linux-3.18.12.orig/include/linux/bottom_half.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/bottom_half.h 2015-04-26 13:32:22.415684003 -0500
@@ -4,6 +4,17 @@
#include <linux/preempt.h>
#include <linux/preempt_mask.h>
@@ -7247,9 +7327,9 @@ diff -Nur linux-3.18.10.orig/include/linux/bottom_half.h linux-3.18.10/include/l
+#endif
#endif /* _LINUX_BH_H */
-diff -Nur linux-3.18.10.orig/include/linux/buffer_head.h linux-3.18.10/include/linux/buffer_head.h
---- linux-3.18.10.orig/include/linux/buffer_head.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/buffer_head.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/buffer_head.h linux-3.18.12/include/linux/buffer_head.h
+--- linux-3.18.12.orig/include/linux/buffer_head.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/buffer_head.h 2015-04-26 13:32:22.415684003 -0500
@@ -75,8 +75,52 @@
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
@@ -7303,9 +7383,9 @@ diff -Nur linux-3.18.10.orig/include/linux/buffer_head.h linux-3.18.10/include/l
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
-diff -Nur linux-3.18.10.orig/include/linux/cgroup.h linux-3.18.10/include/linux/cgroup.h
---- linux-3.18.10.orig/include/linux/cgroup.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/cgroup.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/cgroup.h linux-3.18.12/include/linux/cgroup.h
+--- linux-3.18.12.orig/include/linux/cgroup.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/cgroup.h 2015-04-26 13:32:22.415684003 -0500
@@ -22,6 +22,7 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
@@ -7322,9 +7402,9 @@ diff -Nur linux-3.18.10.orig/include/linux/cgroup.h linux-3.18.10/include/linux/
};
/* bits in struct cgroup_subsys_state flags field */
-diff -Nur linux-3.18.10.orig/include/linux/completion.h linux-3.18.10/include/linux/completion.h
---- linux-3.18.10.orig/include/linux/completion.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/completion.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/completion.h linux-3.18.12/include/linux/completion.h
+--- linux-3.18.12.orig/include/linux/completion.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/completion.h 2015-04-26 13:32:22.415684003 -0500
@@ -7,8 +7,7 @@
* Atomic wait-for-completion handler data structures.
* See kernel/sched/completion.c for details.
@@ -7358,9 +7438,9 @@ diff -Nur linux-3.18.10.orig/include/linux/completion.h linux-3.18.10/include/li
}
/**
-diff -Nur linux-3.18.10.orig/include/linux/cpu.h linux-3.18.10/include/linux/cpu.h
---- linux-3.18.10.orig/include/linux/cpu.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/cpu.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/cpu.h linux-3.18.12/include/linux/cpu.h
+--- linux-3.18.12.orig/include/linux/cpu.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/cpu.h 2015-04-26 13:32:22.415684003 -0500
@@ -217,6 +217,8 @@
extern void put_online_cpus(void);
extern void cpu_hotplug_disable(void);
@@ -7379,9 +7459,9 @@ diff -Nur linux-3.18.10.orig/include/linux/cpu.h linux-3.18.10/include/linux/cpu
#define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
#define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
/* These aren't inline functions due to a GCC bug. */
-diff -Nur linux-3.18.10.orig/include/linux/delay.h linux-3.18.10/include/linux/delay.h
---- linux-3.18.10.orig/include/linux/delay.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/delay.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/delay.h linux-3.18.12/include/linux/delay.h
+--- linux-3.18.12.orig/include/linux/delay.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/delay.h 2015-04-26 13:32:22.415684003 -0500
@@ -52,4 +52,10 @@
msleep(seconds * 1000);
}
@@ -7393,9 +7473,9 @@ diff -Nur linux-3.18.10.orig/include/linux/delay.h linux-3.18.10/include/linux/d
+#endif
+
#endif /* defined(_LINUX_DELAY_H) */
-diff -Nur linux-3.18.10.orig/include/linux/ftrace_event.h linux-3.18.10/include/linux/ftrace_event.h
---- linux-3.18.10.orig/include/linux/ftrace_event.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/ftrace_event.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/ftrace_event.h linux-3.18.12/include/linux/ftrace_event.h
+--- linux-3.18.12.orig/include/linux/ftrace_event.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/ftrace_event.h 2015-04-26 13:32:22.415684003 -0500
@@ -61,6 +61,9 @@
unsigned char flags;
unsigned char preempt_count;
@@ -7406,9 +7486,9 @@ diff -Nur linux-3.18.10.orig/include/linux/ftrace_event.h linux-3.18.10/include/
};
#define FTRACE_MAX_EVENT \
-diff -Nur linux-3.18.10.orig/include/linux/highmem.h linux-3.18.10/include/linux/highmem.h
---- linux-3.18.10.orig/include/linux/highmem.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/highmem.h 2015-03-26 12:42:18.663588322 +0100
+diff -Nur linux-3.18.12.orig/include/linux/highmem.h linux-3.18.12/include/linux/highmem.h
+--- linux-3.18.12.orig/include/linux/highmem.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/highmem.h 2015-04-26 13:32:22.415684003 -0500
@@ -7,6 +7,7 @@
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -7473,9 +7553,9 @@ diff -Nur linux-3.18.10.orig/include/linux/highmem.h linux-3.18.10/include/linux
#endif
}
-diff -Nur linux-3.18.10.orig/include/linux/hrtimer.h linux-3.18.10/include/linux/hrtimer.h
---- linux-3.18.10.orig/include/linux/hrtimer.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/hrtimer.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/hrtimer.h linux-3.18.12/include/linux/hrtimer.h
+--- linux-3.18.12.orig/include/linux/hrtimer.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/hrtimer.h 2015-04-26 13:32:22.415684003 -0500
@@ -111,6 +111,11 @@
enum hrtimer_restart (*function)(struct hrtimer *);
struct hrtimer_clock_base *base;
@@ -7520,9 +7600,9 @@ diff -Nur linux-3.18.10.orig/include/linux/hrtimer.h linux-3.18.10/include/linux
/* Query timers: */
extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
-diff -Nur linux-3.18.10.orig/include/linux/idr.h linux-3.18.10/include/linux/idr.h
---- linux-3.18.10.orig/include/linux/idr.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/idr.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/idr.h linux-3.18.12/include/linux/idr.h
+--- linux-3.18.12.orig/include/linux/idr.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/idr.h 2015-04-26 13:32:22.415684003 -0500
@@ -95,10 +95,14 @@
* Each idr_preload() should be matched with an invocation of this
* function. See idr_preload() for details.
@@ -7538,9 +7618,9 @@ diff -Nur linux-3.18.10.orig/include/linux/idr.h linux-3.18.10/include/linux/idr
/**
* idr_find - return pointer for given id
-diff -Nur linux-3.18.10.orig/include/linux/init_task.h linux-3.18.10/include/linux/init_task.h
---- linux-3.18.10.orig/include/linux/init_task.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/init_task.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/init_task.h linux-3.18.12/include/linux/init_task.h
+--- linux-3.18.12.orig/include/linux/init_task.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/init_task.h 2015-04-26 13:32:22.415684003 -0500
@@ -147,9 +147,16 @@
# define INIT_PERF_EVENTS(tsk)
#endif
@@ -7567,9 +7647,9 @@ diff -Nur linux-3.18.10.orig/include/linux/init_task.h linux-3.18.10/include/lin
.pids = { \
[PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \
[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \
-diff -Nur linux-3.18.10.orig/include/linux/interrupt.h linux-3.18.10/include/linux/interrupt.h
---- linux-3.18.10.orig/include/linux/interrupt.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/interrupt.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/interrupt.h linux-3.18.12/include/linux/interrupt.h
+--- linux-3.18.12.orig/include/linux/interrupt.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/interrupt.h 2015-04-26 13:32:22.415684003 -0500
@@ -57,6 +57,7 @@
* IRQF_NO_THREAD - Interrupt cannot be threaded
* IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
@@ -7736,9 +7816,9 @@ diff -Nur linux-3.18.10.orig/include/linux/interrupt.h linux-3.18.10/include/lin
/*
* Autoprobing for irqs:
*
-diff -Nur linux-3.18.10.orig/include/linux/irqdesc.h linux-3.18.10/include/linux/irqdesc.h
---- linux-3.18.10.orig/include/linux/irqdesc.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/irqdesc.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/irqdesc.h linux-3.18.12/include/linux/irqdesc.h
+--- linux-3.18.12.orig/include/linux/irqdesc.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/irqdesc.h 2015-04-26 13:32:22.415684003 -0500
@@ -63,6 +63,7 @@
unsigned int irqs_unhandled;
atomic_t threads_handled;
@@ -7747,9 +7827,9 @@ diff -Nur linux-3.18.10.orig/include/linux/irqdesc.h linux-3.18.10/include/linux
raw_spinlock_t lock;
struct cpumask *percpu_enabled;
#ifdef CONFIG_SMP
-diff -Nur linux-3.18.10.orig/include/linux/irqflags.h linux-3.18.10/include/linux/irqflags.h
---- linux-3.18.10.orig/include/linux/irqflags.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/irqflags.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/irqflags.h linux-3.18.12/include/linux/irqflags.h
+--- linux-3.18.12.orig/include/linux/irqflags.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/irqflags.h 2015-04-26 13:32:22.415684003 -0500
@@ -25,8 +25,6 @@
# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
# define trace_hardirq_enter() do { current->hardirq_context++; } while (0)
@@ -7800,9 +7880,9 @@ diff -Nur linux-3.18.10.orig/include/linux/irqflags.h linux-3.18.10/include/linu
+#endif
+
#endif
-diff -Nur linux-3.18.10.orig/include/linux/irq.h linux-3.18.10/include/linux/irq.h
---- linux-3.18.10.orig/include/linux/irq.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/irq.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/irq.h linux-3.18.12/include/linux/irq.h
+--- linux-3.18.12.orig/include/linux/irq.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/irq.h 2015-04-26 13:32:22.415684003 -0500
@@ -73,6 +73,7 @@
* IRQ_IS_POLLED - Always polled by another interrupt. Exclude
* it from the spurious interrupt detection
@@ -7827,9 +7907,9 @@ diff -Nur linux-3.18.10.orig/include/linux/irq.h linux-3.18.10/include/linux/irq
#define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING)
-diff -Nur linux-3.18.10.orig/include/linux/irq_work.h linux-3.18.10/include/linux/irq_work.h
---- linux-3.18.10.orig/include/linux/irq_work.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/irq_work.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/irq_work.h linux-3.18.12/include/linux/irq_work.h
+--- linux-3.18.12.orig/include/linux/irq_work.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/irq_work.h 2015-04-26 13:32:22.415684003 -0500
@@ -16,6 +16,7 @@
#define IRQ_WORK_BUSY 2UL
#define IRQ_WORK_FLAGS 3UL
@@ -7838,9 +7918,9 @@ diff -Nur linux-3.18.10.orig/include/linux/irq_work.h linux-3.18.10/include/linu
struct irq_work {
unsigned long flags;
-diff -Nur linux-3.18.10.orig/include/linux/jbd_common.h linux-3.18.10/include/linux/jbd_common.h
---- linux-3.18.10.orig/include/linux/jbd_common.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/jbd_common.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/jbd_common.h linux-3.18.12/include/linux/jbd_common.h
+--- linux-3.18.12.orig/include/linux/jbd_common.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/jbd_common.h 2015-04-26 13:32:22.415684003 -0500
@@ -15,32 +15,56 @@
static inline void jbd_lock_bh_state(struct buffer_head *bh)
@@ -7898,9 +7978,9 @@ diff -Nur linux-3.18.10.orig/include/linux/jbd_common.h linux-3.18.10/include/li
}
#endif
-diff -Nur linux-3.18.10.orig/include/linux/jump_label.h linux-3.18.10/include/linux/jump_label.h
---- linux-3.18.10.orig/include/linux/jump_label.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/jump_label.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/jump_label.h linux-3.18.12/include/linux/jump_label.h
+--- linux-3.18.12.orig/include/linux/jump_label.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/jump_label.h 2015-04-26 13:32:22.419684003 -0500
@@ -55,7 +55,8 @@
"%s used before call to jump_label_init", \
__func__)
@@ -7911,9 +7991,9 @@ diff -Nur linux-3.18.10.orig/include/linux/jump_label.h linux-3.18.10/include/li
struct static_key {
atomic_t enabled;
-diff -Nur linux-3.18.10.orig/include/linux/kdb.h linux-3.18.10/include/linux/kdb.h
---- linux-3.18.10.orig/include/linux/kdb.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/kdb.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/kdb.h linux-3.18.12/include/linux/kdb.h
+--- linux-3.18.12.orig/include/linux/kdb.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/kdb.h 2015-04-26 13:32:22.419684003 -0500
@@ -116,7 +116,7 @@
extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
extern __printf(1, 2) int kdb_printf(const char *, ...);
@@ -7931,9 +8011,9 @@ diff -Nur linux-3.18.10.orig/include/linux/kdb.h linux-3.18.10/include/linux/kdb
static inline void kdb_init(int level) {}
static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
char *help, short minlen) { return 0; }
-diff -Nur linux-3.18.10.orig/include/linux/kernel.h linux-3.18.10/include/linux/kernel.h
---- linux-3.18.10.orig/include/linux/kernel.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/kernel.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/kernel.h linux-3.18.12/include/linux/kernel.h
+--- linux-3.18.12.orig/include/linux/kernel.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/kernel.h 2015-04-26 13:32:22.419684003 -0500
@@ -451,6 +451,7 @@
SYSTEM_HALT,
SYSTEM_POWER_OFF,
@@ -7942,9 +8022,30 @@ diff -Nur linux-3.18.10.orig/include/linux/kernel.h linux-3.18.10/include/linux/
} system_state;
#define TAINT_PROPRIETARY_MODULE 0
-diff -Nur linux-3.18.10.orig/include/linux/lglock.h linux-3.18.10/include/linux/lglock.h
---- linux-3.18.10.orig/include/linux/lglock.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/lglock.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/kvm_host.h linux-3.18.12/include/linux/kvm_host.h
+--- linux-3.18.12.orig/include/linux/kvm_host.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/kvm_host.h 2015-04-26 13:32:22.419684003 -0500
+@@ -244,7 +244,7 @@
+
+ int fpu_active;
+ int guest_fpu_loaded, guest_xcr0_loaded;
+- wait_queue_head_t wq;
++ struct swait_head wq;
+ struct pid *pid;
+ int sigset_active;
+ sigset_t sigset;
+@@ -687,7 +687,7 @@
+ }
+ #endif
+
+-static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
++static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+ {
+ #ifdef __KVM_HAVE_ARCH_WQP
+ return vcpu->arch.wqp;
+diff -Nur linux-3.18.12.orig/include/linux/lglock.h linux-3.18.12/include/linux/lglock.h
+--- linux-3.18.12.orig/include/linux/lglock.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/lglock.h 2015-04-26 13:32:22.419684003 -0500
@@ -34,22 +34,39 @@
#endif
@@ -8000,9 +8101,9 @@ diff -Nur linux-3.18.10.orig/include/linux/lglock.h linux-3.18.10/include/linux/
#else
/* When !CONFIG_SMP, map lglock to spinlock */
#define lglock spinlock
-diff -Nur linux-3.18.10.orig/include/linux/list_bl.h linux-3.18.10/include/linux/list_bl.h
---- linux-3.18.10.orig/include/linux/list_bl.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/list_bl.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/list_bl.h linux-3.18.12/include/linux/list_bl.h
+--- linux-3.18.12.orig/include/linux/list_bl.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/list_bl.h 2015-04-26 13:32:22.419684003 -0500
@@ -2,6 +2,7 @@
#define _LINUX_LIST_BL_H
@@ -8063,9 +8164,9 @@ diff -Nur linux-3.18.10.orig/include/linux/list_bl.h linux-3.18.10/include/linux
}
static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
-diff -Nur linux-3.18.10.orig/include/linux/locallock.h linux-3.18.10/include/linux/locallock.h
---- linux-3.18.10.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/locallock.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/locallock.h linux-3.18.12/include/linux/locallock.h
+--- linux-3.18.12.orig/include/linux/locallock.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/locallock.h 2015-04-26 13:32:22.419684003 -0500
@@ -0,0 +1,270 @@
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
@@ -8337,9 +8438,9 @@ diff -Nur linux-3.18.10.orig/include/linux/locallock.h linux-3.18.10/include/lin
+#endif
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/mm_types.h linux-3.18.10/include/linux/mm_types.h
---- linux-3.18.10.orig/include/linux/mm_types.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/mm_types.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/mm_types.h linux-3.18.12/include/linux/mm_types.h
+--- linux-3.18.12.orig/include/linux/mm_types.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/mm_types.h 2015-04-26 13:32:22.419684003 -0500
@@ -11,6 +11,7 @@
#include <linux/completion.h>
#include <linux/cpumask.h>
@@ -8358,9 +8459,9 @@ diff -Nur linux-3.18.10.orig/include/linux/mm_types.h linux-3.18.10/include/linu
};
static inline void mm_init_cpumask(struct mm_struct *mm)
-diff -Nur linux-3.18.10.orig/include/linux/mutex.h linux-3.18.10/include/linux/mutex.h
---- linux-3.18.10.orig/include/linux/mutex.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/mutex.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/mutex.h linux-3.18.12/include/linux/mutex.h
+--- linux-3.18.12.orig/include/linux/mutex.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/mutex.h 2015-04-26 13:32:22.419684003 -0500
@@ -19,6 +19,17 @@
#include <asm/processor.h>
#include <linux/osq_lock.h>
@@ -8402,9 +8503,9 @@ diff -Nur linux-3.18.10.orig/include/linux/mutex.h linux-3.18.10/include/linux/m
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
#endif /* __LINUX_MUTEX_H */
-diff -Nur linux-3.18.10.orig/include/linux/mutex_rt.h linux-3.18.10/include/linux/mutex_rt.h
---- linux-3.18.10.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/mutex_rt.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/mutex_rt.h linux-3.18.12/include/linux/mutex_rt.h
+--- linux-3.18.12.orig/include/linux/mutex_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/mutex_rt.h 2015-04-26 13:32:22.419684003 -0500
@@ -0,0 +1,84 @@
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
@@ -8490,9 +8591,9 @@ diff -Nur linux-3.18.10.orig/include/linux/mutex_rt.h linux-3.18.10/include/linu
+} while (0)
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/netdevice.h linux-3.18.10/include/linux/netdevice.h
---- linux-3.18.10.orig/include/linux/netdevice.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/netdevice.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/netdevice.h linux-3.18.12/include/linux/netdevice.h
+--- linux-3.18.12.orig/include/linux/netdevice.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/netdevice.h 2015-04-26 13:32:22.419684003 -0500
@@ -2345,6 +2345,7 @@
unsigned int dropped;
struct sk_buff_head input_pkt_queue;
@@ -8501,9 +8602,9 @@ diff -Nur linux-3.18.10.orig/include/linux/netdevice.h linux-3.18.10/include/lin
#ifdef CONFIG_NET_FLOW_LIMIT
struct sd_flow_limit __rcu *flow_limit;
-diff -Nur linux-3.18.10.orig/include/linux/netfilter/x_tables.h linux-3.18.10/include/linux/netfilter/x_tables.h
---- linux-3.18.10.orig/include/linux/netfilter/x_tables.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/netfilter/x_tables.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/netfilter/x_tables.h linux-3.18.12/include/linux/netfilter/x_tables.h
+--- linux-3.18.12.orig/include/linux/netfilter/x_tables.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/netfilter/x_tables.h 2015-04-26 13:32:22.419684003 -0500
@@ -3,6 +3,7 @@
@@ -8539,9 +8640,9 @@ diff -Nur linux-3.18.10.orig/include/linux/netfilter/x_tables.h linux-3.18.10/in
}
/*
-diff -Nur linux-3.18.10.orig/include/linux/notifier.h linux-3.18.10/include/linux/notifier.h
---- linux-3.18.10.orig/include/linux/notifier.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/notifier.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/notifier.h linux-3.18.12/include/linux/notifier.h
+--- linux-3.18.12.orig/include/linux/notifier.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/notifier.h 2015-04-26 13:32:22.419684003 -0500
@@ -6,7 +6,7 @@
*
* Alan Cox <Alan.Cox@linux.org>
@@ -8621,9 +8722,9 @@ diff -Nur linux-3.18.10.orig/include/linux/notifier.h linux-3.18.10/include/linu
/* CPU notfiers are defined in include/linux/cpu.h. */
/* netdevice notifiers are defined in include/linux/netdevice.h */
-diff -Nur linux-3.18.10.orig/include/linux/percpu.h linux-3.18.10/include/linux/percpu.h
---- linux-3.18.10.orig/include/linux/percpu.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/percpu.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/percpu.h linux-3.18.12/include/linux/percpu.h
+--- linux-3.18.12.orig/include/linux/percpu.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/percpu.h 2015-04-26 13:32:22.419684003 -0500
@@ -23,6 +23,35 @@
PERCPU_MODULE_RESERVE)
#endif
@@ -8660,9 +8761,9 @@ diff -Nur linux-3.18.10.orig/include/linux/percpu.h linux-3.18.10/include/linux/
/* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10)
-diff -Nur linux-3.18.10.orig/include/linux/pid.h linux-3.18.10/include/linux/pid.h
---- linux-3.18.10.orig/include/linux/pid.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/pid.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/pid.h linux-3.18.12/include/linux/pid.h
+--- linux-3.18.12.orig/include/linux/pid.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/pid.h 2015-04-26 13:32:22.419684003 -0500
@@ -2,6 +2,7 @@
#define _LINUX_PID_H
@@ -8671,9 +8772,9 @@ diff -Nur linux-3.18.10.orig/include/linux/pid.h linux-3.18.10/include/linux/pid
enum pid_type
{
-diff -Nur linux-3.18.10.orig/include/linux/preempt.h linux-3.18.10/include/linux/preempt.h
---- linux-3.18.10.orig/include/linux/preempt.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/preempt.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/preempt.h linux-3.18.12/include/linux/preempt.h
+--- linux-3.18.12.orig/include/linux/preempt.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/preempt.h 2015-04-26 13:32:22.419684003 -0500
@@ -33,6 +33,20 @@
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)
@@ -8777,9 +8878,9 @@ diff -Nur linux-3.18.10.orig/include/linux/preempt.h linux-3.18.10/include/linux
#ifdef CONFIG_PREEMPT_NOTIFIERS
struct preempt_notifier;
-diff -Nur linux-3.18.10.orig/include/linux/preempt_mask.h linux-3.18.10/include/linux/preempt_mask.h
---- linux-3.18.10.orig/include/linux/preempt_mask.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/preempt_mask.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/preempt_mask.h linux-3.18.12/include/linux/preempt_mask.h
+--- linux-3.18.12.orig/include/linux/preempt_mask.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/preempt_mask.h 2015-04-26 13:32:22.419684003 -0500
@@ -44,16 +44,26 @@
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
#define NMI_OFFSET (1UL << NMI_SHIFT)
@@ -8817,9 +8918,9 @@ diff -Nur linux-3.18.10.orig/include/linux/preempt_mask.h linux-3.18.10/include/
/*
* Are we in NMI context?
-diff -Nur linux-3.18.10.orig/include/linux/printk.h linux-3.18.10/include/linux/printk.h
---- linux-3.18.10.orig/include/linux/printk.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/printk.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/printk.h linux-3.18.12/include/linux/printk.h
+--- linux-3.18.12.orig/include/linux/printk.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/printk.h 2015-04-26 13:32:22.419684003 -0500
@@ -119,9 +119,11 @@
extern asmlinkage __printf(1, 2)
void early_printk(const char *fmt, ...);
@@ -8840,9 +8941,9 @@ diff -Nur linux-3.18.10.orig/include/linux/printk.h linux-3.18.10/include/linux/
extern int printk_delay_msec;
extern int dmesg_restrict;
extern int kptr_restrict;
-diff -Nur linux-3.18.10.orig/include/linux/radix-tree.h linux-3.18.10/include/linux/radix-tree.h
---- linux-3.18.10.orig/include/linux/radix-tree.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/radix-tree.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/radix-tree.h linux-3.18.12/include/linux/radix-tree.h
+--- linux-3.18.12.orig/include/linux/radix-tree.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/radix-tree.h 2015-04-26 13:32:22.419684003 -0500
@@ -277,8 +277,13 @@
unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
void ***results, unsigned long *indices,
@@ -8866,9 +8967,9 @@ diff -Nur linux-3.18.10.orig/include/linux/radix-tree.h linux-3.18.10/include/li
}
/**
-diff -Nur linux-3.18.10.orig/include/linux/random.h linux-3.18.10/include/linux/random.h
---- linux-3.18.10.orig/include/linux/random.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/random.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/random.h linux-3.18.12/include/linux/random.h
+--- linux-3.18.12.orig/include/linux/random.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/random.h 2015-04-26 13:32:22.423684003 -0500
@@ -11,7 +11,7 @@
extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
@@ -8878,9 +8979,9 @@ diff -Nur linux-3.18.10.orig/include/linux/random.h linux-3.18.10/include/linux/
extern void get_random_bytes(void *buf, int nbytes);
extern void get_random_bytes_arch(void *buf, int nbytes);
-diff -Nur linux-3.18.10.orig/include/linux/rcupdate.h linux-3.18.10/include/linux/rcupdate.h
---- linux-3.18.10.orig/include/linux/rcupdate.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/rcupdate.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rcupdate.h linux-3.18.12/include/linux/rcupdate.h
+--- linux-3.18.12.orig/include/linux/rcupdate.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/rcupdate.h 2015-04-26 13:32:22.423684003 -0500
@@ -147,6 +147,9 @@
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
@@ -8965,9 +9066,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rcupdate.h linux-3.18.10/include/linu
local_bh_enable();
}
-diff -Nur linux-3.18.10.orig/include/linux/rcutree.h linux-3.18.10/include/linux/rcutree.h
---- linux-3.18.10.orig/include/linux/rcutree.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/rcutree.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rcutree.h linux-3.18.12/include/linux/rcutree.h
+--- linux-3.18.12.orig/include/linux/rcutree.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/rcutree.h 2015-04-26 13:32:22.423684003 -0500
@@ -46,7 +46,11 @@
rcu_note_context_switch(cpu);
}
@@ -9018,9 +9119,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rcutree.h linux-3.18.10/include/linux
+#endif
+
#endif /* __LINUX_RCUTREE_H */
-diff -Nur linux-3.18.10.orig/include/linux/rtmutex.h linux-3.18.10/include/linux/rtmutex.h
---- linux-3.18.10.orig/include/linux/rtmutex.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/rtmutex.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rtmutex.h linux-3.18.12/include/linux/rtmutex.h
+--- linux-3.18.12.orig/include/linux/rtmutex.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/rtmutex.h 2015-04-26 13:32:22.423684003 -0500
@@ -14,10 +14,14 @@
#include <linux/linkage.h>
@@ -9094,9 +9195,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rtmutex.h linux-3.18.10/include/linux
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
-diff -Nur linux-3.18.10.orig/include/linux/rwlock_rt.h linux-3.18.10/include/linux/rwlock_rt.h
---- linux-3.18.10.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/rwlock_rt.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rwlock_rt.h linux-3.18.12/include/linux/rwlock_rt.h
+--- linux-3.18.12.orig/include/linux/rwlock_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/rwlock_rt.h 2015-04-26 13:32:22.423684003 -0500
@@ -0,0 +1,99 @@
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
@@ -9197,9 +9298,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rwlock_rt.h linux-3.18.10/include/lin
+ } while (0)
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/rwlock_types.h linux-3.18.10/include/linux/rwlock_types.h
---- linux-3.18.10.orig/include/linux/rwlock_types.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/rwlock_types.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rwlock_types.h linux-3.18.12/include/linux/rwlock_types.h
+--- linux-3.18.12.orig/include/linux/rwlock_types.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/rwlock_types.h 2015-04-26 13:32:22.423684003 -0500
@@ -1,6 +1,10 @@
#ifndef __LINUX_RWLOCK_TYPES_H
#define __LINUX_RWLOCK_TYPES_H
@@ -9220,9 +9321,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rwlock_types.h linux-3.18.10/include/
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
#endif /* __LINUX_RWLOCK_TYPES_H */
-diff -Nur linux-3.18.10.orig/include/linux/rwlock_types_rt.h linux-3.18.10/include/linux/rwlock_types_rt.h
---- linux-3.18.10.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/rwlock_types_rt.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rwlock_types_rt.h linux-3.18.12/include/linux/rwlock_types_rt.h
+--- linux-3.18.12.orig/include/linux/rwlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/rwlock_types_rt.h 2015-04-26 13:32:22.423684003 -0500
@@ -0,0 +1,33 @@
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
@@ -9257,9 +9358,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rwlock_types_rt.h linux-3.18.10/inclu
+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/rwsem.h linux-3.18.10/include/linux/rwsem.h
---- linux-3.18.10.orig/include/linux/rwsem.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/rwsem.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rwsem.h linux-3.18.12/include/linux/rwsem.h
+--- linux-3.18.12.orig/include/linux/rwsem.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/rwsem.h 2015-04-26 13:32:22.423684003 -0500
@@ -18,6 +18,10 @@
#include <linux/osq_lock.h>
#endif
@@ -9278,9 +9379,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rwsem.h linux-3.18.10/include/linux/r
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* _LINUX_RWSEM_H */
-diff -Nur linux-3.18.10.orig/include/linux/rwsem_rt.h linux-3.18.10/include/linux/rwsem_rt.h
---- linux-3.18.10.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/rwsem_rt.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/rwsem_rt.h linux-3.18.12/include/linux/rwsem_rt.h
+--- linux-3.18.12.orig/include/linux/rwsem_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/rwsem_rt.h 2015-04-26 13:32:22.423684003 -0500
@@ -0,0 +1,134 @@
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
@@ -9416,9 +9517,9 @@ diff -Nur linux-3.18.10.orig/include/linux/rwsem_rt.h linux-3.18.10/include/linu
+}
+#endif
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/sched.h linux-3.18.10/include/linux/sched.h
---- linux-3.18.10.orig/include/linux/sched.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/sched.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/sched.h linux-3.18.12/include/linux/sched.h
+--- linux-3.18.12.orig/include/linux/sched.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/sched.h 2015-04-26 13:32:22.423684003 -0500
@@ -26,6 +26,7 @@
#include <linux/nodemask.h>
#include <linux/mm_types.h>
@@ -9802,9 +9903,9 @@ diff -Nur linux-3.18.10.orig/include/linux/sched.h linux-3.18.10/include/linux/s
extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
-diff -Nur linux-3.18.10.orig/include/linux/seqlock.h linux-3.18.10/include/linux/seqlock.h
---- linux-3.18.10.orig/include/linux/seqlock.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/seqlock.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/seqlock.h linux-3.18.12/include/linux/seqlock.h
+--- linux-3.18.12.orig/include/linux/seqlock.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/seqlock.h 2015-04-26 13:32:22.423684003 -0500
@@ -219,20 +219,30 @@
return __read_seqcount_retry(s, start);
}
@@ -9934,9 +10035,9 @@ diff -Nur linux-3.18.10.orig/include/linux/seqlock.h linux-3.18.10/include/linux
spin_unlock_irqrestore(&sl->lock, flags);
}
-diff -Nur linux-3.18.10.orig/include/linux/signal.h linux-3.18.10/include/linux/signal.h
---- linux-3.18.10.orig/include/linux/signal.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/signal.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/signal.h linux-3.18.12/include/linux/signal.h
+--- linux-3.18.12.orig/include/linux/signal.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/signal.h 2015-04-26 13:32:22.423684003 -0500
@@ -218,6 +218,7 @@
}
@@ -9945,9 +10046,9 @@ diff -Nur linux-3.18.10.orig/include/linux/signal.h linux-3.18.10/include/linux/
/* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
static inline int valid_signal(unsigned long sig)
-diff -Nur linux-3.18.10.orig/include/linux/skbuff.h linux-3.18.10/include/linux/skbuff.h
---- linux-3.18.10.orig/include/linux/skbuff.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/skbuff.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/skbuff.h linux-3.18.12/include/linux/skbuff.h
+--- linux-3.18.12.orig/include/linux/skbuff.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/skbuff.h 2015-04-26 13:32:22.423684003 -0500
@@ -172,6 +172,7 @@
__u32 qlen;
@@ -9969,9 +10070,9 @@ diff -Nur linux-3.18.10.orig/include/linux/skbuff.h linux-3.18.10/include/linux/
static inline void skb_queue_head_init_class(struct sk_buff_head *list,
struct lock_class_key *class)
{
-diff -Nur linux-3.18.10.orig/include/linux/smp.h linux-3.18.10/include/linux/smp.h
---- linux-3.18.10.orig/include/linux/smp.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/smp.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/smp.h linux-3.18.12/include/linux/smp.h
+--- linux-3.18.12.orig/include/linux/smp.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/smp.h 2015-04-26 13:32:22.423684003 -0500
@@ -178,6 +178,9 @@
#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
#define put_cpu() preempt_enable()
@@ -9982,9 +10083,9 @@ diff -Nur linux-3.18.10.orig/include/linux/smp.h linux-3.18.10/include/linux/smp
/*
* Callback to arch code if there's nosmp or maxcpus=0 on the
* boot command line:
-diff -Nur linux-3.18.10.orig/include/linux/spinlock_api_smp.h linux-3.18.10/include/linux/spinlock_api_smp.h
---- linux-3.18.10.orig/include/linux/spinlock_api_smp.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/spinlock_api_smp.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/spinlock_api_smp.h linux-3.18.12/include/linux/spinlock_api_smp.h
+--- linux-3.18.12.orig/include/linux/spinlock_api_smp.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/spinlock_api_smp.h 2015-04-26 13:32:22.423684003 -0500
@@ -187,6 +187,8 @@
return 0;
}
@@ -9995,9 +10096,9 @@ diff -Nur linux-3.18.10.orig/include/linux/spinlock_api_smp.h linux-3.18.10/incl
+#endif
#endif /* __LINUX_SPINLOCK_API_SMP_H */
-diff -Nur linux-3.18.10.orig/include/linux/spinlock.h linux-3.18.10/include/linux/spinlock.h
---- linux-3.18.10.orig/include/linux/spinlock.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/spinlock.h 2015-03-26 12:42:18.667588326 +0100
+diff -Nur linux-3.18.12.orig/include/linux/spinlock.h linux-3.18.12/include/linux/spinlock.h
+--- linux-3.18.12.orig/include/linux/spinlock.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/spinlock.h 2015-04-26 13:32:22.423684003 -0500
@@ -278,7 +278,11 @@
#define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
@@ -10029,9 +10130,9 @@ diff -Nur linux-3.18.10.orig/include/linux/spinlock.h linux-3.18.10/include/linu
+#endif /* !PREEMPT_RT_FULL */
+
#endif /* __LINUX_SPINLOCK_H */
-diff -Nur linux-3.18.10.orig/include/linux/spinlock_rt.h linux-3.18.10/include/linux/spinlock_rt.h
---- linux-3.18.10.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/spinlock_rt.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/spinlock_rt.h linux-3.18.12/include/linux/spinlock_rt.h
+--- linux-3.18.12.orig/include/linux/spinlock_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/spinlock_rt.h 2015-04-26 13:32:22.423684003 -0500
@@ -0,0 +1,167 @@
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
@@ -10200,9 +10301,9 @@ diff -Nur linux-3.18.10.orig/include/linux/spinlock_rt.h linux-3.18.10/include/l
+ atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/spinlock_types.h linux-3.18.10/include/linux/spinlock_types.h
---- linux-3.18.10.orig/include/linux/spinlock_types.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/spinlock_types.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/spinlock_types.h linux-3.18.12/include/linux/spinlock_types.h
+--- linux-3.18.12.orig/include/linux/spinlock_types.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/spinlock_types.h 2015-04-26 13:32:22.423684003 -0500
@@ -9,80 +9,15 @@
* Released under the General Public License (GPL).
*/
@@ -10291,9 +10392,9 @@ diff -Nur linux-3.18.10.orig/include/linux/spinlock_types.h linux-3.18.10/includ
-#include <linux/rwlock_types.h>
-
#endif /* __LINUX_SPINLOCK_TYPES_H */
-diff -Nur linux-3.18.10.orig/include/linux/spinlock_types_nort.h linux-3.18.10/include/linux/spinlock_types_nort.h
---- linux-3.18.10.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/spinlock_types_nort.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/spinlock_types_nort.h linux-3.18.12/include/linux/spinlock_types_nort.h
+--- linux-3.18.12.orig/include/linux/spinlock_types_nort.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/spinlock_types_nort.h 2015-04-26 13:32:22.423684003 -0500
@@ -0,0 +1,33 @@
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
+#define __LINUX_SPINLOCK_TYPES_NORT_H
@@ -10328,9 +10429,9 @@ diff -Nur linux-3.18.10.orig/include/linux/spinlock_types_nort.h linux-3.18.10/i
+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/spinlock_types_raw.h linux-3.18.10/include/linux/spinlock_types_raw.h
---- linux-3.18.10.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/spinlock_types_raw.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/spinlock_types_raw.h linux-3.18.12/include/linux/spinlock_types_raw.h
+--- linux-3.18.12.orig/include/linux/spinlock_types_raw.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/spinlock_types_raw.h 2015-04-26 13:32:22.423684003 -0500
@@ -0,0 +1,56 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
@@ -10388,9 +10489,9 @@ diff -Nur linux-3.18.10.orig/include/linux/spinlock_types_raw.h linux-3.18.10/in
+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/spinlock_types_rt.h linux-3.18.10/include/linux/spinlock_types_rt.h
---- linux-3.18.10.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/spinlock_types_rt.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/spinlock_types_rt.h linux-3.18.12/include/linux/spinlock_types_rt.h
+--- linux-3.18.12.orig/include/linux/spinlock_types_rt.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/spinlock_types_rt.h 2015-04-26 13:32:22.423684003 -0500
@@ -0,0 +1,51 @@
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
@@ -10443,9 +10544,9 @@ diff -Nur linux-3.18.10.orig/include/linux/spinlock_types_rt.h linux-3.18.10/inc
+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/srcu.h linux-3.18.10/include/linux/srcu.h
---- linux-3.18.10.orig/include/linux/srcu.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/srcu.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/srcu.h linux-3.18.12/include/linux/srcu.h
+--- linux-3.18.12.orig/include/linux/srcu.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/srcu.h 2015-04-26 13:32:22.427684003 -0500
@@ -84,10 +84,10 @@
void process_srcu(struct work_struct *work);
@@ -10474,9 +10575,9 @@ diff -Nur linux-3.18.10.orig/include/linux/srcu.h linux-3.18.10/include/linux/sr
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
-diff -Nur linux-3.18.10.orig/include/linux/swap.h linux-3.18.10/include/linux/swap.h
---- linux-3.18.10.orig/include/linux/swap.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/swap.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/swap.h linux-3.18.12/include/linux/swap.h
+--- linux-3.18.12.orig/include/linux/swap.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/swap.h 2015-04-26 13:32:22.427684003 -0500
@@ -11,6 +11,7 @@
#include <linux/fs.h>
#include <linux/atomic.h>
@@ -10495,9 +10596,9 @@ diff -Nur linux-3.18.10.orig/include/linux/swap.h linux-3.18.10/include/linux/sw
static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
{
-diff -Nur linux-3.18.10.orig/include/linux/sysctl.h linux-3.18.10/include/linux/sysctl.h
---- linux-3.18.10.orig/include/linux/sysctl.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/sysctl.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/sysctl.h linux-3.18.12/include/linux/sysctl.h
+--- linux-3.18.12.orig/include/linux/sysctl.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/sysctl.h 2015-04-26 13:32:22.427684003 -0500
@@ -25,6 +25,7 @@
#include <linux/rcupdate.h>
#include <linux/wait.h>
@@ -10506,9 +10607,9 @@ diff -Nur linux-3.18.10.orig/include/linux/sysctl.h linux-3.18.10/include/linux/
#include <uapi/linux/sysctl.h>
/* For the /proc/sys support */
-diff -Nur linux-3.18.10.orig/include/linux/thread_info.h linux-3.18.10/include/linux/thread_info.h
---- linux-3.18.10.orig/include/linux/thread_info.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/thread_info.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/thread_info.h linux-3.18.12/include/linux/thread_info.h
+--- linux-3.18.12.orig/include/linux/thread_info.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/thread_info.h 2015-04-26 13:32:22.427684003 -0500
@@ -102,7 +102,17 @@
#define test_thread_flag(flag) \
test_ti_thread_flag(current_thread_info(), flag)
@@ -10528,9 +10629,9 @@ diff -Nur linux-3.18.10.orig/include/linux/thread_info.h linux-3.18.10/include/l
#if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
/*
-diff -Nur linux-3.18.10.orig/include/linux/timer.h linux-3.18.10/include/linux/timer.h
---- linux-3.18.10.orig/include/linux/timer.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/timer.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/timer.h linux-3.18.12/include/linux/timer.h
+--- linux-3.18.12.orig/include/linux/timer.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/timer.h 2015-04-26 13:32:22.427684003 -0500
@@ -241,7 +241,7 @@
extern int try_to_del_timer_sync(struct timer_list *timer);
@@ -10540,9 +10641,9 @@ diff -Nur linux-3.18.10.orig/include/linux/timer.h linux-3.18.10/include/linux/t
extern int del_timer_sync(struct timer_list *timer);
#else
# define del_timer_sync(t) del_timer(t)
-diff -Nur linux-3.18.10.orig/include/linux/uaccess.h linux-3.18.10/include/linux/uaccess.h
---- linux-3.18.10.orig/include/linux/uaccess.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/uaccess.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/uaccess.h linux-3.18.12/include/linux/uaccess.h
+--- linux-3.18.12.orig/include/linux/uaccess.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/uaccess.h 2015-04-26 13:32:22.427684003 -0500
@@ -6,14 +6,9 @@
/*
@@ -10603,9 +10704,9 @@ diff -Nur linux-3.18.10.orig/include/linux/uaccess.h linux-3.18.10/include/linux
set_fs(old_fs); \
ret; \
})
-diff -Nur linux-3.18.10.orig/include/linux/uprobes.h linux-3.18.10/include/linux/uprobes.h
---- linux-3.18.10.orig/include/linux/uprobes.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/uprobes.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/uprobes.h linux-3.18.12/include/linux/uprobes.h
+--- linux-3.18.12.orig/include/linux/uprobes.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/uprobes.h 2015-04-26 13:32:22.427684003 -0500
@@ -27,6 +27,7 @@
#include <linux/errno.h>
#include <linux/rbtree.h>
@@ -10614,9 +10715,9 @@ diff -Nur linux-3.18.10.orig/include/linux/uprobes.h linux-3.18.10/include/linux
struct vm_area_struct;
struct mm_struct;
-diff -Nur linux-3.18.10.orig/include/linux/vmstat.h linux-3.18.10/include/linux/vmstat.h
---- linux-3.18.10.orig/include/linux/vmstat.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/vmstat.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/vmstat.h linux-3.18.12/include/linux/vmstat.h
+--- linux-3.18.12.orig/include/linux/vmstat.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/vmstat.h 2015-04-26 13:32:22.427684003 -0500
@@ -33,7 +33,9 @@
*/
static inline void __count_vm_event(enum vm_event_item item)
@@ -10637,9 +10738,9 @@ diff -Nur linux-3.18.10.orig/include/linux/vmstat.h linux-3.18.10/include/linux/
}
static inline void count_vm_events(enum vm_event_item item, long delta)
-diff -Nur linux-3.18.10.orig/include/linux/wait.h linux-3.18.10/include/linux/wait.h
---- linux-3.18.10.orig/include/linux/wait.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/linux/wait.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/wait.h linux-3.18.12/include/linux/wait.h
+--- linux-3.18.12.orig/include/linux/wait.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/linux/wait.h 2015-04-26 13:32:22.427684003 -0500
@@ -8,6 +8,7 @@
#include <linux/spinlock.h>
#include <asm/current.h>
@@ -10648,9 +10749,9 @@ diff -Nur linux-3.18.10.orig/include/linux/wait.h linux-3.18.10/include/linux/wa
typedef struct __wait_queue wait_queue_t;
typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
-diff -Nur linux-3.18.10.orig/include/linux/wait-simple.h linux-3.18.10/include/linux/wait-simple.h
---- linux-3.18.10.orig/include/linux/wait-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/wait-simple.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/wait-simple.h linux-3.18.12/include/linux/wait-simple.h
+--- linux-3.18.12.orig/include/linux/wait-simple.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/wait-simple.h 2015-04-26 13:32:22.427684003 -0500
@@ -0,0 +1,207 @@
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
@@ -10859,9 +10960,9 @@ diff -Nur linux-3.18.10.orig/include/linux/wait-simple.h linux-3.18.10/include/l
+})
+
+#endif
-diff -Nur linux-3.18.10.orig/include/linux/work-simple.h linux-3.18.10/include/linux/work-simple.h
---- linux-3.18.10.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/linux/work-simple.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/linux/work-simple.h linux-3.18.12/include/linux/work-simple.h
+--- linux-3.18.12.orig/include/linux/work-simple.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/linux/work-simple.h 2015-04-26 13:32:22.427684003 -0500
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
@@ -10887,9 +10988,9 @@ diff -Nur linux-3.18.10.orig/include/linux/work-simple.h linux-3.18.10/include/l
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
-diff -Nur linux-3.18.10.orig/include/net/dst.h linux-3.18.10/include/net/dst.h
---- linux-3.18.10.orig/include/net/dst.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/net/dst.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/net/dst.h linux-3.18.12/include/net/dst.h
+--- linux-3.18.12.orig/include/net/dst.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/net/dst.h 2015-04-26 13:32:22.427684003 -0500
@@ -403,7 +403,7 @@
static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
struct sk_buff *skb)
@@ -10899,9 +11000,9 @@ diff -Nur linux-3.18.10.orig/include/net/dst.h linux-3.18.10/include/net/dst.h
if (dst->pending_confirm) {
unsigned long now = jiffies;
-diff -Nur linux-3.18.10.orig/include/net/neighbour.h linux-3.18.10/include/net/neighbour.h
---- linux-3.18.10.orig/include/net/neighbour.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/net/neighbour.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/net/neighbour.h linux-3.18.12/include/net/neighbour.h
+--- linux-3.18.12.orig/include/net/neighbour.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/net/neighbour.h 2015-04-26 13:32:22.427684003 -0500
@@ -387,7 +387,7 @@
}
#endif
@@ -10920,9 +11021,9 @@ diff -Nur linux-3.18.10.orig/include/net/neighbour.h linux-3.18.10/include/net/n
const struct net_device *dev)
{
unsigned int seq;
-diff -Nur linux-3.18.10.orig/include/net/netns/ipv4.h linux-3.18.10/include/net/netns/ipv4.h
---- linux-3.18.10.orig/include/net/netns/ipv4.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/include/net/netns/ipv4.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/net/netns/ipv4.h linux-3.18.12/include/net/netns/ipv4.h
+--- linux-3.18.12.orig/include/net/netns/ipv4.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/include/net/netns/ipv4.h 2015-04-26 13:32:22.427684003 -0500
@@ -67,6 +67,7 @@
int sysctl_icmp_echo_ignore_all;
@@ -10931,9 +11032,9 @@ diff -Nur linux-3.18.10.orig/include/net/netns/ipv4.h linux-3.18.10/include/net/
int sysctl_icmp_ignore_bogus_error_responses;
int sysctl_icmp_ratelimit;
int sysctl_icmp_ratemask;
-diff -Nur linux-3.18.10.orig/include/trace/events/hist.h linux-3.18.10/include/trace/events/hist.h
---- linux-3.18.10.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/trace/events/hist.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/trace/events/hist.h linux-3.18.12/include/trace/events/hist.h
+--- linux-3.18.12.orig/include/trace/events/hist.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/trace/events/hist.h 2015-04-26 13:32:22.427684003 -0500
@@ -0,0 +1,72 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
@@ -11007,9 +11108,9 @@ diff -Nur linux-3.18.10.orig/include/trace/events/hist.h linux-3.18.10/include/t
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
-diff -Nur linux-3.18.10.orig/include/trace/events/latency_hist.h linux-3.18.10/include/trace/events/latency_hist.h
---- linux-3.18.10.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/include/trace/events/latency_hist.h 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/include/trace/events/latency_hist.h linux-3.18.12/include/trace/events/latency_hist.h
+--- linux-3.18.12.orig/include/trace/events/latency_hist.h 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/include/trace/events/latency_hist.h 2015-04-26 13:32:22.427684003 -0500
@@ -0,0 +1,29 @@
+#ifndef _LATENCY_HIST_H
+#define _LATENCY_HIST_H
@@ -11040,9 +11141,9 @@ diff -Nur linux-3.18.10.orig/include/trace/events/latency_hist.h linux-3.18.10/i
+}
+
+#endif /* _LATENCY_HIST_H */
-diff -Nur linux-3.18.10.orig/init/Kconfig linux-3.18.10/init/Kconfig
---- linux-3.18.10.orig/init/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/init/Kconfig 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/init/Kconfig linux-3.18.12/init/Kconfig
+--- linux-3.18.12.orig/init/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/init/Kconfig 2015-04-26 13:32:22.427684003 -0500
@@ -635,7 +635,7 @@
config RCU_FAST_NO_HZ
@@ -11085,9 +11186,9 @@ diff -Nur linux-3.18.10.orig/init/Kconfig linux-3.18.10/init/Kconfig
help
SLOB replaces the stock allocator with a drastically simpler
allocator. SLOB is generally more space efficient but
-diff -Nur linux-3.18.10.orig/init/main.c linux-3.18.10/init/main.c
---- linux-3.18.10.orig/init/main.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/init/main.c 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/init/main.c linux-3.18.12/init/main.c
+--- linux-3.18.12.orig/init/main.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/init/main.c 2015-04-26 13:32:22.427684003 -0500
@@ -533,6 +533,7 @@
setup_command_line(command_line);
setup_nr_cpu_ids();
@@ -11096,18 +11197,18 @@ diff -Nur linux-3.18.10.orig/init/main.c linux-3.18.10/init/main.c
smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */
build_all_zonelists(NULL, NULL);
-diff -Nur linux-3.18.10.orig/init/Makefile linux-3.18.10/init/Makefile
---- linux-3.18.10.orig/init/Makefile 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/init/Makefile 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/init/Makefile linux-3.18.12/init/Makefile
+--- linux-3.18.12.orig/init/Makefile 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/init/Makefile 2015-04-26 13:32:22.427684003 -0500
@@ -33,4 +33,4 @@
include/generated/compile.h: FORCE
@$($(quiet)chk_compile.h)
$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
-diff -Nur linux-3.18.10.orig/ipc/mqueue.c linux-3.18.10/ipc/mqueue.c
---- linux-3.18.10.orig/ipc/mqueue.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/ipc/mqueue.c 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/ipc/mqueue.c linux-3.18.12/ipc/mqueue.c
+--- linux-3.18.12.orig/ipc/mqueue.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/ipc/mqueue.c 2015-04-26 13:32:22.427684003 -0500
@@ -923,12 +923,17 @@
struct msg_msg *message,
struct ext_wait_queue *receiver)
@@ -11152,9 +11253,9 @@ diff -Nur linux-3.18.10.orig/ipc/mqueue.c linux-3.18.10/ipc/mqueue.c
}
SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
-diff -Nur linux-3.18.10.orig/ipc/msg.c linux-3.18.10/ipc/msg.c
---- linux-3.18.10.orig/ipc/msg.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/ipc/msg.c 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/ipc/msg.c linux-3.18.12/ipc/msg.c
+--- linux-3.18.12.orig/ipc/msg.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/ipc/msg.c 2015-04-26 13:32:22.427684003 -0500
@@ -188,6 +188,12 @@
struct msg_receiver *msr, *t;
@@ -11204,9 +11305,9 @@ diff -Nur linux-3.18.10.orig/ipc/msg.c linux-3.18.10/ipc/msg.c
return 0;
}
-diff -Nur linux-3.18.10.orig/ipc/sem.c linux-3.18.10/ipc/sem.c
---- linux-3.18.10.orig/ipc/sem.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/ipc/sem.c 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/ipc/sem.c linux-3.18.12/ipc/sem.c
+--- linux-3.18.12.orig/ipc/sem.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/ipc/sem.c 2015-04-26 13:32:22.431684003 -0500
@@ -673,6 +673,13 @@
static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
@@ -11245,9 +11346,9 @@ diff -Nur linux-3.18.10.orig/ipc/sem.c linux-3.18.10/ipc/sem.c
}
static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
-diff -Nur linux-3.18.10.orig/kernel/cgroup.c linux-3.18.10/kernel/cgroup.c
---- linux-3.18.10.orig/kernel/cgroup.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/cgroup.c 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/kernel/cgroup.c linux-3.18.12/kernel/cgroup.c
+--- linux-3.18.12.orig/kernel/cgroup.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/cgroup.c 2015-04-26 13:32:22.431684003 -0500
@@ -4355,10 +4355,10 @@
queue_work(cgroup_destroy_wq, &css->destroy_work);
}
@@ -11280,9 +11381,9 @@ diff -Nur linux-3.18.10.orig/kernel/cgroup.c linux-3.18.10/kernel/cgroup.c
/*
* Used to destroy pidlists and separate to serve as flush domain.
-diff -Nur linux-3.18.10.orig/kernel/cpu.c linux-3.18.10/kernel/cpu.c
---- linux-3.18.10.orig/kernel/cpu.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/cpu.c 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/kernel/cpu.c linux-3.18.12/kernel/cpu.c
+--- linux-3.18.12.orig/kernel/cpu.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/cpu.c 2015-04-26 13:32:22.431684003 -0500
@@ -86,6 +86,290 @@
#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
@@ -11466,7 +11567,7 @@ diff -Nur linux-3.18.10.orig/kernel/cpu.c linux-3.18.10/kernel/cpu.c
+ * we don't want any more work on this CPU.
+ */
+ current->flags &= ~PF_NO_SETAFFINITY;
-+ do_set_cpus_allowed(current, cpu_present_mask);
++ set_cpus_allowed_ptr(current, cpu_present_mask);
+ migrate_me();
+ return 0;
+}
@@ -11662,9 +11763,9 @@ diff -Nur linux-3.18.10.orig/kernel/cpu.c linux-3.18.10/kernel/cpu.c
return err;
}
-diff -Nur linux-3.18.10.orig/kernel/debug/kdb/kdb_io.c linux-3.18.10/kernel/debug/kdb/kdb_io.c
---- linux-3.18.10.orig/kernel/debug/kdb/kdb_io.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/debug/kdb/kdb_io.c 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/kernel/debug/kdb/kdb_io.c linux-3.18.12/kernel/debug/kdb/kdb_io.c
+--- linux-3.18.12.orig/kernel/debug/kdb/kdb_io.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/debug/kdb/kdb_io.c 2015-04-26 13:32:22.431684003 -0500
@@ -554,7 +554,6 @@
int linecount;
int colcount;
@@ -11702,10 +11803,10 @@ diff -Nur linux-3.18.10.orig/kernel/debug/kdb/kdb_io.c linux-3.18.10/kernel/debu
return r;
}
-diff -Nur linux-3.18.10.orig/kernel/events/core.c linux-3.18.10/kernel/events/core.c
---- linux-3.18.10.orig/kernel/events/core.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/events/core.c 2015-03-26 12:42:18.675588336 +0100
-@@ -6336,6 +6336,7 @@
+diff -Nur linux-3.18.12.orig/kernel/events/core.c linux-3.18.12/kernel/events/core.c
+--- linux-3.18.12.orig/kernel/events/core.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/events/core.c 2015-04-26 13:32:22.431684003 -0500
+@@ -6346,6 +6346,7 @@
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
@@ -11713,9 +11814,8352 @@ diff -Nur linux-3.18.10.orig/kernel/events/core.c linux-3.18.10/kernel/events/co
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
-diff -Nur linux-3.18.10.orig/kernel/exit.c linux-3.18.10/kernel/exit.c
---- linux-3.18.10.orig/kernel/exit.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/exit.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/events/core.c.orig linux-3.18.12/kernel/events/core.c.orig
+--- linux-3.18.12.orig/kernel/events/core.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/kernel/events/core.c.orig 2015-04-20 14:48:02.000000000 -0500
+@@ -0,0 +1,8339 @@
++/*
++ * Performance events core code:
++ *
++ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
++ * Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
++ * Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
++ * Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
++ *
++ * For licensing details see kernel-base/COPYING
++ */
++
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/cpu.h>
++#include <linux/smp.h>
++#include <linux/idr.h>
++#include <linux/file.h>
++#include <linux/poll.h>
++#include <linux/slab.h>
++#include <linux/hash.h>
++#include <linux/tick.h>
++#include <linux/sysfs.h>
++#include <linux/dcache.h>
++#include <linux/percpu.h>
++#include <linux/ptrace.h>
++#include <linux/reboot.h>
++#include <linux/vmstat.h>
++#include <linux/device.h>
++#include <linux/export.h>
++#include <linux/vmalloc.h>
++#include <linux/hardirq.h>
++#include <linux/rculist.h>
++#include <linux/uaccess.h>
++#include <linux/syscalls.h>
++#include <linux/anon_inodes.h>
++#include <linux/kernel_stat.h>
++#include <linux/perf_event.h>
++#include <linux/ftrace_event.h>
++#include <linux/hw_breakpoint.h>
++#include <linux/mm_types.h>
++#include <linux/cgroup.h>
++#include <linux/module.h>
++#include <linux/mman.h>
++#include <linux/compat.h>
++
++#include "internal.h"
++
++#include <asm/irq_regs.h>
++
++static struct workqueue_struct *perf_wq;
++
++struct remote_function_call {
++ struct task_struct *p;
++ int (*func)(void *info);
++ void *info;
++ int ret;
++};
++
++static void remote_function(void *data)
++{
++ struct remote_function_call *tfc = data;
++ struct task_struct *p = tfc->p;
++
++ if (p) {
++ tfc->ret = -EAGAIN;
++ if (task_cpu(p) != smp_processor_id() || !task_curr(p))
++ return;
++ }
++
++ tfc->ret = tfc->func(tfc->info);
++}
++
++/**
++ * task_function_call - call a function on the cpu on which a task runs
++ * @p: the task to evaluate
++ * @func: the function to be called
++ * @info: the function call argument
++ *
++ * Calls the function @func when the task is currently running. This might
++ * be on the current CPU, which just calls the function directly
++ *
++ * returns: @func return value, or
++ * -ESRCH - when the process isn't running
++ * -EAGAIN - when the process moved away
++ */
++static int
++task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
++{
++ struct remote_function_call data = {
++ .p = p,
++ .func = func,
++ .info = info,
++ .ret = -ESRCH, /* No such (running) process */
++ };
++
++ if (task_curr(p))
++ smp_call_function_single(task_cpu(p), remote_function, &data, 1);
++
++ return data.ret;
++}
++
++/**
++ * cpu_function_call - call a function on the cpu
++ * @func: the function to be called
++ * @info: the function call argument
++ *
++ * Calls the function @func on the remote cpu.
++ *
++ * returns: @func return value or -ENXIO when the cpu is offline
++ */
++static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
++{
++ struct remote_function_call data = {
++ .p = NULL,
++ .func = func,
++ .info = info,
++ .ret = -ENXIO, /* No such CPU */
++ };
++
++ smp_call_function_single(cpu, remote_function, &data, 1);
++
++ return data.ret;
++}
++
++#define EVENT_OWNER_KERNEL ((void *) -1)
++
++static bool is_kernel_event(struct perf_event *event)
++{
++ return event->owner == EVENT_OWNER_KERNEL;
++}
++
++#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
++ PERF_FLAG_FD_OUTPUT |\
++ PERF_FLAG_PID_CGROUP |\
++ PERF_FLAG_FD_CLOEXEC)
++
++/*
++ * branch priv levels that need permission checks
++ */
++#define PERF_SAMPLE_BRANCH_PERM_PLM \
++ (PERF_SAMPLE_BRANCH_KERNEL |\
++ PERF_SAMPLE_BRANCH_HV)
++
++enum event_type_t {
++ EVENT_FLEXIBLE = 0x1,
++ EVENT_PINNED = 0x2,
++ EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
++};
++
++/*
++ * perf_sched_events : >0 events exist
++ * perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
++ */
++struct static_key_deferred perf_sched_events __read_mostly;
++static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
++static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
++
++static atomic_t nr_mmap_events __read_mostly;
++static atomic_t nr_comm_events __read_mostly;
++static atomic_t nr_task_events __read_mostly;
++static atomic_t nr_freq_events __read_mostly;
++
++static LIST_HEAD(pmus);
++static DEFINE_MUTEX(pmus_lock);
++static struct srcu_struct pmus_srcu;
++
++/*
++ * perf event paranoia level:
++ * -1 - not paranoid at all
++ * 0 - disallow raw tracepoint access for unpriv
++ * 1 - disallow cpu events for unpriv
++ * 2 - disallow kernel profiling for unpriv
++ */
++int sysctl_perf_event_paranoid __read_mostly = 1;
++
++/* Minimum for 512 kiB + 1 user control page */
++int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
++
++/*
++ * max perf event sample rate
++ */
++#define DEFAULT_MAX_SAMPLE_RATE 100000
++#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
++#define DEFAULT_CPU_TIME_MAX_PERCENT 25
++
++int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
++
++static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
++static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
++
++static int perf_sample_allowed_ns __read_mostly =
++ DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100;
++
++void update_perf_cpu_limits(void)
++{
++ u64 tmp = perf_sample_period_ns;
++
++ tmp *= sysctl_perf_cpu_time_max_percent;
++ do_div(tmp, 100);
++ ACCESS_ONCE(perf_sample_allowed_ns) = tmp;
++}
++
++static int perf_rotate_context(struct perf_cpu_context *cpuctx);
++
++int perf_proc_update_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
++
++ if (ret || !write)
++ return ret;
++
++ max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
++ perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
++ update_perf_cpu_limits();
++
++ return 0;
++}
++
++int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
++
++int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
++ void __user *buffer, size_t *lenp,
++ loff_t *ppos)
++{
++ int ret = proc_dointvec(table, write, buffer, lenp, ppos);
++
++ if (ret || !write)
++ return ret;
++
++ update_perf_cpu_limits();
++
++ return 0;
++}
++
++/*
++ * perf samples are done in some very critical code paths (NMIs).
++ * If they take too much CPU time, the system can lock up and not
++ * get any real work done. This will drop the sample rate when
++ * we detect that events are taking too long.
++ */
++#define NR_ACCUMULATED_SAMPLES 128
++static DEFINE_PER_CPU(u64, running_sample_length);
++
++static void perf_duration_warn(struct irq_work *w)
++{
++ u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
++ u64 avg_local_sample_len;
++ u64 local_samples_len;
++
++ local_samples_len = __this_cpu_read(running_sample_length);
++ avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
++
++ printk_ratelimited(KERN_WARNING
++ "perf interrupt took too long (%lld > %lld), lowering "
++ "kernel.perf_event_max_sample_rate to %d\n",
++ avg_local_sample_len, allowed_ns >> 1,
++ sysctl_perf_event_sample_rate);
++}
++
++static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
++
++void perf_sample_event_took(u64 sample_len_ns)
++{
++ u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
++ u64 avg_local_sample_len;
++ u64 local_samples_len;
++
++ if (allowed_ns == 0)
++ return;
++
++ /* decay the counter by 1 average sample */
++ local_samples_len = __this_cpu_read(running_sample_length);
++ local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
++ local_samples_len += sample_len_ns;
++ __this_cpu_write(running_sample_length, local_samples_len);
++
++ /*
++ * note: this will be biased artifically low until we have
++ * seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
++ * from having to maintain a count.
++ */
++ avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
++
++ if (avg_local_sample_len <= allowed_ns)
++ return;
++
++ if (max_samples_per_tick <= 1)
++ return;
++
++ max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
++ sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
++ perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
++
++ update_perf_cpu_limits();
++
++ if (!irq_work_queue(&perf_duration_work)) {
++ early_printk("perf interrupt took too long (%lld > %lld), lowering "
++ "kernel.perf_event_max_sample_rate to %d\n",
++ avg_local_sample_len, allowed_ns >> 1,
++ sysctl_perf_event_sample_rate);
++ }
++}
++
++static atomic64_t perf_event_id;
++
++static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
++ enum event_type_t event_type);
++
++static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
++ enum event_type_t event_type,
++ struct task_struct *task);
++
++static void update_context_time(struct perf_event_context *ctx);
++static u64 perf_event_time(struct perf_event *event);
++
++void __weak perf_event_print_debug(void) { }
++
++extern __weak const char *perf_pmu_name(void)
++{
++ return "pmu";
++}
++
++static inline u64 perf_clock(void)
++{
++ return local_clock();
++}
++
++static inline struct perf_cpu_context *
++__get_cpu_context(struct perf_event_context *ctx)
++{
++ return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
++}
++
++static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
++ struct perf_event_context *ctx)
++{
++ raw_spin_lock(&cpuctx->ctx.lock);
++ if (ctx)
++ raw_spin_lock(&ctx->lock);
++}
++
++static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
++ struct perf_event_context *ctx)
++{
++ if (ctx)
++ raw_spin_unlock(&ctx->lock);
++ raw_spin_unlock(&cpuctx->ctx.lock);
++}
++
++#ifdef CONFIG_CGROUP_PERF
++
++/*
++ * perf_cgroup_info keeps track of time_enabled for a cgroup.
++ * This is a per-cpu dynamically allocated data structure.
++ */
++struct perf_cgroup_info {
++ u64 time;
++ u64 timestamp;
++};
++
++struct perf_cgroup {
++ struct cgroup_subsys_state css;
++ struct perf_cgroup_info __percpu *info;
++};
++
++/*
++ * Must ensure cgroup is pinned (css_get) before calling
++ * this function. In other words, we cannot call this function
++ * if there is no cgroup event for the current CPU context.
++ */
++static inline struct perf_cgroup *
++perf_cgroup_from_task(struct task_struct *task)
++{
++ return container_of(task_css(task, perf_event_cgrp_id),
++ struct perf_cgroup, css);
++}
++
++static inline bool
++perf_cgroup_match(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++
++ /* @event doesn't care about cgroup */
++ if (!event->cgrp)
++ return true;
++
++ /* wants specific cgroup scope but @cpuctx isn't associated with any */
++ if (!cpuctx->cgrp)
++ return false;
++
++ /*
++ * Cgroup scoping is recursive. An event enabled for a cgroup is
++ * also enabled for all its descendant cgroups. If @cpuctx's
++ * cgroup is a descendant of @event's (the test covers identity
++ * case), it's a match.
++ */
++ return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
++ event->cgrp->css.cgroup);
++}
++
++static inline void perf_detach_cgroup(struct perf_event *event)
++{
++ css_put(&event->cgrp->css);
++ event->cgrp = NULL;
++}
++
++static inline int is_cgroup_event(struct perf_event *event)
++{
++ return event->cgrp != NULL;
++}
++
++static inline u64 perf_cgroup_event_time(struct perf_event *event)
++{
++ struct perf_cgroup_info *t;
++
++ t = per_cpu_ptr(event->cgrp->info, event->cpu);
++ return t->time;
++}
++
++static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
++{
++ struct perf_cgroup_info *info;
++ u64 now;
++
++ now = perf_clock();
++
++ info = this_cpu_ptr(cgrp->info);
++
++ info->time += now - info->timestamp;
++ info->timestamp = now;
++}
++
++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
++{
++ struct perf_cgroup *cgrp_out = cpuctx->cgrp;
++ if (cgrp_out)
++ __update_cgrp_time(cgrp_out);
++}
++
++static inline void update_cgrp_time_from_event(struct perf_event *event)
++{
++ struct perf_cgroup *cgrp;
++
++ /*
++ * ensure we access cgroup data only when needed and
++ * when we know the cgroup is pinned (css_get)
++ */
++ if (!is_cgroup_event(event))
++ return;
++
++ cgrp = perf_cgroup_from_task(current);
++ /*
++ * Do not update time when cgroup is not active
++ */
++ if (cgrp == event->cgrp)
++ __update_cgrp_time(event->cgrp);
++}
++
++static inline void
++perf_cgroup_set_timestamp(struct task_struct *task,
++ struct perf_event_context *ctx)
++{
++ struct perf_cgroup *cgrp;
++ struct perf_cgroup_info *info;
++
++ /*
++ * ctx->lock held by caller
++ * ensure we do not access cgroup data
++ * unless we have the cgroup pinned (css_get)
++ */
++ if (!task || !ctx->nr_cgroups)
++ return;
++
++ cgrp = perf_cgroup_from_task(task);
++ info = this_cpu_ptr(cgrp->info);
++ info->timestamp = ctx->timestamp;
++}
++
++#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
++#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
++
++/*
++ * reschedule events based on the cgroup constraint of task.
++ *
++ * mode SWOUT : schedule out everything
++ * mode SWIN : schedule in based on cgroup for next
++ */
++void perf_cgroup_switch(struct task_struct *task, int mode)
++{
++ struct perf_cpu_context *cpuctx;
++ struct pmu *pmu;
++ unsigned long flags;
++
++ /*
++ * disable interrupts to avoid geting nr_cgroup
++ * changes via __perf_event_disable(). Also
++ * avoids preemption.
++ */
++ local_irq_save(flags);
++
++ /*
++ * we reschedule only in the presence of cgroup
++ * constrained events.
++ */
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(pmu, &pmus, entry) {
++ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++ if (cpuctx->unique_pmu != pmu)
++ continue; /* ensure we process each cpuctx once */
++
++ /*
++ * perf_cgroup_events says at least one
++ * context on this CPU has cgroup events.
++ *
++ * ctx->nr_cgroups reports the number of cgroup
++ * events for a context.
++ */
++ if (cpuctx->ctx.nr_cgroups > 0) {
++ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
++ perf_pmu_disable(cpuctx->ctx.pmu);
++
++ if (mode & PERF_CGROUP_SWOUT) {
++ cpu_ctx_sched_out(cpuctx, EVENT_ALL);
++ /*
++ * must not be done before ctxswout due
++ * to event_filter_match() in event_sched_out()
++ */
++ cpuctx->cgrp = NULL;
++ }
++
++ if (mode & PERF_CGROUP_SWIN) {
++ WARN_ON_ONCE(cpuctx->cgrp);
++ /*
++ * set cgrp before ctxsw in to allow
++ * event_filter_match() to not have to pass
++ * task around
++ */
++ cpuctx->cgrp = perf_cgroup_from_task(task);
++ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
++ }
++ perf_pmu_enable(cpuctx->ctx.pmu);
++ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
++ }
++ }
++
++ rcu_read_unlock();
++
++ local_irq_restore(flags);
++}
++
++static inline void perf_cgroup_sched_out(struct task_struct *task,
++ struct task_struct *next)
++{
++ struct perf_cgroup *cgrp1;
++ struct perf_cgroup *cgrp2 = NULL;
++
++ /*
++ * we come here when we know perf_cgroup_events > 0
++ */
++ cgrp1 = perf_cgroup_from_task(task);
++
++ /*
++ * next is NULL when called from perf_event_enable_on_exec()
++ * that will systematically cause a cgroup_switch()
++ */
++ if (next)
++ cgrp2 = perf_cgroup_from_task(next);
++
++ /*
++ * only schedule out current cgroup events if we know
++ * that we are switching to a different cgroup. Otherwise,
++ * do no touch the cgroup events.
++ */
++ if (cgrp1 != cgrp2)
++ perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
++}
++
++static inline void perf_cgroup_sched_in(struct task_struct *prev,
++ struct task_struct *task)
++{
++ struct perf_cgroup *cgrp1;
++ struct perf_cgroup *cgrp2 = NULL;
++
++ /*
++ * we come here when we know perf_cgroup_events > 0
++ */
++ cgrp1 = perf_cgroup_from_task(task);
++
++ /* prev can never be NULL */
++ cgrp2 = perf_cgroup_from_task(prev);
++
++ /*
++ * only need to schedule in cgroup events if we are changing
++ * cgroup during ctxsw. Cgroup events were not scheduled
++ * out of ctxsw out if that was not the case.
++ */
++ if (cgrp1 != cgrp2)
++ perf_cgroup_switch(task, PERF_CGROUP_SWIN);
++}
++
++static inline int perf_cgroup_connect(int fd, struct perf_event *event,
++ struct perf_event_attr *attr,
++ struct perf_event *group_leader)
++{
++ struct perf_cgroup *cgrp;
++ struct cgroup_subsys_state *css;
++ struct fd f = fdget(fd);
++ int ret = 0;
++
++ if (!f.file)
++ return -EBADF;
++
++ css = css_tryget_online_from_dir(f.file->f_dentry,
++ &perf_event_cgrp_subsys);
++ if (IS_ERR(css)) {
++ ret = PTR_ERR(css);
++ goto out;
++ }
++
++ cgrp = container_of(css, struct perf_cgroup, css);
++ event->cgrp = cgrp;
++
++ /*
++ * all events in a group must monitor
++ * the same cgroup because a task belongs
++ * to only one perf cgroup at a time
++ */
++ if (group_leader && group_leader->cgrp != cgrp) {
++ perf_detach_cgroup(event);
++ ret = -EINVAL;
++ }
++out:
++ fdput(f);
++ return ret;
++}
++
++static inline void
++perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
++{
++ struct perf_cgroup_info *t;
++ t = per_cpu_ptr(event->cgrp->info, event->cpu);
++ event->shadow_ctx_time = now - t->timestamp;
++}
++
++static inline void
++perf_cgroup_defer_enabled(struct perf_event *event)
++{
++ /*
++ * when the current task's perf cgroup does not match
++ * the event's, we need to remember to call the
++ * perf_mark_enable() function the first time a task with
++ * a matching perf cgroup is scheduled in.
++ */
++ if (is_cgroup_event(event) && !perf_cgroup_match(event))
++ event->cgrp_defer_enabled = 1;
++}
++
++static inline void
++perf_cgroup_mark_enabled(struct perf_event *event,
++ struct perf_event_context *ctx)
++{
++ struct perf_event *sub;
++ u64 tstamp = perf_event_time(event);
++
++ if (!event->cgrp_defer_enabled)
++ return;
++
++ event->cgrp_defer_enabled = 0;
++
++ event->tstamp_enabled = tstamp - event->total_time_enabled;
++ list_for_each_entry(sub, &event->sibling_list, group_entry) {
++ if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
++ sub->tstamp_enabled = tstamp - sub->total_time_enabled;
++ sub->cgrp_defer_enabled = 0;
++ }
++ }
++}
++#else /* !CONFIG_CGROUP_PERF */
++
++static inline bool
++perf_cgroup_match(struct perf_event *event)
++{
++ return true;
++}
++
++static inline void perf_detach_cgroup(struct perf_event *event)
++{}
++
++static inline int is_cgroup_event(struct perf_event *event)
++{
++ return 0;
++}
++
++static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
++{
++ return 0;
++}
++
++static inline void update_cgrp_time_from_event(struct perf_event *event)
++{
++}
++
++static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
++{
++}
++
++static inline void perf_cgroup_sched_out(struct task_struct *task,
++ struct task_struct *next)
++{
++}
++
++static inline void perf_cgroup_sched_in(struct task_struct *prev,
++ struct task_struct *task)
++{
++}
++
++static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
++ struct perf_event_attr *attr,
++ struct perf_event *group_leader)
++{
++ return -EINVAL;
++}
++
++static inline void
++perf_cgroup_set_timestamp(struct task_struct *task,
++ struct perf_event_context *ctx)
++{
++}
++
++void
++perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
++{
++}
++
++static inline void
++perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
++{
++}
++
++static inline u64 perf_cgroup_event_time(struct perf_event *event)
++{
++ return 0;
++}
++
++static inline void
++perf_cgroup_defer_enabled(struct perf_event *event)
++{
++}
++
++static inline void
++perf_cgroup_mark_enabled(struct perf_event *event,
++ struct perf_event_context *ctx)
++{
++}
++#endif
++
++/*
++ * set default to be dependent on timer tick just
++ * like original code
++ */
++#define PERF_CPU_HRTIMER (1000 / HZ)
++/*
++ * function must be called with interrupts disbled
++ */
++static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
++{
++ struct perf_cpu_context *cpuctx;
++ enum hrtimer_restart ret = HRTIMER_NORESTART;
++ int rotations = 0;
++
++ WARN_ON(!irqs_disabled());
++
++ cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
++
++ rotations = perf_rotate_context(cpuctx);
++
++ /*
++ * arm timer if needed
++ */
++ if (rotations) {
++ hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
++ ret = HRTIMER_RESTART;
++ }
++
++ return ret;
++}
++
++/* CPU is going down */
++void perf_cpu_hrtimer_cancel(int cpu)
++{
++ struct perf_cpu_context *cpuctx;
++ struct pmu *pmu;
++ unsigned long flags;
++
++ if (WARN_ON(cpu != smp_processor_id()))
++ return;
++
++ local_irq_save(flags);
++
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(pmu, &pmus, entry) {
++ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++
++ if (pmu->task_ctx_nr == perf_sw_context)
++ continue;
++
++ hrtimer_cancel(&cpuctx->hrtimer);
++ }
++
++ rcu_read_unlock();
++
++ local_irq_restore(flags);
++}
++
++static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
++{
++ struct hrtimer *hr = &cpuctx->hrtimer;
++ struct pmu *pmu = cpuctx->ctx.pmu;
++ int timer;
++
++ /* no multiplexing needed for SW PMU */
++ if (pmu->task_ctx_nr == perf_sw_context)
++ return;
++
++ /*
++ * check default is sane, if not set then force to
++ * default interval (1/tick)
++ */
++ timer = pmu->hrtimer_interval_ms;
++ if (timer < 1)
++ timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
++
++ cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
++
++ hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
++ hr->function = perf_cpu_hrtimer_handler;
++}
++
++static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
++{
++ struct hrtimer *hr = &cpuctx->hrtimer;
++ struct pmu *pmu = cpuctx->ctx.pmu;
++
++ /* not for SW PMU */
++ if (pmu->task_ctx_nr == perf_sw_context)
++ return;
++
++ if (hrtimer_active(hr))
++ return;
++
++ if (!hrtimer_callback_running(hr))
++ __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
++ 0, HRTIMER_MODE_REL_PINNED, 0);
++}
++
++void perf_pmu_disable(struct pmu *pmu)
++{
++ int *count = this_cpu_ptr(pmu->pmu_disable_count);
++ if (!(*count)++)
++ pmu->pmu_disable(pmu);
++}
++
++void perf_pmu_enable(struct pmu *pmu)
++{
++ int *count = this_cpu_ptr(pmu->pmu_disable_count);
++ if (!--(*count))
++ pmu->pmu_enable(pmu);
++}
++
++static DEFINE_PER_CPU(struct list_head, rotation_list);
++
++/*
++ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
++ * because they're strictly cpu affine and rotate_start is called with IRQs
++ * disabled, while rotate_context is called from IRQ context.
++ */
++static void perf_pmu_rotate_start(struct pmu *pmu)
++{
++ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++ struct list_head *head = this_cpu_ptr(&rotation_list);
++
++ WARN_ON(!irqs_disabled());
++
++ if (list_empty(&cpuctx->rotation_list))
++ list_add(&cpuctx->rotation_list, head);
++}
++
++static void get_ctx(struct perf_event_context *ctx)
++{
++ WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
++}
++
++static void put_ctx(struct perf_event_context *ctx)
++{
++ if (atomic_dec_and_test(&ctx->refcount)) {
++ if (ctx->parent_ctx)
++ put_ctx(ctx->parent_ctx);
++ if (ctx->task)
++ put_task_struct(ctx->task);
++ kfree_rcu(ctx, rcu_head);
++ }
++}
++
++/*
++ * This must be done under the ctx->lock, such as to serialize against
++ * context_equiv(), therefore we cannot call put_ctx() since that might end up
++ * calling scheduler related locks and ctx->lock nests inside those.
++ */
++static __must_check struct perf_event_context *
++unclone_ctx(struct perf_event_context *ctx)
++{
++ struct perf_event_context *parent_ctx = ctx->parent_ctx;
++
++ lockdep_assert_held(&ctx->lock);
++
++ if (parent_ctx)
++ ctx->parent_ctx = NULL;
++ ctx->generation++;
++
++ return parent_ctx;
++}
++
++static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
++{
++ /*
++ * only top level events have the pid namespace they were created in
++ */
++ if (event->parent)
++ event = event->parent;
++
++ return task_tgid_nr_ns(p, event->ns);
++}
++
++static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
++{
++ /*
++ * only top level events have the pid namespace they were created in
++ */
++ if (event->parent)
++ event = event->parent;
++
++ return task_pid_nr_ns(p, event->ns);
++}
++
++/*
++ * If we inherit events we want to return the parent event id
++ * to userspace.
++ */
++static u64 primary_event_id(struct perf_event *event)
++{
++ u64 id = event->id;
++
++ if (event->parent)
++ id = event->parent->id;
++
++ return id;
++}
++
++/*
++ * Get the perf_event_context for a task and lock it.
++ * This has to cope with with the fact that until it is locked,
++ * the context could get moved to another task.
++ */
++static struct perf_event_context *
++perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
++{
++ struct perf_event_context *ctx;
++
++retry:
++ /*
++ * One of the few rules of preemptible RCU is that one cannot do
++ * rcu_read_unlock() while holding a scheduler (or nested) lock when
++ * part of the read side critical section was preemptible -- see
++ * rcu_read_unlock_special().
++ *
++ * Since ctx->lock nests under rq->lock we must ensure the entire read
++ * side critical section is non-preemptible.
++ */
++ preempt_disable();
++ rcu_read_lock();
++ ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
++ if (ctx) {
++ /*
++ * If this context is a clone of another, it might
++ * get swapped for another underneath us by
++ * perf_event_task_sched_out, though the
++ * rcu_read_lock() protects us from any context
++ * getting freed. Lock the context and check if it
++ * got swapped before we could get the lock, and retry
++ * if so. If we locked the right context, then it
++ * can't get swapped on us any more.
++ */
++ raw_spin_lock_irqsave(&ctx->lock, *flags);
++ if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
++ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
++ rcu_read_unlock();
++ preempt_enable();
++ goto retry;
++ }
++
++ if (!atomic_inc_not_zero(&ctx->refcount)) {
++ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
++ ctx = NULL;
++ }
++ }
++ rcu_read_unlock();
++ preempt_enable();
++ return ctx;
++}
++
++/*
++ * Get the context for a task and increment its pin_count so it
++ * can't get swapped to another task. This also increments its
++ * reference count so that the context can't get freed.
++ */
++static struct perf_event_context *
++perf_pin_task_context(struct task_struct *task, int ctxn)
++{
++ struct perf_event_context *ctx;
++ unsigned long flags;
++
++ ctx = perf_lock_task_context(task, ctxn, &flags);
++ if (ctx) {
++ ++ctx->pin_count;
++ raw_spin_unlock_irqrestore(&ctx->lock, flags);
++ }
++ return ctx;
++}
++
++static void perf_unpin_context(struct perf_event_context *ctx)
++{
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&ctx->lock, flags);
++ --ctx->pin_count;
++ raw_spin_unlock_irqrestore(&ctx->lock, flags);
++}
++
++/*
++ * Update the record of the current time in a context.
++ */
++static void update_context_time(struct perf_event_context *ctx)
++{
++ u64 now = perf_clock();
++
++ ctx->time += now - ctx->timestamp;
++ ctx->timestamp = now;
++}
++
++static u64 perf_event_time(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++
++ if (is_cgroup_event(event))
++ return perf_cgroup_event_time(event);
++
++ return ctx ? ctx->time : 0;
++}
++
++/*
++ * Update the total_time_enabled and total_time_running fields for a event.
++ * The caller of this function needs to hold the ctx->lock.
++ */
++static void update_event_times(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++ u64 run_end;
++
++ if (event->state < PERF_EVENT_STATE_INACTIVE ||
++ event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
++ return;
++ /*
++ * in cgroup mode, time_enabled represents
++ * the time the event was enabled AND active
++ * tasks were in the monitored cgroup. This is
++ * independent of the activity of the context as
++ * there may be a mix of cgroup and non-cgroup events.
++ *
++ * That is why we treat cgroup events differently
++ * here.
++ */
++ if (is_cgroup_event(event))
++ run_end = perf_cgroup_event_time(event);
++ else if (ctx->is_active)
++ run_end = ctx->time;
++ else
++ run_end = event->tstamp_stopped;
++
++ event->total_time_enabled = run_end - event->tstamp_enabled;
++
++ if (event->state == PERF_EVENT_STATE_INACTIVE)
++ run_end = event->tstamp_stopped;
++ else
++ run_end = perf_event_time(event);
++
++ event->total_time_running = run_end - event->tstamp_running;
++
++}
++
++/*
++ * Update total_time_enabled and total_time_running for all events in a group.
++ */
++static void update_group_times(struct perf_event *leader)
++{
++ struct perf_event *event;
++
++ update_event_times(leader);
++ list_for_each_entry(event, &leader->sibling_list, group_entry)
++ update_event_times(event);
++}
++
++static struct list_head *
++ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
++{
++ if (event->attr.pinned)
++ return &ctx->pinned_groups;
++ else
++ return &ctx->flexible_groups;
++}
++
++/*
++ * Add a event from the lists for its context.
++ * Must be called with ctx->mutex and ctx->lock held.
++ */
++static void
++list_add_event(struct perf_event *event, struct perf_event_context *ctx)
++{
++ WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
++ event->attach_state |= PERF_ATTACH_CONTEXT;
++
++ /*
++ * If we're a stand alone event or group leader, we go to the context
++ * list, group events are kept attached to the group so that
++ * perf_group_detach can, at all times, locate all siblings.
++ */
++ if (event->group_leader == event) {
++ struct list_head *list;
++
++ if (is_software_event(event))
++ event->group_flags |= PERF_GROUP_SOFTWARE;
++
++ list = ctx_group_list(event, ctx);
++ list_add_tail(&event->group_entry, list);
++ }
++
++ if (is_cgroup_event(event))
++ ctx->nr_cgroups++;
++
++ if (has_branch_stack(event))
++ ctx->nr_branch_stack++;
++
++ list_add_rcu(&event->event_entry, &ctx->event_list);
++ if (!ctx->nr_events)
++ perf_pmu_rotate_start(ctx->pmu);
++ ctx->nr_events++;
++ if (event->attr.inherit_stat)
++ ctx->nr_stat++;
++
++ ctx->generation++;
++}
++
++/*
++ * Initialize event state based on the perf_event_attr::disabled.
++ */
++static inline void perf_event__state_init(struct perf_event *event)
++{
++ event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
++ PERF_EVENT_STATE_INACTIVE;
++}
++
++/*
++ * Called at perf_event creation and when events are attached/detached from a
++ * group.
++ */
++static void perf_event__read_size(struct perf_event *event)
++{
++ int entry = sizeof(u64); /* value */
++ int size = 0;
++ int nr = 1;
++
++ if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++ size += sizeof(u64);
++
++ if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++ size += sizeof(u64);
++
++ if (event->attr.read_format & PERF_FORMAT_ID)
++ entry += sizeof(u64);
++
++ if (event->attr.read_format & PERF_FORMAT_GROUP) {
++ nr += event->group_leader->nr_siblings;
++ size += sizeof(u64);
++ }
++
++ size += entry * nr;
++ event->read_size = size;
++}
++
++static void perf_event__header_size(struct perf_event *event)
++{
++ struct perf_sample_data *data;
++ u64 sample_type = event->attr.sample_type;
++ u16 size = 0;
++
++ perf_event__read_size(event);
++
++ if (sample_type & PERF_SAMPLE_IP)
++ size += sizeof(data->ip);
++
++ if (sample_type & PERF_SAMPLE_ADDR)
++ size += sizeof(data->addr);
++
++ if (sample_type & PERF_SAMPLE_PERIOD)
++ size += sizeof(data->period);
++
++ if (sample_type & PERF_SAMPLE_WEIGHT)
++ size += sizeof(data->weight);
++
++ if (sample_type & PERF_SAMPLE_READ)
++ size += event->read_size;
++
++ if (sample_type & PERF_SAMPLE_DATA_SRC)
++ size += sizeof(data->data_src.val);
++
++ if (sample_type & PERF_SAMPLE_TRANSACTION)
++ size += sizeof(data->txn);
++
++ event->header_size = size;
++}
++
++static void perf_event__id_header_size(struct perf_event *event)
++{
++ struct perf_sample_data *data;
++ u64 sample_type = event->attr.sample_type;
++ u16 size = 0;
++
++ if (sample_type & PERF_SAMPLE_TID)
++ size += sizeof(data->tid_entry);
++
++ if (sample_type & PERF_SAMPLE_TIME)
++ size += sizeof(data->time);
++
++ if (sample_type & PERF_SAMPLE_IDENTIFIER)
++ size += sizeof(data->id);
++
++ if (sample_type & PERF_SAMPLE_ID)
++ size += sizeof(data->id);
++
++ if (sample_type & PERF_SAMPLE_STREAM_ID)
++ size += sizeof(data->stream_id);
++
++ if (sample_type & PERF_SAMPLE_CPU)
++ size += sizeof(data->cpu_entry);
++
++ event->id_header_size = size;
++}
++
++static void perf_group_attach(struct perf_event *event)
++{
++ struct perf_event *group_leader = event->group_leader, *pos;
++
++ /*
++ * We can have double attach due to group movement in perf_event_open.
++ */
++ if (event->attach_state & PERF_ATTACH_GROUP)
++ return;
++
++ event->attach_state |= PERF_ATTACH_GROUP;
++
++ if (group_leader == event)
++ return;
++
++ if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
++ !is_software_event(event))
++ group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
++
++ list_add_tail(&event->group_entry, &group_leader->sibling_list);
++ group_leader->nr_siblings++;
++
++ perf_event__header_size(group_leader);
++
++ list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
++ perf_event__header_size(pos);
++}
++
++/*
++ * Remove a event from the lists for its context.
++ * Must be called with ctx->mutex and ctx->lock held.
++ */
++static void
++list_del_event(struct perf_event *event, struct perf_event_context *ctx)
++{
++ struct perf_cpu_context *cpuctx;
++ /*
++ * We can have double detach due to exit/hot-unplug + close.
++ */
++ if (!(event->attach_state & PERF_ATTACH_CONTEXT))
++ return;
++
++ event->attach_state &= ~PERF_ATTACH_CONTEXT;
++
++ if (is_cgroup_event(event)) {
++ ctx->nr_cgroups--;
++ cpuctx = __get_cpu_context(ctx);
++ /*
++ * if there are no more cgroup events
++ * then cler cgrp to avoid stale pointer
++ * in update_cgrp_time_from_cpuctx()
++ */
++ if (!ctx->nr_cgroups)
++ cpuctx->cgrp = NULL;
++ }
++
++ if (has_branch_stack(event))
++ ctx->nr_branch_stack--;
++
++ ctx->nr_events--;
++ if (event->attr.inherit_stat)
++ ctx->nr_stat--;
++
++ list_del_rcu(&event->event_entry);
++
++ if (event->group_leader == event)
++ list_del_init(&event->group_entry);
++
++ update_group_times(event);
++
++ /*
++ * If event was in error state, then keep it
++ * that way, otherwise bogus counts will be
++ * returned on read(). The only way to get out
++ * of error state is by explicit re-enabling
++ * of the event
++ */
++ if (event->state > PERF_EVENT_STATE_OFF)
++ event->state = PERF_EVENT_STATE_OFF;
++
++ ctx->generation++;
++}
++
++static void perf_group_detach(struct perf_event *event)
++{
++ struct perf_event *sibling, *tmp;
++ struct list_head *list = NULL;
++
++ /*
++ * We can have double detach due to exit/hot-unplug + close.
++ */
++ if (!(event->attach_state & PERF_ATTACH_GROUP))
++ return;
++
++ event->attach_state &= ~PERF_ATTACH_GROUP;
++
++ /*
++ * If this is a sibling, remove it from its group.
++ */
++ if (event->group_leader != event) {
++ list_del_init(&event->group_entry);
++ event->group_leader->nr_siblings--;
++ goto out;
++ }
++
++ if (!list_empty(&event->group_entry))
++ list = &event->group_entry;
++
++ /*
++ * If this was a group event with sibling events then
++ * upgrade the siblings to singleton events by adding them
++ * to whatever list we are on.
++ */
++ list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
++ if (list)
++ list_move_tail(&sibling->group_entry, list);
++ sibling->group_leader = sibling;
++
++ /* Inherit group flags from the previous leader */
++ sibling->group_flags = event->group_flags;
++ }
++
++out:
++ perf_event__header_size(event->group_leader);
++
++ list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
++ perf_event__header_size(tmp);
++}
++
++/*
++ * User event without the task.
++ */
++static bool is_orphaned_event(struct perf_event *event)
++{
++ return event && !is_kernel_event(event) && !event->owner;
++}
++
++/*
++ * Event has a parent but parent's task finished and it's
++ * alive only because of children holding refference.
++ */
++static bool is_orphaned_child(struct perf_event *event)
++{
++ return is_orphaned_event(event->parent);
++}
++
++static void orphans_remove_work(struct work_struct *work);
++
++static void schedule_orphans_remove(struct perf_event_context *ctx)
++{
++ if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
++ return;
++
++ if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
++ get_ctx(ctx);
++ ctx->orphans_remove_sched = true;
++ }
++}
++
++static int __init perf_workqueue_init(void)
++{
++ perf_wq = create_singlethread_workqueue("perf");
++ WARN(!perf_wq, "failed to create perf workqueue\n");
++ return perf_wq ? 0 : -1;
++}
++
++core_initcall(perf_workqueue_init);
++
++static inline int
++event_filter_match(struct perf_event *event)
++{
++ return (event->cpu == -1 || event->cpu == smp_processor_id())
++ && perf_cgroup_match(event);
++}
++
++static void
++event_sched_out(struct perf_event *event,
++ struct perf_cpu_context *cpuctx,
++ struct perf_event_context *ctx)
++{
++ u64 tstamp = perf_event_time(event);
++ u64 delta;
++ /*
++ * An event which could not be activated because of
++ * filter mismatch still needs to have its timings
++ * maintained, otherwise bogus information is return
++ * via read() for time_enabled, time_running:
++ */
++ if (event->state == PERF_EVENT_STATE_INACTIVE
++ && !event_filter_match(event)) {
++ delta = tstamp - event->tstamp_stopped;
++ event->tstamp_running += delta;
++ event->tstamp_stopped = tstamp;
++ }
++
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return;
++
++ perf_pmu_disable(event->pmu);
++
++ event->state = PERF_EVENT_STATE_INACTIVE;
++ if (event->pending_disable) {
++ event->pending_disable = 0;
++ event->state = PERF_EVENT_STATE_OFF;
++ }
++ event->tstamp_stopped = tstamp;
++ event->pmu->del(event, 0);
++ event->oncpu = -1;
++
++ if (!is_software_event(event))
++ cpuctx->active_oncpu--;
++ ctx->nr_active--;
++ if (event->attr.freq && event->attr.sample_freq)
++ ctx->nr_freq--;
++ if (event->attr.exclusive || !cpuctx->active_oncpu)
++ cpuctx->exclusive = 0;
++
++ if (is_orphaned_child(event))
++ schedule_orphans_remove(ctx);
++
++ perf_pmu_enable(event->pmu);
++}
++
++static void
++group_sched_out(struct perf_event *group_event,
++ struct perf_cpu_context *cpuctx,
++ struct perf_event_context *ctx)
++{
++ struct perf_event *event;
++ int state = group_event->state;
++
++ event_sched_out(group_event, cpuctx, ctx);
++
++ /*
++ * Schedule out siblings (if any):
++ */
++ list_for_each_entry(event, &group_event->sibling_list, group_entry)
++ event_sched_out(event, cpuctx, ctx);
++
++ if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
++ cpuctx->exclusive = 0;
++}
++
++struct remove_event {
++ struct perf_event *event;
++ bool detach_group;
++};
++
++/*
++ * Cross CPU call to remove a performance event
++ *
++ * We disable the event on the hardware level first. After that we
++ * remove it from the context list.
++ */
++static int __perf_remove_from_context(void *info)
++{
++ struct remove_event *re = info;
++ struct perf_event *event = re->event;
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++
++ raw_spin_lock(&ctx->lock);
++ event_sched_out(event, cpuctx, ctx);
++ if (re->detach_group)
++ perf_group_detach(event);
++ list_del_event(event, ctx);
++ if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
++ ctx->is_active = 0;
++ cpuctx->task_ctx = NULL;
++ }
++ raw_spin_unlock(&ctx->lock);
++
++ return 0;
++}
++
++
++/*
++ * Remove the event from a task's (or a CPU's) list of events.
++ *
++ * CPU events are removed with a smp call. For task events we only
++ * call when the task is on a CPU.
++ *
++ * If event->ctx is a cloned context, callers must make sure that
++ * every task struct that event->ctx->task could possibly point to
++ * remains valid. This is OK when called from perf_release since
++ * that only calls us on the top-level context, which can't be a clone.
++ * When called from perf_event_exit_task, it's OK because the
++ * context has been detached from its task.
++ */
++static void perf_remove_from_context(struct perf_event *event, bool detach_group)
++{
++ struct perf_event_context *ctx = event->ctx;
++ struct task_struct *task = ctx->task;
++ struct remove_event re = {
++ .event = event,
++ .detach_group = detach_group,
++ };
++
++ lockdep_assert_held(&ctx->mutex);
++
++ if (!task) {
++ /*
++ * Per cpu events are removed via an smp call. The removal can
++ * fail if the CPU is currently offline, but in that case we
++ * already called __perf_remove_from_context from
++ * perf_event_exit_cpu.
++ */
++ cpu_function_call(event->cpu, __perf_remove_from_context, &re);
++ return;
++ }
++
++retry:
++ if (!task_function_call(task, __perf_remove_from_context, &re))
++ return;
++
++ raw_spin_lock_irq(&ctx->lock);
++ /*
++ * If we failed to find a running task, but find the context active now
++ * that we've acquired the ctx->lock, retry.
++ */
++ if (ctx->is_active) {
++ raw_spin_unlock_irq(&ctx->lock);
++ /*
++ * Reload the task pointer, it might have been changed by
++ * a concurrent perf_event_context_sched_out().
++ */
++ task = ctx->task;
++ goto retry;
++ }
++
++ /*
++ * Since the task isn't running, its safe to remove the event, us
++ * holding the ctx->lock ensures the task won't get scheduled in.
++ */
++ if (detach_group)
++ perf_group_detach(event);
++ list_del_event(event, ctx);
++ raw_spin_unlock_irq(&ctx->lock);
++}
++
++/*
++ * Cross CPU call to disable a performance event
++ */
++int __perf_event_disable(void *info)
++{
++ struct perf_event *event = info;
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++
++ /*
++ * If this is a per-task event, need to check whether this
++ * event's task is the current task on this cpu.
++ *
++ * Can trigger due to concurrent perf_event_context_sched_out()
++ * flipping contexts around.
++ */
++ if (ctx->task && cpuctx->task_ctx != ctx)
++ return -EINVAL;
++
++ raw_spin_lock(&ctx->lock);
++
++ /*
++ * If the event is on, turn it off.
++ * If it is in error state, leave it in error state.
++ */
++ if (event->state >= PERF_EVENT_STATE_INACTIVE) {
++ update_context_time(ctx);
++ update_cgrp_time_from_event(event);
++ update_group_times(event);
++ if (event == event->group_leader)
++ group_sched_out(event, cpuctx, ctx);
++ else
++ event_sched_out(event, cpuctx, ctx);
++ event->state = PERF_EVENT_STATE_OFF;
++ }
++
++ raw_spin_unlock(&ctx->lock);
++
++ return 0;
++}
++
++/*
++ * Disable a event.
++ *
++ * If event->ctx is a cloned context, callers must make sure that
++ * every task struct that event->ctx->task could possibly point to
++ * remains valid. This condition is satisifed when called through
++ * perf_event_for_each_child or perf_event_for_each because they
++ * hold the top-level event's child_mutex, so any descendant that
++ * goes to exit will block in sync_child_event.
++ * When called from perf_pending_event it's OK because event->ctx
++ * is the current context on this CPU and preemption is disabled,
++ * hence we can't get into perf_event_task_sched_out for this context.
++ */
++void perf_event_disable(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++ struct task_struct *task = ctx->task;
++
++ if (!task) {
++ /*
++ * Disable the event on the cpu that it's on
++ */
++ cpu_function_call(event->cpu, __perf_event_disable, event);
++ return;
++ }
++
++retry:
++ if (!task_function_call(task, __perf_event_disable, event))
++ return;
++
++ raw_spin_lock_irq(&ctx->lock);
++ /*
++ * If the event is still active, we need to retry the cross-call.
++ */
++ if (event->state == PERF_EVENT_STATE_ACTIVE) {
++ raw_spin_unlock_irq(&ctx->lock);
++ /*
++ * Reload the task pointer, it might have been changed by
++ * a concurrent perf_event_context_sched_out().
++ */
++ task = ctx->task;
++ goto retry;
++ }
++
++ /*
++ * Since we have the lock this context can't be scheduled
++ * in, so we can change the state safely.
++ */
++ if (event->state == PERF_EVENT_STATE_INACTIVE) {
++ update_group_times(event);
++ event->state = PERF_EVENT_STATE_OFF;
++ }
++ raw_spin_unlock_irq(&ctx->lock);
++}
++EXPORT_SYMBOL_GPL(perf_event_disable);
++
++static void perf_set_shadow_time(struct perf_event *event,
++ struct perf_event_context *ctx,
++ u64 tstamp)
++{
++ /*
++ * use the correct time source for the time snapshot
++ *
++ * We could get by without this by leveraging the
++ * fact that to get to this function, the caller
++ * has most likely already called update_context_time()
++ * and update_cgrp_time_xx() and thus both timestamp
++ * are identical (or very close). Given that tstamp is,
++ * already adjusted for cgroup, we could say that:
++ * tstamp - ctx->timestamp
++ * is equivalent to
++ * tstamp - cgrp->timestamp.
++ *
++ * Then, in perf_output_read(), the calculation would
++ * work with no changes because:
++ * - event is guaranteed scheduled in
++ * - no scheduled out in between
++ * - thus the timestamp would be the same
++ *
++ * But this is a bit hairy.
++ *
++ * So instead, we have an explicit cgroup call to remain
++ * within the time time source all along. We believe it
++ * is cleaner and simpler to understand.
++ */
++ if (is_cgroup_event(event))
++ perf_cgroup_set_shadow_time(event, tstamp);
++ else
++ event->shadow_ctx_time = tstamp - ctx->timestamp;
++}
++
++#define MAX_INTERRUPTS (~0ULL)
++
++static void perf_log_throttle(struct perf_event *event, int enable);
++
++static int
++event_sched_in(struct perf_event *event,
++ struct perf_cpu_context *cpuctx,
++ struct perf_event_context *ctx)
++{
++ u64 tstamp = perf_event_time(event);
++ int ret = 0;
++
++ lockdep_assert_held(&ctx->lock);
++
++ if (event->state <= PERF_EVENT_STATE_OFF)
++ return 0;
++
++ event->state = PERF_EVENT_STATE_ACTIVE;
++ event->oncpu = smp_processor_id();
++
++ /*
++ * Unthrottle events, since we scheduled we might have missed several
++ * ticks already, also for a heavily scheduling task there is little
++ * guarantee it'll get a tick in a timely manner.
++ */
++ if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
++ perf_log_throttle(event, 1);
++ event->hw.interrupts = 0;
++ }
++
++ /*
++ * The new state must be visible before we turn it on in the hardware:
++ */
++ smp_wmb();
++
++ perf_pmu_disable(event->pmu);
++
++ if (event->pmu->add(event, PERF_EF_START)) {
++ event->state = PERF_EVENT_STATE_INACTIVE;
++ event->oncpu = -1;
++ ret = -EAGAIN;
++ goto out;
++ }
++
++ event->tstamp_running += tstamp - event->tstamp_stopped;
++
++ perf_set_shadow_time(event, ctx, tstamp);
++
++ if (!is_software_event(event))
++ cpuctx->active_oncpu++;
++ ctx->nr_active++;
++ if (event->attr.freq && event->attr.sample_freq)
++ ctx->nr_freq++;
++
++ if (event->attr.exclusive)
++ cpuctx->exclusive = 1;
++
++ if (is_orphaned_child(event))
++ schedule_orphans_remove(ctx);
++
++out:
++ perf_pmu_enable(event->pmu);
++
++ return ret;
++}
++
++static int
++group_sched_in(struct perf_event *group_event,
++ struct perf_cpu_context *cpuctx,
++ struct perf_event_context *ctx)
++{
++ struct perf_event *event, *partial_group = NULL;
++ struct pmu *pmu = ctx->pmu;
++ u64 now = ctx->time;
++ bool simulate = false;
++
++ if (group_event->state == PERF_EVENT_STATE_OFF)
++ return 0;
++
++ pmu->start_txn(pmu);
++
++ if (event_sched_in(group_event, cpuctx, ctx)) {
++ pmu->cancel_txn(pmu);
++ perf_cpu_hrtimer_restart(cpuctx);
++ return -EAGAIN;
++ }
++
++ /*
++ * Schedule in siblings as one group (if any):
++ */
++ list_for_each_entry(event, &group_event->sibling_list, group_entry) {
++ if (event_sched_in(event, cpuctx, ctx)) {
++ partial_group = event;
++ goto group_error;
++ }
++ }
++
++ if (!pmu->commit_txn(pmu))
++ return 0;
++
++group_error:
++ /*
++ * Groups can be scheduled in as one unit only, so undo any
++ * partial group before returning:
++ * The events up to the failed event are scheduled out normally,
++ * tstamp_stopped will be updated.
++ *
++ * The failed events and the remaining siblings need to have
++ * their timings updated as if they had gone thru event_sched_in()
++ * and event_sched_out(). This is required to get consistent timings
++ * across the group. This also takes care of the case where the group
++ * could never be scheduled by ensuring tstamp_stopped is set to mark
++ * the time the event was actually stopped, such that time delta
++ * calculation in update_event_times() is correct.
++ */
++ list_for_each_entry(event, &group_event->sibling_list, group_entry) {
++ if (event == partial_group)
++ simulate = true;
++
++ if (simulate) {
++ event->tstamp_running += now - event->tstamp_stopped;
++ event->tstamp_stopped = now;
++ } else {
++ event_sched_out(event, cpuctx, ctx);
++ }
++ }
++ event_sched_out(group_event, cpuctx, ctx);
++
++ pmu->cancel_txn(pmu);
++
++ perf_cpu_hrtimer_restart(cpuctx);
++
++ return -EAGAIN;
++}
++
++/*
++ * Work out whether we can put this event group on the CPU now.
++ */
++static int group_can_go_on(struct perf_event *event,
++ struct perf_cpu_context *cpuctx,
++ int can_add_hw)
++{
++ /*
++ * Groups consisting entirely of software events can always go on.
++ */
++ if (event->group_flags & PERF_GROUP_SOFTWARE)
++ return 1;
++ /*
++ * If an exclusive group is already on, no other hardware
++ * events can go on.
++ */
++ if (cpuctx->exclusive)
++ return 0;
++ /*
++ * If this group is exclusive and there are already
++ * events on the CPU, it can't go on.
++ */
++ if (event->attr.exclusive && cpuctx->active_oncpu)
++ return 0;
++ /*
++ * Otherwise, try to add it if all previous groups were able
++ * to go on.
++ */
++ return can_add_hw;
++}
++
++static void add_event_to_ctx(struct perf_event *event,
++ struct perf_event_context *ctx)
++{
++ u64 tstamp = perf_event_time(event);
++
++ list_add_event(event, ctx);
++ perf_group_attach(event);
++ event->tstamp_enabled = tstamp;
++ event->tstamp_running = tstamp;
++ event->tstamp_stopped = tstamp;
++}
++
++static void task_ctx_sched_out(struct perf_event_context *ctx);
++static void
++ctx_sched_in(struct perf_event_context *ctx,
++ struct perf_cpu_context *cpuctx,
++ enum event_type_t event_type,
++ struct task_struct *task);
++
++static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
++ struct perf_event_context *ctx,
++ struct task_struct *task)
++{
++ cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
++ if (ctx)
++ ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
++ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
++ if (ctx)
++ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
++}
++
++/*
++ * Cross CPU call to install and enable a performance event
++ *
++ * Must be called with ctx->mutex held
++ */
++static int __perf_install_in_context(void *info)
++{
++ struct perf_event *event = info;
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++ struct perf_event_context *task_ctx = cpuctx->task_ctx;
++ struct task_struct *task = current;
++
++ perf_ctx_lock(cpuctx, task_ctx);
++ perf_pmu_disable(cpuctx->ctx.pmu);
++
++ /*
++ * If there was an active task_ctx schedule it out.
++ */
++ if (task_ctx)
++ task_ctx_sched_out(task_ctx);
++
++ /*
++ * If the context we're installing events in is not the
++ * active task_ctx, flip them.
++ */
++ if (ctx->task && task_ctx != ctx) {
++ if (task_ctx)
++ raw_spin_unlock(&task_ctx->lock);
++ raw_spin_lock(&ctx->lock);
++ task_ctx = ctx;
++ }
++
++ if (task_ctx) {
++ cpuctx->task_ctx = task_ctx;
++ task = task_ctx->task;
++ }
++
++ cpu_ctx_sched_out(cpuctx, EVENT_ALL);
++
++ update_context_time(ctx);
++ /*
++ * update cgrp time only if current cgrp
++ * matches event->cgrp. Must be done before
++ * calling add_event_to_ctx()
++ */
++ update_cgrp_time_from_event(event);
++
++ add_event_to_ctx(event, ctx);
++
++ /*
++ * Schedule everything back in
++ */
++ perf_event_sched_in(cpuctx, task_ctx, task);
++
++ perf_pmu_enable(cpuctx->ctx.pmu);
++ perf_ctx_unlock(cpuctx, task_ctx);
++
++ return 0;
++}
++
++/*
++ * Attach a performance event to a context
++ *
++ * First we add the event to the list with the hardware enable bit
++ * in event->hw_config cleared.
++ *
++ * If the event is attached to a task which is on a CPU we use a smp
++ * call to enable it in the task context. The task might have been
++ * scheduled away, but we check this in the smp call again.
++ */
++static void
++perf_install_in_context(struct perf_event_context *ctx,
++ struct perf_event *event,
++ int cpu)
++{
++ struct task_struct *task = ctx->task;
++
++ lockdep_assert_held(&ctx->mutex);
++
++ event->ctx = ctx;
++ if (event->cpu != -1)
++ event->cpu = cpu;
++
++ if (!task) {
++ /*
++ * Per cpu events are installed via an smp call and
++ * the install is always successful.
++ */
++ cpu_function_call(cpu, __perf_install_in_context, event);
++ return;
++ }
++
++retry:
++ if (!task_function_call(task, __perf_install_in_context, event))
++ return;
++
++ raw_spin_lock_irq(&ctx->lock);
++ /*
++ * If we failed to find a running task, but find the context active now
++ * that we've acquired the ctx->lock, retry.
++ */
++ if (ctx->is_active) {
++ raw_spin_unlock_irq(&ctx->lock);
++ /*
++ * Reload the task pointer, it might have been changed by
++ * a concurrent perf_event_context_sched_out().
++ */
++ task = ctx->task;
++ goto retry;
++ }
++
++ /*
++ * Since the task isn't running, its safe to add the event, us holding
++ * the ctx->lock ensures the task won't get scheduled in.
++ */
++ add_event_to_ctx(event, ctx);
++ raw_spin_unlock_irq(&ctx->lock);
++}
++
++/*
++ * Put a event into inactive state and update time fields.
++ * Enabling the leader of a group effectively enables all
++ * the group members that aren't explicitly disabled, so we
++ * have to update their ->tstamp_enabled also.
++ * Note: this works for group members as well as group leaders
++ * since the non-leader members' sibling_lists will be empty.
++ */
++static void __perf_event_mark_enabled(struct perf_event *event)
++{
++ struct perf_event *sub;
++ u64 tstamp = perf_event_time(event);
++
++ event->state = PERF_EVENT_STATE_INACTIVE;
++ event->tstamp_enabled = tstamp - event->total_time_enabled;
++ list_for_each_entry(sub, &event->sibling_list, group_entry) {
++ if (sub->state >= PERF_EVENT_STATE_INACTIVE)
++ sub->tstamp_enabled = tstamp - sub->total_time_enabled;
++ }
++}
++
++/*
++ * Cross CPU call to enable a performance event
++ */
++static int __perf_event_enable(void *info)
++{
++ struct perf_event *event = info;
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_event *leader = event->group_leader;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++ int err;
++
++ /*
++ * There's a time window between 'ctx->is_active' check
++ * in perf_event_enable function and this place having:
++ * - IRQs on
++ * - ctx->lock unlocked
++ *
++ * where the task could be killed and 'ctx' deactivated
++ * by perf_event_exit_task.
++ */
++ if (!ctx->is_active)
++ return -EINVAL;
++
++ raw_spin_lock(&ctx->lock);
++ update_context_time(ctx);
++
++ if (event->state >= PERF_EVENT_STATE_INACTIVE)
++ goto unlock;
++
++ /*
++ * set current task's cgroup time reference point
++ */
++ perf_cgroup_set_timestamp(current, ctx);
++
++ __perf_event_mark_enabled(event);
++
++ if (!event_filter_match(event)) {
++ if (is_cgroup_event(event))
++ perf_cgroup_defer_enabled(event);
++ goto unlock;
++ }
++
++ /*
++ * If the event is in a group and isn't the group leader,
++ * then don't put it on unless the group is on.
++ */
++ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
++ goto unlock;
++
++ if (!group_can_go_on(event, cpuctx, 1)) {
++ err = -EEXIST;
++ } else {
++ if (event == leader)
++ err = group_sched_in(event, cpuctx, ctx);
++ else
++ err = event_sched_in(event, cpuctx, ctx);
++ }
++
++ if (err) {
++ /*
++ * If this event can't go on and it's part of a
++ * group, then the whole group has to come off.
++ */
++ if (leader != event) {
++ group_sched_out(leader, cpuctx, ctx);
++ perf_cpu_hrtimer_restart(cpuctx);
++ }
++ if (leader->attr.pinned) {
++ update_group_times(leader);
++ leader->state = PERF_EVENT_STATE_ERROR;
++ }
++ }
++
++unlock:
++ raw_spin_unlock(&ctx->lock);
++
++ return 0;
++}
++
++/*
++ * Enable a event.
++ *
++ * If event->ctx is a cloned context, callers must make sure that
++ * every task struct that event->ctx->task could possibly point to
++ * remains valid. This condition is satisfied when called through
++ * perf_event_for_each_child or perf_event_for_each as described
++ * for perf_event_disable.
++ */
++void perf_event_enable(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++ struct task_struct *task = ctx->task;
++
++ if (!task) {
++ /*
++ * Enable the event on the cpu that it's on
++ */
++ cpu_function_call(event->cpu, __perf_event_enable, event);
++ return;
++ }
++
++ raw_spin_lock_irq(&ctx->lock);
++ if (event->state >= PERF_EVENT_STATE_INACTIVE)
++ goto out;
++
++ /*
++ * If the event is in error state, clear that first.
++ * That way, if we see the event in error state below, we
++ * know that it has gone back into error state, as distinct
++ * from the task having been scheduled away before the
++ * cross-call arrived.
++ */
++ if (event->state == PERF_EVENT_STATE_ERROR)
++ event->state = PERF_EVENT_STATE_OFF;
++
++retry:
++ if (!ctx->is_active) {
++ __perf_event_mark_enabled(event);
++ goto out;
++ }
++
++ raw_spin_unlock_irq(&ctx->lock);
++
++ if (!task_function_call(task, __perf_event_enable, event))
++ return;
++
++ raw_spin_lock_irq(&ctx->lock);
++
++ /*
++ * If the context is active and the event is still off,
++ * we need to retry the cross-call.
++ */
++ if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
++ /*
++ * task could have been flipped by a concurrent
++ * perf_event_context_sched_out()
++ */
++ task = ctx->task;
++ goto retry;
++ }
++
++out:
++ raw_spin_unlock_irq(&ctx->lock);
++}
++EXPORT_SYMBOL_GPL(perf_event_enable);
++
++int perf_event_refresh(struct perf_event *event, int refresh)
++{
++ /*
++ * not supported on inherited events
++ */
++ if (event->attr.inherit || !is_sampling_event(event))
++ return -EINVAL;
++
++ atomic_add(refresh, &event->event_limit);
++ perf_event_enable(event);
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(perf_event_refresh);
++
++static void ctx_sched_out(struct perf_event_context *ctx,
++ struct perf_cpu_context *cpuctx,
++ enum event_type_t event_type)
++{
++ struct perf_event *event;
++ int is_active = ctx->is_active;
++
++ ctx->is_active &= ~event_type;
++ if (likely(!ctx->nr_events))
++ return;
++
++ update_context_time(ctx);
++ update_cgrp_time_from_cpuctx(cpuctx);
++ if (!ctx->nr_active)
++ return;
++
++ perf_pmu_disable(ctx->pmu);
++ if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
++ list_for_each_entry(event, &ctx->pinned_groups, group_entry)
++ group_sched_out(event, cpuctx, ctx);
++ }
++
++ if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
++ list_for_each_entry(event, &ctx->flexible_groups, group_entry)
++ group_sched_out(event, cpuctx, ctx);
++ }
++ perf_pmu_enable(ctx->pmu);
++}
++
++/*
++ * Test whether two contexts are equivalent, i.e. whether they have both been
++ * cloned from the same version of the same context.
++ *
++ * Equivalence is measured using a generation number in the context that is
++ * incremented on each modification to it; see unclone_ctx(), list_add_event()
++ * and list_del_event().
++ */
++static int context_equiv(struct perf_event_context *ctx1,
++ struct perf_event_context *ctx2)
++{
++ lockdep_assert_held(&ctx1->lock);
++ lockdep_assert_held(&ctx2->lock);
++
++ /* Pinning disables the swap optimization */
++ if (ctx1->pin_count || ctx2->pin_count)
++ return 0;
++
++ /* If ctx1 is the parent of ctx2 */
++ if (ctx1 == ctx2->parent_ctx && ctx1->generation == ctx2->parent_gen)
++ return 1;
++
++ /* If ctx2 is the parent of ctx1 */
++ if (ctx1->parent_ctx == ctx2 && ctx1->parent_gen == ctx2->generation)
++ return 1;
++
++ /*
++ * If ctx1 and ctx2 have the same parent; we flatten the parent
++ * hierarchy, see perf_event_init_context().
++ */
++ if (ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx &&
++ ctx1->parent_gen == ctx2->parent_gen)
++ return 1;
++
++ /* Unmatched */
++ return 0;
++}
++
++static void __perf_event_sync_stat(struct perf_event *event,
++ struct perf_event *next_event)
++{
++ u64 value;
++
++ if (!event->attr.inherit_stat)
++ return;
++
++ /*
++ * Update the event value, we cannot use perf_event_read()
++ * because we're in the middle of a context switch and have IRQs
++ * disabled, which upsets smp_call_function_single(), however
++ * we know the event must be on the current CPU, therefore we
++ * don't need to use it.
++ */
++ switch (event->state) {
++ case PERF_EVENT_STATE_ACTIVE:
++ event->pmu->read(event);
++ /* fall-through */
++
++ case PERF_EVENT_STATE_INACTIVE:
++ update_event_times(event);
++ break;
++
++ default:
++ break;
++ }
++
++ /*
++ * In order to keep per-task stats reliable we need to flip the event
++ * values when we flip the contexts.
++ */
++ value = local64_read(&next_event->count);
++ value = local64_xchg(&event->count, value);
++ local64_set(&next_event->count, value);
++
++ swap(event->total_time_enabled, next_event->total_time_enabled);
++ swap(event->total_time_running, next_event->total_time_running);
++
++ /*
++ * Since we swizzled the values, update the user visible data too.
++ */
++ perf_event_update_userpage(event);
++ perf_event_update_userpage(next_event);
++}
++
++static void perf_event_sync_stat(struct perf_event_context *ctx,
++ struct perf_event_context *next_ctx)
++{
++ struct perf_event *event, *next_event;
++
++ if (!ctx->nr_stat)
++ return;
++
++ update_context_time(ctx);
++
++ event = list_first_entry(&ctx->event_list,
++ struct perf_event, event_entry);
++
++ next_event = list_first_entry(&next_ctx->event_list,
++ struct perf_event, event_entry);
++
++ while (&event->event_entry != &ctx->event_list &&
++ &next_event->event_entry != &next_ctx->event_list) {
++
++ __perf_event_sync_stat(event, next_event);
++
++ event = list_next_entry(event, event_entry);
++ next_event = list_next_entry(next_event, event_entry);
++ }
++}
++
++static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
++ struct task_struct *next)
++{
++ struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
++ struct perf_event_context *next_ctx;
++ struct perf_event_context *parent, *next_parent;
++ struct perf_cpu_context *cpuctx;
++ int do_switch = 1;
++
++ if (likely(!ctx))
++ return;
++
++ cpuctx = __get_cpu_context(ctx);
++ if (!cpuctx->task_ctx)
++ return;
++
++ rcu_read_lock();
++ next_ctx = next->perf_event_ctxp[ctxn];
++ if (!next_ctx)
++ goto unlock;
++
++ parent = rcu_dereference(ctx->parent_ctx);
++ next_parent = rcu_dereference(next_ctx->parent_ctx);
++
++ /* If neither context have a parent context; they cannot be clones. */
++ if (!parent && !next_parent)
++ goto unlock;
++
++ if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
++ /*
++ * Looks like the two contexts are clones, so we might be
++ * able to optimize the context switch. We lock both
++ * contexts and check that they are clones under the
++ * lock (including re-checking that neither has been
++ * uncloned in the meantime). It doesn't matter which
++ * order we take the locks because no other cpu could
++ * be trying to lock both of these tasks.
++ */
++ raw_spin_lock(&ctx->lock);
++ raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
++ if (context_equiv(ctx, next_ctx)) {
++ /*
++ * XXX do we need a memory barrier of sorts
++ * wrt to rcu_dereference() of perf_event_ctxp
++ */
++ task->perf_event_ctxp[ctxn] = next_ctx;
++ next->perf_event_ctxp[ctxn] = ctx;
++ ctx->task = next;
++ next_ctx->task = task;
++ do_switch = 0;
++
++ perf_event_sync_stat(ctx, next_ctx);
++ }
++ raw_spin_unlock(&next_ctx->lock);
++ raw_spin_unlock(&ctx->lock);
++ }
++unlock:
++ rcu_read_unlock();
++
++ if (do_switch) {
++ raw_spin_lock(&ctx->lock);
++ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
++ cpuctx->task_ctx = NULL;
++ raw_spin_unlock(&ctx->lock);
++ }
++}
++
++#define for_each_task_context_nr(ctxn) \
++ for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
++
++/*
++ * Called from scheduler to remove the events of the current task,
++ * with interrupts disabled.
++ *
++ * We stop each event and update the event value in event->count.
++ *
++ * This does not protect us against NMI, but disable()
++ * sets the disabled bit in the control field of event _before_
++ * accessing the event control register. If a NMI hits, then it will
++ * not restart the event.
++ */
++void __perf_event_task_sched_out(struct task_struct *task,
++ struct task_struct *next)
++{
++ int ctxn;
++
++ for_each_task_context_nr(ctxn)
++ perf_event_context_sched_out(task, ctxn, next);
++
++ /*
++ * if cgroup events exist on this CPU, then we need
++ * to check if we have to switch out PMU state.
++ * cgroup event are system-wide mode only
++ */
++ if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
++ perf_cgroup_sched_out(task, next);
++}
++
++static void task_ctx_sched_out(struct perf_event_context *ctx)
++{
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++
++ if (!cpuctx->task_ctx)
++ return;
++
++ if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
++ return;
++
++ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
++ cpuctx->task_ctx = NULL;
++}
++
++/*
++ * Called with IRQs disabled
++ */
++static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
++ enum event_type_t event_type)
++{
++ ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
++}
++
++static void
++ctx_pinned_sched_in(struct perf_event_context *ctx,
++ struct perf_cpu_context *cpuctx)
++{
++ struct perf_event *event;
++
++ list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
++ if (event->state <= PERF_EVENT_STATE_OFF)
++ continue;
++ if (!event_filter_match(event))
++ continue;
++
++ /* may need to reset tstamp_enabled */
++ if (is_cgroup_event(event))
++ perf_cgroup_mark_enabled(event, ctx);
++
++ if (group_can_go_on(event, cpuctx, 1))
++ group_sched_in(event, cpuctx, ctx);
++
++ /*
++ * If this pinned group hasn't been scheduled,
++ * put it in error state.
++ */
++ if (event->state == PERF_EVENT_STATE_INACTIVE) {
++ update_group_times(event);
++ event->state = PERF_EVENT_STATE_ERROR;
++ }
++ }
++}
++
++static void
++ctx_flexible_sched_in(struct perf_event_context *ctx,
++ struct perf_cpu_context *cpuctx)
++{
++ struct perf_event *event;
++ int can_add_hw = 1;
++
++ list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
++ /* Ignore events in OFF or ERROR state */
++ if (event->state <= PERF_EVENT_STATE_OFF)
++ continue;
++ /*
++ * Listen to the 'cpu' scheduling filter constraint
++ * of events:
++ */
++ if (!event_filter_match(event))
++ continue;
++
++ /* may need to reset tstamp_enabled */
++ if (is_cgroup_event(event))
++ perf_cgroup_mark_enabled(event, ctx);
++
++ if (group_can_go_on(event, cpuctx, can_add_hw)) {
++ if (group_sched_in(event, cpuctx, ctx))
++ can_add_hw = 0;
++ }
++ }
++}
++
++static void
++ctx_sched_in(struct perf_event_context *ctx,
++ struct perf_cpu_context *cpuctx,
++ enum event_type_t event_type,
++ struct task_struct *task)
++{
++ u64 now;
++ int is_active = ctx->is_active;
++
++ ctx->is_active |= event_type;
++ if (likely(!ctx->nr_events))
++ return;
++
++ now = perf_clock();
++ ctx->timestamp = now;
++ perf_cgroup_set_timestamp(task, ctx);
++ /*
++ * First go through the list and put on any pinned groups
++ * in order to give them the best chance of going on.
++ */
++ if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
++ ctx_pinned_sched_in(ctx, cpuctx);
++
++ /* Then walk through the lower prio flexible groups */
++ if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
++ ctx_flexible_sched_in(ctx, cpuctx);
++}
++
++static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
++ enum event_type_t event_type,
++ struct task_struct *task)
++{
++ struct perf_event_context *ctx = &cpuctx->ctx;
++
++ ctx_sched_in(ctx, cpuctx, event_type, task);
++}
++
++static void perf_event_context_sched_in(struct perf_event_context *ctx,
++ struct task_struct *task)
++{
++ struct perf_cpu_context *cpuctx;
++
++ cpuctx = __get_cpu_context(ctx);
++ if (cpuctx->task_ctx == ctx)
++ return;
++
++ perf_ctx_lock(cpuctx, ctx);
++ perf_pmu_disable(ctx->pmu);
++ /*
++ * We want to keep the following priority order:
++ * cpu pinned (that don't need to move), task pinned,
++ * cpu flexible, task flexible.
++ */
++ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
++
++ if (ctx->nr_events)
++ cpuctx->task_ctx = ctx;
++
++ perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
++
++ perf_pmu_enable(ctx->pmu);
++ perf_ctx_unlock(cpuctx, ctx);
++
++ /*
++ * Since these rotations are per-cpu, we need to ensure the
++ * cpu-context we got scheduled on is actually rotating.
++ */
++ perf_pmu_rotate_start(ctx->pmu);
++}
++
++/*
++ * When sampling the branck stack in system-wide, it may be necessary
++ * to flush the stack on context switch. This happens when the branch
++ * stack does not tag its entries with the pid of the current task.
++ * Otherwise it becomes impossible to associate a branch entry with a
++ * task. This ambiguity is more likely to appear when the branch stack
++ * supports priv level filtering and the user sets it to monitor only
++ * at the user level (which could be a useful measurement in system-wide
++ * mode). In that case, the risk is high of having a branch stack with
++ * branch from multiple tasks. Flushing may mean dropping the existing
++ * entries or stashing them somewhere in the PMU specific code layer.
++ *
++ * This function provides the context switch callback to the lower code
++ * layer. It is invoked ONLY when there is at least one system-wide context
++ * with at least one active event using taken branch sampling.
++ */
++static void perf_branch_stack_sched_in(struct task_struct *prev,
++ struct task_struct *task)
++{
++ struct perf_cpu_context *cpuctx;
++ struct pmu *pmu;
++ unsigned long flags;
++
++ /* no need to flush branch stack if not changing task */
++ if (prev == task)
++ return;
++
++ local_irq_save(flags);
++
++ rcu_read_lock();
++
++ list_for_each_entry_rcu(pmu, &pmus, entry) {
++ cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++
++ /*
++ * check if the context has at least one
++ * event using PERF_SAMPLE_BRANCH_STACK
++ */
++ if (cpuctx->ctx.nr_branch_stack > 0
++ && pmu->flush_branch_stack) {
++
++ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
++
++ perf_pmu_disable(pmu);
++
++ pmu->flush_branch_stack();
++
++ perf_pmu_enable(pmu);
++
++ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
++ }
++ }
++
++ rcu_read_unlock();
++
++ local_irq_restore(flags);
++}
++
++/*
++ * Called from scheduler to add the events of the current task
++ * with interrupts disabled.
++ *
++ * We restore the event value and then enable it.
++ *
++ * This does not protect us against NMI, but enable()
++ * sets the enabled bit in the control field of event _before_
++ * accessing the event control register. If a NMI hits, then it will
++ * keep the event running.
++ */
++void __perf_event_task_sched_in(struct task_struct *prev,
++ struct task_struct *task)
++{
++ struct perf_event_context *ctx;
++ int ctxn;
++
++ for_each_task_context_nr(ctxn) {
++ ctx = task->perf_event_ctxp[ctxn];
++ if (likely(!ctx))
++ continue;
++
++ perf_event_context_sched_in(ctx, task);
++ }
++ /*
++ * if cgroup events exist on this CPU, then we need
++ * to check if we have to switch in PMU state.
++ * cgroup event are system-wide mode only
++ */
++ if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
++ perf_cgroup_sched_in(prev, task);
++
++ /* check for system-wide branch_stack events */
++ if (atomic_read(this_cpu_ptr(&perf_branch_stack_events)))
++ perf_branch_stack_sched_in(prev, task);
++}
++
++static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
++{
++ u64 frequency = event->attr.sample_freq;
++ u64 sec = NSEC_PER_SEC;
++ u64 divisor, dividend;
++
++ int count_fls, nsec_fls, frequency_fls, sec_fls;
++
++ count_fls = fls64(count);
++ nsec_fls = fls64(nsec);
++ frequency_fls = fls64(frequency);
++ sec_fls = 30;
++
++ /*
++ * We got @count in @nsec, with a target of sample_freq HZ
++ * the target period becomes:
++ *
++ * @count * 10^9
++ * period = -------------------
++ * @nsec * sample_freq
++ *
++ */
++
++ /*
++ * Reduce accuracy by one bit such that @a and @b converge
++ * to a similar magnitude.
++ */
++#define REDUCE_FLS(a, b) \
++do { \
++ if (a##_fls > b##_fls) { \
++ a >>= 1; \
++ a##_fls--; \
++ } else { \
++ b >>= 1; \
++ b##_fls--; \
++ } \
++} while (0)
++
++ /*
++ * Reduce accuracy until either term fits in a u64, then proceed with
++ * the other, so that finally we can do a u64/u64 division.
++ */
++ while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
++ REDUCE_FLS(nsec, frequency);
++ REDUCE_FLS(sec, count);
++ }
++
++ if (count_fls + sec_fls > 64) {
++ divisor = nsec * frequency;
++
++ while (count_fls + sec_fls > 64) {
++ REDUCE_FLS(count, sec);
++ divisor >>= 1;
++ }
++
++ dividend = count * sec;
++ } else {
++ dividend = count * sec;
++
++ while (nsec_fls + frequency_fls > 64) {
++ REDUCE_FLS(nsec, frequency);
++ dividend >>= 1;
++ }
++
++ divisor = nsec * frequency;
++ }
++
++ if (!divisor)
++ return dividend;
++
++ return div64_u64(dividend, divisor);
++}
++
++static DEFINE_PER_CPU(int, perf_throttled_count);
++static DEFINE_PER_CPU(u64, perf_throttled_seq);
++
++static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ s64 period, sample_period;
++ s64 delta;
++
++ period = perf_calculate_period(event, nsec, count);
++
++ delta = (s64)(period - hwc->sample_period);
++ delta = (delta + 7) / 8; /* low pass filter */
++
++ sample_period = hwc->sample_period + delta;
++
++ if (!sample_period)
++ sample_period = 1;
++
++ hwc->sample_period = sample_period;
++
++ if (local64_read(&hwc->period_left) > 8*sample_period) {
++ if (disable)
++ event->pmu->stop(event, PERF_EF_UPDATE);
++
++ local64_set(&hwc->period_left, 0);
++
++ if (disable)
++ event->pmu->start(event, PERF_EF_RELOAD);
++ }
++}
++
++/*
++ * combine freq adjustment with unthrottling to avoid two passes over the
++ * events. At the same time, make sure, having freq events does not change
++ * the rate of unthrottling as that would introduce bias.
++ */
++static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
++ int needs_unthr)
++{
++ struct perf_event *event;
++ struct hw_perf_event *hwc;
++ u64 now, period = TICK_NSEC;
++ s64 delta;
++
++ /*
++ * only need to iterate over all events iff:
++ * - context have events in frequency mode (needs freq adjust)
++ * - there are events to unthrottle on this cpu
++ */
++ if (!(ctx->nr_freq || needs_unthr))
++ return;
++
++ raw_spin_lock(&ctx->lock);
++ perf_pmu_disable(ctx->pmu);
++
++ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ continue;
++
++ if (!event_filter_match(event))
++ continue;
++
++ perf_pmu_disable(event->pmu);
++
++ hwc = &event->hw;
++
++ if (hwc->interrupts == MAX_INTERRUPTS) {
++ hwc->interrupts = 0;
++ perf_log_throttle(event, 1);
++ event->pmu->start(event, 0);
++ }
++
++ if (!event->attr.freq || !event->attr.sample_freq)
++ goto next;
++
++ /*
++ * stop the event and update event->count
++ */
++ event->pmu->stop(event, PERF_EF_UPDATE);
++
++ now = local64_read(&event->count);
++ delta = now - hwc->freq_count_stamp;
++ hwc->freq_count_stamp = now;
++
++ /*
++ * restart the event
++ * reload only if value has changed
++ * we have stopped the event so tell that
++ * to perf_adjust_period() to avoid stopping it
++ * twice.
++ */
++ if (delta > 0)
++ perf_adjust_period(event, period, delta, false);
++
++ event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
++ next:
++ perf_pmu_enable(event->pmu);
++ }
++
++ perf_pmu_enable(ctx->pmu);
++ raw_spin_unlock(&ctx->lock);
++}
++
++/*
++ * Round-robin a context's events:
++ */
++static void rotate_ctx(struct perf_event_context *ctx)
++{
++ /*
++ * Rotate the first entry last of non-pinned groups. Rotation might be
++ * disabled by the inheritance code.
++ */
++ if (!ctx->rotate_disable)
++ list_rotate_left(&ctx->flexible_groups);
++}
++
++/*
++ * perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
++ * because they're strictly cpu affine and rotate_start is called with IRQs
++ * disabled, while rotate_context is called from IRQ context.
++ */
++static int perf_rotate_context(struct perf_cpu_context *cpuctx)
++{
++ struct perf_event_context *ctx = NULL;
++ int rotate = 0, remove = 1;
++
++ if (cpuctx->ctx.nr_events) {
++ remove = 0;
++ if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
++ rotate = 1;
++ }
++
++ ctx = cpuctx->task_ctx;
++ if (ctx && ctx->nr_events) {
++ remove = 0;
++ if (ctx->nr_events != ctx->nr_active)
++ rotate = 1;
++ }
++
++ if (!rotate)
++ goto done;
++
++ perf_ctx_lock(cpuctx, cpuctx->task_ctx);
++ perf_pmu_disable(cpuctx->ctx.pmu);
++
++ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
++ if (ctx)
++ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
++
++ rotate_ctx(&cpuctx->ctx);
++ if (ctx)
++ rotate_ctx(ctx);
++
++ perf_event_sched_in(cpuctx, ctx, current);
++
++ perf_pmu_enable(cpuctx->ctx.pmu);
++ perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
++done:
++ if (remove)
++ list_del_init(&cpuctx->rotation_list);
++
++ return rotate;
++}
++
++#ifdef CONFIG_NO_HZ_FULL
++bool perf_event_can_stop_tick(void)
++{
++ if (atomic_read(&nr_freq_events) ||
++ __this_cpu_read(perf_throttled_count))
++ return false;
++ else
++ return true;
++}
++#endif
++
++void perf_event_task_tick(void)
++{
++ struct list_head *head = this_cpu_ptr(&rotation_list);
++ struct perf_cpu_context *cpuctx, *tmp;
++ struct perf_event_context *ctx;
++ int throttled;
++
++ WARN_ON(!irqs_disabled());
++
++ __this_cpu_inc(perf_throttled_seq);
++ throttled = __this_cpu_xchg(perf_throttled_count, 0);
++
++ list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
++ ctx = &cpuctx->ctx;
++ perf_adjust_freq_unthr_context(ctx, throttled);
++
++ ctx = cpuctx->task_ctx;
++ if (ctx)
++ perf_adjust_freq_unthr_context(ctx, throttled);
++ }
++}
++
++static int event_enable_on_exec(struct perf_event *event,
++ struct perf_event_context *ctx)
++{
++ if (!event->attr.enable_on_exec)
++ return 0;
++
++ event->attr.enable_on_exec = 0;
++ if (event->state >= PERF_EVENT_STATE_INACTIVE)
++ return 0;
++
++ __perf_event_mark_enabled(event);
++
++ return 1;
++}
++
++/*
++ * Enable all of a task's events that have been marked enable-on-exec.
++ * This expects task == current.
++ */
++static void perf_event_enable_on_exec(struct perf_event_context *ctx)
++{
++ struct perf_event_context *clone_ctx = NULL;
++ struct perf_event *event;
++ unsigned long flags;
++ int enabled = 0;
++ int ret;
++
++ local_irq_save(flags);
++ if (!ctx || !ctx->nr_events)
++ goto out;
++
++ /*
++ * We must ctxsw out cgroup events to avoid conflict
++ * when invoking perf_task_event_sched_in() later on
++ * in this function. Otherwise we end up trying to
++ * ctxswin cgroup events which are already scheduled
++ * in.
++ */
++ perf_cgroup_sched_out(current, NULL);
++
++ raw_spin_lock(&ctx->lock);
++ task_ctx_sched_out(ctx);
++
++ list_for_each_entry(event, &ctx->event_list, event_entry) {
++ ret = event_enable_on_exec(event, ctx);
++ if (ret)
++ enabled = 1;
++ }
++
++ /*
++ * Unclone this context if we enabled any event.
++ */
++ if (enabled)
++ clone_ctx = unclone_ctx(ctx);
++
++ raw_spin_unlock(&ctx->lock);
++
++ /*
++ * Also calls ctxswin for cgroup events, if any:
++ */
++ perf_event_context_sched_in(ctx, ctx->task);
++out:
++ local_irq_restore(flags);
++
++ if (clone_ctx)
++ put_ctx(clone_ctx);
++}
++
++void perf_event_exec(void)
++{
++ struct perf_event_context *ctx;
++ int ctxn;
++
++ rcu_read_lock();
++ for_each_task_context_nr(ctxn) {
++ ctx = current->perf_event_ctxp[ctxn];
++ if (!ctx)
++ continue;
++
++ perf_event_enable_on_exec(ctx);
++ }
++ rcu_read_unlock();
++}
++
++/*
++ * Cross CPU call to read the hardware event
++ */
++static void __perf_event_read(void *info)
++{
++ struct perf_event *event = info;
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
++
++ /*
++ * If this is a task context, we need to check whether it is
++ * the current task context of this cpu. If not it has been
++ * scheduled out before the smp call arrived. In that case
++ * event->count would have been updated to a recent sample
++ * when the event was scheduled out.
++ */
++ if (ctx->task && cpuctx->task_ctx != ctx)
++ return;
++
++ raw_spin_lock(&ctx->lock);
++ if (ctx->is_active) {
++ update_context_time(ctx);
++ update_cgrp_time_from_event(event);
++ }
++ update_event_times(event);
++ if (event->state == PERF_EVENT_STATE_ACTIVE)
++ event->pmu->read(event);
++ raw_spin_unlock(&ctx->lock);
++}
++
++static inline u64 perf_event_count(struct perf_event *event)
++{
++ return local64_read(&event->count) + atomic64_read(&event->child_count);
++}
++
++static u64 perf_event_read(struct perf_event *event)
++{
++ /*
++ * If event is enabled and currently active on a CPU, update the
++ * value in the event structure:
++ */
++ if (event->state == PERF_EVENT_STATE_ACTIVE) {
++ smp_call_function_single(event->oncpu,
++ __perf_event_read, event, 1);
++ } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
++ struct perf_event_context *ctx = event->ctx;
++ unsigned long flags;
++
++ raw_spin_lock_irqsave(&ctx->lock, flags);
++ /*
++ * may read while context is not active
++ * (e.g., thread is blocked), in that case
++ * we cannot update context time
++ */
++ if (ctx->is_active) {
++ update_context_time(ctx);
++ update_cgrp_time_from_event(event);
++ }
++ update_event_times(event);
++ raw_spin_unlock_irqrestore(&ctx->lock, flags);
++ }
++
++ return perf_event_count(event);
++}
++
++/*
++ * Initialize the perf_event context in a task_struct:
++ */
++static void __perf_event_init_context(struct perf_event_context *ctx)
++{
++ raw_spin_lock_init(&ctx->lock);
++ mutex_init(&ctx->mutex);
++ INIT_LIST_HEAD(&ctx->pinned_groups);
++ INIT_LIST_HEAD(&ctx->flexible_groups);
++ INIT_LIST_HEAD(&ctx->event_list);
++ atomic_set(&ctx->refcount, 1);
++ INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
++}
++
++static struct perf_event_context *
++alloc_perf_context(struct pmu *pmu, struct task_struct *task)
++{
++ struct perf_event_context *ctx;
++
++ ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
++ if (!ctx)
++ return NULL;
++
++ __perf_event_init_context(ctx);
++ if (task) {
++ ctx->task = task;
++ get_task_struct(task);
++ }
++ ctx->pmu = pmu;
++
++ return ctx;
++}
++
++static struct task_struct *
++find_lively_task_by_vpid(pid_t vpid)
++{
++ struct task_struct *task;
++ int err;
++
++ rcu_read_lock();
++ if (!vpid)
++ task = current;
++ else
++ task = find_task_by_vpid(vpid);
++ if (task)
++ get_task_struct(task);
++ rcu_read_unlock();
++
++ if (!task)
++ return ERR_PTR(-ESRCH);
++
++ /* Reuse ptrace permission checks for now. */
++ err = -EACCES;
++ if (!ptrace_may_access(task, PTRACE_MODE_READ))
++ goto errout;
++
++ return task;
++errout:
++ put_task_struct(task);
++ return ERR_PTR(err);
++
++}
++
++/*
++ * Returns a matching context with refcount and pincount.
++ */
++static struct perf_event_context *
++find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
++{
++ struct perf_event_context *ctx, *clone_ctx = NULL;
++ struct perf_cpu_context *cpuctx;
++ unsigned long flags;
++ int ctxn, err;
++
++ if (!task) {
++ /* Must be root to operate on a CPU event: */
++ if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
++ return ERR_PTR(-EACCES);
++
++ /*
++ * We could be clever and allow to attach a event to an
++ * offline CPU and activate it when the CPU comes up, but
++ * that's for later.
++ */
++ if (!cpu_online(cpu))
++ return ERR_PTR(-ENODEV);
++
++ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
++ ctx = &cpuctx->ctx;
++ get_ctx(ctx);
++ ++ctx->pin_count;
++
++ return ctx;
++ }
++
++ err = -EINVAL;
++ ctxn = pmu->task_ctx_nr;
++ if (ctxn < 0)
++ goto errout;
++
++retry:
++ ctx = perf_lock_task_context(task, ctxn, &flags);
++ if (ctx) {
++ clone_ctx = unclone_ctx(ctx);
++ ++ctx->pin_count;
++ raw_spin_unlock_irqrestore(&ctx->lock, flags);
++
++ if (clone_ctx)
++ put_ctx(clone_ctx);
++ } else {
++ ctx = alloc_perf_context(pmu, task);
++ err = -ENOMEM;
++ if (!ctx)
++ goto errout;
++
++ err = 0;
++ mutex_lock(&task->perf_event_mutex);
++ /*
++ * If it has already passed perf_event_exit_task().
++ * we must see PF_EXITING, it takes this mutex too.
++ */
++ if (task->flags & PF_EXITING)
++ err = -ESRCH;
++ else if (task->perf_event_ctxp[ctxn])
++ err = -EAGAIN;
++ else {
++ get_ctx(ctx);
++ ++ctx->pin_count;
++ rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
++ }
++ mutex_unlock(&task->perf_event_mutex);
++
++ if (unlikely(err)) {
++ put_ctx(ctx);
++
++ if (err == -EAGAIN)
++ goto retry;
++ goto errout;
++ }
++ }
++
++ return ctx;
++
++errout:
++ return ERR_PTR(err);
++}
++
++static void perf_event_free_filter(struct perf_event *event);
++
++static void free_event_rcu(struct rcu_head *head)
++{
++ struct perf_event *event;
++
++ event = container_of(head, struct perf_event, rcu_head);
++ if (event->ns)
++ put_pid_ns(event->ns);
++ perf_event_free_filter(event);
++ kfree(event);
++}
++
++static void ring_buffer_put(struct ring_buffer *rb);
++static void ring_buffer_attach(struct perf_event *event,
++ struct ring_buffer *rb);
++
++static void unaccount_event_cpu(struct perf_event *event, int cpu)
++{
++ if (event->parent)
++ return;
++
++ if (has_branch_stack(event)) {
++ if (!(event->attach_state & PERF_ATTACH_TASK))
++ atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
++ }
++ if (is_cgroup_event(event))
++ atomic_dec(&per_cpu(perf_cgroup_events, cpu));
++}
++
++static void unaccount_event(struct perf_event *event)
++{
++ if (event->parent)
++ return;
++
++ if (event->attach_state & PERF_ATTACH_TASK)
++ static_key_slow_dec_deferred(&perf_sched_events);
++ if (event->attr.mmap || event->attr.mmap_data)
++ atomic_dec(&nr_mmap_events);
++ if (event->attr.comm)
++ atomic_dec(&nr_comm_events);
++ if (event->attr.task)
++ atomic_dec(&nr_task_events);
++ if (event->attr.freq)
++ atomic_dec(&nr_freq_events);
++ if (is_cgroup_event(event))
++ static_key_slow_dec_deferred(&perf_sched_events);
++ if (has_branch_stack(event))
++ static_key_slow_dec_deferred(&perf_sched_events);
++
++ unaccount_event_cpu(event, event->cpu);
++}
++
++static void __free_event(struct perf_event *event)
++{
++ if (!event->parent) {
++ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
++ put_callchain_buffers();
++ }
++
++ if (event->destroy)
++ event->destroy(event);
++
++ if (event->ctx)
++ put_ctx(event->ctx);
++
++ if (event->pmu)
++ module_put(event->pmu->module);
++
++ call_rcu(&event->rcu_head, free_event_rcu);
++}
++
++static void _free_event(struct perf_event *event)
++{
++ irq_work_sync(&event->pending);
++
++ unaccount_event(event);
++
++ if (event->rb) {
++ /*
++ * Can happen when we close an event with re-directed output.
++ *
++ * Since we have a 0 refcount, perf_mmap_close() will skip
++ * over us; possibly making our ring_buffer_put() the last.
++ */
++ mutex_lock(&event->mmap_mutex);
++ ring_buffer_attach(event, NULL);
++ mutex_unlock(&event->mmap_mutex);
++ }
++
++ if (is_cgroup_event(event))
++ perf_detach_cgroup(event);
++
++ __free_event(event);
++}
++
++/*
++ * Used to free events which have a known refcount of 1, such as in error paths
++ * where the event isn't exposed yet and inherited events.
++ */
++static void free_event(struct perf_event *event)
++{
++ if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1,
++ "unexpected event refcount: %ld; ptr=%p\n",
++ atomic_long_read(&event->refcount), event)) {
++ /* leak to avoid use-after-free */
++ return;
++ }
++
++ _free_event(event);
++}
++
++/*
++ * Remove user event from the owner task.
++ */
++static void perf_remove_from_owner(struct perf_event *event)
++{
++ struct task_struct *owner;
++
++ rcu_read_lock();
++ owner = ACCESS_ONCE(event->owner);
++ /*
++ * Matches the smp_wmb() in perf_event_exit_task(). If we observe
++ * !owner it means the list deletion is complete and we can indeed
++ * free this event, otherwise we need to serialize on
++ * owner->perf_event_mutex.
++ */
++ smp_read_barrier_depends();
++ if (owner) {
++ /*
++ * Since delayed_put_task_struct() also drops the last
++ * task reference we can safely take a new reference
++ * while holding the rcu_read_lock().
++ */
++ get_task_struct(owner);
++ }
++ rcu_read_unlock();
++
++ if (owner) {
++ mutex_lock(&owner->perf_event_mutex);
++ /*
++ * We have to re-check the event->owner field, if it is cleared
++ * we raced with perf_event_exit_task(), acquiring the mutex
++ * ensured they're done, and we can proceed with freeing the
++ * event.
++ */
++ if (event->owner)
++ list_del_init(&event->owner_entry);
++ mutex_unlock(&owner->perf_event_mutex);
++ put_task_struct(owner);
++ }
++}
++
++/*
++ * Called when the last reference to the file is gone.
++ */
++static void put_event(struct perf_event *event)
++{
++ struct perf_event_context *ctx = event->ctx;
++
++ if (!atomic_long_dec_and_test(&event->refcount))
++ return;
++
++ if (!is_kernel_event(event))
++ perf_remove_from_owner(event);
++
++ WARN_ON_ONCE(ctx->parent_ctx);
++ /*
++ * There are two ways this annotation is useful:
++ *
++ * 1) there is a lock recursion from perf_event_exit_task
++ * see the comment there.
++ *
++ * 2) there is a lock-inversion with mmap_sem through
++ * perf_event_read_group(), which takes faults while
++ * holding ctx->mutex, however this is called after
++ * the last filedesc died, so there is no possibility
++ * to trigger the AB-BA case.
++ */
++ mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
++ perf_remove_from_context(event, true);
++ mutex_unlock(&ctx->mutex);
++
++ _free_event(event);
++}
++
++int perf_event_release_kernel(struct perf_event *event)
++{
++ put_event(event);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(perf_event_release_kernel);
++
++static int perf_release(struct inode *inode, struct file *file)
++{
++ put_event(file->private_data);
++ return 0;
++}
++
++/*
++ * Remove all orphanes events from the context.
++ */
++static void orphans_remove_work(struct work_struct *work)
++{
++ struct perf_event_context *ctx;
++ struct perf_event *event, *tmp;
++
++ ctx = container_of(work, struct perf_event_context,
++ orphans_remove.work);
++
++ mutex_lock(&ctx->mutex);
++ list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
++ struct perf_event *parent_event = event->parent;
++
++ if (!is_orphaned_child(event))
++ continue;
++
++ perf_remove_from_context(event, true);
++
++ mutex_lock(&parent_event->child_mutex);
++ list_del_init(&event->child_list);
++ mutex_unlock(&parent_event->child_mutex);
++
++ free_event(event);
++ put_event(parent_event);
++ }
++
++ raw_spin_lock_irq(&ctx->lock);
++ ctx->orphans_remove_sched = false;
++ raw_spin_unlock_irq(&ctx->lock);
++ mutex_unlock(&ctx->mutex);
++
++ put_ctx(ctx);
++}
++
++u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
++{
++ struct perf_event *child;
++ u64 total = 0;
++
++ *enabled = 0;
++ *running = 0;
++
++ mutex_lock(&event->child_mutex);
++ total += perf_event_read(event);
++ *enabled += event->total_time_enabled +
++ atomic64_read(&event->child_total_time_enabled);
++ *running += event->total_time_running +
++ atomic64_read(&event->child_total_time_running);
++
++ list_for_each_entry(child, &event->child_list, child_list) {
++ total += perf_event_read(child);
++ *enabled += child->total_time_enabled;
++ *running += child->total_time_running;
++ }
++ mutex_unlock(&event->child_mutex);
++
++ return total;
++}
++EXPORT_SYMBOL_GPL(perf_event_read_value);
++
++static int perf_event_read_group(struct perf_event *event,
++ u64 read_format, char __user *buf)
++{
++ struct perf_event *leader = event->group_leader, *sub;
++ int n = 0, size = 0, ret = -EFAULT;
++ struct perf_event_context *ctx = leader->ctx;
++ u64 values[5];
++ u64 count, enabled, running;
++
++ mutex_lock(&ctx->mutex);
++ count = perf_event_read_value(leader, &enabled, &running);
++
++ values[n++] = 1 + leader->nr_siblings;
++ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++ values[n++] = enabled;
++ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++ values[n++] = running;
++ values[n++] = count;
++ if (read_format & PERF_FORMAT_ID)
++ values[n++] = primary_event_id(leader);
++
++ size = n * sizeof(u64);
++
++ if (copy_to_user(buf, values, size))
++ goto unlock;
++
++ ret = size;
++
++ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
++ n = 0;
++
++ values[n++] = perf_event_read_value(sub, &enabled, &running);
++ if (read_format & PERF_FORMAT_ID)
++ values[n++] = primary_event_id(sub);
++
++ size = n * sizeof(u64);
++
++ if (copy_to_user(buf + ret, values, size)) {
++ ret = -EFAULT;
++ goto unlock;
++ }
++
++ ret += size;
++ }
++unlock:
++ mutex_unlock(&ctx->mutex);
++
++ return ret;
++}
++
++static int perf_event_read_one(struct perf_event *event,
++ u64 read_format, char __user *buf)
++{
++ u64 enabled, running;
++ u64 values[4];
++ int n = 0;
++
++ values[n++] = perf_event_read_value(event, &enabled, &running);
++ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++ values[n++] = enabled;
++ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++ values[n++] = running;
++ if (read_format & PERF_FORMAT_ID)
++ values[n++] = primary_event_id(event);
++
++ if (copy_to_user(buf, values, n * sizeof(u64)))
++ return -EFAULT;
++
++ return n * sizeof(u64);
++}
++
++static bool is_event_hup(struct perf_event *event)
++{
++ bool no_children;
++
++ if (event->state != PERF_EVENT_STATE_EXIT)
++ return false;
++
++ mutex_lock(&event->child_mutex);
++ no_children = list_empty(&event->child_list);
++ mutex_unlock(&event->child_mutex);
++ return no_children;
++}
++
++/*
++ * Read the performance event - simple non blocking version for now
++ */
++static ssize_t
++perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
++{
++ u64 read_format = event->attr.read_format;
++ int ret;
++
++ /*
++ * Return end-of-file for a read on a event that is in
++ * error state (i.e. because it was pinned but it couldn't be
++ * scheduled on to the CPU at some point).
++ */
++ if (event->state == PERF_EVENT_STATE_ERROR)
++ return 0;
++
++ if (count < event->read_size)
++ return -ENOSPC;
++
++ WARN_ON_ONCE(event->ctx->parent_ctx);
++ if (read_format & PERF_FORMAT_GROUP)
++ ret = perf_event_read_group(event, read_format, buf);
++ else
++ ret = perf_event_read_one(event, read_format, buf);
++
++ return ret;
++}
++
++static ssize_t
++perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
++{
++ struct perf_event *event = file->private_data;
++
++ return perf_read_hw(event, buf, count);
++}
++
++static unsigned int perf_poll(struct file *file, poll_table *wait)
++{
++ struct perf_event *event = file->private_data;
++ struct ring_buffer *rb;
++ unsigned int events = POLLHUP;
++
++ poll_wait(file, &event->waitq, wait);
++
++ if (is_event_hup(event))
++ return events;
++
++ /*
++ * Pin the event->rb by taking event->mmap_mutex; otherwise
++ * perf_event_set_output() can swizzle our rb and make us miss wakeups.
++ */
++ mutex_lock(&event->mmap_mutex);
++ rb = event->rb;
++ if (rb)
++ events = atomic_xchg(&rb->poll, 0);
++ mutex_unlock(&event->mmap_mutex);
++ return events;
++}
++
++static void perf_event_reset(struct perf_event *event)
++{
++ (void)perf_event_read(event);
++ local64_set(&event->count, 0);
++ perf_event_update_userpage(event);
++}
++
++/*
++ * Holding the top-level event's child_mutex means that any
++ * descendant process that has inherited this event will block
++ * in sync_child_event if it goes to exit, thus satisfying the
++ * task existence requirements of perf_event_enable/disable.
++ */
++static void perf_event_for_each_child(struct perf_event *event,
++ void (*func)(struct perf_event *))
++{
++ struct perf_event *child;
++
++ WARN_ON_ONCE(event->ctx->parent_ctx);
++ mutex_lock(&event->child_mutex);
++ func(event);
++ list_for_each_entry(child, &event->child_list, child_list)
++ func(child);
++ mutex_unlock(&event->child_mutex);
++}
++
++static void perf_event_for_each(struct perf_event *event,
++ void (*func)(struct perf_event *))
++{
++ struct perf_event_context *ctx = event->ctx;
++ struct perf_event *sibling;
++
++ WARN_ON_ONCE(ctx->parent_ctx);
++ mutex_lock(&ctx->mutex);
++ event = event->group_leader;
++
++ perf_event_for_each_child(event, func);
++ list_for_each_entry(sibling, &event->sibling_list, group_entry)
++ perf_event_for_each_child(sibling, func);
++ mutex_unlock(&ctx->mutex);
++}
++
++static int perf_event_period(struct perf_event *event, u64 __user *arg)
++{
++ struct perf_event_context *ctx = event->ctx;
++ int ret = 0, active;
++ u64 value;
++
++ if (!is_sampling_event(event))
++ return -EINVAL;
++
++ if (copy_from_user(&value, arg, sizeof(value)))
++ return -EFAULT;
++
++ if (!value)
++ return -EINVAL;
++
++ raw_spin_lock_irq(&ctx->lock);
++ if (event->attr.freq) {
++ if (value > sysctl_perf_event_sample_rate) {
++ ret = -EINVAL;
++ goto unlock;
++ }
++
++ event->attr.sample_freq = value;
++ } else {
++ event->attr.sample_period = value;
++ event->hw.sample_period = value;
++ }
++
++ active = (event->state == PERF_EVENT_STATE_ACTIVE);
++ if (active) {
++ perf_pmu_disable(ctx->pmu);
++ event->pmu->stop(event, PERF_EF_UPDATE);
++ }
++
++ local64_set(&event->hw.period_left, 0);
++
++ if (active) {
++ event->pmu->start(event, PERF_EF_RELOAD);
++ perf_pmu_enable(ctx->pmu);
++ }
++
++unlock:
++ raw_spin_unlock_irq(&ctx->lock);
++
++ return ret;
++}
++
++static const struct file_operations perf_fops;
++
++static inline int perf_fget_light(int fd, struct fd *p)
++{
++ struct fd f = fdget(fd);
++ if (!f.file)
++ return -EBADF;
++
++ if (f.file->f_op != &perf_fops) {
++ fdput(f);
++ return -EBADF;
++ }
++ *p = f;
++ return 0;
++}
++
++static int perf_event_set_output(struct perf_event *event,
++ struct perf_event *output_event);
++static int perf_event_set_filter(struct perf_event *event, void __user *arg);
++
++static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
++{
++ struct perf_event *event = file->private_data;
++ void (*func)(struct perf_event *);
++ u32 flags = arg;
++
++ switch (cmd) {
++ case PERF_EVENT_IOC_ENABLE:
++ func = perf_event_enable;
++ break;
++ case PERF_EVENT_IOC_DISABLE:
++ func = perf_event_disable;
++ break;
++ case PERF_EVENT_IOC_RESET:
++ func = perf_event_reset;
++ break;
++
++ case PERF_EVENT_IOC_REFRESH:
++ return perf_event_refresh(event, arg);
++
++ case PERF_EVENT_IOC_PERIOD:
++ return perf_event_period(event, (u64 __user *)arg);
++
++ case PERF_EVENT_IOC_ID:
++ {
++ u64 id = primary_event_id(event);
++
++ if (copy_to_user((void __user *)arg, &id, sizeof(id)))
++ return -EFAULT;
++ return 0;
++ }
++
++ case PERF_EVENT_IOC_SET_OUTPUT:
++ {
++ int ret;
++ if (arg != -1) {
++ struct perf_event *output_event;
++ struct fd output;
++ ret = perf_fget_light(arg, &output);
++ if (ret)
++ return ret;
++ output_event = output.file->private_data;
++ ret = perf_event_set_output(event, output_event);
++ fdput(output);
++ } else {
++ ret = perf_event_set_output(event, NULL);
++ }
++ return ret;
++ }
++
++ case PERF_EVENT_IOC_SET_FILTER:
++ return perf_event_set_filter(event, (void __user *)arg);
++
++ default:
++ return -ENOTTY;
++ }
++
++ if (flags & PERF_IOC_FLAG_GROUP)
++ perf_event_for_each(event, func);
++ else
++ perf_event_for_each_child(event, func);
++
++ return 0;
++}
++
++#ifdef CONFIG_COMPAT
++static long perf_compat_ioctl(struct file *file, unsigned int cmd,
++ unsigned long arg)
++{
++ switch (_IOC_NR(cmd)) {
++ case _IOC_NR(PERF_EVENT_IOC_SET_FILTER):
++ case _IOC_NR(PERF_EVENT_IOC_ID):
++ /* Fix up pointer size (usually 4 -> 8 in 32-on-64-bit case */
++ if (_IOC_SIZE(cmd) == sizeof(compat_uptr_t)) {
++ cmd &= ~IOCSIZE_MASK;
++ cmd |= sizeof(void *) << IOCSIZE_SHIFT;
++ }
++ break;
++ }
++ return perf_ioctl(file, cmd, arg);
++}
++#else
++# define perf_compat_ioctl NULL
++#endif
++
++int perf_event_task_enable(void)
++{
++ struct perf_event *event;
++
++ mutex_lock(&current->perf_event_mutex);
++ list_for_each_entry(event, &current->perf_event_list, owner_entry)
++ perf_event_for_each_child(event, perf_event_enable);
++ mutex_unlock(&current->perf_event_mutex);
++
++ return 0;
++}
++
++int perf_event_task_disable(void)
++{
++ struct perf_event *event;
++
++ mutex_lock(&current->perf_event_mutex);
++ list_for_each_entry(event, &current->perf_event_list, owner_entry)
++ perf_event_for_each_child(event, perf_event_disable);
++ mutex_unlock(&current->perf_event_mutex);
++
++ return 0;
++}
++
++static int perf_event_index(struct perf_event *event)
++{
++ if (event->hw.state & PERF_HES_STOPPED)
++ return 0;
++
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return 0;
++
++ return event->pmu->event_idx(event);
++}
++
++static void calc_timer_values(struct perf_event *event,
++ u64 *now,
++ u64 *enabled,
++ u64 *running)
++{
++ u64 ctx_time;
++
++ *now = perf_clock();
++ ctx_time = event->shadow_ctx_time + *now;
++ *enabled = ctx_time - event->tstamp_enabled;
++ *running = ctx_time - event->tstamp_running;
++}
++
++static void perf_event_init_userpage(struct perf_event *event)
++{
++ struct perf_event_mmap_page *userpg;
++ struct ring_buffer *rb;
++
++ rcu_read_lock();
++ rb = rcu_dereference(event->rb);
++ if (!rb)
++ goto unlock;
++
++ userpg = rb->user_page;
++
++ /* Allow new userspace to detect that bit 0 is deprecated */
++ userpg->cap_bit0_is_deprecated = 1;
++ userpg->size = offsetof(struct perf_event_mmap_page, __reserved);
++
++unlock:
++ rcu_read_unlock();
++}
++
++void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
++{
++}
++
++/*
++ * Callers need to ensure there can be no nesting of this function, otherwise
++ * the seqlock logic goes bad. We can not serialize this because the arch
++ * code calls this from NMI context.
++ */
++void perf_event_update_userpage(struct perf_event *event)
++{
++ struct perf_event_mmap_page *userpg;
++ struct ring_buffer *rb;
++ u64 enabled, running, now;
++
++ rcu_read_lock();
++ rb = rcu_dereference(event->rb);
++ if (!rb)
++ goto unlock;
++
++ /*
++ * compute total_time_enabled, total_time_running
++ * based on snapshot values taken when the event
++ * was last scheduled in.
++ *
++ * we cannot simply called update_context_time()
++ * because of locking issue as we can be called in
++ * NMI context
++ */
++ calc_timer_values(event, &now, &enabled, &running);
++
++ userpg = rb->user_page;
++ /*
++ * Disable preemption so as to not let the corresponding user-space
++ * spin too long if we get preempted.
++ */
++ preempt_disable();
++ ++userpg->lock;
++ barrier();
++ userpg->index = perf_event_index(event);
++ userpg->offset = perf_event_count(event);
++ if (userpg->index)
++ userpg->offset -= local64_read(&event->hw.prev_count);
++
++ userpg->time_enabled = enabled +
++ atomic64_read(&event->child_total_time_enabled);
++
++ userpg->time_running = running +
++ atomic64_read(&event->child_total_time_running);
++
++ arch_perf_update_userpage(userpg, now);
++
++ barrier();
++ ++userpg->lock;
++ preempt_enable();
++unlock:
++ rcu_read_unlock();
++}
++
++static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
++{
++ struct perf_event *event = vma->vm_file->private_data;
++ struct ring_buffer *rb;
++ int ret = VM_FAULT_SIGBUS;
++
++ if (vmf->flags & FAULT_FLAG_MKWRITE) {
++ if (vmf->pgoff == 0)
++ ret = 0;
++ return ret;
++ }
++
++ rcu_read_lock();
++ rb = rcu_dereference(event->rb);
++ if (!rb)
++ goto unlock;
++
++ if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
++ goto unlock;
++
++ vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
++ if (!vmf->page)
++ goto unlock;
++
++ get_page(vmf->page);
++ vmf->page->mapping = vma->vm_file->f_mapping;
++ vmf->page->index = vmf->pgoff;
++
++ ret = 0;
++unlock:
++ rcu_read_unlock();
++
++ return ret;
++}
++
++static void ring_buffer_attach(struct perf_event *event,
++ struct ring_buffer *rb)
++{
++ struct ring_buffer *old_rb = NULL;
++ unsigned long flags;
++
++ if (event->rb) {
++ /*
++ * Should be impossible, we set this when removing
++ * event->rb_entry and wait/clear when adding event->rb_entry.
++ */
++ WARN_ON_ONCE(event->rcu_pending);
++
++ old_rb = event->rb;
++ event->rcu_batches = get_state_synchronize_rcu();
++ event->rcu_pending = 1;
++
++ spin_lock_irqsave(&old_rb->event_lock, flags);
++ list_del_rcu(&event->rb_entry);
++ spin_unlock_irqrestore(&old_rb->event_lock, flags);
++ }
++
++ if (event->rcu_pending && rb) {
++ cond_synchronize_rcu(event->rcu_batches);
++ event->rcu_pending = 0;
++ }
++
++ if (rb) {
++ spin_lock_irqsave(&rb->event_lock, flags);
++ list_add_rcu(&event->rb_entry, &rb->event_list);
++ spin_unlock_irqrestore(&rb->event_lock, flags);
++ }
++
++ rcu_assign_pointer(event->rb, rb);
++
++ if (old_rb) {
++ ring_buffer_put(old_rb);
++ /*
++ * Since we detached before setting the new rb, so that we
++ * could attach the new rb, we could have missed a wakeup.
++ * Provide it now.
++ */
++ wake_up_all(&event->waitq);
++ }
++}
++
++static void ring_buffer_wakeup(struct perf_event *event)
++{
++ struct ring_buffer *rb;
++
++ rcu_read_lock();
++ rb = rcu_dereference(event->rb);
++ if (rb) {
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
++ wake_up_all(&event->waitq);
++ }
++ rcu_read_unlock();
++}
++
++static void rb_free_rcu(struct rcu_head *rcu_head)
++{
++ struct ring_buffer *rb;
++
++ rb = container_of(rcu_head, struct ring_buffer, rcu_head);
++ rb_free(rb);
++}
++
++static struct ring_buffer *ring_buffer_get(struct perf_event *event)
++{
++ struct ring_buffer *rb;
++
++ rcu_read_lock();
++ rb = rcu_dereference(event->rb);
++ if (rb) {
++ if (!atomic_inc_not_zero(&rb->refcount))
++ rb = NULL;
++ }
++ rcu_read_unlock();
++
++ return rb;
++}
++
++static void ring_buffer_put(struct ring_buffer *rb)
++{
++ if (!atomic_dec_and_test(&rb->refcount))
++ return;
++
++ WARN_ON_ONCE(!list_empty(&rb->event_list));
++
++ call_rcu(&rb->rcu_head, rb_free_rcu);
++}
++
++static void perf_mmap_open(struct vm_area_struct *vma)
++{
++ struct perf_event *event = vma->vm_file->private_data;
++
++ atomic_inc(&event->mmap_count);
++ atomic_inc(&event->rb->mmap_count);
++}
++
++/*
++ * A buffer can be mmap()ed multiple times; either directly through the same
++ * event, or through other events by use of perf_event_set_output().
++ *
++ * In order to undo the VM accounting done by perf_mmap() we need to destroy
++ * the buffer here, where we still have a VM context. This means we need
++ * to detach all events redirecting to us.
++ */
++static void perf_mmap_close(struct vm_area_struct *vma)
++{
++ struct perf_event *event = vma->vm_file->private_data;
++
++ struct ring_buffer *rb = ring_buffer_get(event);
++ struct user_struct *mmap_user = rb->mmap_user;
++ int mmap_locked = rb->mmap_locked;
++ unsigned long size = perf_data_size(rb);
++
++ atomic_dec(&rb->mmap_count);
++
++ if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
++ goto out_put;
++
++ ring_buffer_attach(event, NULL);
++ mutex_unlock(&event->mmap_mutex);
++
++ /* If there's still other mmap()s of this buffer, we're done. */
++ if (atomic_read(&rb->mmap_count))
++ goto out_put;
++
++ /*
++ * No other mmap()s, detach from all other events that might redirect
++ * into the now unreachable buffer. Somewhat complicated by the
++ * fact that rb::event_lock otherwise nests inside mmap_mutex.
++ */
++again:
++ rcu_read_lock();
++ list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
++ if (!atomic_long_inc_not_zero(&event->refcount)) {
++ /*
++ * This event is en-route to free_event() which will
++ * detach it and remove it from the list.
++ */
++ continue;
++ }
++ rcu_read_unlock();
++
++ mutex_lock(&event->mmap_mutex);
++ /*
++ * Check we didn't race with perf_event_set_output() which can
++ * swizzle the rb from under us while we were waiting to
++ * acquire mmap_mutex.
++ *
++ * If we find a different rb; ignore this event, a next
++ * iteration will no longer find it on the list. We have to
++ * still restart the iteration to make sure we're not now
++ * iterating the wrong list.
++ */
++ if (event->rb == rb)
++ ring_buffer_attach(event, NULL);
++
++ mutex_unlock(&event->mmap_mutex);
++ put_event(event);
++
++ /*
++ * Restart the iteration; either we're on the wrong list or
++ * destroyed its integrity by doing a deletion.
++ */
++ goto again;
++ }
++ rcu_read_unlock();
++
++ /*
++ * It could be there's still a few 0-ref events on the list; they'll
++ * get cleaned up by free_event() -- they'll also still have their
++ * ref on the rb and will free it whenever they are done with it.
++ *
++ * Aside from that, this buffer is 'fully' detached and unmapped,
++ * undo the VM accounting.
++ */
++
++ atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
++ vma->vm_mm->pinned_vm -= mmap_locked;
++ free_uid(mmap_user);
++
++out_put:
++ ring_buffer_put(rb); /* could be last */
++}
++
++static const struct vm_operations_struct perf_mmap_vmops = {
++ .open = perf_mmap_open,
++ .close = perf_mmap_close,
++ .fault = perf_mmap_fault,
++ .page_mkwrite = perf_mmap_fault,
++};
++
++static int perf_mmap(struct file *file, struct vm_area_struct *vma)
++{
++ struct perf_event *event = file->private_data;
++ unsigned long user_locked, user_lock_limit;
++ struct user_struct *user = current_user();
++ unsigned long locked, lock_limit;
++ struct ring_buffer *rb;
++ unsigned long vma_size;
++ unsigned long nr_pages;
++ long user_extra, extra;
++ int ret = 0, flags = 0;
++
++ /*
++ * Don't allow mmap() of inherited per-task counters. This would
++ * create a performance issue due to all children writing to the
++ * same rb.
++ */
++ if (event->cpu == -1 && event->attr.inherit)
++ return -EINVAL;
++
++ if (!(vma->vm_flags & VM_SHARED))
++ return -EINVAL;
++
++ vma_size = vma->vm_end - vma->vm_start;
++ nr_pages = (vma_size / PAGE_SIZE) - 1;
++
++ /*
++ * If we have rb pages ensure they're a power-of-two number, so we
++ * can do bitmasks instead of modulo.
++ */
++ if (nr_pages != 0 && !is_power_of_2(nr_pages))
++ return -EINVAL;
++
++ if (vma_size != PAGE_SIZE * (1 + nr_pages))
++ return -EINVAL;
++
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++
++ WARN_ON_ONCE(event->ctx->parent_ctx);
++again:
++ mutex_lock(&event->mmap_mutex);
++ if (event->rb) {
++ if (event->rb->nr_pages != nr_pages) {
++ ret = -EINVAL;
++ goto unlock;
++ }
++
++ if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
++ /*
++ * Raced against perf_mmap_close() through
++ * perf_event_set_output(). Try again, hope for better
++ * luck.
++ */
++ mutex_unlock(&event->mmap_mutex);
++ goto again;
++ }
++
++ goto unlock;
++ }
++
++ user_extra = nr_pages + 1;
++ user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
++
++ /*
++ * Increase the limit linearly with more CPUs:
++ */
++ user_lock_limit *= num_online_cpus();
++
++ user_locked = atomic_long_read(&user->locked_vm) + user_extra;
++
++ extra = 0;
++ if (user_locked > user_lock_limit)
++ extra = user_locked - user_lock_limit;
++
++ lock_limit = rlimit(RLIMIT_MEMLOCK);
++ lock_limit >>= PAGE_SHIFT;
++ locked = vma->vm_mm->pinned_vm + extra;
++
++ if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
++ !capable(CAP_IPC_LOCK)) {
++ ret = -EPERM;
++ goto unlock;
++ }
++
++ WARN_ON(event->rb);
++
++ if (vma->vm_flags & VM_WRITE)
++ flags |= RING_BUFFER_WRITABLE;
++
++ rb = rb_alloc(nr_pages,
++ event->attr.watermark ? event->attr.wakeup_watermark : 0,
++ event->cpu, flags);
++
++ if (!rb) {
++ ret = -ENOMEM;
++ goto unlock;
++ }
++
++ atomic_set(&rb->mmap_count, 1);
++ rb->mmap_locked = extra;
++ rb->mmap_user = get_current_user();
++
++ atomic_long_add(user_extra, &user->locked_vm);
++ vma->vm_mm->pinned_vm += extra;
++
++ ring_buffer_attach(event, rb);
++
++ perf_event_init_userpage(event);
++ perf_event_update_userpage(event);
++
++unlock:
++ if (!ret)
++ atomic_inc(&event->mmap_count);
++ mutex_unlock(&event->mmap_mutex);
++
++ /*
++ * Since pinned accounting is per vm we cannot allow fork() to copy our
++ * vma.
++ */
++ vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
++ vma->vm_ops = &perf_mmap_vmops;
++
++ return ret;
++}
++
++static int perf_fasync(int fd, struct file *filp, int on)
++{
++ struct inode *inode = file_inode(filp);
++ struct perf_event *event = filp->private_data;
++ int retval;
++
++ mutex_lock(&inode->i_mutex);
++ retval = fasync_helper(fd, filp, on, &event->fasync);
++ mutex_unlock(&inode->i_mutex);
++
++ if (retval < 0)
++ return retval;
++
++ return 0;
++}
++
++static const struct file_operations perf_fops = {
++ .llseek = no_llseek,
++ .release = perf_release,
++ .read = perf_read,
++ .poll = perf_poll,
++ .unlocked_ioctl = perf_ioctl,
++ .compat_ioctl = perf_compat_ioctl,
++ .mmap = perf_mmap,
++ .fasync = perf_fasync,
++};
++
++/*
++ * Perf event wakeup
++ *
++ * If there's data, ensure we set the poll() state and publish everything
++ * to user-space before waking everybody up.
++ */
++
++void perf_event_wakeup(struct perf_event *event)
++{
++ ring_buffer_wakeup(event);
++
++ if (event->pending_kill) {
++ kill_fasync(&event->fasync, SIGIO, event->pending_kill);
++ event->pending_kill = 0;
++ }
++}
++
++static void perf_pending_event(struct irq_work *entry)
++{
++ struct perf_event *event = container_of(entry,
++ struct perf_event, pending);
++ int rctx;
++
++ rctx = perf_swevent_get_recursion_context();
++ /*
++ * If we 'fail' here, that's OK, it means recursion is already disabled
++ * and we won't recurse 'further'.
++ */
++
++ if (event->pending_disable) {
++ event->pending_disable = 0;
++ __perf_event_disable(event);
++ }
++
++ if (event->pending_wakeup) {
++ event->pending_wakeup = 0;
++ perf_event_wakeup(event);
++ }
++
++ if (rctx >= 0)
++ perf_swevent_put_recursion_context(rctx);
++}
++
++/*
++ * We assume there is only KVM supporting the callbacks.
++ * Later on, we might change it to a list if there is
++ * another virtualization implementation supporting the callbacks.
++ */
++struct perf_guest_info_callbacks *perf_guest_cbs;
++
++int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
++{
++ perf_guest_cbs = cbs;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
++
++int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
++{
++ perf_guest_cbs = NULL;
++ return 0;
++}
++EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
++
++static void
++perf_output_sample_regs(struct perf_output_handle *handle,
++ struct pt_regs *regs, u64 mask)
++{
++ int bit;
++
++ for_each_set_bit(bit, (const unsigned long *) &mask,
++ sizeof(mask) * BITS_PER_BYTE) {
++ u64 val;
++
++ val = perf_reg_value(regs, bit);
++ perf_output_put(handle, val);
++ }
++}
++
++static void perf_sample_regs_user(struct perf_regs_user *regs_user,
++ struct pt_regs *regs)
++{
++ if (!user_mode(regs)) {
++ if (current->mm)
++ regs = task_pt_regs(current);
++ else
++ regs = NULL;
++ }
++
++ if (regs) {
++ regs_user->regs = regs;
++ regs_user->abi = perf_reg_abi(current);
++ }
++}
++
++/*
++ * Get remaining task size from user stack pointer.
++ *
++ * It'd be better to take stack vma map and limit this more
++ * precisly, but there's no way to get it safely under interrupt,
++ * so using TASK_SIZE as limit.
++ */
++static u64 perf_ustack_task_size(struct pt_regs *regs)
++{
++ unsigned long addr = perf_user_stack_pointer(regs);
++
++ if (!addr || addr >= TASK_SIZE)
++ return 0;
++
++ return TASK_SIZE - addr;
++}
++
++static u16
++perf_sample_ustack_size(u16 stack_size, u16 header_size,
++ struct pt_regs *regs)
++{
++ u64 task_size;
++
++ /* No regs, no stack pointer, no dump. */
++ if (!regs)
++ return 0;
++
++ /*
++ * Check if we fit in with the requested stack size into the:
++ * - TASK_SIZE
++ * If we don't, we limit the size to the TASK_SIZE.
++ *
++ * - remaining sample size
++ * If we don't, we customize the stack size to
++ * fit in to the remaining sample size.
++ */
++
++ task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
++ stack_size = min(stack_size, (u16) task_size);
++
++ /* Current header size plus static size and dynamic size. */
++ header_size += 2 * sizeof(u64);
++
++ /* Do we fit in with the current stack dump size? */
++ if ((u16) (header_size + stack_size) < header_size) {
++ /*
++ * If we overflow the maximum size for the sample,
++ * we customize the stack dump size to fit in.
++ */
++ stack_size = USHRT_MAX - header_size - sizeof(u64);
++ stack_size = round_up(stack_size, sizeof(u64));
++ }
++
++ return stack_size;
++}
++
++static void
++perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
++ struct pt_regs *regs)
++{
++ /* Case of a kernel thread, nothing to dump */
++ if (!regs) {
++ u64 size = 0;
++ perf_output_put(handle, size);
++ } else {
++ unsigned long sp;
++ unsigned int rem;
++ u64 dyn_size;
++
++ /*
++ * We dump:
++ * static size
++ * - the size requested by user or the best one we can fit
++ * in to the sample max size
++ * data
++ * - user stack dump data
++ * dynamic size
++ * - the actual dumped size
++ */
++
++ /* Static size. */
++ perf_output_put(handle, dump_size);
++
++ /* Data. */
++ sp = perf_user_stack_pointer(regs);
++ rem = __output_copy_user(handle, (void *) sp, dump_size);
++ dyn_size = dump_size - rem;
++
++ perf_output_skip(handle, rem);
++
++ /* Dynamic size. */
++ perf_output_put(handle, dyn_size);
++ }
++}
++
++static void __perf_event_header__init_id(struct perf_event_header *header,
++ struct perf_sample_data *data,
++ struct perf_event *event)
++{
++ u64 sample_type = event->attr.sample_type;
++
++ data->type = sample_type;
++ header->size += event->id_header_size;
++
++ if (sample_type & PERF_SAMPLE_TID) {
++ /* namespace issues */
++ data->tid_entry.pid = perf_event_pid(event, current);
++ data->tid_entry.tid = perf_event_tid(event, current);
++ }
++
++ if (sample_type & PERF_SAMPLE_TIME)
++ data->time = perf_clock();
++
++ if (sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
++ data->id = primary_event_id(event);
++
++ if (sample_type & PERF_SAMPLE_STREAM_ID)
++ data->stream_id = event->id;
++
++ if (sample_type & PERF_SAMPLE_CPU) {
++ data->cpu_entry.cpu = raw_smp_processor_id();
++ data->cpu_entry.reserved = 0;
++ }
++}
++
++void perf_event_header__init_id(struct perf_event_header *header,
++ struct perf_sample_data *data,
++ struct perf_event *event)
++{
++ if (event->attr.sample_id_all)
++ __perf_event_header__init_id(header, data, event);
++}
++
++static void __perf_event__output_id_sample(struct perf_output_handle *handle,
++ struct perf_sample_data *data)
++{
++ u64 sample_type = data->type;
++
++ if (sample_type & PERF_SAMPLE_TID)
++ perf_output_put(handle, data->tid_entry);
++
++ if (sample_type & PERF_SAMPLE_TIME)
++ perf_output_put(handle, data->time);
++
++ if (sample_type & PERF_SAMPLE_ID)
++ perf_output_put(handle, data->id);
++
++ if (sample_type & PERF_SAMPLE_STREAM_ID)
++ perf_output_put(handle, data->stream_id);
++
++ if (sample_type & PERF_SAMPLE_CPU)
++ perf_output_put(handle, data->cpu_entry);
++
++ if (sample_type & PERF_SAMPLE_IDENTIFIER)
++ perf_output_put(handle, data->id);
++}
++
++void perf_event__output_id_sample(struct perf_event *event,
++ struct perf_output_handle *handle,
++ struct perf_sample_data *sample)
++{
++ if (event->attr.sample_id_all)
++ __perf_event__output_id_sample(handle, sample);
++}
++
++static void perf_output_read_one(struct perf_output_handle *handle,
++ struct perf_event *event,
++ u64 enabled, u64 running)
++{
++ u64 read_format = event->attr.read_format;
++ u64 values[4];
++ int n = 0;
++
++ values[n++] = perf_event_count(event);
++ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
++ values[n++] = enabled +
++ atomic64_read(&event->child_total_time_enabled);
++ }
++ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
++ values[n++] = running +
++ atomic64_read(&event->child_total_time_running);
++ }
++ if (read_format & PERF_FORMAT_ID)
++ values[n++] = primary_event_id(event);
++
++ __output_copy(handle, values, n * sizeof(u64));
++}
++
++/*
++ * XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
++ */
++static void perf_output_read_group(struct perf_output_handle *handle,
++ struct perf_event *event,
++ u64 enabled, u64 running)
++{
++ struct perf_event *leader = event->group_leader, *sub;
++ u64 read_format = event->attr.read_format;
++ u64 values[5];
++ int n = 0;
++
++ values[n++] = 1 + leader->nr_siblings;
++
++ if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
++ values[n++] = enabled;
++
++ if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
++ values[n++] = running;
++
++ if (leader != event)
++ leader->pmu->read(leader);
++
++ values[n++] = perf_event_count(leader);
++ if (read_format & PERF_FORMAT_ID)
++ values[n++] = primary_event_id(leader);
++
++ __output_copy(handle, values, n * sizeof(u64));
++
++ list_for_each_entry(sub, &leader->sibling_list, group_entry) {
++ n = 0;
++
++ if ((sub != event) &&
++ (sub->state == PERF_EVENT_STATE_ACTIVE))
++ sub->pmu->read(sub);
++
++ values[n++] = perf_event_count(sub);
++ if (read_format & PERF_FORMAT_ID)
++ values[n++] = primary_event_id(sub);
++
++ __output_copy(handle, values, n * sizeof(u64));
++ }
++}
++
++#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
++ PERF_FORMAT_TOTAL_TIME_RUNNING)
++
++static void perf_output_read(struct perf_output_handle *handle,
++ struct perf_event *event)
++{
++ u64 enabled = 0, running = 0, now;
++ u64 read_format = event->attr.read_format;
++
++ /*
++ * compute total_time_enabled, total_time_running
++ * based on snapshot values taken when the event
++ * was last scheduled in.
++ *
++ * we cannot simply called update_context_time()
++ * because of locking issue as we are called in
++ * NMI context
++ */
++ if (read_format & PERF_FORMAT_TOTAL_TIMES)
++ calc_timer_values(event, &now, &enabled, &running);
++
++ if (event->attr.read_format & PERF_FORMAT_GROUP)
++ perf_output_read_group(handle, event, enabled, running);
++ else
++ perf_output_read_one(handle, event, enabled, running);
++}
++
++void perf_output_sample(struct perf_output_handle *handle,
++ struct perf_event_header *header,
++ struct perf_sample_data *data,
++ struct perf_event *event)
++{
++ u64 sample_type = data->type;
++
++ perf_output_put(handle, *header);
++
++ if (sample_type & PERF_SAMPLE_IDENTIFIER)
++ perf_output_put(handle, data->id);
++
++ if (sample_type & PERF_SAMPLE_IP)
++ perf_output_put(handle, data->ip);
++
++ if (sample_type & PERF_SAMPLE_TID)
++ perf_output_put(handle, data->tid_entry);
++
++ if (sample_type & PERF_SAMPLE_TIME)
++ perf_output_put(handle, data->time);
++
++ if (sample_type & PERF_SAMPLE_ADDR)
++ perf_output_put(handle, data->addr);
++
++ if (sample_type & PERF_SAMPLE_ID)
++ perf_output_put(handle, data->id);
++
++ if (sample_type & PERF_SAMPLE_STREAM_ID)
++ perf_output_put(handle, data->stream_id);
++
++ if (sample_type & PERF_SAMPLE_CPU)
++ perf_output_put(handle, data->cpu_entry);
++
++ if (sample_type & PERF_SAMPLE_PERIOD)
++ perf_output_put(handle, data->period);
++
++ if (sample_type & PERF_SAMPLE_READ)
++ perf_output_read(handle, event);
++
++ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
++ if (data->callchain) {
++ int size = 1;
++
++ if (data->callchain)
++ size += data->callchain->nr;
++
++ size *= sizeof(u64);
++
++ __output_copy(handle, data->callchain, size);
++ } else {
++ u64 nr = 0;
++ perf_output_put(handle, nr);
++ }
++ }
++
++ if (sample_type & PERF_SAMPLE_RAW) {
++ if (data->raw) {
++ perf_output_put(handle, data->raw->size);
++ __output_copy(handle, data->raw->data,
++ data->raw->size);
++ } else {
++ struct {
++ u32 size;
++ u32 data;
++ } raw = {
++ .size = sizeof(u32),
++ .data = 0,
++ };
++ perf_output_put(handle, raw);
++ }
++ }
++
++ if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
++ if (data->br_stack) {
++ size_t size;
++
++ size = data->br_stack->nr
++ * sizeof(struct perf_branch_entry);
++
++ perf_output_put(handle, data->br_stack->nr);
++ perf_output_copy(handle, data->br_stack->entries, size);
++ } else {
++ /*
++ * we always store at least the value of nr
++ */
++ u64 nr = 0;
++ perf_output_put(handle, nr);
++ }
++ }
++
++ if (sample_type & PERF_SAMPLE_REGS_USER) {
++ u64 abi = data->regs_user.abi;
++
++ /*
++ * If there are no regs to dump, notice it through
++ * first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
++ */
++ perf_output_put(handle, abi);
++
++ if (abi) {
++ u64 mask = event->attr.sample_regs_user;
++ perf_output_sample_regs(handle,
++ data->regs_user.regs,
++ mask);
++ }
++ }
++
++ if (sample_type & PERF_SAMPLE_STACK_USER) {
++ perf_output_sample_ustack(handle,
++ data->stack_user_size,
++ data->regs_user.regs);
++ }
++
++ if (sample_type & PERF_SAMPLE_WEIGHT)
++ perf_output_put(handle, data->weight);
++
++ if (sample_type & PERF_SAMPLE_DATA_SRC)
++ perf_output_put(handle, data->data_src.val);
++
++ if (sample_type & PERF_SAMPLE_TRANSACTION)
++ perf_output_put(handle, data->txn);
++
++ if (!event->attr.watermark) {
++ int wakeup_events = event->attr.wakeup_events;
++
++ if (wakeup_events) {
++ struct ring_buffer *rb = handle->rb;
++ int events = local_inc_return(&rb->events);
++
++ if (events >= wakeup_events) {
++ local_sub(wakeup_events, &rb->events);
++ local_inc(&rb->wakeup);
++ }
++ }
++ }
++}
++
++void perf_prepare_sample(struct perf_event_header *header,
++ struct perf_sample_data *data,
++ struct perf_event *event,
++ struct pt_regs *regs)
++{
++ u64 sample_type = event->attr.sample_type;
++
++ header->type = PERF_RECORD_SAMPLE;
++ header->size = sizeof(*header) + event->header_size;
++
++ header->misc = 0;
++ header->misc |= perf_misc_flags(regs);
++
++ __perf_event_header__init_id(header, data, event);
++
++ if (sample_type & PERF_SAMPLE_IP)
++ data->ip = perf_instruction_pointer(regs);
++
++ if (sample_type & PERF_SAMPLE_CALLCHAIN) {
++ int size = 1;
++
++ data->callchain = perf_callchain(event, regs);
++
++ if (data->callchain)
++ size += data->callchain->nr;
++
++ header->size += size * sizeof(u64);
++ }
++
++ if (sample_type & PERF_SAMPLE_RAW) {
++ int size = sizeof(u32);
++
++ if (data->raw)
++ size += data->raw->size;
++ else
++ size += sizeof(u32);
++
++ WARN_ON_ONCE(size & (sizeof(u64)-1));
++ header->size += size;
++ }
++
++ if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
++ int size = sizeof(u64); /* nr */
++ if (data->br_stack) {
++ size += data->br_stack->nr
++ * sizeof(struct perf_branch_entry);
++ }
++ header->size += size;
++ }
++
++ if (sample_type & PERF_SAMPLE_REGS_USER) {
++ /* regs dump ABI info */
++ int size = sizeof(u64);
++
++ perf_sample_regs_user(&data->regs_user, regs);
++
++ if (data->regs_user.regs) {
++ u64 mask = event->attr.sample_regs_user;
++ size += hweight64(mask) * sizeof(u64);
++ }
++
++ header->size += size;
++ }
++
++ if (sample_type & PERF_SAMPLE_STACK_USER) {
++ /*
++ * Either we need PERF_SAMPLE_STACK_USER bit to be allways
++ * processed as the last one or have additional check added
++ * in case new sample type is added, because we could eat
++ * up the rest of the sample size.
++ */
++ struct perf_regs_user *uregs = &data->regs_user;
++ u16 stack_size = event->attr.sample_stack_user;
++ u16 size = sizeof(u64);
++
++ if (!uregs->abi)
++ perf_sample_regs_user(uregs, regs);
++
++ stack_size = perf_sample_ustack_size(stack_size, header->size,
++ uregs->regs);
++
++ /*
++ * If there is something to dump, add space for the dump
++ * itself and for the field that tells the dynamic size,
++ * which is how many have been actually dumped.
++ */
++ if (stack_size)
++ size += sizeof(u64) + stack_size;
++
++ data->stack_user_size = stack_size;
++ header->size += size;
++ }
++}
++
++static void perf_event_output(struct perf_event *event,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ struct perf_output_handle handle;
++ struct perf_event_header header;
++
++ /* protect the callchain buffers */
++ rcu_read_lock();
++
++ perf_prepare_sample(&header, data, event, regs);
++
++ if (perf_output_begin(&handle, event, header.size))
++ goto exit;
++
++ perf_output_sample(&handle, &header, data, event);
++
++ perf_output_end(&handle);
++
++exit:
++ rcu_read_unlock();
++}
++
++/*
++ * read event_id
++ */
++
++struct perf_read_event {
++ struct perf_event_header header;
++
++ u32 pid;
++ u32 tid;
++};
++
++static void
++perf_event_read_event(struct perf_event *event,
++ struct task_struct *task)
++{
++ struct perf_output_handle handle;
++ struct perf_sample_data sample;
++ struct perf_read_event read_event = {
++ .header = {
++ .type = PERF_RECORD_READ,
++ .misc = 0,
++ .size = sizeof(read_event) + event->read_size,
++ },
++ .pid = perf_event_pid(event, task),
++ .tid = perf_event_tid(event, task),
++ };
++ int ret;
++
++ perf_event_header__init_id(&read_event.header, &sample, event);
++ ret = perf_output_begin(&handle, event, read_event.header.size);
++ if (ret)
++ return;
++
++ perf_output_put(&handle, read_event);
++ perf_output_read(&handle, event);
++ perf_event__output_id_sample(event, &handle, &sample);
++
++ perf_output_end(&handle);
++}
++
++typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
++
++static void
++perf_event_aux_ctx(struct perf_event_context *ctx,
++ perf_event_aux_output_cb output,
++ void *data)
++{
++ struct perf_event *event;
++
++ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
++ if (event->state < PERF_EVENT_STATE_INACTIVE)
++ continue;
++ if (!event_filter_match(event))
++ continue;
++ output(event, data);
++ }
++}
++
++static void
++perf_event_aux(perf_event_aux_output_cb output, void *data,
++ struct perf_event_context *task_ctx)
++{
++ struct perf_cpu_context *cpuctx;
++ struct perf_event_context *ctx;
++ struct pmu *pmu;
++ int ctxn;
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(pmu, &pmus, entry) {
++ cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
++ if (cpuctx->unique_pmu != pmu)
++ goto next;
++ perf_event_aux_ctx(&cpuctx->ctx, output, data);
++ if (task_ctx)
++ goto next;
++ ctxn = pmu->task_ctx_nr;
++ if (ctxn < 0)
++ goto next;
++ ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
++ if (ctx)
++ perf_event_aux_ctx(ctx, output, data);
++next:
++ put_cpu_ptr(pmu->pmu_cpu_context);
++ }
++
++ if (task_ctx) {
++ preempt_disable();
++ perf_event_aux_ctx(task_ctx, output, data);
++ preempt_enable();
++ }
++ rcu_read_unlock();
++}
++
++/*
++ * task tracking -- fork/exit
++ *
++ * enabled by: attr.comm | attr.mmap | attr.mmap2 | attr.mmap_data | attr.task
++ */
++
++struct perf_task_event {
++ struct task_struct *task;
++ struct perf_event_context *task_ctx;
++
++ struct {
++ struct perf_event_header header;
++
++ u32 pid;
++ u32 ppid;
++ u32 tid;
++ u32 ptid;
++ u64 time;
++ } event_id;
++};
++
++static int perf_event_task_match(struct perf_event *event)
++{
++ return event->attr.comm || event->attr.mmap ||
++ event->attr.mmap2 || event->attr.mmap_data ||
++ event->attr.task;
++}
++
++static void perf_event_task_output(struct perf_event *event,
++ void *data)
++{
++ struct perf_task_event *task_event = data;
++ struct perf_output_handle handle;
++ struct perf_sample_data sample;
++ struct task_struct *task = task_event->task;
++ int ret, size = task_event->event_id.header.size;
++
++ if (!perf_event_task_match(event))
++ return;
++
++ perf_event_header__init_id(&task_event->event_id.header, &sample, event);
++
++ ret = perf_output_begin(&handle, event,
++ task_event->event_id.header.size);
++ if (ret)
++ goto out;
++
++ task_event->event_id.pid = perf_event_pid(event, task);
++ task_event->event_id.ppid = perf_event_pid(event, current);
++
++ task_event->event_id.tid = perf_event_tid(event, task);
++ task_event->event_id.ptid = perf_event_tid(event, current);
++
++ perf_output_put(&handle, task_event->event_id);
++
++ perf_event__output_id_sample(event, &handle, &sample);
++
++ perf_output_end(&handle);
++out:
++ task_event->event_id.header.size = size;
++}
++
++static void perf_event_task(struct task_struct *task,
++ struct perf_event_context *task_ctx,
++ int new)
++{
++ struct perf_task_event task_event;
++
++ if (!atomic_read(&nr_comm_events) &&
++ !atomic_read(&nr_mmap_events) &&
++ !atomic_read(&nr_task_events))
++ return;
++
++ task_event = (struct perf_task_event){
++ .task = task,
++ .task_ctx = task_ctx,
++ .event_id = {
++ .header = {
++ .type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
++ .misc = 0,
++ .size = sizeof(task_event.event_id),
++ },
++ /* .pid */
++ /* .ppid */
++ /* .tid */
++ /* .ptid */
++ .time = perf_clock(),
++ },
++ };
++
++ perf_event_aux(perf_event_task_output,
++ &task_event,
++ task_ctx);
++}
++
++void perf_event_fork(struct task_struct *task)
++{
++ perf_event_task(task, NULL, 1);
++}
++
++/*
++ * comm tracking
++ */
++
++struct perf_comm_event {
++ struct task_struct *task;
++ char *comm;
++ int comm_size;
++
++ struct {
++ struct perf_event_header header;
++
++ u32 pid;
++ u32 tid;
++ } event_id;
++};
++
++static int perf_event_comm_match(struct perf_event *event)
++{
++ return event->attr.comm;
++}
++
++static void perf_event_comm_output(struct perf_event *event,
++ void *data)
++{
++ struct perf_comm_event *comm_event = data;
++ struct perf_output_handle handle;
++ struct perf_sample_data sample;
++ int size = comm_event->event_id.header.size;
++ int ret;
++
++ if (!perf_event_comm_match(event))
++ return;
++
++ perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
++ ret = perf_output_begin(&handle, event,
++ comm_event->event_id.header.size);
++
++ if (ret)
++ goto out;
++
++ comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
++ comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
++
++ perf_output_put(&handle, comm_event->event_id);
++ __output_copy(&handle, comm_event->comm,
++ comm_event->comm_size);
++
++ perf_event__output_id_sample(event, &handle, &sample);
++
++ perf_output_end(&handle);
++out:
++ comm_event->event_id.header.size = size;
++}
++
++static void perf_event_comm_event(struct perf_comm_event *comm_event)
++{
++ char comm[TASK_COMM_LEN];
++ unsigned int size;
++
++ memset(comm, 0, sizeof(comm));
++ strlcpy(comm, comm_event->task->comm, sizeof(comm));
++ size = ALIGN(strlen(comm)+1, sizeof(u64));
++
++ comm_event->comm = comm;
++ comm_event->comm_size = size;
++
++ comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
++
++ perf_event_aux(perf_event_comm_output,
++ comm_event,
++ NULL);
++}
++
++void perf_event_comm(struct task_struct *task, bool exec)
++{
++ struct perf_comm_event comm_event;
++
++ if (!atomic_read(&nr_comm_events))
++ return;
++
++ comm_event = (struct perf_comm_event){
++ .task = task,
++ /* .comm */
++ /* .comm_size */
++ .event_id = {
++ .header = {
++ .type = PERF_RECORD_COMM,
++ .misc = exec ? PERF_RECORD_MISC_COMM_EXEC : 0,
++ /* .size */
++ },
++ /* .pid */
++ /* .tid */
++ },
++ };
++
++ perf_event_comm_event(&comm_event);
++}
++
++/*
++ * mmap tracking
++ */
++
++struct perf_mmap_event {
++ struct vm_area_struct *vma;
++
++ const char *file_name;
++ int file_size;
++ int maj, min;
++ u64 ino;
++ u64 ino_generation;
++ u32 prot, flags;
++
++ struct {
++ struct perf_event_header header;
++
++ u32 pid;
++ u32 tid;
++ u64 start;
++ u64 len;
++ u64 pgoff;
++ } event_id;
++};
++
++static int perf_event_mmap_match(struct perf_event *event,
++ void *data)
++{
++ struct perf_mmap_event *mmap_event = data;
++ struct vm_area_struct *vma = mmap_event->vma;
++ int executable = vma->vm_flags & VM_EXEC;
++
++ return (!executable && event->attr.mmap_data) ||
++ (executable && (event->attr.mmap || event->attr.mmap2));
++}
++
++static void perf_event_mmap_output(struct perf_event *event,
++ void *data)
++{
++ struct perf_mmap_event *mmap_event = data;
++ struct perf_output_handle handle;
++ struct perf_sample_data sample;
++ int size = mmap_event->event_id.header.size;
++ int ret;
++
++ if (!perf_event_mmap_match(event, data))
++ return;
++
++ if (event->attr.mmap2) {
++ mmap_event->event_id.header.type = PERF_RECORD_MMAP2;
++ mmap_event->event_id.header.size += sizeof(mmap_event->maj);
++ mmap_event->event_id.header.size += sizeof(mmap_event->min);
++ mmap_event->event_id.header.size += sizeof(mmap_event->ino);
++ mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
++ mmap_event->event_id.header.size += sizeof(mmap_event->prot);
++ mmap_event->event_id.header.size += sizeof(mmap_event->flags);
++ }
++
++ perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
++ ret = perf_output_begin(&handle, event,
++ mmap_event->event_id.header.size);
++ if (ret)
++ goto out;
++
++ mmap_event->event_id.pid = perf_event_pid(event, current);
++ mmap_event->event_id.tid = perf_event_tid(event, current);
++
++ perf_output_put(&handle, mmap_event->event_id);
++
++ if (event->attr.mmap2) {
++ perf_output_put(&handle, mmap_event->maj);
++ perf_output_put(&handle, mmap_event->min);
++ perf_output_put(&handle, mmap_event->ino);
++ perf_output_put(&handle, mmap_event->ino_generation);
++ perf_output_put(&handle, mmap_event->prot);
++ perf_output_put(&handle, mmap_event->flags);
++ }
++
++ __output_copy(&handle, mmap_event->file_name,
++ mmap_event->file_size);
++
++ perf_event__output_id_sample(event, &handle, &sample);
++
++ perf_output_end(&handle);
++out:
++ mmap_event->event_id.header.size = size;
++}
++
++static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
++{
++ struct vm_area_struct *vma = mmap_event->vma;
++ struct file *file = vma->vm_file;
++ int maj = 0, min = 0;
++ u64 ino = 0, gen = 0;
++ u32 prot = 0, flags = 0;
++ unsigned int size;
++ char tmp[16];
++ char *buf = NULL;
++ char *name;
++
++ if (file) {
++ struct inode *inode;
++ dev_t dev;
++
++ buf = kmalloc(PATH_MAX, GFP_KERNEL);
++ if (!buf) {
++ name = "//enomem";
++ goto cpy_name;
++ }
++ /*
++ * d_path() works from the end of the rb backwards, so we
++ * need to add enough zero bytes after the string to handle
++ * the 64bit alignment we do later.
++ */
++ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
++ if (IS_ERR(name)) {
++ name = "//toolong";
++ goto cpy_name;
++ }
++ inode = file_inode(vma->vm_file);
++ dev = inode->i_sb->s_dev;
++ ino = inode->i_ino;
++ gen = inode->i_generation;
++ maj = MAJOR(dev);
++ min = MINOR(dev);
++
++ if (vma->vm_flags & VM_READ)
++ prot |= PROT_READ;
++ if (vma->vm_flags & VM_WRITE)
++ prot |= PROT_WRITE;
++ if (vma->vm_flags & VM_EXEC)
++ prot |= PROT_EXEC;
++
++ if (vma->vm_flags & VM_MAYSHARE)
++ flags = MAP_SHARED;
++ else
++ flags = MAP_PRIVATE;
++
++ if (vma->vm_flags & VM_DENYWRITE)
++ flags |= MAP_DENYWRITE;
++ if (vma->vm_flags & VM_MAYEXEC)
++ flags |= MAP_EXECUTABLE;
++ if (vma->vm_flags & VM_LOCKED)
++ flags |= MAP_LOCKED;
++ if (vma->vm_flags & VM_HUGETLB)
++ flags |= MAP_HUGETLB;
++
++ goto got_name;
++ } else {
++ if (vma->vm_ops && vma->vm_ops->name) {
++ name = (char *) vma->vm_ops->name(vma);
++ if (name)
++ goto cpy_name;
++ }
++
++ name = (char *)arch_vma_name(vma);
++ if (name)
++ goto cpy_name;
++
++ if (vma->vm_start <= vma->vm_mm->start_brk &&
++ vma->vm_end >= vma->vm_mm->brk) {
++ name = "[heap]";
++ goto cpy_name;
++ }
++ if (vma->vm_start <= vma->vm_mm->start_stack &&
++ vma->vm_end >= vma->vm_mm->start_stack) {
++ name = "[stack]";
++ goto cpy_name;
++ }
++
++ name = "//anon";
++ goto cpy_name;
++ }
++
++cpy_name:
++ strlcpy(tmp, name, sizeof(tmp));
++ name = tmp;
++got_name:
++ /*
++ * Since our buffer works in 8 byte units we need to align our string
++ * size to a multiple of 8. However, we must guarantee the tail end is
++ * zero'd out to avoid leaking random bits to userspace.
++ */
++ size = strlen(name)+1;
++ while (!IS_ALIGNED(size, sizeof(u64)))
++ name[size++] = '\0';
++
++ mmap_event->file_name = name;
++ mmap_event->file_size = size;
++ mmap_event->maj = maj;
++ mmap_event->min = min;
++ mmap_event->ino = ino;
++ mmap_event->ino_generation = gen;
++ mmap_event->prot = prot;
++ mmap_event->flags = flags;
++
++ if (!(vma->vm_flags & VM_EXEC))
++ mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
++
++ mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
++
++ perf_event_aux(perf_event_mmap_output,
++ mmap_event,
++ NULL);
++
++ kfree(buf);
++}
++
++void perf_event_mmap(struct vm_area_struct *vma)
++{
++ struct perf_mmap_event mmap_event;
++
++ if (!atomic_read(&nr_mmap_events))
++ return;
++
++ mmap_event = (struct perf_mmap_event){
++ .vma = vma,
++ /* .file_name */
++ /* .file_size */
++ .event_id = {
++ .header = {
++ .type = PERF_RECORD_MMAP,
++ .misc = PERF_RECORD_MISC_USER,
++ /* .size */
++ },
++ /* .pid */
++ /* .tid */
++ .start = vma->vm_start,
++ .len = vma->vm_end - vma->vm_start,
++ .pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
++ },
++ /* .maj (attr_mmap2 only) */
++ /* .min (attr_mmap2 only) */
++ /* .ino (attr_mmap2 only) */
++ /* .ino_generation (attr_mmap2 only) */
++ /* .prot (attr_mmap2 only) */
++ /* .flags (attr_mmap2 only) */
++ };
++
++ perf_event_mmap_event(&mmap_event);
++}
++
++/*
++ * IRQ throttle logging
++ */
++
++static void perf_log_throttle(struct perf_event *event, int enable)
++{
++ struct perf_output_handle handle;
++ struct perf_sample_data sample;
++ int ret;
++
++ struct {
++ struct perf_event_header header;
++ u64 time;
++ u64 id;
++ u64 stream_id;
++ } throttle_event = {
++ .header = {
++ .type = PERF_RECORD_THROTTLE,
++ .misc = 0,
++ .size = sizeof(throttle_event),
++ },
++ .time = perf_clock(),
++ .id = primary_event_id(event),
++ .stream_id = event->id,
++ };
++
++ if (enable)
++ throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
++
++ perf_event_header__init_id(&throttle_event.header, &sample, event);
++
++ ret = perf_output_begin(&handle, event,
++ throttle_event.header.size);
++ if (ret)
++ return;
++
++ perf_output_put(&handle, throttle_event);
++ perf_event__output_id_sample(event, &handle, &sample);
++ perf_output_end(&handle);
++}
++
++/*
++ * Generic event overflow handling, sampling.
++ */
++
++static int __perf_event_overflow(struct perf_event *event,
++ int throttle, struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ int events = atomic_read(&event->event_limit);
++ struct hw_perf_event *hwc = &event->hw;
++ u64 seq;
++ int ret = 0;
++
++ /*
++ * Non-sampling counters might still use the PMI to fold short
++ * hardware counters, ignore those.
++ */
++ if (unlikely(!is_sampling_event(event)))
++ return 0;
++
++ seq = __this_cpu_read(perf_throttled_seq);
++ if (seq != hwc->interrupts_seq) {
++ hwc->interrupts_seq = seq;
++ hwc->interrupts = 1;
++ } else {
++ hwc->interrupts++;
++ if (unlikely(throttle
++ && hwc->interrupts >= max_samples_per_tick)) {
++ __this_cpu_inc(perf_throttled_count);
++ hwc->interrupts = MAX_INTERRUPTS;
++ perf_log_throttle(event, 0);
++ tick_nohz_full_kick();
++ ret = 1;
++ }
++ }
++
++ if (event->attr.freq) {
++ u64 now = perf_clock();
++ s64 delta = now - hwc->freq_time_stamp;
++
++ hwc->freq_time_stamp = now;
++
++ if (delta > 0 && delta < 2*TICK_NSEC)
++ perf_adjust_period(event, delta, hwc->last_period, true);
++ }
++
++ /*
++ * XXX event_limit might not quite work as expected on inherited
++ * events
++ */
++
++ event->pending_kill = POLL_IN;
++ if (events && atomic_dec_and_test(&event->event_limit)) {
++ ret = 1;
++ event->pending_kill = POLL_HUP;
++ event->pending_disable = 1;
++ irq_work_queue(&event->pending);
++ }
++
++ if (event->overflow_handler)
++ event->overflow_handler(event, data, regs);
++ else
++ perf_event_output(event, data, regs);
++
++ if (event->fasync && event->pending_kill) {
++ event->pending_wakeup = 1;
++ irq_work_queue(&event->pending);
++ }
++
++ return ret;
++}
++
++int perf_event_overflow(struct perf_event *event,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ return __perf_event_overflow(event, 1, data, regs);
++}
++
++/*
++ * Generic software event infrastructure
++ */
++
++struct swevent_htable {
++ struct swevent_hlist *swevent_hlist;
++ struct mutex hlist_mutex;
++ int hlist_refcount;
++
++ /* Recursion avoidance in each contexts */
++ int recursion[PERF_NR_CONTEXTS];
++
++ /* Keeps track of cpu being initialized/exited */
++ bool online;
++};
++
++static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
++
++/*
++ * We directly increment event->count and keep a second value in
++ * event->hw.period_left to count intervals. This period event
++ * is kept in the range [-sample_period, 0] so that we can use the
++ * sign as trigger.
++ */
++
++u64 perf_swevent_set_period(struct perf_event *event)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ u64 period = hwc->last_period;
++ u64 nr, offset;
++ s64 old, val;
++
++ hwc->last_period = hwc->sample_period;
++
++again:
++ old = val = local64_read(&hwc->period_left);
++ if (val < 0)
++ return 0;
++
++ nr = div64_u64(period + val, period);
++ offset = nr * period;
++ val -= offset;
++ if (local64_cmpxchg(&hwc->period_left, old, val) != old)
++ goto again;
++
++ return nr;
++}
++
++static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ int throttle = 0;
++
++ if (!overflow)
++ overflow = perf_swevent_set_period(event);
++
++ if (hwc->interrupts == MAX_INTERRUPTS)
++ return;
++
++ for (; overflow; overflow--) {
++ if (__perf_event_overflow(event, throttle,
++ data, regs)) {
++ /*
++ * We inhibit the overflow from happening when
++ * hwc->interrupts == MAX_INTERRUPTS.
++ */
++ break;
++ }
++ throttle = 1;
++ }
++}
++
++static void perf_swevent_event(struct perf_event *event, u64 nr,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ local64_add(nr, &event->count);
++
++ if (!regs)
++ return;
++
++ if (!is_sampling_event(event))
++ return;
++
++ if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
++ data->period = nr;
++ return perf_swevent_overflow(event, 1, data, regs);
++ } else
++ data->period = event->hw.last_period;
++
++ if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
++ return perf_swevent_overflow(event, 1, data, regs);
++
++ if (local64_add_negative(nr, &hwc->period_left))
++ return;
++
++ perf_swevent_overflow(event, 0, data, regs);
++}
++
++static int perf_exclude_event(struct perf_event *event,
++ struct pt_regs *regs)
++{
++ if (event->hw.state & PERF_HES_STOPPED)
++ return 1;
++
++ if (regs) {
++ if (event->attr.exclude_user && user_mode(regs))
++ return 1;
++
++ if (event->attr.exclude_kernel && !user_mode(regs))
++ return 1;
++ }
++
++ return 0;
++}
++
++static int perf_swevent_match(struct perf_event *event,
++ enum perf_type_id type,
++ u32 event_id,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ if (event->attr.type != type)
++ return 0;
++
++ if (event->attr.config != event_id)
++ return 0;
++
++ if (perf_exclude_event(event, regs))
++ return 0;
++
++ return 1;
++}
++
++static inline u64 swevent_hash(u64 type, u32 event_id)
++{
++ u64 val = event_id | (type << 32);
++
++ return hash_64(val, SWEVENT_HLIST_BITS);
++}
++
++static inline struct hlist_head *
++__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
++{
++ u64 hash = swevent_hash(type, event_id);
++
++ return &hlist->heads[hash];
++}
++
++/* For the read side: events when they trigger */
++static inline struct hlist_head *
++find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
++{
++ struct swevent_hlist *hlist;
++
++ hlist = rcu_dereference(swhash->swevent_hlist);
++ if (!hlist)
++ return NULL;
++
++ return __find_swevent_head(hlist, type, event_id);
++}
++
++/* For the event head insertion and removal in the hlist */
++static inline struct hlist_head *
++find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
++{
++ struct swevent_hlist *hlist;
++ u32 event_id = event->attr.config;
++ u64 type = event->attr.type;
++
++ /*
++ * Event scheduling is always serialized against hlist allocation
++ * and release. Which makes the protected version suitable here.
++ * The context lock guarantees that.
++ */
++ hlist = rcu_dereference_protected(swhash->swevent_hlist,
++ lockdep_is_held(&event->ctx->lock));
++ if (!hlist)
++ return NULL;
++
++ return __find_swevent_head(hlist, type, event_id);
++}
++
++static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
++ u64 nr,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
++ struct perf_event *event;
++ struct hlist_head *head;
++
++ rcu_read_lock();
++ head = find_swevent_head_rcu(swhash, type, event_id);
++ if (!head)
++ goto end;
++
++ hlist_for_each_entry_rcu(event, head, hlist_entry) {
++ if (perf_swevent_match(event, type, event_id, data, regs))
++ perf_swevent_event(event, nr, data, regs);
++ }
++end:
++ rcu_read_unlock();
++}
++
++int perf_swevent_get_recursion_context(void)
++{
++ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
++
++ return get_recursion_context(swhash->recursion);
++}
++EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
++
++inline void perf_swevent_put_recursion_context(int rctx)
++{
++ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
++
++ put_recursion_context(swhash->recursion, rctx);
++}
++
++void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
++{
++ struct perf_sample_data data;
++ int rctx;
++
++ preempt_disable_notrace();
++ rctx = perf_swevent_get_recursion_context();
++ if (rctx < 0)
++ return;
++
++ perf_sample_data_init(&data, addr, 0);
++
++ do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
++
++ perf_swevent_put_recursion_context(rctx);
++ preempt_enable_notrace();
++}
++
++static void perf_swevent_read(struct perf_event *event)
++{
++}
++
++static int perf_swevent_add(struct perf_event *event, int flags)
++{
++ struct swevent_htable *swhash = this_cpu_ptr(&swevent_htable);
++ struct hw_perf_event *hwc = &event->hw;
++ struct hlist_head *head;
++
++ if (is_sampling_event(event)) {
++ hwc->last_period = hwc->sample_period;
++ perf_swevent_set_period(event);
++ }
++
++ hwc->state = !(flags & PERF_EF_START);
++
++ head = find_swevent_head(swhash, event);
++ if (!head) {
++ /*
++ * We can race with cpu hotplug code. Do not
++ * WARN if the cpu just got unplugged.
++ */
++ WARN_ON_ONCE(swhash->online);
++ return -EINVAL;
++ }
++
++ hlist_add_head_rcu(&event->hlist_entry, head);
++
++ return 0;
++}
++
++static void perf_swevent_del(struct perf_event *event, int flags)
++{
++ hlist_del_rcu(&event->hlist_entry);
++}
++
++static void perf_swevent_start(struct perf_event *event, int flags)
++{
++ event->hw.state = 0;
++}
++
++static void perf_swevent_stop(struct perf_event *event, int flags)
++{
++ event->hw.state = PERF_HES_STOPPED;
++}
++
++/* Deref the hlist from the update side */
++static inline struct swevent_hlist *
++swevent_hlist_deref(struct swevent_htable *swhash)
++{
++ return rcu_dereference_protected(swhash->swevent_hlist,
++ lockdep_is_held(&swhash->hlist_mutex));
++}
++
++static void swevent_hlist_release(struct swevent_htable *swhash)
++{
++ struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
++
++ if (!hlist)
++ return;
++
++ RCU_INIT_POINTER(swhash->swevent_hlist, NULL);
++ kfree_rcu(hlist, rcu_head);
++}
++
++static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
++{
++ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
++
++ mutex_lock(&swhash->hlist_mutex);
++
++ if (!--swhash->hlist_refcount)
++ swevent_hlist_release(swhash);
++
++ mutex_unlock(&swhash->hlist_mutex);
++}
++
++static void swevent_hlist_put(struct perf_event *event)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu)
++ swevent_hlist_put_cpu(event, cpu);
++}
++
++static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
++{
++ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
++ int err = 0;
++
++ mutex_lock(&swhash->hlist_mutex);
++
++ if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
++ struct swevent_hlist *hlist;
++
++ hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
++ if (!hlist) {
++ err = -ENOMEM;
++ goto exit;
++ }
++ rcu_assign_pointer(swhash->swevent_hlist, hlist);
++ }
++ swhash->hlist_refcount++;
++exit:
++ mutex_unlock(&swhash->hlist_mutex);
++
++ return err;
++}
++
++static int swevent_hlist_get(struct perf_event *event)
++{
++ int err;
++ int cpu, failed_cpu;
++
++ get_online_cpus();
++ for_each_possible_cpu(cpu) {
++ err = swevent_hlist_get_cpu(event, cpu);
++ if (err) {
++ failed_cpu = cpu;
++ goto fail;
++ }
++ }
++ put_online_cpus();
++
++ return 0;
++fail:
++ for_each_possible_cpu(cpu) {
++ if (cpu == failed_cpu)
++ break;
++ swevent_hlist_put_cpu(event, cpu);
++ }
++
++ put_online_cpus();
++ return err;
++}
++
++struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
++
++static void sw_perf_event_destroy(struct perf_event *event)
++{
++ u64 event_id = event->attr.config;
++
++ WARN_ON(event->parent);
++
++ static_key_slow_dec(&perf_swevent_enabled[event_id]);
++ swevent_hlist_put(event);
++}
++
++static int perf_swevent_init(struct perf_event *event)
++{
++ u64 event_id = event->attr.config;
++
++ if (event->attr.type != PERF_TYPE_SOFTWARE)
++ return -ENOENT;
++
++ /*
++ * no branch sampling for software events
++ */
++ if (has_branch_stack(event))
++ return -EOPNOTSUPP;
++
++ switch (event_id) {
++ case PERF_COUNT_SW_CPU_CLOCK:
++ case PERF_COUNT_SW_TASK_CLOCK:
++ return -ENOENT;
++
++ default:
++ break;
++ }
++
++ if (event_id >= PERF_COUNT_SW_MAX)
++ return -ENOENT;
++
++ if (!event->parent) {
++ int err;
++
++ err = swevent_hlist_get(event);
++ if (err)
++ return err;
++
++ static_key_slow_inc(&perf_swevent_enabled[event_id]);
++ event->destroy = sw_perf_event_destroy;
++ }
++
++ return 0;
++}
++
++static struct pmu perf_swevent = {
++ .task_ctx_nr = perf_sw_context,
++
++ .event_init = perf_swevent_init,
++ .add = perf_swevent_add,
++ .del = perf_swevent_del,
++ .start = perf_swevent_start,
++ .stop = perf_swevent_stop,
++ .read = perf_swevent_read,
++};
++
++#ifdef CONFIG_EVENT_TRACING
++
++static int perf_tp_filter_match(struct perf_event *event,
++ struct perf_sample_data *data)
++{
++ void *record = data->raw->data;
++
++ if (likely(!event->filter) || filter_match_preds(event->filter, record))
++ return 1;
++ return 0;
++}
++
++static int perf_tp_event_match(struct perf_event *event,
++ struct perf_sample_data *data,
++ struct pt_regs *regs)
++{
++ if (event->hw.state & PERF_HES_STOPPED)
++ return 0;
++ /*
++ * All tracepoints are from kernel-space.
++ */
++ if (event->attr.exclude_kernel)
++ return 0;
++
++ if (!perf_tp_filter_match(event, data))
++ return 0;
++
++ return 1;
++}
++
++void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
++ struct pt_regs *regs, struct hlist_head *head, int rctx,
++ struct task_struct *task)
++{
++ struct perf_sample_data data;
++ struct perf_event *event;
++
++ struct perf_raw_record raw = {
++ .size = entry_size,
++ .data = record,
++ };
++
++ perf_sample_data_init(&data, addr, 0);
++ data.raw = &raw;
++
++ hlist_for_each_entry_rcu(event, head, hlist_entry) {
++ if (perf_tp_event_match(event, &data, regs))
++ perf_swevent_event(event, count, &data, regs);
++ }
++
++ /*
++ * If we got specified a target task, also iterate its context and
++ * deliver this event there too.
++ */
++ if (task && task != current) {
++ struct perf_event_context *ctx;
++ struct trace_entry *entry = record;
++
++ rcu_read_lock();
++ ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
++ if (!ctx)
++ goto unlock;
++
++ list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
++ if (event->attr.type != PERF_TYPE_TRACEPOINT)
++ continue;
++ if (event->attr.config != entry->type)
++ continue;
++ if (perf_tp_event_match(event, &data, regs))
++ perf_swevent_event(event, count, &data, regs);
++ }
++unlock:
++ rcu_read_unlock();
++ }
++
++ perf_swevent_put_recursion_context(rctx);
++}
++EXPORT_SYMBOL_GPL(perf_tp_event);
++
++static void tp_perf_event_destroy(struct perf_event *event)
++{
++ perf_trace_destroy(event);
++}
++
++static int perf_tp_event_init(struct perf_event *event)
++{
++ int err;
++
++ if (event->attr.type != PERF_TYPE_TRACEPOINT)
++ return -ENOENT;
++
++ /*
++ * no branch sampling for tracepoint events
++ */
++ if (has_branch_stack(event))
++ return -EOPNOTSUPP;
++
++ err = perf_trace_init(event);
++ if (err)
++ return err;
++
++ event->destroy = tp_perf_event_destroy;
++
++ return 0;
++}
++
++static struct pmu perf_tracepoint = {
++ .task_ctx_nr = perf_sw_context,
++
++ .event_init = perf_tp_event_init,
++ .add = perf_trace_add,
++ .del = perf_trace_del,
++ .start = perf_swevent_start,
++ .stop = perf_swevent_stop,
++ .read = perf_swevent_read,
++};
++
++static inline void perf_tp_register(void)
++{
++ perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
++}
++
++static int perf_event_set_filter(struct perf_event *event, void __user *arg)
++{
++ char *filter_str;
++ int ret;
++
++ if (event->attr.type != PERF_TYPE_TRACEPOINT)
++ return -EINVAL;
++
++ filter_str = strndup_user(arg, PAGE_SIZE);
++ if (IS_ERR(filter_str))
++ return PTR_ERR(filter_str);
++
++ ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
++
++ kfree(filter_str);
++ return ret;
++}
++
++static void perf_event_free_filter(struct perf_event *event)
++{
++ ftrace_profile_free_filter(event);
++}
++
++#else
++
++static inline void perf_tp_register(void)
++{
++}
++
++static int perf_event_set_filter(struct perf_event *event, void __user *arg)
++{
++ return -ENOENT;
++}
++
++static void perf_event_free_filter(struct perf_event *event)
++{
++}
++
++#endif /* CONFIG_EVENT_TRACING */
++
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++void perf_bp_event(struct perf_event *bp, void *data)
++{
++ struct perf_sample_data sample;
++ struct pt_regs *regs = data;
++
++ perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
++
++ if (!bp->hw.state && !perf_exclude_event(bp, regs))
++ perf_swevent_event(bp, 1, &sample, regs);
++}
++#endif
++
++/*
++ * hrtimer based swevent callback
++ */
++
++static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
++{
++ enum hrtimer_restart ret = HRTIMER_RESTART;
++ struct perf_sample_data data;
++ struct pt_regs *regs;
++ struct perf_event *event;
++ u64 period;
++
++ event = container_of(hrtimer, struct perf_event, hw.hrtimer);
++
++ if (event->state != PERF_EVENT_STATE_ACTIVE)
++ return HRTIMER_NORESTART;
++
++ event->pmu->read(event);
++
++ perf_sample_data_init(&data, 0, event->hw.last_period);
++ regs = get_irq_regs();
++
++ if (regs && !perf_exclude_event(event, regs)) {
++ if (!(event->attr.exclude_idle && is_idle_task(current)))
++ if (__perf_event_overflow(event, 1, &data, regs))
++ ret = HRTIMER_NORESTART;
++ }
++
++ period = max_t(u64, 10000, event->hw.sample_period);
++ hrtimer_forward_now(hrtimer, ns_to_ktime(period));
++
++ return ret;
++}
++
++static void perf_swevent_start_hrtimer(struct perf_event *event)
++{
++ struct hw_perf_event *hwc = &event->hw;
++ s64 period;
++
++ if (!is_sampling_event(event))
++ return;
++
++ period = local64_read(&hwc->period_left);
++ if (period) {
++ if (period < 0)
++ period = 10000;
++
++ local64_set(&hwc->period_left, 0);
++ } else {
++ period = max_t(u64, 10000, hwc->sample_period);
++ }
++ __hrtimer_start_range_ns(&hwc->hrtimer,
++ ns_to_ktime(period), 0,
++ HRTIMER_MODE_REL_PINNED, 0);
++}
++
++static void perf_swevent_cancel_hrtimer(struct perf_event *event)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ if (is_sampling_event(event)) {
++ ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
++ local64_set(&hwc->period_left, ktime_to_ns(remaining));
++
++ hrtimer_cancel(&hwc->hrtimer);
++ }
++}
++
++static void perf_swevent_init_hrtimer(struct perf_event *event)
++{
++ struct hw_perf_event *hwc = &event->hw;
++
++ if (!is_sampling_event(event))
++ return;
++
++ hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
++ hwc->hrtimer.function = perf_swevent_hrtimer;
++
++ /*
++ * Since hrtimers have a fixed rate, we can do a static freq->period
++ * mapping and avoid the whole period adjust feedback stuff.
++ */
++ if (event->attr.freq) {
++ long freq = event->attr.sample_freq;
++
++ event->attr.sample_period = NSEC_PER_SEC / freq;
++ hwc->sample_period = event->attr.sample_period;
++ local64_set(&hwc->period_left, hwc->sample_period);
++ hwc->last_period = hwc->sample_period;
++ event->attr.freq = 0;
++ }
++}
++
++/*
++ * Software event: cpu wall time clock
++ */
++
++static void cpu_clock_event_update(struct perf_event *event)
++{
++ s64 prev;
++ u64 now;
++
++ now = local_clock();
++ prev = local64_xchg(&event->hw.prev_count, now);
++ local64_add(now - prev, &event->count);
++}
++
++static void cpu_clock_event_start(struct perf_event *event, int flags)
++{
++ local64_set(&event->hw.prev_count, local_clock());
++ perf_swevent_start_hrtimer(event);
++}
++
++static void cpu_clock_event_stop(struct perf_event *event, int flags)
++{
++ perf_swevent_cancel_hrtimer(event);
++ cpu_clock_event_update(event);
++}
++
++static int cpu_clock_event_add(struct perf_event *event, int flags)
++{
++ if (flags & PERF_EF_START)
++ cpu_clock_event_start(event, flags);
++
++ return 0;
++}
++
++static void cpu_clock_event_del(struct perf_event *event, int flags)
++{
++ cpu_clock_event_stop(event, flags);
++}
++
++static void cpu_clock_event_read(struct perf_event *event)
++{
++ cpu_clock_event_update(event);
++}
++
++static int cpu_clock_event_init(struct perf_event *event)
++{
++ if (event->attr.type != PERF_TYPE_SOFTWARE)
++ return -ENOENT;
++
++ if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
++ return -ENOENT;
++
++ /*
++ * no branch sampling for software events
++ */
++ if (has_branch_stack(event))
++ return -EOPNOTSUPP;
++
++ perf_swevent_init_hrtimer(event);
++
++ return 0;
++}
++
++static struct pmu perf_cpu_clock = {
++ .task_ctx_nr = perf_sw_context,
++
++ .event_init = cpu_clock_event_init,
++ .add = cpu_clock_event_add,
++ .del = cpu_clock_event_del,
++ .start = cpu_clock_event_start,
++ .stop = cpu_clock_event_stop,
++ .read = cpu_clock_event_read,
++};
++
++/*
++ * Software event: task time clock
++ */
++
++static void task_clock_event_update(struct perf_event *event, u64 now)
++{
++ u64 prev;
++ s64 delta;
++
++ prev = local64_xchg(&event->hw.prev_count, now);
++ delta = now - prev;
++ local64_add(delta, &event->count);
++}
++
++static void task_clock_event_start(struct perf_event *event, int flags)
++{
++ local64_set(&event->hw.prev_count, event->ctx->time);
++ perf_swevent_start_hrtimer(event);
++}
++
++static void task_clock_event_stop(struct perf_event *event, int flags)
++{
++ perf_swevent_cancel_hrtimer(event);
++ task_clock_event_update(event, event->ctx->time);
++}
++
++static int task_clock_event_add(struct perf_event *event, int flags)
++{
++ if (flags & PERF_EF_START)
++ task_clock_event_start(event, flags);
++
++ return 0;
++}
++
++static void task_clock_event_del(struct perf_event *event, int flags)
++{
++ task_clock_event_stop(event, PERF_EF_UPDATE);
++}
++
++static void task_clock_event_read(struct perf_event *event)
++{
++ u64 now = perf_clock();
++ u64 delta = now - event->ctx->timestamp;
++ u64 time = event->ctx->time + delta;
++
++ task_clock_event_update(event, time);
++}
++
++static int task_clock_event_init(struct perf_event *event)
++{
++ if (event->attr.type != PERF_TYPE_SOFTWARE)
++ return -ENOENT;
++
++ if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
++ return -ENOENT;
++
++ /*
++ * no branch sampling for software events
++ */
++ if (has_branch_stack(event))
++ return -EOPNOTSUPP;
++
++ perf_swevent_init_hrtimer(event);
++
++ return 0;
++}
++
++static struct pmu perf_task_clock = {
++ .task_ctx_nr = perf_sw_context,
++
++ .event_init = task_clock_event_init,
++ .add = task_clock_event_add,
++ .del = task_clock_event_del,
++ .start = task_clock_event_start,
++ .stop = task_clock_event_stop,
++ .read = task_clock_event_read,
++};
++
++static void perf_pmu_nop_void(struct pmu *pmu)
++{
++}
++
++static int perf_pmu_nop_int(struct pmu *pmu)
++{
++ return 0;
++}
++
++static void perf_pmu_start_txn(struct pmu *pmu)
++{
++ perf_pmu_disable(pmu);
++}
++
++static int perf_pmu_commit_txn(struct pmu *pmu)
++{
++ perf_pmu_enable(pmu);
++ return 0;
++}
++
++static void perf_pmu_cancel_txn(struct pmu *pmu)
++{
++ perf_pmu_enable(pmu);
++}
++
++static int perf_event_idx_default(struct perf_event *event)
++{
++ return 0;
++}
++
++/*
++ * Ensures all contexts with the same task_ctx_nr have the same
++ * pmu_cpu_context too.
++ */
++static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
++{
++ struct pmu *pmu;
++
++ if (ctxn < 0)
++ return NULL;
++
++ list_for_each_entry(pmu, &pmus, entry) {
++ if (pmu->task_ctx_nr == ctxn)
++ return pmu->pmu_cpu_context;
++ }
++
++ return NULL;
++}
++
++static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
++{
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ struct perf_cpu_context *cpuctx;
++
++ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
++
++ if (cpuctx->unique_pmu == old_pmu)
++ cpuctx->unique_pmu = pmu;
++ }
++}
++
++static void free_pmu_context(struct pmu *pmu)
++{
++ struct pmu *i;
++
++ mutex_lock(&pmus_lock);
++ /*
++ * Like a real lame refcount.
++ */
++ list_for_each_entry(i, &pmus, entry) {
++ if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
++ update_pmu_context(i, pmu);
++ goto out;
++ }
++ }
++
++ free_percpu(pmu->pmu_cpu_context);
++out:
++ mutex_unlock(&pmus_lock);
++}
++static struct idr pmu_idr;
++
++static ssize_t
++type_show(struct device *dev, struct device_attribute *attr, char *page)
++{
++ struct pmu *pmu = dev_get_drvdata(dev);
++
++ return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
++}
++static DEVICE_ATTR_RO(type);
++
++static ssize_t
++perf_event_mux_interval_ms_show(struct device *dev,
++ struct device_attribute *attr,
++ char *page)
++{
++ struct pmu *pmu = dev_get_drvdata(dev);
++
++ return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
++}
++
++static ssize_t
++perf_event_mux_interval_ms_store(struct device *dev,
++ struct device_attribute *attr,
++ const char *buf, size_t count)
++{
++ struct pmu *pmu = dev_get_drvdata(dev);
++ int timer, cpu, ret;
++
++ ret = kstrtoint(buf, 0, &timer);
++ if (ret)
++ return ret;
++
++ if (timer < 1)
++ return -EINVAL;
++
++ /* same value, noting to do */
++ if (timer == pmu->hrtimer_interval_ms)
++ return count;
++
++ pmu->hrtimer_interval_ms = timer;
++
++ /* update all cpuctx for this PMU */
++ for_each_possible_cpu(cpu) {
++ struct perf_cpu_context *cpuctx;
++ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
++ cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
++
++ if (hrtimer_active(&cpuctx->hrtimer))
++ hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
++ }
++
++ return count;
++}
++static DEVICE_ATTR_RW(perf_event_mux_interval_ms);
++
++static struct attribute *pmu_dev_attrs[] = {
++ &dev_attr_type.attr,
++ &dev_attr_perf_event_mux_interval_ms.attr,
++ NULL,
++};
++ATTRIBUTE_GROUPS(pmu_dev);
++
++static int pmu_bus_running;
++static struct bus_type pmu_bus = {
++ .name = "event_source",
++ .dev_groups = pmu_dev_groups,
++};
++
++static void pmu_dev_release(struct device *dev)
++{
++ kfree(dev);
++}
++
++static int pmu_dev_alloc(struct pmu *pmu)
++{
++ int ret = -ENOMEM;
++
++ pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
++ if (!pmu->dev)
++ goto out;
++
++ pmu->dev->groups = pmu->attr_groups;
++ device_initialize(pmu->dev);
++ ret = dev_set_name(pmu->dev, "%s", pmu->name);
++ if (ret)
++ goto free_dev;
++
++ dev_set_drvdata(pmu->dev, pmu);
++ pmu->dev->bus = &pmu_bus;
++ pmu->dev->release = pmu_dev_release;
++ ret = device_add(pmu->dev);
++ if (ret)
++ goto free_dev;
++
++out:
++ return ret;
++
++free_dev:
++ put_device(pmu->dev);
++ goto out;
++}
++
++static struct lock_class_key cpuctx_mutex;
++static struct lock_class_key cpuctx_lock;
++
++int perf_pmu_register(struct pmu *pmu, const char *name, int type)
++{
++ int cpu, ret;
++
++ mutex_lock(&pmus_lock);
++ ret = -ENOMEM;
++ pmu->pmu_disable_count = alloc_percpu(int);
++ if (!pmu->pmu_disable_count)
++ goto unlock;
++
++ pmu->type = -1;
++ if (!name)
++ goto skip_type;
++ pmu->name = name;
++
++ if (type < 0) {
++ type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
++ if (type < 0) {
++ ret = type;
++ goto free_pdc;
++ }
++ }
++ pmu->type = type;
++
++ if (pmu_bus_running) {
++ ret = pmu_dev_alloc(pmu);
++ if (ret)
++ goto free_idr;
++ }
++
++skip_type:
++ pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
++ if (pmu->pmu_cpu_context)
++ goto got_cpu_context;
++
++ ret = -ENOMEM;
++ pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
++ if (!pmu->pmu_cpu_context)
++ goto free_dev;
++
++ for_each_possible_cpu(cpu) {
++ struct perf_cpu_context *cpuctx;
++
++ cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
++ __perf_event_init_context(&cpuctx->ctx);
++ lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
++ lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
++ cpuctx->ctx.type = cpu_context;
++ cpuctx->ctx.pmu = pmu;
++
++ __perf_cpu_hrtimer_init(cpuctx, cpu);
++
++ INIT_LIST_HEAD(&cpuctx->rotation_list);
++ cpuctx->unique_pmu = pmu;
++ }
++
++got_cpu_context:
++ if (!pmu->start_txn) {
++ if (pmu->pmu_enable) {
++ /*
++ * If we have pmu_enable/pmu_disable calls, install
++ * transaction stubs that use that to try and batch
++ * hardware accesses.
++ */
++ pmu->start_txn = perf_pmu_start_txn;
++ pmu->commit_txn = perf_pmu_commit_txn;
++ pmu->cancel_txn = perf_pmu_cancel_txn;
++ } else {
++ pmu->start_txn = perf_pmu_nop_void;
++ pmu->commit_txn = perf_pmu_nop_int;
++ pmu->cancel_txn = perf_pmu_nop_void;
++ }
++ }
++
++ if (!pmu->pmu_enable) {
++ pmu->pmu_enable = perf_pmu_nop_void;
++ pmu->pmu_disable = perf_pmu_nop_void;
++ }
++
++ if (!pmu->event_idx)
++ pmu->event_idx = perf_event_idx_default;
++
++ list_add_rcu(&pmu->entry, &pmus);
++ ret = 0;
++unlock:
++ mutex_unlock(&pmus_lock);
++
++ return ret;
++
++free_dev:
++ device_del(pmu->dev);
++ put_device(pmu->dev);
++
++free_idr:
++ if (pmu->type >= PERF_TYPE_MAX)
++ idr_remove(&pmu_idr, pmu->type);
++
++free_pdc:
++ free_percpu(pmu->pmu_disable_count);
++ goto unlock;
++}
++EXPORT_SYMBOL_GPL(perf_pmu_register);
++
++void perf_pmu_unregister(struct pmu *pmu)
++{
++ mutex_lock(&pmus_lock);
++ list_del_rcu(&pmu->entry);
++ mutex_unlock(&pmus_lock);
++
++ /*
++ * We dereference the pmu list under both SRCU and regular RCU, so
++ * synchronize against both of those.
++ */
++ synchronize_srcu(&pmus_srcu);
++ synchronize_rcu();
++
++ free_percpu(pmu->pmu_disable_count);
++ if (pmu->type >= PERF_TYPE_MAX)
++ idr_remove(&pmu_idr, pmu->type);
++ device_del(pmu->dev);
++ put_device(pmu->dev);
++ free_pmu_context(pmu);
++}
++EXPORT_SYMBOL_GPL(perf_pmu_unregister);
++
++struct pmu *perf_init_event(struct perf_event *event)
++{
++ struct pmu *pmu = NULL;
++ int idx;
++ int ret;
++
++ idx = srcu_read_lock(&pmus_srcu);
++
++ rcu_read_lock();
++ pmu = idr_find(&pmu_idr, event->attr.type);
++ rcu_read_unlock();
++ if (pmu) {
++ if (!try_module_get(pmu->module)) {
++ pmu = ERR_PTR(-ENODEV);
++ goto unlock;
++ }
++ event->pmu = pmu;
++ ret = pmu->event_init(event);
++ if (ret)
++ pmu = ERR_PTR(ret);
++ goto unlock;
++ }
++
++ list_for_each_entry_rcu(pmu, &pmus, entry) {
++ if (!try_module_get(pmu->module)) {
++ pmu = ERR_PTR(-ENODEV);
++ goto unlock;
++ }
++ event->pmu = pmu;
++ ret = pmu->event_init(event);
++ if (!ret)
++ goto unlock;
++
++ if (ret != -ENOENT) {
++ pmu = ERR_PTR(ret);
++ goto unlock;
++ }
++ }
++ pmu = ERR_PTR(-ENOENT);
++unlock:
++ srcu_read_unlock(&pmus_srcu, idx);
++
++ return pmu;
++}
++
++static void account_event_cpu(struct perf_event *event, int cpu)
++{
++ if (event->parent)
++ return;
++
++ if (has_branch_stack(event)) {
++ if (!(event->attach_state & PERF_ATTACH_TASK))
++ atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
++ }
++ if (is_cgroup_event(event))
++ atomic_inc(&per_cpu(perf_cgroup_events, cpu));
++}
++
++static void account_event(struct perf_event *event)
++{
++ if (event->parent)
++ return;
++
++ if (event->attach_state & PERF_ATTACH_TASK)
++ static_key_slow_inc(&perf_sched_events.key);
++ if (event->attr.mmap || event->attr.mmap_data)
++ atomic_inc(&nr_mmap_events);
++ if (event->attr.comm)
++ atomic_inc(&nr_comm_events);
++ if (event->attr.task)
++ atomic_inc(&nr_task_events);
++ if (event->attr.freq) {
++ if (atomic_inc_return(&nr_freq_events) == 1)
++ tick_nohz_full_kick_all();
++ }
++ if (has_branch_stack(event))
++ static_key_slow_inc(&perf_sched_events.key);
++ if (is_cgroup_event(event))
++ static_key_slow_inc(&perf_sched_events.key);
++
++ account_event_cpu(event, event->cpu);
++}
++
++/*
++ * Allocate and initialize a event structure
++ */
++static struct perf_event *
++perf_event_alloc(struct perf_event_attr *attr, int cpu,
++ struct task_struct *task,
++ struct perf_event *group_leader,
++ struct perf_event *parent_event,
++ perf_overflow_handler_t overflow_handler,
++ void *context)
++{
++ struct pmu *pmu;
++ struct perf_event *event;
++ struct hw_perf_event *hwc;
++ long err = -EINVAL;
++
++ if ((unsigned)cpu >= nr_cpu_ids) {
++ if (!task || cpu != -1)
++ return ERR_PTR(-EINVAL);
++ }
++
++ event = kzalloc(sizeof(*event), GFP_KERNEL);
++ if (!event)
++ return ERR_PTR(-ENOMEM);
++
++ /*
++ * Single events are their own group leaders, with an
++ * empty sibling list:
++ */
++ if (!group_leader)
++ group_leader = event;
++
++ mutex_init(&event->child_mutex);
++ INIT_LIST_HEAD(&event->child_list);
++
++ INIT_LIST_HEAD(&event->group_entry);
++ INIT_LIST_HEAD(&event->event_entry);
++ INIT_LIST_HEAD(&event->sibling_list);
++ INIT_LIST_HEAD(&event->rb_entry);
++ INIT_LIST_HEAD(&event->active_entry);
++ INIT_HLIST_NODE(&event->hlist_entry);
++
++
++ init_waitqueue_head(&event->waitq);
++ init_irq_work(&event->pending, perf_pending_event);
++
++ mutex_init(&event->mmap_mutex);
++
++ atomic_long_set(&event->refcount, 1);
++ event->cpu = cpu;
++ event->attr = *attr;
++ event->group_leader = group_leader;
++ event->pmu = NULL;
++ event->oncpu = -1;
++
++ event->parent = parent_event;
++
++ event->ns = get_pid_ns(task_active_pid_ns(current));
++ event->id = atomic64_inc_return(&perf_event_id);
++
++ event->state = PERF_EVENT_STATE_INACTIVE;
++
++ if (task) {
++ event->attach_state = PERF_ATTACH_TASK;
++
++ if (attr->type == PERF_TYPE_TRACEPOINT)
++ event->hw.tp_target = task;
++#ifdef CONFIG_HAVE_HW_BREAKPOINT
++ /*
++ * hw_breakpoint is a bit difficult here..
++ */
++ else if (attr->type == PERF_TYPE_BREAKPOINT)
++ event->hw.bp_target = task;
++#endif
++ }
++
++ if (!overflow_handler && parent_event) {
++ overflow_handler = parent_event->overflow_handler;
++ context = parent_event->overflow_handler_context;
++ }
++
++ event->overflow_handler = overflow_handler;
++ event->overflow_handler_context = context;
++
++ perf_event__state_init(event);
++
++ pmu = NULL;
++
++ hwc = &event->hw;
++ hwc->sample_period = attr->sample_period;
++ if (attr->freq && attr->sample_freq)
++ hwc->sample_period = 1;
++ hwc->last_period = hwc->sample_period;
++
++ local64_set(&hwc->period_left, hwc->sample_period);
++
++ /*
++ * we currently do not support PERF_FORMAT_GROUP on inherited events
++ */
++ if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
++ goto err_ns;
++
++ pmu = perf_init_event(event);
++ if (!pmu)
++ goto err_ns;
++ else if (IS_ERR(pmu)) {
++ err = PTR_ERR(pmu);
++ goto err_ns;
++ }
++
++ if (!event->parent) {
++ if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
++ err = get_callchain_buffers();
++ if (err)
++ goto err_pmu;
++ }
++ }
++
++ return event;
++
++err_pmu:
++ if (event->destroy)
++ event->destroy(event);
++ module_put(pmu->module);
++err_ns:
++ if (event->ns)
++ put_pid_ns(event->ns);
++ kfree(event);
++
++ return ERR_PTR(err);
++}
++
++static int perf_copy_attr(struct perf_event_attr __user *uattr,
++ struct perf_event_attr *attr)
++{
++ u32 size;
++ int ret;
++
++ if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
++ return -EFAULT;
++
++ /*
++ * zero the full structure, so that a short copy will be nice.
++ */
++ memset(attr, 0, sizeof(*attr));
++
++ ret = get_user(size, &uattr->size);
++ if (ret)
++ return ret;
++
++ if (size > PAGE_SIZE) /* silly large */
++ goto err_size;
++
++ if (!size) /* abi compat */
++ size = PERF_ATTR_SIZE_VER0;
++
++ if (size < PERF_ATTR_SIZE_VER0)
++ goto err_size;
++
++ /*
++ * If we're handed a bigger struct than we know of,
++ * ensure all the unknown bits are 0 - i.e. new
++ * user-space does not rely on any kernel feature
++ * extensions we dont know about yet.
++ */
++ if (size > sizeof(*attr)) {
++ unsigned char __user *addr;
++ unsigned char __user *end;
++ unsigned char val;
++
++ addr = (void __user *)uattr + sizeof(*attr);
++ end = (void __user *)uattr + size;
++
++ for (; addr < end; addr++) {
++ ret = get_user(val, addr);
++ if (ret)
++ return ret;
++ if (val)
++ goto err_size;
++ }
++ size = sizeof(*attr);
++ }
++
++ ret = copy_from_user(attr, uattr, size);
++ if (ret)
++ return -EFAULT;
++
++ if (attr->__reserved_1)
++ return -EINVAL;
++
++ if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
++ return -EINVAL;
++
++ if (attr->read_format & ~(PERF_FORMAT_MAX-1))
++ return -EINVAL;
++
++ if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
++ u64 mask = attr->branch_sample_type;
++
++ /* only using defined bits */
++ if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
++ return -EINVAL;
++
++ /* at least one branch bit must be set */
++ if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
++ return -EINVAL;
++
++ /* propagate priv level, when not set for branch */
++ if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
++
++ /* exclude_kernel checked on syscall entry */
++ if (!attr->exclude_kernel)
++ mask |= PERF_SAMPLE_BRANCH_KERNEL;
++
++ if (!attr->exclude_user)
++ mask |= PERF_SAMPLE_BRANCH_USER;
++
++ if (!attr->exclude_hv)
++ mask |= PERF_SAMPLE_BRANCH_HV;
++ /*
++ * adjust user setting (for HW filter setup)
++ */
++ attr->branch_sample_type = mask;
++ }
++ /* privileged levels capture (kernel, hv): check permissions */
++ if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
++ && perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
++ return -EACCES;
++ }
++
++ if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
++ ret = perf_reg_validate(attr->sample_regs_user);
++ if (ret)
++ return ret;
++ }
++
++ if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
++ if (!arch_perf_have_user_stack_dump())
++ return -ENOSYS;
++
++ /*
++ * We have __u32 type for the size, but so far
++ * we can only use __u16 as maximum due to the
++ * __u16 sample size limit.
++ */
++ if (attr->sample_stack_user >= USHRT_MAX)
++ ret = -EINVAL;
++ else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
++ ret = -EINVAL;
++ }
++
++out:
++ return ret;
++
++err_size:
++ put_user(sizeof(*attr), &uattr->size);
++ ret = -E2BIG;
++ goto out;
++}
++
++static int
++perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
++{
++ struct ring_buffer *rb = NULL;
++ int ret = -EINVAL;
++
++ if (!output_event)
++ goto set;
++
++ /* don't allow circular references */
++ if (event == output_event)
++ goto out;
++
++ /*
++ * Don't allow cross-cpu buffers
++ */
++ if (output_event->cpu != event->cpu)
++ goto out;
++
++ /*
++ * If its not a per-cpu rb, it must be the same task.
++ */
++ if (output_event->cpu == -1 && output_event->ctx != event->ctx)
++ goto out;
++
++set:
++ mutex_lock(&event->mmap_mutex);
++ /* Can't redirect output if we've got an active mmap() */
++ if (atomic_read(&event->mmap_count))
++ goto unlock;
++
++ if (output_event) {
++ /* get the rb we want to redirect to */
++ rb = ring_buffer_get(output_event);
++ if (!rb)
++ goto unlock;
++ }
++
++ ring_buffer_attach(event, rb);
++
++ ret = 0;
++unlock:
++ mutex_unlock(&event->mmap_mutex);
++
++out:
++ return ret;
++}
++
++/**
++ * sys_perf_event_open - open a performance event, associate it to a task/cpu
++ *
++ * @attr_uptr: event_id type attributes for monitoring/sampling
++ * @pid: target pid
++ * @cpu: target cpu
++ * @group_fd: group leader event fd
++ */
++SYSCALL_DEFINE5(perf_event_open,
++ struct perf_event_attr __user *, attr_uptr,
++ pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
++{
++ struct perf_event *group_leader = NULL, *output_event = NULL;
++ struct perf_event *event, *sibling;
++ struct perf_event_attr attr;
++ struct perf_event_context *ctx;
++ struct file *event_file = NULL;
++ struct fd group = {NULL, 0};
++ struct task_struct *task = NULL;
++ struct pmu *pmu;
++ int event_fd;
++ int move_group = 0;
++ int err;
++ int f_flags = O_RDWR;
++
++ /* for future expandability... */
++ if (flags & ~PERF_FLAG_ALL)
++ return -EINVAL;
++
++ err = perf_copy_attr(attr_uptr, &attr);
++ if (err)
++ return err;
++
++ if (!attr.exclude_kernel) {
++ if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
++ return -EACCES;
++ }
++
++ if (attr.freq) {
++ if (attr.sample_freq > sysctl_perf_event_sample_rate)
++ return -EINVAL;
++ } else {
++ if (attr.sample_period & (1ULL << 63))
++ return -EINVAL;
++ }
++
++ /*
++ * In cgroup mode, the pid argument is used to pass the fd
++ * opened to the cgroup directory in cgroupfs. The cpu argument
++ * designates the cpu on which to monitor threads from that
++ * cgroup.
++ */
++ if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
++ return -EINVAL;
++
++ if (flags & PERF_FLAG_FD_CLOEXEC)
++ f_flags |= O_CLOEXEC;
++
++ event_fd = get_unused_fd_flags(f_flags);
++ if (event_fd < 0)
++ return event_fd;
++
++ if (group_fd != -1) {
++ err = perf_fget_light(group_fd, &group);
++ if (err)
++ goto err_fd;
++ group_leader = group.file->private_data;
++ if (flags & PERF_FLAG_FD_OUTPUT)
++ output_event = group_leader;
++ if (flags & PERF_FLAG_FD_NO_GROUP)
++ group_leader = NULL;
++ }
++
++ if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
++ task = find_lively_task_by_vpid(pid);
++ if (IS_ERR(task)) {
++ err = PTR_ERR(task);
++ goto err_group_fd;
++ }
++ }
++
++ if (task && group_leader &&
++ group_leader->attr.inherit != attr.inherit) {
++ err = -EINVAL;
++ goto err_task;
++ }
++
++ get_online_cpus();
++
++ event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
++ NULL, NULL);
++ if (IS_ERR(event)) {
++ err = PTR_ERR(event);
++ goto err_cpus;
++ }
++
++ if (flags & PERF_FLAG_PID_CGROUP) {
++ err = perf_cgroup_connect(pid, event, &attr, group_leader);
++ if (err) {
++ __free_event(event);
++ goto err_cpus;
++ }
++ }
++
++ if (is_sampling_event(event)) {
++ if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) {
++ err = -ENOTSUPP;
++ goto err_alloc;
++ }
++ }
++
++ account_event(event);
++
++ /*
++ * Special case software events and allow them to be part of
++ * any hardware group.
++ */
++ pmu = event->pmu;
++
++ if (group_leader &&
++ (is_software_event(event) != is_software_event(group_leader))) {
++ if (is_software_event(event)) {
++ /*
++ * If event and group_leader are not both a software
++ * event, and event is, then group leader is not.
++ *
++ * Allow the addition of software events to !software
++ * groups, this is safe because software events never
++ * fail to schedule.
++ */
++ pmu = group_leader->pmu;
++ } else if (is_software_event(group_leader) &&
++ (group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
++ /*
++ * In case the group is a pure software group, and we
++ * try to add a hardware event, move the whole group to
++ * the hardware context.
++ */
++ move_group = 1;
++ }
++ }
++
++ /*
++ * Get the target context (task or percpu):
++ */
++ ctx = find_get_context(pmu, task, event->cpu);
++ if (IS_ERR(ctx)) {
++ err = PTR_ERR(ctx);
++ goto err_alloc;
++ }
++
++ if (task) {
++ put_task_struct(task);
++ task = NULL;
++ }
++
++ /*
++ * Look up the group leader (we will attach this event to it):
++ */
++ if (group_leader) {
++ err = -EINVAL;
++
++ /*
++ * Do not allow a recursive hierarchy (this new sibling
++ * becoming part of another group-sibling):
++ */
++ if (group_leader->group_leader != group_leader)
++ goto err_context;
++ /*
++ * Do not allow to attach to a group in a different
++ * task or CPU context:
++ */
++ if (move_group) {
++ if (group_leader->ctx->type != ctx->type)
++ goto err_context;
++ } else {
++ if (group_leader->ctx != ctx)
++ goto err_context;
++ }
++
++ /*
++ * Only a group leader can be exclusive or pinned
++ */
++ if (attr.exclusive || attr.pinned)
++ goto err_context;
++ }
++
++ if (output_event) {
++ err = perf_event_set_output(event, output_event);
++ if (err)
++ goto err_context;
++ }
++
++ event_file = anon_inode_getfile("[perf_event]", &perf_fops, event,
++ f_flags);
++ if (IS_ERR(event_file)) {
++ err = PTR_ERR(event_file);
++ goto err_context;
++ }
++
++ if (move_group) {
++ struct perf_event_context *gctx = group_leader->ctx;
++
++ mutex_lock(&gctx->mutex);
++ perf_remove_from_context(group_leader, false);
++
++ /*
++ * Removing from the context ends up with disabled
++ * event. What we want here is event in the initial
++ * startup state, ready to be add into new context.
++ */
++ perf_event__state_init(group_leader);
++ list_for_each_entry(sibling, &group_leader->sibling_list,
++ group_entry) {
++ perf_remove_from_context(sibling, false);
++ perf_event__state_init(sibling);
++ put_ctx(gctx);
++ }
++ mutex_unlock(&gctx->mutex);
++ put_ctx(gctx);
++ }
++
++ WARN_ON_ONCE(ctx->parent_ctx);
++ mutex_lock(&ctx->mutex);
++
++ if (move_group) {
++ synchronize_rcu();
++ perf_install_in_context(ctx, group_leader, group_leader->cpu);
++ get_ctx(ctx);
++ list_for_each_entry(sibling, &group_leader->sibling_list,
++ group_entry) {
++ perf_install_in_context(ctx, sibling, sibling->cpu);
++ get_ctx(ctx);
++ }
++ }
++
++ perf_install_in_context(ctx, event, event->cpu);
++ perf_unpin_context(ctx);
++ mutex_unlock(&ctx->mutex);
++
++ put_online_cpus();
++
++ event->owner = current;
++
++ mutex_lock(&current->perf_event_mutex);
++ list_add_tail(&event->owner_entry, &current->perf_event_list);
++ mutex_unlock(&current->perf_event_mutex);
++
++ /*
++ * Precalculate sample_data sizes
++ */
++ perf_event__header_size(event);
++ perf_event__id_header_size(event);
++
++ /*
++ * Drop the reference on the group_event after placing the
++ * new event on the sibling_list. This ensures destruction
++ * of the group leader will find the pointer to itself in
++ * perf_group_detach().
++ */
++ fdput(group);
++ fd_install(event_fd, event_file);
++ return event_fd;
++
++err_context:
++ perf_unpin_context(ctx);
++ put_ctx(ctx);
++err_alloc:
++ free_event(event);
++err_cpus:
++ put_online_cpus();
++err_task:
++ if (task)
++ put_task_struct(task);
++err_group_fd:
++ fdput(group);
++err_fd:
++ put_unused_fd(event_fd);
++ return err;
++}
++
++/**
++ * perf_event_create_kernel_counter
++ *
++ * @attr: attributes of the counter to create
++ * @cpu: cpu in which the counter is bound
++ * @task: task to profile (NULL for percpu)
++ */
++struct perf_event *
++perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
++ struct task_struct *task,
++ perf_overflow_handler_t overflow_handler,
++ void *context)
++{
++ struct perf_event_context *ctx;
++ struct perf_event *event;
++ int err;
++
++ /*
++ * Get the target context (task or percpu):
++ */
++
++ event = perf_event_alloc(attr, cpu, task, NULL, NULL,
++ overflow_handler, context);
++ if (IS_ERR(event)) {
++ err = PTR_ERR(event);
++ goto err;
++ }
++
++ /* Mark owner so we could distinguish it from user events. */
++ event->owner = EVENT_OWNER_KERNEL;
++
++ account_event(event);
++
++ ctx = find_get_context(event->pmu, task, cpu);
++ if (IS_ERR(ctx)) {
++ err = PTR_ERR(ctx);
++ goto err_free;
++ }
++
++ WARN_ON_ONCE(ctx->parent_ctx);
++ mutex_lock(&ctx->mutex);
++ perf_install_in_context(ctx, event, cpu);
++ perf_unpin_context(ctx);
++ mutex_unlock(&ctx->mutex);
++
++ return event;
++
++err_free:
++ free_event(event);
++err:
++ return ERR_PTR(err);
++}
++EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
++
++void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
++{
++ struct perf_event_context *src_ctx;
++ struct perf_event_context *dst_ctx;
++ struct perf_event *event, *tmp;
++ LIST_HEAD(events);
++
++ src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
++ dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
++
++ mutex_lock(&src_ctx->mutex);
++ list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
++ event_entry) {
++ perf_remove_from_context(event, false);
++ unaccount_event_cpu(event, src_cpu);
++ put_ctx(src_ctx);
++ list_add(&event->migrate_entry, &events);
++ }
++ mutex_unlock(&src_ctx->mutex);
++
++ synchronize_rcu();
++
++ mutex_lock(&dst_ctx->mutex);
++ list_for_each_entry_safe(event, tmp, &events, migrate_entry) {
++ list_del(&event->migrate_entry);
++ if (event->state >= PERF_EVENT_STATE_OFF)
++ event->state = PERF_EVENT_STATE_INACTIVE;
++ account_event_cpu(event, dst_cpu);
++ perf_install_in_context(dst_ctx, event, dst_cpu);
++ get_ctx(dst_ctx);
++ }
++ mutex_unlock(&dst_ctx->mutex);
++}
++EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
++
++static void sync_child_event(struct perf_event *child_event,
++ struct task_struct *child)
++{
++ struct perf_event *parent_event = child_event->parent;
++ u64 child_val;
++
++ if (child_event->attr.inherit_stat)
++ perf_event_read_event(child_event, child);
++
++ child_val = perf_event_count(child_event);
++
++ /*
++ * Add back the child's count to the parent's count:
++ */
++ atomic64_add(child_val, &parent_event->child_count);
++ atomic64_add(child_event->total_time_enabled,
++ &parent_event->child_total_time_enabled);
++ atomic64_add(child_event->total_time_running,
++ &parent_event->child_total_time_running);
++
++ /*
++ * Remove this event from the parent's list
++ */
++ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
++ mutex_lock(&parent_event->child_mutex);
++ list_del_init(&child_event->child_list);
++ mutex_unlock(&parent_event->child_mutex);
++
++ /*
++ * Make sure user/parent get notified, that we just
++ * lost one event.
++ */
++ perf_event_wakeup(parent_event);
++
++ /*
++ * Release the parent event, if this was the last
++ * reference to it.
++ */
++ put_event(parent_event);
++}
++
++static void
++__perf_event_exit_task(struct perf_event *child_event,
++ struct perf_event_context *child_ctx,
++ struct task_struct *child)
++{
++ /*
++ * Do not destroy the 'original' grouping; because of the context
++ * switch optimization the original events could've ended up in a
++ * random child task.
++ *
++ * If we were to destroy the original group, all group related
++ * operations would cease to function properly after this random
++ * child dies.
++ *
++ * Do destroy all inherited groups, we don't care about those
++ * and being thorough is better.
++ */
++ perf_remove_from_context(child_event, !!child_event->parent);
++
++ /*
++ * It can happen that the parent exits first, and has events
++ * that are still around due to the child reference. These
++ * events need to be zapped.
++ */
++ if (child_event->parent) {
++ sync_child_event(child_event, child);
++ free_event(child_event);
++ } else {
++ child_event->state = PERF_EVENT_STATE_EXIT;
++ perf_event_wakeup(child_event);
++ }
++}
++
++static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
++{
++ struct perf_event *child_event, *next;
++ struct perf_event_context *child_ctx, *clone_ctx = NULL;
++ unsigned long flags;
++
++ if (likely(!child->perf_event_ctxp[ctxn])) {
++ perf_event_task(child, NULL, 0);
++ return;
++ }
++
++ local_irq_save(flags);
++ /*
++ * We can't reschedule here because interrupts are disabled,
++ * and either child is current or it is a task that can't be
++ * scheduled, so we are now safe from rescheduling changing
++ * our context.
++ */
++ child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
++
++ /*
++ * Take the context lock here so that if find_get_context is
++ * reading child->perf_event_ctxp, we wait until it has
++ * incremented the context's refcount before we do put_ctx below.
++ */
++ raw_spin_lock(&child_ctx->lock);
++ task_ctx_sched_out(child_ctx);
++ child->perf_event_ctxp[ctxn] = NULL;
++
++ /*
++ * If this context is a clone; unclone it so it can't get
++ * swapped to another process while we're removing all
++ * the events from it.
++ */
++ clone_ctx = unclone_ctx(child_ctx);
++ update_context_time(child_ctx);
++ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
++
++ if (clone_ctx)
++ put_ctx(clone_ctx);
++
++ /*
++ * Report the task dead after unscheduling the events so that we
++ * won't get any samples after PERF_RECORD_EXIT. We can however still
++ * get a few PERF_RECORD_READ events.
++ */
++ perf_event_task(child, child_ctx, 0);
++
++ /*
++ * We can recurse on the same lock type through:
++ *
++ * __perf_event_exit_task()
++ * sync_child_event()
++ * put_event()
++ * mutex_lock(&ctx->mutex)
++ *
++ * But since its the parent context it won't be the same instance.
++ */
++ mutex_lock(&child_ctx->mutex);
++
++ list_for_each_entry_safe(child_event, next, &child_ctx->event_list, event_entry)
++ __perf_event_exit_task(child_event, child_ctx, child);
++
++ mutex_unlock(&child_ctx->mutex);
++
++ put_ctx(child_ctx);
++}
++
++/*
++ * When a child task exits, feed back event values to parent events.
++ */
++void perf_event_exit_task(struct task_struct *child)
++{
++ struct perf_event *event, *tmp;
++ int ctxn;
++
++ mutex_lock(&child->perf_event_mutex);
++ list_for_each_entry_safe(event, tmp, &child->perf_event_list,
++ owner_entry) {
++ list_del_init(&event->owner_entry);
++
++ /*
++ * Ensure the list deletion is visible before we clear
++ * the owner, closes a race against perf_release() where
++ * we need to serialize on the owner->perf_event_mutex.
++ */
++ smp_wmb();
++ event->owner = NULL;
++ }
++ mutex_unlock(&child->perf_event_mutex);
++
++ for_each_task_context_nr(ctxn)
++ perf_event_exit_task_context(child, ctxn);
++}
++
++static void perf_free_event(struct perf_event *event,
++ struct perf_event_context *ctx)
++{
++ struct perf_event *parent = event->parent;
++
++ if (WARN_ON_ONCE(!parent))
++ return;
++
++ mutex_lock(&parent->child_mutex);
++ list_del_init(&event->child_list);
++ mutex_unlock(&parent->child_mutex);
++
++ put_event(parent);
++
++ perf_group_detach(event);
++ list_del_event(event, ctx);
++ free_event(event);
++}
++
++/*
++ * free an unexposed, unused context as created by inheritance by
++ * perf_event_init_task below, used by fork() in case of fail.
++ */
++void perf_event_free_task(struct task_struct *task)
++{
++ struct perf_event_context *ctx;
++ struct perf_event *event, *tmp;
++ int ctxn;
++
++ for_each_task_context_nr(ctxn) {
++ ctx = task->perf_event_ctxp[ctxn];
++ if (!ctx)
++ continue;
++
++ mutex_lock(&ctx->mutex);
++again:
++ list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
++ group_entry)
++ perf_free_event(event, ctx);
++
++ list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
++ group_entry)
++ perf_free_event(event, ctx);
++
++ if (!list_empty(&ctx->pinned_groups) ||
++ !list_empty(&ctx->flexible_groups))
++ goto again;
++
++ mutex_unlock(&ctx->mutex);
++
++ put_ctx(ctx);
++ }
++}
++
++void perf_event_delayed_put(struct task_struct *task)
++{
++ int ctxn;
++
++ for_each_task_context_nr(ctxn)
++ WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
++}
++
++/*
++ * inherit a event from parent task to child task:
++ */
++static struct perf_event *
++inherit_event(struct perf_event *parent_event,
++ struct task_struct *parent,
++ struct perf_event_context *parent_ctx,
++ struct task_struct *child,
++ struct perf_event *group_leader,
++ struct perf_event_context *child_ctx)
++{
++ enum perf_event_active_state parent_state = parent_event->state;
++ struct perf_event *child_event;
++ unsigned long flags;
++
++ /*
++ * Instead of creating recursive hierarchies of events,
++ * we link inherited events back to the original parent,
++ * which has a filp for sure, which we use as the reference
++ * count:
++ */
++ if (parent_event->parent)
++ parent_event = parent_event->parent;
++
++ child_event = perf_event_alloc(&parent_event->attr,
++ parent_event->cpu,
++ child,
++ group_leader, parent_event,
++ NULL, NULL);
++ if (IS_ERR(child_event))
++ return child_event;
++
++ if (is_orphaned_event(parent_event) ||
++ !atomic_long_inc_not_zero(&parent_event->refcount)) {
++ free_event(child_event);
++ return NULL;
++ }
++
++ get_ctx(child_ctx);
++
++ /*
++ * Make the child state follow the state of the parent event,
++ * not its attr.disabled bit. We hold the parent's mutex,
++ * so we won't race with perf_event_{en, dis}able_family.
++ */
++ if (parent_state >= PERF_EVENT_STATE_INACTIVE)
++ child_event->state = PERF_EVENT_STATE_INACTIVE;
++ else
++ child_event->state = PERF_EVENT_STATE_OFF;
++
++ if (parent_event->attr.freq) {
++ u64 sample_period = parent_event->hw.sample_period;
++ struct hw_perf_event *hwc = &child_event->hw;
++
++ hwc->sample_period = sample_period;
++ hwc->last_period = sample_period;
++
++ local64_set(&hwc->period_left, sample_period);
++ }
++
++ child_event->ctx = child_ctx;
++ child_event->overflow_handler = parent_event->overflow_handler;
++ child_event->overflow_handler_context
++ = parent_event->overflow_handler_context;
++
++ /*
++ * Precalculate sample_data sizes
++ */
++ perf_event__header_size(child_event);
++ perf_event__id_header_size(child_event);
++
++ /*
++ * Link it up in the child's context:
++ */
++ raw_spin_lock_irqsave(&child_ctx->lock, flags);
++ add_event_to_ctx(child_event, child_ctx);
++ raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
++
++ /*
++ * Link this into the parent event's child list
++ */
++ WARN_ON_ONCE(parent_event->ctx->parent_ctx);
++ mutex_lock(&parent_event->child_mutex);
++ list_add_tail(&child_event->child_list, &parent_event->child_list);
++ mutex_unlock(&parent_event->child_mutex);
++
++ return child_event;
++}
++
++static int inherit_group(struct perf_event *parent_event,
++ struct task_struct *parent,
++ struct perf_event_context *parent_ctx,
++ struct task_struct *child,
++ struct perf_event_context *child_ctx)
++{
++ struct perf_event *leader;
++ struct perf_event *sub;
++ struct perf_event *child_ctr;
++
++ leader = inherit_event(parent_event, parent, parent_ctx,
++ child, NULL, child_ctx);
++ if (IS_ERR(leader))
++ return PTR_ERR(leader);
++ list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
++ child_ctr = inherit_event(sub, parent, parent_ctx,
++ child, leader, child_ctx);
++ if (IS_ERR(child_ctr))
++ return PTR_ERR(child_ctr);
++ }
++ return 0;
++}
++
++static int
++inherit_task_group(struct perf_event *event, struct task_struct *parent,
++ struct perf_event_context *parent_ctx,
++ struct task_struct *child, int ctxn,
++ int *inherited_all)
++{
++ int ret;
++ struct perf_event_context *child_ctx;
++
++ if (!event->attr.inherit) {
++ *inherited_all = 0;
++ return 0;
++ }
++
++ child_ctx = child->perf_event_ctxp[ctxn];
++ if (!child_ctx) {
++ /*
++ * This is executed from the parent task context, so
++ * inherit events that have been marked for cloning.
++ * First allocate and initialize a context for the
++ * child.
++ */
++
++ child_ctx = alloc_perf_context(parent_ctx->pmu, child);
++ if (!child_ctx)
++ return -ENOMEM;
++
++ child->perf_event_ctxp[ctxn] = child_ctx;
++ }
++
++ ret = inherit_group(event, parent, parent_ctx,
++ child, child_ctx);
++
++ if (ret)
++ *inherited_all = 0;
++
++ return ret;
++}
++
++/*
++ * Initialize the perf_event context in task_struct
++ */
++static int perf_event_init_context(struct task_struct *child, int ctxn)
++{
++ struct perf_event_context *child_ctx, *parent_ctx;
++ struct perf_event_context *cloned_ctx;
++ struct perf_event *event;
++ struct task_struct *parent = current;
++ int inherited_all = 1;
++ unsigned long flags;
++ int ret = 0;
++
++ if (likely(!parent->perf_event_ctxp[ctxn]))
++ return 0;
++
++ /*
++ * If the parent's context is a clone, pin it so it won't get
++ * swapped under us.
++ */
++ parent_ctx = perf_pin_task_context(parent, ctxn);
++ if (!parent_ctx)
++ return 0;
++
++ /*
++ * No need to check if parent_ctx != NULL here; since we saw
++ * it non-NULL earlier, the only reason for it to become NULL
++ * is if we exit, and since we're currently in the middle of
++ * a fork we can't be exiting at the same time.
++ */
++
++ /*
++ * Lock the parent list. No need to lock the child - not PID
++ * hashed yet and not running, so nobody can access it.
++ */
++ mutex_lock(&parent_ctx->mutex);
++
++ /*
++ * We dont have to disable NMIs - we are only looking at
++ * the list, not manipulating it:
++ */
++ list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
++ ret = inherit_task_group(event, parent, parent_ctx,
++ child, ctxn, &inherited_all);
++ if (ret)
++ break;
++ }
++
++ /*
++ * We can't hold ctx->lock when iterating the ->flexible_group list due
++ * to allocations, but we need to prevent rotation because
++ * rotate_ctx() will change the list from interrupt context.
++ */
++ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
++ parent_ctx->rotate_disable = 1;
++ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++
++ list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
++ ret = inherit_task_group(event, parent, parent_ctx,
++ child, ctxn, &inherited_all);
++ if (ret)
++ break;
++ }
++
++ raw_spin_lock_irqsave(&parent_ctx->lock, flags);
++ parent_ctx->rotate_disable = 0;
++
++ child_ctx = child->perf_event_ctxp[ctxn];
++
++ if (child_ctx && inherited_all) {
++ /*
++ * Mark the child context as a clone of the parent
++ * context, or of whatever the parent is a clone of.
++ *
++ * Note that if the parent is a clone, the holding of
++ * parent_ctx->lock avoids it from being uncloned.
++ */
++ cloned_ctx = parent_ctx->parent_ctx;
++ if (cloned_ctx) {
++ child_ctx->parent_ctx = cloned_ctx;
++ child_ctx->parent_gen = parent_ctx->parent_gen;
++ } else {
++ child_ctx->parent_ctx = parent_ctx;
++ child_ctx->parent_gen = parent_ctx->generation;
++ }
++ get_ctx(child_ctx->parent_ctx);
++ }
++
++ raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
++ mutex_unlock(&parent_ctx->mutex);
++
++ perf_unpin_context(parent_ctx);
++ put_ctx(parent_ctx);
++
++ return ret;
++}
++
++/*
++ * Initialize the perf_event context in task_struct
++ */
++int perf_event_init_task(struct task_struct *child)
++{
++ int ctxn, ret;
++
++ memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
++ mutex_init(&child->perf_event_mutex);
++ INIT_LIST_HEAD(&child->perf_event_list);
++
++ for_each_task_context_nr(ctxn) {
++ ret = perf_event_init_context(child, ctxn);
++ if (ret) {
++ perf_event_free_task(child);
++ return ret;
++ }
++ }
++
++ return 0;
++}
++
++static void __init perf_event_init_all_cpus(void)
++{
++ struct swevent_htable *swhash;
++ int cpu;
++
++ for_each_possible_cpu(cpu) {
++ swhash = &per_cpu(swevent_htable, cpu);
++ mutex_init(&swhash->hlist_mutex);
++ INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
++ }
++}
++
++static void perf_event_init_cpu(int cpu)
++{
++ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
++
++ mutex_lock(&swhash->hlist_mutex);
++ swhash->online = true;
++ if (swhash->hlist_refcount > 0) {
++ struct swevent_hlist *hlist;
++
++ hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
++ WARN_ON(!hlist);
++ rcu_assign_pointer(swhash->swevent_hlist, hlist);
++ }
++ mutex_unlock(&swhash->hlist_mutex);
++}
++
++#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
++static void perf_pmu_rotate_stop(struct pmu *pmu)
++{
++ struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
++
++ WARN_ON(!irqs_disabled());
++
++ list_del_init(&cpuctx->rotation_list);
++}
++
++static void __perf_event_exit_context(void *__info)
++{
++ struct remove_event re = { .detach_group = true };
++ struct perf_event_context *ctx = __info;
++
++ perf_pmu_rotate_stop(ctx->pmu);
++
++ rcu_read_lock();
++ list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
++ __perf_remove_from_context(&re);
++ rcu_read_unlock();
++}
++
++static void perf_event_exit_cpu_context(int cpu)
++{
++ struct perf_event_context *ctx;
++ struct pmu *pmu;
++ int idx;
++
++ idx = srcu_read_lock(&pmus_srcu);
++ list_for_each_entry_rcu(pmu, &pmus, entry) {
++ ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
++
++ mutex_lock(&ctx->mutex);
++ smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);
++ mutex_unlock(&ctx->mutex);
++ }
++ srcu_read_unlock(&pmus_srcu, idx);
++}
++
++static void perf_event_exit_cpu(int cpu)
++{
++ struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
++
++ perf_event_exit_cpu_context(cpu);
++
++ mutex_lock(&swhash->hlist_mutex);
++ swhash->online = false;
++ swevent_hlist_release(swhash);
++ mutex_unlock(&swhash->hlist_mutex);
++}
++#else
++static inline void perf_event_exit_cpu(int cpu) { }
++#endif
++
++static int
++perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
++{
++ int cpu;
++
++ for_each_online_cpu(cpu)
++ perf_event_exit_cpu(cpu);
++
++ return NOTIFY_OK;
++}
++
++/*
++ * Run the perf reboot notifier at the very last possible moment so that
++ * the generic watchdog code runs as long as possible.
++ */
++static struct notifier_block perf_reboot_notifier = {
++ .notifier_call = perf_reboot,
++ .priority = INT_MIN,
++};
++
++static int
++perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (long)hcpu;
++
++ switch (action & ~CPU_TASKS_FROZEN) {
++
++ case CPU_UP_PREPARE:
++ case CPU_DOWN_FAILED:
++ perf_event_init_cpu(cpu);
++ break;
++
++ case CPU_UP_CANCELED:
++ case CPU_DOWN_PREPARE:
++ perf_event_exit_cpu(cpu);
++ break;
++ default:
++ break;
++ }
++
++ return NOTIFY_OK;
++}
++
++void __init perf_event_init(void)
++{
++ int ret;
++
++ idr_init(&pmu_idr);
++
++ perf_event_init_all_cpus();
++ init_srcu_struct(&pmus_srcu);
++ perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
++ perf_pmu_register(&perf_cpu_clock, NULL, -1);
++ perf_pmu_register(&perf_task_clock, NULL, -1);
++ perf_tp_register();
++ perf_cpu_notifier(perf_cpu_notify);
++ register_reboot_notifier(&perf_reboot_notifier);
++
++ ret = init_hw_breakpoint();
++ WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
++
++ /* do not patch jump label more than once per second */
++ jump_label_rate_limit(&perf_sched_events, HZ);
++
++ /*
++ * Build time assertion that we keep the data_head at the intended
++ * location. IOW, validation we got the __reserved[] size right.
++ */
++ BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
++ != 1024);
++}
++
++static int __init perf_event_sysfs_init(void)
++{
++ struct pmu *pmu;
++ int ret;
++
++ mutex_lock(&pmus_lock);
++
++ ret = bus_register(&pmu_bus);
++ if (ret)
++ goto unlock;
++
++ list_for_each_entry(pmu, &pmus, entry) {
++ if (!pmu->name || pmu->type < 0)
++ continue;
++
++ ret = pmu_dev_alloc(pmu);
++ WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
++ }
++ pmu_bus_running = 1;
++ ret = 0;
++
++unlock:
++ mutex_unlock(&pmus_lock);
++
++ return ret;
++}
++device_initcall(perf_event_sysfs_init);
++
++#ifdef CONFIG_CGROUP_PERF
++static struct cgroup_subsys_state *
++perf_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
++{
++ struct perf_cgroup *jc;
++
++ jc = kzalloc(sizeof(*jc), GFP_KERNEL);
++ if (!jc)
++ return ERR_PTR(-ENOMEM);
++
++ jc->info = alloc_percpu(struct perf_cgroup_info);
++ if (!jc->info) {
++ kfree(jc);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ return &jc->css;
++}
++
++static void perf_cgroup_css_free(struct cgroup_subsys_state *css)
++{
++ struct perf_cgroup *jc = container_of(css, struct perf_cgroup, css);
++
++ free_percpu(jc->info);
++ kfree(jc);
++}
++
++static int __perf_cgroup_move(void *info)
++{
++ struct task_struct *task = info;
++ perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
++ return 0;
++}
++
++static void perf_cgroup_attach(struct cgroup_subsys_state *css,
++ struct cgroup_taskset *tset)
++{
++ struct task_struct *task;
++
++ cgroup_taskset_for_each(task, tset)
++ task_function_call(task, __perf_cgroup_move, task);
++}
++
++static void perf_cgroup_exit(struct cgroup_subsys_state *css,
++ struct cgroup_subsys_state *old_css,
++ struct task_struct *task)
++{
++ /*
++ * cgroup_exit() is called in the copy_process() failure path.
++ * Ignore this case since the task hasn't ran yet, this avoids
++ * trying to poke a half freed task state from generic code.
++ */
++ if (!(task->flags & PF_EXITING))
++ return;
++
++ task_function_call(task, __perf_cgroup_move, task);
++}
++
++struct cgroup_subsys perf_event_cgrp_subsys = {
++ .css_alloc = perf_cgroup_css_alloc,
++ .css_free = perf_cgroup_css_free,
++ .exit = perf_cgroup_exit,
++ .attach = perf_cgroup_attach,
++};
++#endif /* CONFIG_CGROUP_PERF */
+diff -Nur linux-3.18.12.orig/kernel/exit.c linux-3.18.12/kernel/exit.c
+--- linux-3.18.12.orig/kernel/exit.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/exit.c 2015-04-26 13:32:22.431684003 -0500
@@ -147,7 +147,7 @@
* Do this under ->siglock, we can race with another thread
* doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
@@ -11725,9 +20169,9 @@ diff -Nur linux-3.18.10.orig/kernel/exit.c linux-3.18.10/kernel/exit.c
tsk->sighand = NULL;
spin_unlock(&sighand->siglock);
-diff -Nur linux-3.18.10.orig/kernel/fork.c linux-3.18.10/kernel/fork.c
---- linux-3.18.10.orig/kernel/fork.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/fork.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/fork.c linux-3.18.12/kernel/fork.c
+--- linux-3.18.12.orig/kernel/fork.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/fork.c 2015-04-26 13:32:22.435684003 -0500
@@ -97,7 +97,7 @@
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
@@ -11825,9 +20269,9 @@ diff -Nur linux-3.18.10.orig/kernel/fork.c linux-3.18.10/kernel/fork.c
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
-diff -Nur linux-3.18.10.orig/kernel/futex.c linux-3.18.10/kernel/futex.c
---- linux-3.18.10.orig/kernel/futex.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/futex.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/futex.c linux-3.18.12/kernel/futex.c
+--- linux-3.18.12.orig/kernel/futex.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/futex.c 2015-04-26 13:32:22.435684003 -0500
@@ -738,7 +738,9 @@
* task still owns the PI-state:
*/
@@ -11966,9 +20410,9 @@ diff -Nur linux-3.18.10.orig/kernel/futex.c linux-3.18.10/kernel/futex.c
/*
* Fixup the pi_state owner and possibly acquire the lock if we
* haven't already.
-diff -Nur linux-3.18.10.orig/kernel/irq/handle.c linux-3.18.10/kernel/irq/handle.c
---- linux-3.18.10.orig/kernel/irq/handle.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/irq/handle.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/irq/handle.c linux-3.18.12/kernel/irq/handle.c
+--- linux-3.18.12.orig/kernel/irq/handle.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/irq/handle.c 2015-04-26 13:32:22.435684003 -0500
@@ -133,6 +133,8 @@
irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
@@ -11991,9 +20435,9 @@ diff -Nur linux-3.18.10.orig/kernel/irq/handle.c linux-3.18.10/kernel/irq/handle
if (!noirqdebug)
note_interrupt(irq, desc, retval);
-diff -Nur linux-3.18.10.orig/kernel/irq/manage.c linux-3.18.10/kernel/irq/manage.c
---- linux-3.18.10.orig/kernel/irq/manage.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/irq/manage.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/irq/manage.c linux-3.18.12/kernel/irq/manage.c
+--- linux-3.18.12.orig/kernel/irq/manage.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/irq/manage.c 2015-04-26 13:32:22.435684003 -0500
@@ -22,6 +22,7 @@
#include "internals.h"
@@ -12166,9 +20610,9 @@ diff -Nur linux-3.18.10.orig/kernel/irq/manage.c linux-3.18.10/kernel/irq/manage
/* Set default affinity mask once everything is setup */
setup_affinity(irq, desc, mask);
-diff -Nur linux-3.18.10.orig/kernel/irq/settings.h linux-3.18.10/kernel/irq/settings.h
---- linux-3.18.10.orig/kernel/irq/settings.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/irq/settings.h 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/irq/settings.h linux-3.18.12/kernel/irq/settings.h
+--- linux-3.18.12.orig/kernel/irq/settings.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/irq/settings.h 2015-04-26 13:32:22.435684003 -0500
@@ -15,6 +15,7 @@
_IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
_IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
@@ -12202,9 +20646,9 @@ diff -Nur linux-3.18.10.orig/kernel/irq/settings.h linux-3.18.10/kernel/irq/sett
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
{
return desc->status_use_accessors & _IRQ_PER_CPU;
-diff -Nur linux-3.18.10.orig/kernel/irq/spurious.c linux-3.18.10/kernel/irq/spurious.c
---- linux-3.18.10.orig/kernel/irq/spurious.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/irq/spurious.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/irq/spurious.c linux-3.18.12/kernel/irq/spurious.c
+--- linux-3.18.12.orig/kernel/irq/spurious.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/irq/spurious.c 2015-04-26 13:32:22.435684003 -0500
@@ -444,6 +444,10 @@
static int __init irqfixup_setup(char *str)
@@ -12227,10 +20671,16 @@ diff -Nur linux-3.18.10.orig/kernel/irq/spurious.c linux-3.18.10/kernel/irq/spur
irqfixup = 2;
printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
"enabled\n");
-diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
---- linux-3.18.10.orig/kernel/irq_work.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/irq_work.c 2015-03-26 12:42:18.675588336 +0100
-@@ -22,7 +22,9 @@
+diff -Nur linux-3.18.12.orig/kernel/irq_work.c linux-3.18.12/kernel/irq_work.c
+--- linux-3.18.12.orig/kernel/irq_work.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/irq_work.c 2015-04-26 13:32:22.435684003 -0500
+@@ -17,12 +17,15 @@
+ #include <linux/cpu.h>
+ #include <linux/notifier.h>
+ #include <linux/smp.h>
++#include <linux/interrupt.h>
+ #include <asm/processor.h>
+
static DEFINE_PER_CPU(struct llist_head, raised_list);
static DEFINE_PER_CPU(struct llist_head, lazy_list);
@@ -12241,19 +20691,7 @@ diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
/*
* Claim the entry so that no one else will poke at it.
*/
-@@ -49,7 +51,11 @@
- return true;
- }
-
-+#ifdef CONFIG_PREEMPT_RT_FULL
-+void arch_irq_work_raise(void)
-+#else
- void __weak arch_irq_work_raise(void)
-+#endif
- {
- /*
- * Lame architectures will get the timer tick callback
-@@ -65,6 +71,8 @@
+@@ -65,6 +68,8 @@
*/
bool irq_work_queue_on(struct irq_work *work, int cpu)
{
@@ -12262,7 +20700,7 @@ diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(cpu));
-@@ -75,7 +83,19 @@
+@@ -75,7 +80,19 @@
if (!irq_work_claim(work))
return false;
@@ -12283,7 +20721,7 @@ diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
arch_send_call_function_single_ipi(cpu);
return true;
-@@ -93,7 +113,15 @@
+@@ -93,7 +110,16 @@
/* Queue the entry and raise the IPI if needed. */
preempt_disable();
@@ -12293,14 +20731,15 @@ diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
+ if (llist_add(&work->llnode, this_cpu_ptr(&hirq_work_list)))
+ arch_irq_work_raise();
+ } else {
-+ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)))
-+ arch_irq_work_raise();
++ if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
++ tick_nohz_tick_stopped())
++ raise_softirq(TIMER_SOFTIRQ);
+ }
+#else
if (work->flags & IRQ_WORK_LAZY) {
if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
tick_nohz_tick_stopped())
-@@ -102,6 +130,7 @@
+@@ -102,6 +128,7 @@
if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
arch_irq_work_raise();
}
@@ -12308,7 +20747,7 @@ diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
preempt_enable();
-@@ -116,9 +145,12 @@
+@@ -116,9 +143,12 @@
raised = this_cpu_ptr(&raised_list);
lazy = this_cpu_ptr(&lazy_list);
@@ -12323,7 +20762,7 @@ diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
/* All work should have been flushed before going offline */
WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
-@@ -132,7 +164,9 @@
+@@ -132,7 +162,9 @@
struct irq_work *work;
struct llist_node *llnode;
@@ -12333,41 +20772,36 @@ diff -Nur linux-3.18.10.orig/kernel/irq_work.c linux-3.18.10/kernel/irq_work.c
if (llist_empty(list))
return;
-@@ -168,6 +202,12 @@
+@@ -168,18 +200,26 @@
*/
void irq_work_run(void)
{
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (in_irq()) {
-+ irq_work_run_list(this_cpu_ptr(&hirq_work_list));
-+ return;
-+ }
-+#endif
++ irq_work_run_list(this_cpu_ptr(&hirq_work_list));
++#else
irq_work_run_list(this_cpu_ptr(&raised_list));
irq_work_run_list(this_cpu_ptr(&lazy_list));
++#endif
}
-@@ -175,9 +215,16 @@
+ EXPORT_SYMBOL_GPL(irq_work_run);
void irq_work_tick(void)
{
-- struct llist_head *raised = &__get_cpu_var(raised_list);
-+ struct llist_head *raised;
-
-- if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
+#ifdef CONFIG_PREEMPT_RT_FULL
-+ if (in_irq()) {
-+ irq_work_run_list(this_cpu_ptr(&hirq_work_list));
-+ return;
-+ }
-+#endif
-+ raised = &__get_cpu_var(raised_list);
-+ if (!llist_empty(raised))
++ irq_work_run_list(this_cpu_ptr(&lazy_list));
++#else
+ struct llist_head *raised = &__get_cpu_var(raised_list);
+
+ if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
irq_work_run_list(raised);
irq_work_run_list(&__get_cpu_var(lazy_list));
++#endif
}
-diff -Nur linux-3.18.10.orig/kernel/Kconfig.locks linux-3.18.10/kernel/Kconfig.locks
---- linux-3.18.10.orig/kernel/Kconfig.locks 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/Kconfig.locks 2015-03-26 12:42:18.671588332 +0100
+
+ /*
+diff -Nur linux-3.18.12.orig/kernel/Kconfig.locks linux-3.18.12/kernel/Kconfig.locks
+--- linux-3.18.12.orig/kernel/Kconfig.locks 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/Kconfig.locks 2015-04-26 13:32:22.431684003 -0500
@@ -225,11 +225,11 @@
config MUTEX_SPIN_ON_OWNER
@@ -12382,9 +20816,9 @@ diff -Nur linux-3.18.10.orig/kernel/Kconfig.locks linux-3.18.10/kernel/Kconfig.l
config ARCH_USE_QUEUE_RWLOCK
bool
-diff -Nur linux-3.18.10.orig/kernel/Kconfig.preempt linux-3.18.10/kernel/Kconfig.preempt
---- linux-3.18.10.orig/kernel/Kconfig.preempt 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/Kconfig.preempt 2015-03-26 12:42:18.671588332 +0100
+diff -Nur linux-3.18.12.orig/kernel/Kconfig.preempt linux-3.18.12/kernel/Kconfig.preempt
+--- linux-3.18.12.orig/kernel/Kconfig.preempt 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/Kconfig.preempt 2015-04-26 13:32:22.431684003 -0500
@@ -1,3 +1,16 @@
+config PREEMPT
+ bool
@@ -12437,9 +20871,9 @@ diff -Nur linux-3.18.10.orig/kernel/Kconfig.preempt linux-3.18.10/kernel/Kconfig
endchoice
config PREEMPT_COUNT
-diff -Nur linux-3.18.10.orig/kernel/ksysfs.c linux-3.18.10/kernel/ksysfs.c
---- linux-3.18.10.orig/kernel/ksysfs.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/ksysfs.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/ksysfs.c linux-3.18.12/kernel/ksysfs.c
+--- linux-3.18.12.orig/kernel/ksysfs.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/ksysfs.c 2015-04-26 13:32:22.435684003 -0500
@@ -136,6 +136,15 @@
#endif /* CONFIG_KEXEC */
@@ -12466,9 +20900,9 @@ diff -Nur linux-3.18.10.orig/kernel/ksysfs.c linux-3.18.10/kernel/ksysfs.c
NULL
};
-diff -Nur linux-3.18.10.orig/kernel/locking/lglock.c linux-3.18.10/kernel/locking/lglock.c
---- linux-3.18.10.orig/kernel/locking/lglock.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/lglock.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/lglock.c linux-3.18.12/kernel/locking/lglock.c
+--- linux-3.18.12.orig/kernel/locking/lglock.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/lglock.c 2015-04-26 13:32:22.435684003 -0500
@@ -4,6 +4,15 @@
#include <linux/cpu.h>
#include <linux/string.h>
@@ -12613,9 +21047,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/lglock.c linux-3.18.10/kernel/lockin
+ }
+}
+#endif
-diff -Nur linux-3.18.10.orig/kernel/locking/lockdep.c linux-3.18.10/kernel/locking/lockdep.c
---- linux-3.18.10.orig/kernel/locking/lockdep.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/lockdep.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/lockdep.c linux-3.18.12/kernel/locking/lockdep.c
+--- linux-3.18.12.orig/kernel/locking/lockdep.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/lockdep.c 2015-04-26 13:32:22.435684003 -0500
@@ -3542,6 +3542,7 @@
}
}
@@ -12632,9 +21066,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/lockdep.c linux-3.18.10/kernel/locki
if (!debug_locks)
print_irqtrace_events(current);
-diff -Nur linux-3.18.10.orig/kernel/locking/Makefile linux-3.18.10/kernel/locking/Makefile
---- linux-3.18.10.orig/kernel/locking/Makefile 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/Makefile 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/Makefile linux-3.18.12/kernel/locking/Makefile
+--- linux-3.18.12.orig/kernel/locking/Makefile 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/Makefile 2015-04-26 13:32:22.435684003 -0500
@@ -1,5 +1,5 @@
-obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o
@@ -12666,9 +21100,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/Makefile linux-3.18.10/kernel/lockin
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
-diff -Nur linux-3.18.10.orig/kernel/locking/percpu-rwsem.c linux-3.18.10/kernel/locking/percpu-rwsem.c
---- linux-3.18.10.orig/kernel/locking/percpu-rwsem.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/percpu-rwsem.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/percpu-rwsem.c linux-3.18.12/kernel/locking/percpu-rwsem.c
+--- linux-3.18.12.orig/kernel/locking/percpu-rwsem.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/percpu-rwsem.c 2015-04-26 13:32:22.435684003 -0500
@@ -84,8 +84,12 @@
down_read(&brw->rw_sem);
@@ -12682,9 +21116,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/percpu-rwsem.c linux-3.18.10/kernel/
}
void percpu_up_read(struct percpu_rw_semaphore *brw)
-diff -Nur linux-3.18.10.orig/kernel/locking/rt.c linux-3.18.10/kernel/locking/rt.c
---- linux-3.18.10.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/kernel/locking/rt.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/rt.c linux-3.18.12/kernel/locking/rt.c
+--- linux-3.18.12.orig/kernel/locking/rt.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/kernel/locking/rt.c 2015-04-26 13:32:22.435684003 -0500
@@ -0,0 +1,456 @@
+/*
+ * kernel/rt.c
@@ -13142,9 +21576,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/rt.c linux-3.18.10/kernel/locking/rt
+ return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
-diff -Nur linux-3.18.10.orig/kernel/locking/rtmutex.c linux-3.18.10/kernel/locking/rtmutex.c
---- linux-3.18.10.orig/kernel/locking/rtmutex.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/rtmutex.c 2015-03-26 12:45:24.871805156 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/rtmutex.c linux-3.18.12/kernel/locking/rtmutex.c
+--- linux-3.18.12.orig/kernel/locking/rtmutex.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/rtmutex.c 2015-04-26 13:32:22.439684003 -0500
@@ -7,6 +7,11 @@
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
@@ -14174,9 +22608,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/rtmutex.c linux-3.18.10/kernel/locki
+}
+EXPORT_SYMBOL(ww_mutex_unlock);
+#endif
-diff -Nur linux-3.18.10.orig/kernel/locking/rtmutex_common.h linux-3.18.10/kernel/locking/rtmutex_common.h
---- linux-3.18.10.orig/kernel/locking/rtmutex_common.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/rtmutex_common.h 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/rtmutex_common.h linux-3.18.12/kernel/locking/rtmutex_common.h
+--- linux-3.18.12.orig/kernel/locking/rtmutex_common.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/rtmutex_common.h 2015-04-26 13:32:22.439684003 -0500
@@ -49,6 +49,7 @@
struct rb_node pi_tree_entry;
struct task_struct *task;
@@ -14210,9 +22644,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/rtmutex_common.h linux-3.18.10/kerne
+}
+
#endif
-diff -Nur linux-3.18.10.orig/kernel/locking/spinlock.c linux-3.18.10/kernel/locking/spinlock.c
---- linux-3.18.10.orig/kernel/locking/spinlock.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/spinlock.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/spinlock.c linux-3.18.12/kernel/locking/spinlock.c
+--- linux-3.18.12.orig/kernel/locking/spinlock.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/spinlock.c 2015-04-26 13:32:22.439684003 -0500
@@ -124,8 +124,11 @@
* __[spin|read|write]_lock_bh()
*/
@@ -14243,9 +22677,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/spinlock.c linux-3.18.10/kernel/lock
#ifdef CONFIG_DEBUG_LOCK_ALLOC
void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
-diff -Nur linux-3.18.10.orig/kernel/locking/spinlock_debug.c linux-3.18.10/kernel/locking/spinlock_debug.c
---- linux-3.18.10.orig/kernel/locking/spinlock_debug.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/locking/spinlock_debug.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/locking/spinlock_debug.c linux-3.18.12/kernel/locking/spinlock_debug.c
+--- linux-3.18.12.orig/kernel/locking/spinlock_debug.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/locking/spinlock_debug.c 2015-04-26 13:32:22.439684003 -0500
@@ -31,6 +31,7 @@
EXPORT_SYMBOL(__raw_spin_lock_init);
@@ -14276,9 +22710,9 @@ diff -Nur linux-3.18.10.orig/kernel/locking/spinlock_debug.c linux-3.18.10/kerne
}
+
+#endif
-diff -Nur linux-3.18.10.orig/kernel/panic.c linux-3.18.10/kernel/panic.c
---- linux-3.18.10.orig/kernel/panic.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/panic.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/panic.c linux-3.18.12/kernel/panic.c
+--- linux-3.18.12.orig/kernel/panic.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/panic.c 2015-04-26 13:32:22.439684003 -0500
@@ -384,9 +384,11 @@
static int init_oops_id(void)
@@ -14291,9 +22725,9 @@ diff -Nur linux-3.18.10.orig/kernel/panic.c linux-3.18.10/kernel/panic.c
oops_id++;
return 0;
-diff -Nur linux-3.18.10.orig/kernel/power/hibernate.c linux-3.18.10/kernel/power/hibernate.c
---- linux-3.18.10.orig/kernel/power/hibernate.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/power/hibernate.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/power/hibernate.c linux-3.18.12/kernel/power/hibernate.c
+--- linux-3.18.12.orig/kernel/power/hibernate.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/power/hibernate.c 2015-04-26 13:32:22.439684003 -0500
@@ -287,6 +287,8 @@
local_irq_disable();
@@ -14343,9 +22777,9 @@ diff -Nur linux-3.18.10.orig/kernel/power/hibernate.c linux-3.18.10/kernel/power
local_irq_enable();
enable_nonboot_cpus();
-diff -Nur linux-3.18.10.orig/kernel/power/suspend.c linux-3.18.10/kernel/power/suspend.c
---- linux-3.18.10.orig/kernel/power/suspend.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/power/suspend.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/power/suspend.c linux-3.18.12/kernel/power/suspend.c
+--- linux-3.18.12.orig/kernel/power/suspend.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/power/suspend.c 2015-04-26 13:32:22.439684003 -0500
@@ -318,6 +318,8 @@
arch_suspend_disable_irqs();
BUG_ON(!irqs_disabled());
@@ -14364,9 +22798,9 @@ diff -Nur linux-3.18.10.orig/kernel/power/suspend.c linux-3.18.10/kernel/power/s
arch_suspend_enable_irqs();
BUG_ON(irqs_disabled());
-diff -Nur linux-3.18.10.orig/kernel/printk/printk.c linux-3.18.10/kernel/printk/printk.c
---- linux-3.18.10.orig/kernel/printk/printk.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/printk/printk.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/printk/printk.c linux-3.18.12/kernel/printk/printk.c
+--- linux-3.18.12.orig/kernel/printk/printk.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/printk/printk.c 2015-04-26 13:32:22.439684003 -0500
@@ -1165,6 +1165,7 @@
{
char *text;
@@ -14622,9 +23056,9 @@ diff -Nur linux-3.18.10.orig/kernel/printk/printk.c linux-3.18.10/kernel/printk/
}
console_locked = 0;
-diff -Nur linux-3.18.10.orig/kernel/ptrace.c linux-3.18.10/kernel/ptrace.c
---- linux-3.18.10.orig/kernel/ptrace.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/ptrace.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/ptrace.c linux-3.18.12/kernel/ptrace.c
+--- linux-3.18.12.orig/kernel/ptrace.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/ptrace.c 2015-04-26 13:32:22.439684003 -0500
@@ -129,7 +129,12 @@
spin_lock_irq(&task->sighand->siglock);
@@ -14639,9 +23073,9 @@ diff -Nur linux-3.18.10.orig/kernel/ptrace.c linux-3.18.10/kernel/ptrace.c
ret = true;
}
spin_unlock_irq(&task->sighand->siglock);
-diff -Nur linux-3.18.10.orig/kernel/rcu/tiny.c linux-3.18.10/kernel/rcu/tiny.c
---- linux-3.18.10.orig/kernel/rcu/tiny.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/rcu/tiny.c 2015-03-26 12:42:18.675588336 +0100
+diff -Nur linux-3.18.12.orig/kernel/rcu/tiny.c linux-3.18.12/kernel/rcu/tiny.c
+--- linux-3.18.12.orig/kernel/rcu/tiny.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/rcu/tiny.c 2015-04-26 13:32:22.439684003 -0500
@@ -370,6 +370,7 @@
}
EXPORT_SYMBOL_GPL(call_rcu_sched);
@@ -14658,9 +23092,9 @@ diff -Nur linux-3.18.10.orig/kernel/rcu/tiny.c linux-3.18.10/kernel/rcu/tiny.c
void rcu_init(void)
{
-diff -Nur linux-3.18.10.orig/kernel/rcu/tree.c linux-3.18.10/kernel/rcu/tree.c
---- linux-3.18.10.orig/kernel/rcu/tree.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/rcu/tree.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/rcu/tree.c linux-3.18.12/kernel/rcu/tree.c
+--- linux-3.18.12.orig/kernel/rcu/tree.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/rcu/tree.c 2015-04-26 13:32:22.439684003 -0500
@@ -56,6 +56,11 @@
#include <linux/random.h>
#include <linux/ftrace_event.h>
@@ -14963,9 +23397,9 @@ diff -Nur linux-3.18.10.orig/kernel/rcu/tree.c linux-3.18.10/kernel/rcu/tree.c
/*
* We don't need protection against CPU-hotplug here because
-diff -Nur linux-3.18.10.orig/kernel/rcu/tree.h linux-3.18.10/kernel/rcu/tree.h
---- linux-3.18.10.orig/kernel/rcu/tree.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/rcu/tree.h 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/rcu/tree.h linux-3.18.12/kernel/rcu/tree.h
+--- linux-3.18.12.orig/kernel/rcu/tree.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/rcu/tree.h 2015-04-26 13:32:22.443684003 -0500
@@ -28,6 +28,7 @@
#include <linux/cpumask.h>
#include <linux/seqlock.h>
@@ -15025,9 +23459,9 @@ diff -Nur linux-3.18.10.orig/kernel/rcu/tree.h linux-3.18.10/kernel/rcu/tree.h
static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
struct rcu_node *rnp);
#endif /* #ifdef CONFIG_RCU_BOOST */
-diff -Nur linux-3.18.10.orig/kernel/rcu/tree_plugin.h linux-3.18.10/kernel/rcu/tree_plugin.h
---- linux-3.18.10.orig/kernel/rcu/tree_plugin.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/rcu/tree_plugin.h 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/rcu/tree_plugin.h linux-3.18.12/kernel/rcu/tree_plugin.h
+--- linux-3.18.12.orig/kernel/rcu/tree_plugin.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/rcu/tree_plugin.h 2015-04-26 13:32:22.443684003 -0500
@@ -24,12 +24,6 @@
* Paul E. McKenney <paulmck@linux.vnet.ibm.com>
*/
@@ -15373,9 +23807,9 @@ diff -Nur linux-3.18.10.orig/kernel/rcu/tree_plugin.h linux-3.18.10/kernel/rcu/t
rdp->nocb_follower_tail = &rdp->nocb_follower_head;
}
-diff -Nur linux-3.18.10.orig/kernel/rcu/update.c linux-3.18.10/kernel/rcu/update.c
---- linux-3.18.10.orig/kernel/rcu/update.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/rcu/update.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/rcu/update.c linux-3.18.12/kernel/rcu/update.c
+--- linux-3.18.12.orig/kernel/rcu/update.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/rcu/update.c 2015-04-26 13:32:22.443684003 -0500
@@ -170,6 +170,7 @@
}
EXPORT_SYMBOL_GPL(rcu_read_lock_held);
@@ -15392,9 +23826,9 @@ diff -Nur linux-3.18.10.orig/kernel/rcu/update.c linux-3.18.10/kernel/rcu/update
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-diff -Nur linux-3.18.10.orig/kernel/relay.c linux-3.18.10/kernel/relay.c
---- linux-3.18.10.orig/kernel/relay.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/relay.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/relay.c linux-3.18.12/kernel/relay.c
+--- linux-3.18.12.orig/kernel/relay.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/relay.c 2015-04-26 13:32:22.443684003 -0500
@@ -339,6 +339,10 @@
{
struct rchan_buf *buf = (struct rchan_buf *)data;
@@ -15430,9 +23864,9 @@ diff -Nur linux-3.18.10.orig/kernel/relay.c linux-3.18.10/kernel/relay.c
}
old = buf->data;
-diff -Nur linux-3.18.10.orig/kernel/res_counter.c linux-3.18.10/kernel/res_counter.c
---- linux-3.18.10.orig/kernel/res_counter.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/res_counter.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/res_counter.c linux-3.18.12/kernel/res_counter.c
+--- linux-3.18.12.orig/kernel/res_counter.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/res_counter.c 2015-04-26 13:32:22.443684003 -0500
@@ -59,7 +59,7 @@
r = ret = 0;
@@ -15469,9 +23903,9 @@ diff -Nur linux-3.18.10.orig/kernel/res_counter.c linux-3.18.10/kernel/res_count
return ret;
}
-diff -Nur linux-3.18.10.orig/kernel/sched/completion.c linux-3.18.10/kernel/sched/completion.c
---- linux-3.18.10.orig/kernel/sched/completion.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/completion.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/completion.c linux-3.18.12/kernel/sched/completion.c
+--- linux-3.18.12.orig/kernel/sched/completion.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/completion.c 2015-04-26 13:32:22.443684003 -0500
@@ -30,10 +30,10 @@
{
unsigned long flags;
@@ -15566,9 +24000,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/completion.c linux-3.18.10/kernel/sche
return ret;
}
EXPORT_SYMBOL(completion_done);
-diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core.c
---- linux-3.18.10.orig/kernel/sched/core.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/core.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/core.c linux-3.18.12/kernel/sched/core.c
+--- linux-3.18.12.orig/kernel/sched/core.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/core.c 2015-04-26 13:32:22.443684003 -0500
@@ -280,7 +280,11 @@
* Number of tasks to iterate in a single balance run.
* Limited because this is done with IRQs disabled.
@@ -15654,7 +24088,38 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
return cpu;
}
/*
-@@ -1198,6 +1239,18 @@
+@@ -745,14 +786,29 @@
+ #endif /* CONFIG_NO_HZ_COMMON */
+
+ #ifdef CONFIG_NO_HZ_FULL
++
++static int ksoftirqd_running(void)
++{
++ struct task_struct *softirqd;
++
++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL))
++ return 0;
++ softirqd = this_cpu_ksoftirqd();
++ if (softirqd && softirqd->on_rq)
++ return 1;
++ return 0;
++}
++
+ bool sched_can_stop_tick(void)
+ {
+ /*
+ * More than one running task need preemption.
+ * nr_running update is assumed to be visible
+ * after IPI is sent from wakers.
++ *
++ * NOTE, RT: if ksoftirqd is awake, subtract it.
+ */
+- if (this_rq()->nr_running > 1)
++ if (this_rq()->nr_running - ksoftirqd_running() > 1)
+ return false;
+
+ return true;
+@@ -1198,6 +1254,18 @@
static int migration_cpu_stop(void *data);
@@ -15673,7 +24138,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
/*
* wait_task_inactive - wait for a thread to unschedule.
*
-@@ -1242,7 +1295,7 @@
+@@ -1242,7 +1310,7 @@
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
@@ -15682,7 +24147,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
return 0;
cpu_relax();
}
-@@ -1257,7 +1310,8 @@
+@@ -1257,7 +1325,8 @@
running = task_running(rq, p);
queued = task_on_rq_queued(p);
ncsw = 0;
@@ -15692,7 +24157,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
task_rq_unlock(rq, p, &flags);
-@@ -1482,10 +1536,6 @@
+@@ -1482,10 +1551,6 @@
{
activate_task(rq, p, en_flags);
p->on_rq = TASK_ON_RQ_QUEUED;
@@ -15703,7 +24168,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
}
/*
-@@ -1699,8 +1749,27 @@
+@@ -1699,8 +1764,27 @@
*/
smp_mb__before_spinlock();
raw_spin_lock_irqsave(&p->pi_lock, flags);
@@ -15732,7 +24197,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
success = 1; /* we're going to change ->state */
cpu = task_cpu(p);
-@@ -1743,42 +1812,6 @@
+@@ -1743,42 +1827,6 @@
}
/**
@@ -15775,7 +24240,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
* wake_up_process - Wake up a specific process
* @p: The process to be woken up.
*
-@@ -1792,11 +1825,23 @@
+@@ -1792,11 +1840,23 @@
*/
int wake_up_process(struct task_struct *p)
{
@@ -15800,7 +24265,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
int wake_up_state(struct task_struct *p, unsigned int state)
{
return try_to_wake_up(p, state, 0);
-@@ -1987,6 +2032,9 @@
+@@ -1987,6 +2047,9 @@
p->on_cpu = 0;
#endif
init_task_preempt_count(p);
@@ -15810,7 +24275,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
#ifdef CONFIG_SMP
plist_node_init(&p->pushable_tasks, MAX_PRIO);
RB_CLEAR_NODE(&p->pushable_dl_tasks);
-@@ -2270,8 +2318,12 @@
+@@ -2270,8 +2333,12 @@
finish_arch_post_lock_switch();
fire_sched_in_preempt_notifiers(current);
@@ -15824,7 +24289,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
if (unlikely(prev_state == TASK_DEAD)) {
if (prev->sched_class->task_dead)
prev->sched_class->task_dead(prev);
-@@ -2696,6 +2748,133 @@
+@@ -2696,6 +2763,133 @@
schedstat_inc(this_rq(), sched_count);
}
@@ -15958,7 +24423,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
/*
* Pick up the highest-prio task:
*/
-@@ -2799,6 +2978,8 @@
+@@ -2799,6 +2993,8 @@
smp_mb__before_spinlock();
raw_spin_lock_irq(&rq->lock);
@@ -15967,7 +24432,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
switch_count = &prev->nivcsw;
if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
if (unlikely(signal_pending_state(prev->state, prev))) {
-@@ -2806,19 +2987,6 @@
+@@ -2806,19 +3002,6 @@
} else {
deactivate_task(rq, prev, DEQUEUE_SLEEP);
prev->on_rq = 0;
@@ -15987,7 +24452,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
}
switch_count = &prev->nvcsw;
}
-@@ -2828,6 +2996,7 @@
+@@ -2828,6 +3011,7 @@
next = pick_next_task(rq, prev);
clear_tsk_need_resched(prev);
@@ -15995,7 +24460,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
clear_preempt_need_resched();
rq->skip_clock_update = 0;
-@@ -2857,9 +3026,20 @@
+@@ -2857,9 +3041,20 @@
static inline void sched_submit_work(struct task_struct *tsk)
{
@@ -16017,7 +24482,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
* If we are going to sleep and we have plugged IO queued,
* make sure to submit it to avoid deadlocks.
*/
-@@ -2867,12 +3047,19 @@
+@@ -2867,12 +3062,19 @@
blk_schedule_flush_plug(tsk);
}
@@ -16037,7 +24502,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
}
EXPORT_SYMBOL(schedule);
-@@ -2922,9 +3109,26 @@
+@@ -2922,9 +3124,26 @@
if (likely(!preemptible()))
return;
@@ -16064,7 +24529,16 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
__preempt_count_sub(PREEMPT_ACTIVE);
/*
-@@ -4234,9 +4438,16 @@
+@@ -3097,6 +3316,8 @@
+ } else {
+ if (dl_prio(oldprio))
+ p->dl.dl_boosted = 0;
++ if (rt_prio(oldprio))
++ p->rt.timeout = 0;
+ p->sched_class = &fair_sched_class;
+ }
+
+@@ -4234,9 +4455,16 @@
static void __cond_resched(void)
{
@@ -16084,7 +24558,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
}
int __sched _cond_resched(void)
-@@ -4277,6 +4488,7 @@
+@@ -4277,6 +4505,7 @@
}
EXPORT_SYMBOL(__cond_resched_lock);
@@ -16092,7 +24566,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
int __sched __cond_resched_softirq(void)
{
BUG_ON(!in_softirq());
-@@ -4290,6 +4502,7 @@
+@@ -4290,6 +4519,7 @@
return 0;
}
EXPORT_SYMBOL(__cond_resched_softirq);
@@ -16100,7 +24574,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
/**
* yield - yield the current processor to other threads.
-@@ -4651,7 +4864,9 @@
+@@ -4651,7 +4881,9 @@
/* Set the preempt count _outside_ the spinlocks! */
init_idle_preempt_count(idle, cpu);
@@ -16111,7 +24585,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
/*
* The idle tasks have their own, simple scheduling class:
*/
-@@ -4693,11 +4908,91 @@
+@@ -4693,11 +4925,91 @@
void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
{
@@ -16206,7 +24680,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
}
/*
-@@ -4743,7 +5038,7 @@
+@@ -4743,7 +5055,7 @@
do_set_cpus_allowed(p, new_mask);
/* Can the task run on the task's current CPU? If so, we're done */
@@ -16215,7 +24689,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
goto out;
dest_cpu = cpumask_any_and(cpu_active_mask, new_mask);
-@@ -4883,6 +5178,8 @@
+@@ -4883,6 +5195,8 @@
#ifdef CONFIG_HOTPLUG_CPU
@@ -16224,7 +24698,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
/*
* Ensures that the idle task is using init_mm right before its cpu goes
* offline.
-@@ -4897,7 +5194,11 @@
+@@ -4897,7 +5211,11 @@
switch_mm(mm, &init_mm, current);
finish_arch_post_lock_switch();
}
@@ -16237,7 +24711,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
}
/*
-@@ -5240,6 +5541,10 @@
+@@ -5240,6 +5558,10 @@
case CPU_DEAD:
calc_load_migrate(rq);
@@ -16248,7 +24722,7 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
break;
#endif
}
-@@ -7181,7 +7486,8 @@
+@@ -7181,7 +7503,8 @@
#ifdef CONFIG_DEBUG_ATOMIC_SLEEP
static inline int preempt_count_equals(int preempt_offset)
{
@@ -16258,9 +24732,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/core.c linux-3.18.10/kernel/sched/core
return (nested == preempt_offset);
}
-diff -Nur linux-3.18.10.orig/kernel/sched/cputime.c linux-3.18.10/kernel/sched/cputime.c
---- linux-3.18.10.orig/kernel/sched/cputime.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/cputime.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/cputime.c linux-3.18.12/kernel/sched/cputime.c
+--- linux-3.18.12.orig/kernel/sched/cputime.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/cputime.c 2015-04-26 13:32:22.443684003 -0500
@@ -675,37 +675,45 @@
void vtime_account_system(struct task_struct *tsk)
@@ -16414,9 +24888,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/cputime.c linux-3.18.10/kernel/sched/c
}
-diff -Nur linux-3.18.10.orig/kernel/sched/deadline.c linux-3.18.10/kernel/sched/deadline.c
---- linux-3.18.10.orig/kernel/sched/deadline.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/deadline.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/deadline.c linux-3.18.12/kernel/sched/deadline.c
+--- linux-3.18.12.orig/kernel/sched/deadline.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/deadline.c 2015-04-26 13:32:22.447684003 -0500
@@ -570,6 +570,7 @@
hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -16425,9 +24899,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/deadline.c linux-3.18.10/kernel/sched/
}
static
-diff -Nur linux-3.18.10.orig/kernel/sched/debug.c linux-3.18.10/kernel/sched/debug.c
---- linux-3.18.10.orig/kernel/sched/debug.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/debug.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/debug.c linux-3.18.12/kernel/sched/debug.c
+--- linux-3.18.12.orig/kernel/sched/debug.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/debug.c 2015-04-26 13:32:22.447684003 -0500
@@ -256,6 +256,9 @@
P(rt_throttled);
PN(rt_time);
@@ -16449,9 +24923,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/debug.c linux-3.18.10/kernel/sched/deb
#undef PN
#undef __PN
#undef P
-diff -Nur linux-3.18.10.orig/kernel/sched/fair.c linux-3.18.10/kernel/sched/fair.c
---- linux-3.18.10.orig/kernel/sched/fair.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/fair.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/fair.c linux-3.18.12/kernel/sched/fair.c
+--- linux-3.18.12.orig/kernel/sched/fair.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/fair.c 2015-04-26 13:32:22.447684003 -0500
@@ -2951,7 +2951,7 @@
ideal_runtime = sched_slice(cfs_rq, curr);
delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime;
@@ -16524,9 +24998,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/fair.c linux-3.18.10/kernel/sched/fair
} else
check_preempt_curr(rq, p, 0);
}
-diff -Nur linux-3.18.10.orig/kernel/sched/features.h linux-3.18.10/kernel/sched/features.h
---- linux-3.18.10.orig/kernel/sched/features.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/features.h 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/features.h linux-3.18.12/kernel/sched/features.h
+--- linux-3.18.12.orig/kernel/sched/features.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/features.h 2015-04-26 13:32:22.447684003 -0500
@@ -50,12 +50,18 @@
*/
SCHED_FEAT(NONTASK_CAPACITY, true)
@@ -16547,9 +25021,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/features.h linux-3.18.10/kernel/sched/
SCHED_FEAT(FORCE_SD_OVERLAP, false)
SCHED_FEAT(RT_RUNTIME_SHARE, true)
SCHED_FEAT(LB_MIN, false)
-diff -Nur linux-3.18.10.orig/kernel/sched/Makefile linux-3.18.10/kernel/sched/Makefile
---- linux-3.18.10.orig/kernel/sched/Makefile 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/Makefile 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/Makefile linux-3.18.12/kernel/sched/Makefile
+--- linux-3.18.12.orig/kernel/sched/Makefile 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/Makefile 2015-04-26 13:32:22.443684003 -0500
@@ -13,7 +13,7 @@
obj-y += core.o proc.o clock.o cputime.o
@@ -16559,9 +25033,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/Makefile linux-3.18.10/kernel/sched/Ma
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
obj-$(CONFIG_SCHEDSTATS) += stats.o
-diff -Nur linux-3.18.10.orig/kernel/sched/rt.c linux-3.18.10/kernel/sched/rt.c
---- linux-3.18.10.orig/kernel/sched/rt.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/rt.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/rt.c linux-3.18.12/kernel/sched/rt.c
+--- linux-3.18.12.orig/kernel/sched/rt.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/rt.c 2015-04-26 13:32:22.447684003 -0500
@@ -43,6 +43,7 @@
hrtimer_init(&rt_b->rt_period_timer,
@@ -16570,9 +25044,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/rt.c linux-3.18.10/kernel/sched/rt.c
rt_b->rt_period_timer.function = sched_rt_period_timer;
}
-diff -Nur linux-3.18.10.orig/kernel/sched/sched.h linux-3.18.10/kernel/sched/sched.h
---- linux-3.18.10.orig/kernel/sched/sched.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/sched/sched.h 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/sched.h linux-3.18.12/kernel/sched/sched.h
+--- linux-3.18.12.orig/kernel/sched/sched.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/sched/sched.h 2015-04-26 13:32:22.447684003 -0500
@@ -1018,6 +1018,7 @@
#define WF_SYNC 0x01 /* waker goes to sleep after wakeup */
#define WF_FORK 0x02 /* child wakeup after fork */
@@ -16597,9 +25071,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/sched.h linux-3.18.10/kernel/sched/sch
extern struct rt_bandwidth def_rt_bandwidth;
extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime);
-diff -Nur linux-3.18.10.orig/kernel/sched/wait-simple.c linux-3.18.10/kernel/sched/wait-simple.c
---- linux-3.18.10.orig/kernel/sched/wait-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/kernel/sched/wait-simple.c 2015-03-26 12:42:18.679588341 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/wait-simple.c linux-3.18.12/kernel/sched/wait-simple.c
+--- linux-3.18.12.orig/kernel/sched/wait-simple.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/kernel/sched/wait-simple.c 2015-04-26 13:32:22.447684003 -0500
@@ -0,0 +1,115 @@
+/*
+ * Simple waitqueues without fancy flags and callbacks
@@ -16716,9 +25190,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/wait-simple.c linux-3.18.10/kernel/sch
+ return woken;
+}
+EXPORT_SYMBOL(__swait_wake);
-diff -Nur linux-3.18.10.orig/kernel/sched/work-simple.c linux-3.18.10/kernel/sched/work-simple.c
---- linux-3.18.10.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/kernel/sched/work-simple.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/sched/work-simple.c linux-3.18.12/kernel/sched/work-simple.c
+--- linux-3.18.12.orig/kernel/sched/work-simple.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/kernel/sched/work-simple.c 2015-04-26 13:32:22.447684003 -0500
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
@@ -16892,9 +25366,9 @@ diff -Nur linux-3.18.10.orig/kernel/sched/work-simple.c linux-3.18.10/kernel/sch
+ mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
-diff -Nur linux-3.18.10.orig/kernel/signal.c linux-3.18.10/kernel/signal.c
---- linux-3.18.10.orig/kernel/signal.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/signal.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/signal.c linux-3.18.12/kernel/signal.c
+--- linux-3.18.12.orig/kernel/signal.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/signal.c 2015-04-26 13:32:22.447684003 -0500
@@ -14,6 +14,7 @@
#include <linux/export.h>
#include <linux/init.h>
@@ -17139,9 +25613,9 @@ diff -Nur linux-3.18.10.orig/kernel/signal.c linux-3.18.10/kernel/signal.c
freezable_schedule();
} else {
/*
-diff -Nur linux-3.18.10.orig/kernel/softirq.c linux-3.18.10/kernel/softirq.c
---- linux-3.18.10.orig/kernel/softirq.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/softirq.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/softirq.c linux-3.18.12/kernel/softirq.c
+--- linux-3.18.12.orig/kernel/softirq.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/softirq.c 2015-04-26 13:32:22.451684003 -0500
@@ -21,10 +21,12 @@
#include <linux/freezer.h>
#include <linux/kthread.h>
@@ -18033,9 +26507,9 @@ diff -Nur linux-3.18.10.orig/kernel/softirq.c linux-3.18.10/kernel/softirq.c
.thread_should_run = ksoftirqd_should_run,
.thread_fn = run_ksoftirqd,
.thread_comm = "ksoftirqd/%u",
-diff -Nur linux-3.18.10.orig/kernel/stop_machine.c linux-3.18.10/kernel/stop_machine.c
---- linux-3.18.10.orig/kernel/stop_machine.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/stop_machine.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/stop_machine.c linux-3.18.12/kernel/stop_machine.c
+--- linux-3.18.12.orig/kernel/stop_machine.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/stop_machine.c 2015-04-26 13:32:22.451684003 -0500
@@ -30,12 +30,12 @@
atomic_t nr_todo; /* nr left to execute */
bool executed; /* actually executed? */
@@ -18296,9 +26770,9 @@ diff -Nur linux-3.18.10.orig/kernel/stop_machine.c linux-3.18.10/kernel/stop_mac
cpu_relax();
mutex_unlock(&stop_cpus_mutex);
-diff -Nur linux-3.18.10.orig/kernel/time/hrtimer.c linux-3.18.10/kernel/time/hrtimer.c
---- linux-3.18.10.orig/kernel/time/hrtimer.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/hrtimer.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/hrtimer.c linux-3.18.12/kernel/time/hrtimer.c
+--- linux-3.18.12.orig/kernel/time/hrtimer.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/hrtimer.c 2015-04-26 13:32:22.451684003 -0500
@@ -48,11 +48,13 @@
#include <linux/sched/rt.h>
#include <linux/sched/deadline.h>
@@ -18903,9 +27377,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/hrtimer.c linux-3.18.10/kernel/time/hrt
}
/**
-diff -Nur linux-3.18.10.orig/kernel/time/itimer.c linux-3.18.10/kernel/time/itimer.c
---- linux-3.18.10.orig/kernel/time/itimer.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/itimer.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/itimer.c linux-3.18.12/kernel/time/itimer.c
+--- linux-3.18.12.orig/kernel/time/itimer.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/itimer.c 2015-04-26 13:32:22.451684003 -0500
@@ -213,6 +213,7 @@
/* We are sharing ->siglock with it_real_fn() */
if (hrtimer_try_to_cancel(timer) < 0) {
@@ -18914,9 +27388,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/itimer.c linux-3.18.10/kernel/time/itim
goto again;
}
expires = timeval_to_ktime(value->it_value);
-diff -Nur linux-3.18.10.orig/kernel/time/jiffies.c linux-3.18.10/kernel/time/jiffies.c
---- linux-3.18.10.orig/kernel/time/jiffies.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/jiffies.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/jiffies.c linux-3.18.12/kernel/time/jiffies.c
+--- linux-3.18.12.orig/kernel/time/jiffies.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/jiffies.c 2015-04-26 13:32:22.451684003 -0500
@@ -73,7 +73,8 @@
.shift = JIFFIES_SHIFT,
};
@@ -18939,9 +27413,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/jiffies.c linux-3.18.10/kernel/time/jif
return ret;
}
EXPORT_SYMBOL(get_jiffies_64);
-diff -Nur linux-3.18.10.orig/kernel/time/ntp.c linux-3.18.10/kernel/time/ntp.c
---- linux-3.18.10.orig/kernel/time/ntp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/ntp.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/ntp.c linux-3.18.12/kernel/time/ntp.c
+--- linux-3.18.12.orig/kernel/time/ntp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/ntp.c 2015-04-26 13:32:22.451684003 -0500
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/hrtimer.h>
@@ -19003,9 +27477,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/ntp.c linux-3.18.10/kernel/time/ntp.c
#else
void ntp_notify_cmos_timer(void) { }
-diff -Nur linux-3.18.10.orig/kernel/time/posix-cpu-timers.c linux-3.18.10/kernel/time/posix-cpu-timers.c
---- linux-3.18.10.orig/kernel/time/posix-cpu-timers.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/posix-cpu-timers.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/posix-cpu-timers.c linux-3.18.12/kernel/time/posix-cpu-timers.c
+--- linux-3.18.12.orig/kernel/time/posix-cpu-timers.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/posix-cpu-timers.c 2015-04-26 13:32:22.451684003 -0500
@@ -3,6 +3,7 @@
*/
@@ -19253,9 +27727,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/posix-cpu-timers.c linux-3.18.10/kernel
/*
* Set one of the process-wide special case CPU timers or RLIMIT_CPU.
* The tsk->sighand->siglock must be held by the caller.
-diff -Nur linux-3.18.10.orig/kernel/time/posix-timers.c linux-3.18.10/kernel/time/posix-timers.c
---- linux-3.18.10.orig/kernel/time/posix-timers.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/posix-timers.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/posix-timers.c linux-3.18.12/kernel/time/posix-timers.c
+--- linux-3.18.12.orig/kernel/time/posix-timers.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/posix-timers.c 2015-04-26 13:32:22.451684003 -0500
@@ -499,6 +499,7 @@
static struct pid *good_sigevent(sigevent_t * event)
{
@@ -19351,9 +27825,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/posix-timers.c linux-3.18.10/kernel/tim
goto retry_delete;
}
list_del(&timer->list);
-diff -Nur linux-3.18.10.orig/kernel/time/tick-common.c linux-3.18.10/kernel/time/tick-common.c
---- linux-3.18.10.orig/kernel/time/tick-common.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/tick-common.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/tick-common.c linux-3.18.12/kernel/time/tick-common.c
+--- linux-3.18.12.orig/kernel/time/tick-common.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/tick-common.c 2015-04-26 13:32:22.451684003 -0500
@@ -78,13 +78,15 @@
static void tick_periodic(int cpu)
{
@@ -19384,9 +27858,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/tick-common.c linux-3.18.10/kernel/time
clockevents_set_mode(dev, CLOCK_EVT_MODE_ONESHOT);
-diff -Nur linux-3.18.10.orig/kernel/time/tick-internal.h linux-3.18.10/kernel/time/tick-internal.h
---- linux-3.18.10.orig/kernel/time/tick-internal.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/tick-internal.h 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/tick-internal.h linux-3.18.12/kernel/time/tick-internal.h
+--- linux-3.18.12.orig/kernel/time/tick-internal.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/tick-internal.h 2015-04-26 13:32:22.451684003 -0500
@@ -6,7 +6,8 @@
#include "timekeeping.h"
@@ -19397,9 +27871,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/tick-internal.h linux-3.18.10/kernel/ti
#define CS_NAME_LEN 32
-diff -Nur linux-3.18.10.orig/kernel/time/tick-sched.c linux-3.18.10/kernel/time/tick-sched.c
---- linux-3.18.10.orig/kernel/time/tick-sched.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/tick-sched.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/tick-sched.c linux-3.18.12/kernel/time/tick-sched.c
+--- linux-3.18.12.orig/kernel/time/tick-sched.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/tick-sched.c 2015-04-26 13:32:22.451684003 -0500
@@ -62,7 +62,8 @@
return;
@@ -19442,7 +27916,29 @@ diff -Nur linux-3.18.10.orig/kernel/time/tick-sched.c linux-3.18.10/kernel/time/
return period;
}
-@@ -222,6 +227,7 @@
+@@ -176,6 +181,11 @@
+ return false;
+ }
+
++ if (!arch_irq_work_has_interrupt()) {
++ trace_tick_stop(0, "missing irq work interrupt\n");
++ return false;
++ }
++
+ /* sched_clock_tick() needs us? */
+ #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
+ /*
+@@ -217,11 +227,17 @@
+
+ static void nohz_full_kick_work_func(struct irq_work *work)
+ {
++ unsigned long flags;
++
++ /* ksoftirqd processes sirqs with interrupts enabled */
++ local_irq_save(flags);
+ __tick_nohz_full_check();
++ local_irq_restore(flags);
+ }
static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = {
.func = nohz_full_kick_work_func,
@@ -19450,7 +27946,7 @@ diff -Nur linux-3.18.10.orig/kernel/time/tick-sched.c linux-3.18.10/kernel/time/
};
/*
-@@ -580,10 +586,10 @@
+@@ -580,10 +596,10 @@
/* Read jiffies and the time when jiffies were updated last */
do {
@@ -19463,7 +27959,7 @@ diff -Nur linux-3.18.10.orig/kernel/time/tick-sched.c linux-3.18.10/kernel/time/
if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) ||
arch_needs_cpu() || irq_work_needs_cpu()) {
-@@ -761,14 +767,7 @@
+@@ -761,14 +777,7 @@
return false;
if (unlikely(local_softirq_pending() && cpu_online(cpu))) {
@@ -19479,7 +27975,7 @@ diff -Nur linux-3.18.10.orig/kernel/time/tick-sched.c linux-3.18.10/kernel/time/
return false;
}
-@@ -1156,6 +1155,7 @@
+@@ -1156,6 +1165,7 @@
* Emulate tick processing via per-CPU hrtimers:
*/
hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
@@ -19487,9 +27983,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/tick-sched.c linux-3.18.10/kernel/time/
ts->sched_timer.function = tick_sched_timer;
/* Get the next period (per cpu) */
-diff -Nur linux-3.18.10.orig/kernel/time/timekeeping.c linux-3.18.10/kernel/time/timekeeping.c
---- linux-3.18.10.orig/kernel/time/timekeeping.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/timekeeping.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/timekeeping.c linux-3.18.12/kernel/time/timekeeping.c
+--- linux-3.18.12.orig/kernel/time/timekeeping.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/timekeeping.c 2015-04-26 13:32:22.451684003 -0500
@@ -1814,8 +1814,10 @@
*/
void xtime_update(unsigned long ticks)
@@ -19503,9 +27999,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/timekeeping.c linux-3.18.10/kernel/time
+ raw_spin_unlock(&jiffies_lock);
update_wall_time();
}
-diff -Nur linux-3.18.10.orig/kernel/time/timer.c linux-3.18.10/kernel/time/timer.c
---- linux-3.18.10.orig/kernel/time/timer.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/time/timer.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/time/timer.c linux-3.18.12/kernel/time/timer.c
+--- linux-3.18.12.orig/kernel/time/timer.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/time/timer.c 2015-04-26 13:32:22.455684003 -0500
@@ -78,6 +78,9 @@
struct tvec_base {
spinlock_t lock;
@@ -19677,9 +28173,12 @@ diff -Nur linux-3.18.10.orig/kernel/time/timer.c linux-3.18.10/kernel/time/timer
+ scheduler_tick();
run_local_timers();
rcu_check_callbacks(cpu, user_tick);
- #ifdef CONFIG_IRQ_WORK
- if (in_irq())
- irq_work_tick();
+-#ifdef CONFIG_IRQ_WORK
+- if (in_irq())
+- irq_work_tick();
++
++#if defined(CONFIG_IRQ_WORK) && !defined(CONFIG_PREEMPT_RT_FULL)
++ irq_work_tick();
#endif
- scheduler_tick();
run_posix_cpu_timers(p);
@@ -19724,9 +28223,9 @@ diff -Nur linux-3.18.10.orig/kernel/time/timer.c linux-3.18.10/kernel/time/timer
}
#endif /* CONFIG_HOTPLUG_CPU */
-diff -Nur linux-3.18.10.orig/kernel/trace/Kconfig linux-3.18.10/kernel/trace/Kconfig
---- linux-3.18.10.orig/kernel/trace/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/trace/Kconfig 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/Kconfig linux-3.18.12/kernel/trace/Kconfig
+--- linux-3.18.12.orig/kernel/trace/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/trace/Kconfig 2015-04-26 13:32:22.455684003 -0500
@@ -187,6 +187,24 @@
enabled. This option and the preempt-off timing option can be
used together or separately.)
@@ -19852,9 +28351,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/Kconfig linux-3.18.10/kernel/trace/Kco
config ENABLE_DEFAULT_TRACERS
bool "Trace process context switches and events"
depends on !GENERIC_TRACER
-diff -Nur linux-3.18.10.orig/kernel/trace/latency_hist.c linux-3.18.10/kernel/trace/latency_hist.c
---- linux-3.18.10.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100
-+++ linux-3.18.10/kernel/trace/latency_hist.c 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/latency_hist.c linux-3.18.12/kernel/trace/latency_hist.c
+--- linux-3.18.12.orig/kernel/trace/latency_hist.c 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/kernel/trace/latency_hist.c 2015-04-26 13:32:22.455684003 -0500
@@ -0,0 +1,1178 @@
+/*
+ * kernel/trace/latency_hist.c
@@ -21034,9 +29533,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/latency_hist.c linux-3.18.10/kernel/tr
+}
+
+device_initcall(latency_hist_init);
-diff -Nur linux-3.18.10.orig/kernel/trace/Makefile linux-3.18.10/kernel/trace/Makefile
---- linux-3.18.10.orig/kernel/trace/Makefile 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/trace/Makefile 2015-03-26 12:42:18.683588345 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/Makefile linux-3.18.12/kernel/trace/Makefile
+--- linux-3.18.12.orig/kernel/trace/Makefile 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/trace/Makefile 2015-04-26 13:32:22.455684003 -0500
@@ -36,6 +36,10 @@
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
@@ -21048,9 +29547,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/Makefile linux-3.18.10/kernel/trace/Ma
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
obj-$(CONFIG_STACK_TRACER) += trace_stack.o
obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
-diff -Nur linux-3.18.10.orig/kernel/trace/trace.c linux-3.18.10/kernel/trace/trace.c
---- linux-3.18.10.orig/kernel/trace/trace.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/trace/trace.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/trace.c linux-3.18.12/kernel/trace/trace.c
+--- linux-3.18.12.orig/kernel/trace/trace.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/trace/trace.c 2015-04-26 13:32:22.455684003 -0500
@@ -1579,6 +1579,7 @@
struct task_struct *tsk = current;
@@ -21122,9 +29621,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/trace.c linux-3.18.10/kernel/trace/tra
}
void
-diff -Nur linux-3.18.10.orig/kernel/trace/trace_events.c linux-3.18.10/kernel/trace/trace_events.c
---- linux-3.18.10.orig/kernel/trace/trace_events.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/trace/trace_events.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/trace_events.c linux-3.18.12/kernel/trace/trace_events.c
+--- linux-3.18.12.orig/kernel/trace/trace_events.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/trace/trace_events.c 2015-04-26 13:32:22.455684003 -0500
@@ -162,6 +162,8 @@
__common_field(unsigned char, flags);
__common_field(unsigned char, preempt_count);
@@ -21134,9 +29633,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/trace_events.c linux-3.18.10/kernel/tr
return ret;
}
-diff -Nur linux-3.18.10.orig/kernel/trace/trace.h linux-3.18.10/kernel/trace/trace.h
---- linux-3.18.10.orig/kernel/trace/trace.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/trace/trace.h 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/trace.h linux-3.18.12/kernel/trace/trace.h
+--- linux-3.18.12.orig/kernel/trace/trace.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/trace/trace.h 2015-04-26 13:32:22.455684003 -0500
@@ -119,6 +119,7 @@
* NEED_RESCHED - reschedule is requested
* HARDIRQ - inside an interrupt handler
@@ -21153,9 +29652,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/trace.h linux-3.18.10/kernel/trace/tra
};
#define TRACE_BUF_SIZE 1024
-diff -Nur linux-3.18.10.orig/kernel/trace/trace_irqsoff.c linux-3.18.10/kernel/trace/trace_irqsoff.c
---- linux-3.18.10.orig/kernel/trace/trace_irqsoff.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/trace/trace_irqsoff.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/trace_irqsoff.c linux-3.18.12/kernel/trace/trace_irqsoff.c
+--- linux-3.18.12.orig/kernel/trace/trace_irqsoff.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/trace/trace_irqsoff.c 2015-04-26 13:32:22.455684003 -0500
@@ -17,6 +17,7 @@
#include <linux/fs.h>
@@ -21239,9 +29738,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/trace_irqsoff.c linux-3.18.10/kernel/t
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
-diff -Nur linux-3.18.10.orig/kernel/trace/trace_output.c linux-3.18.10/kernel/trace/trace_output.c
---- linux-3.18.10.orig/kernel/trace/trace_output.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/trace/trace_output.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/trace/trace_output.c linux-3.18.12/kernel/trace/trace_output.c
+--- linux-3.18.12.orig/kernel/trace/trace_output.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/trace/trace_output.c 2015-04-26 13:32:22.455684003 -0500
@@ -410,6 +410,7 @@
{
char hardsoft_irq;
@@ -21288,9 +29787,9 @@ diff -Nur linux-3.18.10.orig/kernel/trace/trace_output.c linux-3.18.10/kernel/tr
return ret;
}
-diff -Nur linux-3.18.10.orig/kernel/user.c linux-3.18.10/kernel/user.c
---- linux-3.18.10.orig/kernel/user.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/user.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/user.c linux-3.18.12/kernel/user.c
+--- linux-3.18.12.orig/kernel/user.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/user.c 2015-04-26 13:32:22.455684003 -0500
@@ -158,11 +158,11 @@
if (!up)
return;
@@ -21305,9 +29804,9 @@ diff -Nur linux-3.18.10.orig/kernel/user.c linux-3.18.10/kernel/user.c
}
struct user_struct *alloc_uid(kuid_t uid)
-diff -Nur linux-3.18.10.orig/kernel/watchdog.c linux-3.18.10/kernel/watchdog.c
---- linux-3.18.10.orig/kernel/watchdog.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/watchdog.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/watchdog.c linux-3.18.12/kernel/watchdog.c
+--- linux-3.18.12.orig/kernel/watchdog.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/watchdog.c 2015-04-26 13:32:22.459684003 -0500
@@ -248,6 +248,8 @@
#ifdef CONFIG_HARDLOCKUP_DETECTOR
@@ -21349,9 +29848,9 @@ diff -Nur linux-3.18.10.orig/kernel/watchdog.c linux-3.18.10/kernel/watchdog.c
/* Enable the perf event */
watchdog_nmi_enable(cpu);
-diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
---- linux-3.18.10.orig/kernel/workqueue.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/workqueue.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/workqueue.c linux-3.18.12/kernel/workqueue.c
+--- linux-3.18.12.orig/kernel/workqueue.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/workqueue.c 2015-04-26 13:32:22.459684003 -0500
@@ -48,6 +48,8 @@
#include <linux/nodemask.h>
#include <linux/moduleparam.h>
@@ -21808,7 +30307,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
return false;
}
-@@ -2727,7 +2763,7 @@
+@@ -2765,7 +2801,7 @@
/* tell other tasks trying to grab @work to back off */
mark_work_canceling(work);
@@ -21817,7 +30316,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
flush_work(work);
clear_work_data(work);
-@@ -2772,10 +2808,10 @@
+@@ -2820,10 +2856,10 @@
*/
bool flush_delayed_work(struct delayed_work *dwork)
{
@@ -21830,7 +30329,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
return flush_work(&dwork->work);
}
EXPORT_SYMBOL(flush_delayed_work);
-@@ -2810,7 +2846,7 @@
+@@ -2858,7 +2894,7 @@
set_work_pool_and_clear_pending(&dwork->work,
get_work_pool_id(&dwork->work));
@@ -21839,7 +30338,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
return ret;
}
EXPORT_SYMBOL(cancel_delayed_work);
-@@ -2996,7 +3032,8 @@
+@@ -3044,7 +3080,8 @@
const char *delim = "";
int node, written = 0;
@@ -21849,7 +30348,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
for_each_node(node) {
written += scnprintf(buf + written, PAGE_SIZE - written,
"%s%d:%d", delim, node,
-@@ -3004,7 +3041,8 @@
+@@ -3052,7 +3089,8 @@
delim = " ";
}
written += scnprintf(buf + written, PAGE_SIZE - written, "\n");
@@ -21859,7 +30358,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
return written;
}
-@@ -3372,7 +3410,7 @@
+@@ -3420,7 +3458,7 @@
* put_unbound_pool - put a worker_pool
* @pool: worker_pool to put
*
@@ -21868,7 +30367,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
* safe manner. get_unbound_pool() calls this function on its failure path
* and this function should be able to release pools which went through,
* successfully or not, init_worker_pool().
-@@ -3426,8 +3464,8 @@
+@@ -3474,8 +3512,8 @@
del_timer_sync(&pool->idle_timer);
del_timer_sync(&pool->mayday_timer);
@@ -21879,7 +30378,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
}
/**
-@@ -3532,7 +3570,7 @@
+@@ -3580,7 +3618,7 @@
put_unbound_pool(pool);
mutex_unlock(&wq_pool_mutex);
@@ -21888,7 +30387,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
/*
* If we're the last pwq going away, @wq is already dead and no one
-@@ -4244,7 +4282,8 @@
+@@ -4292,7 +4330,8 @@
struct pool_workqueue *pwq;
bool ret;
@@ -21898,7 +30397,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
if (cpu == WORK_CPU_UNBOUND)
cpu = smp_processor_id();
-@@ -4255,7 +4294,8 @@
+@@ -4303,7 +4342,8 @@
pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu));
ret = !list_empty(&pwq->delayed_works);
@@ -21908,7 +30407,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
return ret;
}
-@@ -4281,16 +4321,15 @@
+@@ -4329,16 +4369,15 @@
if (work_pending(work))
ret |= WORK_BUSY_PENDING;
@@ -21929,7 +30428,7 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
return ret;
}
EXPORT_SYMBOL_GPL(work_busy);
-@@ -4719,16 +4758,16 @@
+@@ -4767,16 +4806,16 @@
* nr_active is monotonically decreasing. It's safe
* to peek without lock.
*/
@@ -21949,9 +30448,9 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue.c linux-3.18.10/kernel/workqueue.c
}
out_unlock:
mutex_unlock(&wq_pool_mutex);
-diff -Nur linux-3.18.10.orig/kernel/workqueue_internal.h linux-3.18.10/kernel/workqueue_internal.h
---- linux-3.18.10.orig/kernel/workqueue_internal.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/kernel/workqueue_internal.h 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/kernel/workqueue_internal.h linux-3.18.12/kernel/workqueue_internal.h
+--- linux-3.18.12.orig/kernel/workqueue_internal.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/kernel/workqueue_internal.h 2015-04-26 13:32:22.459684003 -0500
@@ -43,6 +43,7 @@
unsigned long last_active; /* L: last active timestamp */
unsigned int flags; /* X: flags */
@@ -21970,9 +30469,9 @@ diff -Nur linux-3.18.10.orig/kernel/workqueue_internal.h linux-3.18.10/kernel/wo
+void wq_worker_sleeping(struct task_struct *task);
#endif /* _KERNEL_WORKQUEUE_INTERNAL_H */
-diff -Nur linux-3.18.10.orig/lib/debugobjects.c linux-3.18.10/lib/debugobjects.c
---- linux-3.18.10.orig/lib/debugobjects.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/debugobjects.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/debugobjects.c linux-3.18.12/lib/debugobjects.c
+--- linux-3.18.12.orig/lib/debugobjects.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/debugobjects.c 2015-04-26 13:32:22.459684003 -0500
@@ -309,7 +309,10 @@
struct debug_obj *obj;
unsigned long flags;
@@ -21985,9 +30484,9 @@ diff -Nur linux-3.18.10.orig/lib/debugobjects.c linux-3.18.10/lib/debugobjects.c
db = get_bucket((unsigned long) addr);
-diff -Nur linux-3.18.10.orig/lib/idr.c linux-3.18.10/lib/idr.c
---- linux-3.18.10.orig/lib/idr.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/idr.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/idr.c linux-3.18.12/lib/idr.c
+--- linux-3.18.12.orig/lib/idr.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/idr.c 2015-04-26 13:32:22.459684003 -0500
@@ -31,6 +31,7 @@
#include <linux/spinlock.h>
#include <linux/percpu.h>
@@ -22053,9 +30552,9 @@ diff -Nur linux-3.18.10.orig/lib/idr.c linux-3.18.10/lib/idr.c
if (!new)
break;
-diff -Nur linux-3.18.10.orig/lib/Kconfig linux-3.18.10/lib/Kconfig
---- linux-3.18.10.orig/lib/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/Kconfig 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/Kconfig linux-3.18.12/lib/Kconfig
+--- linux-3.18.12.orig/lib/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/Kconfig 2015-04-26 13:32:22.459684003 -0500
@@ -383,6 +383,7 @@
config CPUMASK_OFFSTACK
@@ -22064,9 +30563,9 @@ diff -Nur linux-3.18.10.orig/lib/Kconfig linux-3.18.10/lib/Kconfig
help
Use dynamic allocation for cpumask_var_t, instead of putting
them on the stack. This is a bit more expensive, but avoids
-diff -Nur linux-3.18.10.orig/lib/Kconfig.debug linux-3.18.10/lib/Kconfig.debug
---- linux-3.18.10.orig/lib/Kconfig.debug 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/Kconfig.debug 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/Kconfig.debug linux-3.18.12/lib/Kconfig.debug
+--- linux-3.18.12.orig/lib/Kconfig.debug 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/Kconfig.debug 2015-04-26 13:32:22.459684003 -0500
@@ -639,7 +639,7 @@
config DEBUG_SHIRQ
@@ -22076,9 +30575,9 @@ diff -Nur linux-3.18.10.orig/lib/Kconfig.debug linux-3.18.10/lib/Kconfig.debug
help
Enable this to generate a spurious interrupt as soon as a shared
interrupt handler is registered, and just before one is deregistered.
-diff -Nur linux-3.18.10.orig/lib/locking-selftest.c linux-3.18.10/lib/locking-selftest.c
---- linux-3.18.10.orig/lib/locking-selftest.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/locking-selftest.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/locking-selftest.c linux-3.18.12/lib/locking-selftest.c
+--- linux-3.18.12.orig/lib/locking-selftest.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/locking-selftest.c 2015-04-26 13:32:22.459684003 -0500
@@ -590,6 +590,8 @@
#include "locking-selftest-spin-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin)
@@ -22227,9 +30726,9 @@ diff -Nur linux-3.18.10.orig/lib/locking-selftest.c linux-3.18.10/lib/locking-se
ww_tests();
-diff -Nur linux-3.18.10.orig/lib/percpu_ida.c linux-3.18.10/lib/percpu_ida.c
---- linux-3.18.10.orig/lib/percpu_ida.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/percpu_ida.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/percpu_ida.c linux-3.18.12/lib/percpu_ida.c
+--- linux-3.18.12.orig/lib/percpu_ida.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/percpu_ida.c 2015-04-26 13:32:22.459684003 -0500
@@ -29,6 +29,9 @@
#include <linux/string.h>
#include <linux/spinlock.h>
@@ -22318,9 +30817,9 @@ diff -Nur linux-3.18.10.orig/lib/percpu_ida.c linux-3.18.10/lib/percpu_ida.c
return err;
}
EXPORT_SYMBOL_GPL(percpu_ida_for_each_free);
-diff -Nur linux-3.18.10.orig/lib/radix-tree.c linux-3.18.10/lib/radix-tree.c
---- linux-3.18.10.orig/lib/radix-tree.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/radix-tree.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/radix-tree.c linux-3.18.12/lib/radix-tree.c
+--- linux-3.18.12.orig/lib/radix-tree.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/radix-tree.c 2015-04-26 13:32:22.459684003 -0500
@@ -195,12 +195,13 @@
* succeed in getting a node here (and never reach
* kmem_cache_alloc)
@@ -22352,9 +30851,9 @@ diff -Nur linux-3.18.10.orig/lib/radix-tree.c linux-3.18.10/lib/radix-tree.c
/*
* Return the maximum key which can be store into a
-diff -Nur linux-3.18.10.orig/lib/scatterlist.c linux-3.18.10/lib/scatterlist.c
---- linux-3.18.10.orig/lib/scatterlist.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/scatterlist.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/scatterlist.c linux-3.18.12/lib/scatterlist.c
+--- linux-3.18.12.orig/lib/scatterlist.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/scatterlist.c 2015-04-26 13:32:22.459684003 -0500
@@ -592,7 +592,7 @@
flush_kernel_dcache_page(miter->page);
@@ -22382,9 +30881,9 @@ diff -Nur linux-3.18.10.orig/lib/scatterlist.c linux-3.18.10/lib/scatterlist.c
return offset;
}
-diff -Nur linux-3.18.10.orig/lib/smp_processor_id.c linux-3.18.10/lib/smp_processor_id.c
---- linux-3.18.10.orig/lib/smp_processor_id.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/lib/smp_processor_id.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/lib/smp_processor_id.c linux-3.18.12/lib/smp_processor_id.c
+--- linux-3.18.12.orig/lib/smp_processor_id.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/lib/smp_processor_id.c 2015-04-26 13:32:22.459684003 -0500
@@ -39,8 +39,9 @@
if (!printk_ratelimit())
goto out_enable;
@@ -22397,9 +30896,9 @@ diff -Nur linux-3.18.10.orig/lib/smp_processor_id.c linux-3.18.10/lib/smp_proces
print_symbol("caller is %s\n", (long)__builtin_return_address(0));
dump_stack();
-diff -Nur linux-3.18.10.orig/mm/filemap.c linux-3.18.10/mm/filemap.c
---- linux-3.18.10.orig/mm/filemap.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/filemap.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/mm/filemap.c linux-3.18.12/mm/filemap.c
+--- linux-3.18.12.orig/mm/filemap.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/filemap.c 2015-04-26 13:32:22.463684003 -0500
@@ -168,7 +168,9 @@
if (!workingset_node_pages(node) &&
list_empty(&node->private_list)) {
@@ -22426,9 +30925,9 @@ diff -Nur linux-3.18.10.orig/mm/filemap.c linux-3.18.10/mm/filemap.c
}
return 0;
}
-diff -Nur linux-3.18.10.orig/mm/highmem.c linux-3.18.10/mm/highmem.c
---- linux-3.18.10.orig/mm/highmem.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/highmem.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/mm/highmem.c linux-3.18.12/mm/highmem.c
+--- linux-3.18.12.orig/mm/highmem.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/highmem.c 2015-04-26 13:32:22.463684003 -0500
@@ -29,10 +29,11 @@
#include <linux/kgdb.h>
#include <asm/tlbflush.h>
@@ -22453,9 +30952,9 @@ diff -Nur linux-3.18.10.orig/mm/highmem.c linux-3.18.10/mm/highmem.c
unsigned int nr_free_highpages (void)
{
-diff -Nur linux-3.18.10.orig/mm/Kconfig linux-3.18.10/mm/Kconfig
---- linux-3.18.10.orig/mm/Kconfig 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/Kconfig 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/mm/Kconfig linux-3.18.12/mm/Kconfig
+--- linux-3.18.12.orig/mm/Kconfig 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/Kconfig 2015-04-26 13:32:22.463684003 -0500
@@ -408,7 +408,7 @@
config TRANSPARENT_HUGEPAGE
@@ -22465,9 +30964,9 @@ diff -Nur linux-3.18.10.orig/mm/Kconfig linux-3.18.10/mm/Kconfig
select COMPACTION
help
Transparent Hugepages allows the kernel to use huge pages and
-diff -Nur linux-3.18.10.orig/mm/memcontrol.c linux-3.18.10/mm/memcontrol.c
---- linux-3.18.10.orig/mm/memcontrol.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/memcontrol.c 2015-03-26 12:42:18.687588350 +0100
+diff -Nur linux-3.18.12.orig/mm/memcontrol.c linux-3.18.12/mm/memcontrol.c
+--- linux-3.18.12.orig/mm/memcontrol.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/memcontrol.c 2015-04-26 13:32:22.463684003 -0500
@@ -60,6 +60,8 @@
#include <net/sock.h>
#include <net/ip.h>
@@ -22568,10 +31067,10 @@ diff -Nur linux-3.18.10.orig/mm/memcontrol.c linux-3.18.10/mm/memcontrol.c
}
static void uncharge_list(struct list_head *page_list)
-diff -Nur linux-3.18.10.orig/mm/memory.c linux-3.18.10/mm/memory.c
---- linux-3.18.10.orig/mm/memory.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/memory.c 2015-03-26 12:42:18.691588355 +0100
-@@ -3258,6 +3258,32 @@
+diff -Nur linux-3.18.12.orig/mm/memory.c linux-3.18.12/mm/memory.c
+--- linux-3.18.12.orig/mm/memory.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/memory.c 2015-04-26 13:32:22.463684003 -0500
+@@ -3244,6 +3244,32 @@
return 0;
}
@@ -22604,9 +31103,9 @@ diff -Nur linux-3.18.10.orig/mm/memory.c linux-3.18.10/mm/memory.c
/*
* By the time we get here, we already hold the mm semaphore
*
-diff -Nur linux-3.18.10.orig/mm/mmu_context.c linux-3.18.10/mm/mmu_context.c
---- linux-3.18.10.orig/mm/mmu_context.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/mmu_context.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/mmu_context.c linux-3.18.12/mm/mmu_context.c
+--- linux-3.18.12.orig/mm/mmu_context.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/mmu_context.c 2015-04-26 13:32:22.463684003 -0500
@@ -23,6 +23,7 @@
struct task_struct *tsk = current;
@@ -22623,9 +31122,9 @@ diff -Nur linux-3.18.10.orig/mm/mmu_context.c linux-3.18.10/mm/mmu_context.c
task_unlock(tsk);
#ifdef finish_arch_post_lock_switch
finish_arch_post_lock_switch();
-diff -Nur linux-3.18.10.orig/mm/page_alloc.c linux-3.18.10/mm/page_alloc.c
---- linux-3.18.10.orig/mm/page_alloc.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/page_alloc.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/page_alloc.c linux-3.18.12/mm/page_alloc.c
+--- linux-3.18.12.orig/mm/page_alloc.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/page_alloc.c 2015-04-26 13:32:22.463684003 -0500
@@ -59,6 +59,7 @@
#include <linux/page-debug-flags.h>
#include <linux/hugetlb.h>
@@ -22958,9 +31457,9 @@ diff -Nur linux-3.18.10.orig/mm/page_alloc.c linux-3.18.10/mm/page_alloc.c
}
#ifdef CONFIG_MEMORY_HOTREMOVE
-diff -Nur linux-3.18.10.orig/mm/slab.h linux-3.18.10/mm/slab.h
---- linux-3.18.10.orig/mm/slab.h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/slab.h 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/slab.h linux-3.18.12/mm/slab.h
+--- linux-3.18.12.orig/mm/slab.h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/slab.h 2015-04-26 13:32:22.467684003 -0500
@@ -315,7 +315,11 @@
* The slab lists for all objects.
*/
@@ -22973,9 +31472,9 @@ diff -Nur linux-3.18.10.orig/mm/slab.h linux-3.18.10/mm/slab.h
#ifdef CONFIG_SLAB
struct list_head slabs_partial; /* partial list first, better asm code */
-diff -Nur linux-3.18.10.orig/mm/slub.c linux-3.18.10/mm/slub.c
---- linux-3.18.10.orig/mm/slub.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/slub.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/slub.c linux-3.18.12/mm/slub.c
+--- linux-3.18.12.orig/mm/slub.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/slub.c 2015-04-26 13:32:22.467684003 -0500
@@ -1044,7 +1044,7 @@
{
struct kmem_cache_node *n = get_node(s, page_to_nid(page));
@@ -23365,9 +31864,9 @@ diff -Nur linux-3.18.10.orig/mm/slub.c linux-3.18.10/mm/slub.c
}
for (i = 0; i < t.count; i++) {
-diff -Nur linux-3.18.10.orig/mm/swap.c linux-3.18.10/mm/swap.c
---- linux-3.18.10.orig/mm/swap.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/swap.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/swap.c linux-3.18.12/mm/swap.c
+--- linux-3.18.12.orig/mm/swap.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/swap.c 2015-04-26 13:32:22.467684003 -0500
@@ -31,6 +31,7 @@
#include <linux/memcontrol.h>
#include <linux/gfp.h>
@@ -23486,9 +31985,9 @@ diff -Nur linux-3.18.10.orig/mm/swap.c linux-3.18.10/mm/swap.c
}
static void lru_add_drain_per_cpu(struct work_struct *dummy)
-diff -Nur linux-3.18.10.orig/mm/truncate.c linux-3.18.10/mm/truncate.c
---- linux-3.18.10.orig/mm/truncate.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/truncate.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/truncate.c linux-3.18.12/mm/truncate.c
+--- linux-3.18.12.orig/mm/truncate.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/truncate.c 2015-04-26 13:32:22.467684003 -0500
@@ -56,8 +56,11 @@
* protected by mapping->tree_lock.
*/
@@ -23503,9 +32002,9 @@ diff -Nur linux-3.18.10.orig/mm/truncate.c linux-3.18.10/mm/truncate.c
__radix_tree_delete_node(&mapping->page_tree, node);
unlock:
spin_unlock_irq(&mapping->tree_lock);
-diff -Nur linux-3.18.10.orig/mm/vmalloc.c linux-3.18.10/mm/vmalloc.c
---- linux-3.18.10.orig/mm/vmalloc.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/vmalloc.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/vmalloc.c linux-3.18.12/mm/vmalloc.c
+--- linux-3.18.12.orig/mm/vmalloc.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/vmalloc.c 2015-04-26 13:32:22.467684003 -0500
@@ -798,7 +798,7 @@
struct vmap_block *vb;
struct vmap_area *va;
@@ -23557,9 +32056,9 @@ diff -Nur linux-3.18.10.orig/mm/vmalloc.c linux-3.18.10/mm/vmalloc.c
rcu_read_unlock();
if (!addr) {
-diff -Nur linux-3.18.10.orig/mm/vmstat.c linux-3.18.10/mm/vmstat.c
---- linux-3.18.10.orig/mm/vmstat.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/vmstat.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/vmstat.c linux-3.18.12/mm/vmstat.c
+--- linux-3.18.12.orig/mm/vmstat.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/vmstat.c 2015-04-26 13:32:22.467684003 -0500
@@ -221,6 +221,7 @@
long x;
long t;
@@ -23608,9 +32107,9 @@ diff -Nur linux-3.18.10.orig/mm/vmstat.c linux-3.18.10/mm/vmstat.c
}
void __dec_zone_page_state(struct page *page, enum zone_stat_item item)
-diff -Nur linux-3.18.10.orig/mm/workingset.c linux-3.18.10/mm/workingset.c
---- linux-3.18.10.orig/mm/workingset.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/mm/workingset.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/mm/workingset.c linux-3.18.12/mm/workingset.c
+--- linux-3.18.12.orig/mm/workingset.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/mm/workingset.c 2015-04-26 13:32:22.467684003 -0500
@@ -264,7 +264,8 @@
* point where they would still be useful.
*/
@@ -23678,9 +32177,9 @@ diff -Nur linux-3.18.10.orig/mm/workingset.c linux-3.18.10/mm/workingset.c
err:
return ret;
}
-diff -Nur linux-3.18.10.orig/net/core/dev.c linux-3.18.10/net/core/dev.c
---- linux-3.18.10.orig/net/core/dev.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/core/dev.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/net/core/dev.c linux-3.18.12/net/core/dev.c
+--- linux-3.18.12.orig/net/core/dev.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/core/dev.c 2015-04-26 13:32:22.471684003 -0500
@@ -182,6 +182,7 @@
static DEFINE_HASHTABLE(napi_hash, 8);
@@ -23977,9 +32476,9 @@ diff -Nur linux-3.18.10.orig/net/core/dev.c linux-3.18.10/net/core/dev.c
INIT_LIST_HEAD(&sd->poll_list);
sd->output_queue_tailp = &sd->output_queue;
#ifdef CONFIG_RPS
-diff -Nur linux-3.18.10.orig/net/core/skbuff.c linux-3.18.10/net/core/skbuff.c
---- linux-3.18.10.orig/net/core/skbuff.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/core/skbuff.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/net/core/skbuff.c linux-3.18.12/net/core/skbuff.c
+--- linux-3.18.12.orig/net/core/skbuff.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/core/skbuff.c 2015-04-26 13:32:22.471684003 -0500
@@ -63,6 +63,7 @@
#include <linux/errqueue.h>
#include <linux/prefetch.h>
@@ -24014,9 +32513,9 @@ diff -Nur linux-3.18.10.orig/net/core/skbuff.c linux-3.18.10/net/core/skbuff.c
return data;
}
-diff -Nur linux-3.18.10.orig/net/core/sock.c linux-3.18.10/net/core/sock.c
---- linux-3.18.10.orig/net/core/sock.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/core/sock.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/net/core/sock.c linux-3.18.12/net/core/sock.c
+--- linux-3.18.12.orig/net/core/sock.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/core/sock.c 2015-04-26 13:32:22.471684003 -0500
@@ -2326,12 +2326,11 @@
if (sk->sk_lock.owned)
__lock_sock(sk);
@@ -24031,9 +32530,9 @@ diff -Nur linux-3.18.10.orig/net/core/sock.c linux-3.18.10/net/core/sock.c
}
EXPORT_SYMBOL(lock_sock_nested);
-diff -Nur linux-3.18.10.orig/net/ipv4/icmp.c linux-3.18.10/net/ipv4/icmp.c
---- linux-3.18.10.orig/net/ipv4/icmp.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/ipv4/icmp.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/net/ipv4/icmp.c linux-3.18.12/net/ipv4/icmp.c
+--- linux-3.18.12.orig/net/ipv4/icmp.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/ipv4/icmp.c 2015-04-26 13:32:22.471684003 -0500
@@ -69,6 +69,7 @@
#include <linux/jiffies.h>
#include <linux/kernel.h>
@@ -24085,9 +32584,9 @@ diff -Nur linux-3.18.10.orig/net/ipv4/icmp.c linux-3.18.10/net/ipv4/icmp.c
}
}
-diff -Nur linux-3.18.10.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.10/net/ipv4/sysctl_net_ipv4.c
---- linux-3.18.10.orig/net/ipv4/sysctl_net_ipv4.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/ipv4/sysctl_net_ipv4.c 2015-03-26 12:42:18.691588355 +0100
+diff -Nur linux-3.18.12.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.12/net/ipv4/sysctl_net_ipv4.c
+--- linux-3.18.12.orig/net/ipv4/sysctl_net_ipv4.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/ipv4/sysctl_net_ipv4.c 2015-04-26 13:32:22.471684003 -0500
@@ -779,6 +779,13 @@
.proc_handler = proc_dointvec
},
@@ -24102,10 +32601,10 @@ diff -Nur linux-3.18.10.orig/net/ipv4/sysctl_net_ipv4.c linux-3.18.10/net/ipv4/s
.procname = "icmp_ignore_bogus_error_responses",
.data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses,
.maxlen = sizeof(int),
-diff -Nur linux-3.18.10.orig/net/mac80211/rx.c linux-3.18.10/net/mac80211/rx.c
---- linux-3.18.10.orig/net/mac80211/rx.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/mac80211/rx.c 2015-03-26 12:42:18.695588359 +0100
-@@ -3356,7 +3356,7 @@
+diff -Nur linux-3.18.12.orig/net/mac80211/rx.c linux-3.18.12/net/mac80211/rx.c
+--- linux-3.18.12.orig/net/mac80211/rx.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/mac80211/rx.c 2015-04-26 13:32:22.471684003 -0500
+@@ -3359,7 +3359,7 @@
struct ieee80211_supported_band *sband;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
@@ -24114,9 +32613,3489 @@ diff -Nur linux-3.18.10.orig/net/mac80211/rx.c linux-3.18.10/net/mac80211/rx.c
if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
goto drop;
-diff -Nur linux-3.18.10.orig/net/netfilter/core.c linux-3.18.10/net/netfilter/core.c
---- linux-3.18.10.orig/net/netfilter/core.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/netfilter/core.c 2015-03-26 12:42:18.695588359 +0100
+diff -Nur linux-3.18.12.orig/net/mac80211/rx.c.orig linux-3.18.12/net/mac80211/rx.c.orig
+--- linux-3.18.12.orig/net/mac80211/rx.c.orig 1969-12-31 18:00:00.000000000 -0600
++++ linux-3.18.12/net/mac80211/rx.c.orig 2015-04-20 14:48:02.000000000 -0500
+@@ -0,0 +1,3476 @@
++/*
++ * Copyright 2002-2005, Instant802 Networks, Inc.
++ * Copyright 2005-2006, Devicescape Software, Inc.
++ * Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
++ * Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
++ * Copyright 2013-2014 Intel Mobile Communications GmbH
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/jiffies.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/rcupdate.h>
++#include <linux/export.h>
++#include <net/mac80211.h>
++#include <net/ieee80211_radiotap.h>
++#include <asm/unaligned.h>
++
++#include "ieee80211_i.h"
++#include "driver-ops.h"
++#include "led.h"
++#include "mesh.h"
++#include "wep.h"
++#include "wpa.h"
++#include "tkip.h"
++#include "wme.h"
++#include "rate.h"
++
++/*
++ * monitor mode reception
++ *
++ * This function cleans up the SKB, i.e. it removes all the stuff
++ * only useful for monitoring.
++ */
++static struct sk_buff *remove_monitor_info(struct ieee80211_local *local,
++ struct sk_buff *skb)
++{
++ if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS) {
++ if (likely(skb->len > FCS_LEN))
++ __pskb_trim(skb, skb->len - FCS_LEN);
++ else {
++ /* driver bug */
++ WARN_ON(1);
++ dev_kfree_skb(skb);
++ return NULL;
++ }
++ }
++
++ return skb;
++}
++
++static inline bool should_drop_frame(struct sk_buff *skb, int present_fcs_len)
++{
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ struct ieee80211_hdr *hdr = (void *)skb->data;
++
++ if (status->flag & (RX_FLAG_FAILED_FCS_CRC |
++ RX_FLAG_FAILED_PLCP_CRC |
++ RX_FLAG_AMPDU_IS_ZEROLEN))
++ return true;
++
++ if (unlikely(skb->len < 16 + present_fcs_len))
++ return true;
++
++ if (ieee80211_is_ctl(hdr->frame_control) &&
++ !ieee80211_is_pspoll(hdr->frame_control) &&
++ !ieee80211_is_back_req(hdr->frame_control))
++ return true;
++
++ return false;
++}
++
++static int
++ieee80211_rx_radiotap_space(struct ieee80211_local *local,
++ struct ieee80211_rx_status *status)
++{
++ int len;
++
++ /* always present fields */
++ len = sizeof(struct ieee80211_radiotap_header) + 8;
++
++ /* allocate extra bitmaps */
++ if (status->chains)
++ len += 4 * hweight8(status->chains);
++
++ if (ieee80211_have_rx_timestamp(status)) {
++ len = ALIGN(len, 8);
++ len += 8;
++ }
++ if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
++ len += 1;
++
++ /* antenna field, if we don't have per-chain info */
++ if (!status->chains)
++ len += 1;
++
++ /* padding for RX_FLAGS if necessary */
++ len = ALIGN(len, 2);
++
++ if (status->flag & RX_FLAG_HT) /* HT info */
++ len += 3;
++
++ if (status->flag & RX_FLAG_AMPDU_DETAILS) {
++ len = ALIGN(len, 4);
++ len += 8;
++ }
++
++ if (status->flag & RX_FLAG_VHT) {
++ len = ALIGN(len, 2);
++ len += 12;
++ }
++
++ if (status->chains) {
++ /* antenna and antenna signal fields */
++ len += 2 * hweight8(status->chains);
++ }
++
++ return len;
++}
++
++/*
++ * ieee80211_add_rx_radiotap_header - add radiotap header
++ *
++ * add a radiotap header containing all the fields which the hardware provided.
++ */
++static void
++ieee80211_add_rx_radiotap_header(struct ieee80211_local *local,
++ struct sk_buff *skb,
++ struct ieee80211_rate *rate,
++ int rtap_len, bool has_fcs)
++{
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ struct ieee80211_radiotap_header *rthdr;
++ unsigned char *pos;
++ __le32 *it_present;
++ u32 it_present_val;
++ u16 rx_flags = 0;
++ u16 channel_flags = 0;
++ int mpdulen, chain;
++ unsigned long chains = status->chains;
++
++ mpdulen = skb->len;
++ if (!(has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)))
++ mpdulen += FCS_LEN;
++
++ rthdr = (struct ieee80211_radiotap_header *)skb_push(skb, rtap_len);
++ memset(rthdr, 0, rtap_len);
++ it_present = &rthdr->it_present;
++
++ /* radiotap header, set always present flags */
++ rthdr->it_len = cpu_to_le16(rtap_len);
++ it_present_val = BIT(IEEE80211_RADIOTAP_FLAGS) |
++ BIT(IEEE80211_RADIOTAP_CHANNEL) |
++ BIT(IEEE80211_RADIOTAP_RX_FLAGS);
++
++ if (!status->chains)
++ it_present_val |= BIT(IEEE80211_RADIOTAP_ANTENNA);
++
++ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
++ it_present_val |=
++ BIT(IEEE80211_RADIOTAP_EXT) |
++ BIT(IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE);
++ put_unaligned_le32(it_present_val, it_present);
++ it_present++;
++ it_present_val = BIT(IEEE80211_RADIOTAP_ANTENNA) |
++ BIT(IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
++ }
++
++ put_unaligned_le32(it_present_val, it_present);
++
++ pos = (void *)(it_present + 1);
++
++ /* the order of the following fields is important */
++
++ /* IEEE80211_RADIOTAP_TSFT */
++ if (ieee80211_have_rx_timestamp(status)) {
++ /* padding */
++ while ((pos - (u8 *)rthdr) & 7)
++ *pos++ = 0;
++ put_unaligned_le64(
++ ieee80211_calculate_rx_timestamp(local, status,
++ mpdulen, 0),
++ pos);
++ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_TSFT);
++ pos += 8;
++ }
++
++ /* IEEE80211_RADIOTAP_FLAGS */
++ if (has_fcs && (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS))
++ *pos |= IEEE80211_RADIOTAP_F_FCS;
++ if (status->flag & (RX_FLAG_FAILED_FCS_CRC | RX_FLAG_FAILED_PLCP_CRC))
++ *pos |= IEEE80211_RADIOTAP_F_BADFCS;
++ if (status->flag & RX_FLAG_SHORTPRE)
++ *pos |= IEEE80211_RADIOTAP_F_SHORTPRE;
++ pos++;
++
++ /* IEEE80211_RADIOTAP_RATE */
++ if (!rate || status->flag & (RX_FLAG_HT | RX_FLAG_VHT)) {
++ /*
++ * Without rate information don't add it. If we have,
++ * MCS information is a separate field in radiotap,
++ * added below. The byte here is needed as padding
++ * for the channel though, so initialise it to 0.
++ */
++ *pos = 0;
++ } else {
++ int shift = 0;
++ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_RATE);
++ if (status->flag & RX_FLAG_10MHZ)
++ shift = 1;
++ else if (status->flag & RX_FLAG_5MHZ)
++ shift = 2;
++ *pos = DIV_ROUND_UP(rate->bitrate, 5 * (1 << shift));
++ }
++ pos++;
++
++ /* IEEE80211_RADIOTAP_CHANNEL */
++ put_unaligned_le16(status->freq, pos);
++ pos += 2;
++ if (status->flag & RX_FLAG_10MHZ)
++ channel_flags |= IEEE80211_CHAN_HALF;
++ else if (status->flag & RX_FLAG_5MHZ)
++ channel_flags |= IEEE80211_CHAN_QUARTER;
++
++ if (status->band == IEEE80211_BAND_5GHZ)
++ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ;
++ else if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
++ channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ;
++ else if (rate && rate->flags & IEEE80211_RATE_ERP_G)
++ channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_2GHZ;
++ else if (rate)
++ channel_flags |= IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ;
++ else
++ channel_flags |= IEEE80211_CHAN_2GHZ;
++ put_unaligned_le16(channel_flags, pos);
++ pos += 2;
++
++ /* IEEE80211_RADIOTAP_DBM_ANTSIGNAL */
++ if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM &&
++ !(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
++ *pos = status->signal;
++ rthdr->it_present |=
++ cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL);
++ pos++;
++ }
++
++ /* IEEE80211_RADIOTAP_LOCK_QUALITY is missing */
++
++ if (!status->chains) {
++ /* IEEE80211_RADIOTAP_ANTENNA */
++ *pos = status->antenna;
++ pos++;
++ }
++
++ /* IEEE80211_RADIOTAP_DB_ANTNOISE is not used */
++
++ /* IEEE80211_RADIOTAP_RX_FLAGS */
++ /* ensure 2 byte alignment for the 2 byte field as required */
++ if ((pos - (u8 *)rthdr) & 1)
++ *pos++ = 0;
++ if (status->flag & RX_FLAG_FAILED_PLCP_CRC)
++ rx_flags |= IEEE80211_RADIOTAP_F_RX_BADPLCP;
++ put_unaligned_le16(rx_flags, pos);
++ pos += 2;
++
++ if (status->flag & RX_FLAG_HT) {
++ unsigned int stbc;
++
++ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_MCS);
++ *pos++ = local->hw.radiotap_mcs_details;
++ *pos = 0;
++ if (status->flag & RX_FLAG_SHORT_GI)
++ *pos |= IEEE80211_RADIOTAP_MCS_SGI;
++ if (status->flag & RX_FLAG_40MHZ)
++ *pos |= IEEE80211_RADIOTAP_MCS_BW_40;
++ if (status->flag & RX_FLAG_HT_GF)
++ *pos |= IEEE80211_RADIOTAP_MCS_FMT_GF;
++ if (status->flag & RX_FLAG_LDPC)
++ *pos |= IEEE80211_RADIOTAP_MCS_FEC_LDPC;
++ stbc = (status->flag & RX_FLAG_STBC_MASK) >> RX_FLAG_STBC_SHIFT;
++ *pos |= stbc << IEEE80211_RADIOTAP_MCS_STBC_SHIFT;
++ pos++;
++ *pos++ = status->rate_idx;
++ }
++
++ if (status->flag & RX_FLAG_AMPDU_DETAILS) {
++ u16 flags = 0;
++
++ /* ensure 4 byte alignment */
++ while ((pos - (u8 *)rthdr) & 3)
++ pos++;
++ rthdr->it_present |=
++ cpu_to_le32(1 << IEEE80211_RADIOTAP_AMPDU_STATUS);
++ put_unaligned_le32(status->ampdu_reference, pos);
++ pos += 4;
++ if (status->flag & RX_FLAG_AMPDU_REPORT_ZEROLEN)
++ flags |= IEEE80211_RADIOTAP_AMPDU_REPORT_ZEROLEN;
++ if (status->flag & RX_FLAG_AMPDU_IS_ZEROLEN)
++ flags |= IEEE80211_RADIOTAP_AMPDU_IS_ZEROLEN;
++ if (status->flag & RX_FLAG_AMPDU_LAST_KNOWN)
++ flags |= IEEE80211_RADIOTAP_AMPDU_LAST_KNOWN;
++ if (status->flag & RX_FLAG_AMPDU_IS_LAST)
++ flags |= IEEE80211_RADIOTAP_AMPDU_IS_LAST;
++ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_ERROR)
++ flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_ERR;
++ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
++ flags |= IEEE80211_RADIOTAP_AMPDU_DELIM_CRC_KNOWN;
++ put_unaligned_le16(flags, pos);
++ pos += 2;
++ if (status->flag & RX_FLAG_AMPDU_DELIM_CRC_KNOWN)
++ *pos++ = status->ampdu_delimiter_crc;
++ else
++ *pos++ = 0;
++ *pos++ = 0;
++ }
++
++ if (status->flag & RX_FLAG_VHT) {
++ u16 known = local->hw.radiotap_vht_details;
++
++ rthdr->it_present |= cpu_to_le32(1 << IEEE80211_RADIOTAP_VHT);
++ /* known field - how to handle 80+80? */
++ if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
++ known &= ~IEEE80211_RADIOTAP_VHT_KNOWN_BANDWIDTH;
++ put_unaligned_le16(known, pos);
++ pos += 2;
++ /* flags */
++ if (status->flag & RX_FLAG_SHORT_GI)
++ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_SGI;
++ /* in VHT, STBC is binary */
++ if (status->flag & RX_FLAG_STBC_MASK)
++ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_STBC;
++ if (status->vht_flag & RX_VHT_FLAG_BF)
++ *pos |= IEEE80211_RADIOTAP_VHT_FLAG_BEAMFORMED;
++ pos++;
++ /* bandwidth */
++ if (status->vht_flag & RX_VHT_FLAG_80MHZ)
++ *pos++ = 4;
++ else if (status->vht_flag & RX_VHT_FLAG_80P80MHZ)
++ *pos++ = 0; /* marked not known above */
++ else if (status->vht_flag & RX_VHT_FLAG_160MHZ)
++ *pos++ = 11;
++ else if (status->flag & RX_FLAG_40MHZ)
++ *pos++ = 1;
++ else /* 20 MHz */
++ *pos++ = 0;
++ /* MCS/NSS */
++ *pos = (status->rate_idx << 4) | status->vht_nss;
++ pos += 4;
++ /* coding field */
++ if (status->flag & RX_FLAG_LDPC)
++ *pos |= IEEE80211_RADIOTAP_CODING_LDPC_USER0;
++ pos++;
++ /* group ID */
++ pos++;
++ /* partial_aid */
++ pos += 2;
++ }
++
++ for_each_set_bit(chain, &chains, IEEE80211_MAX_CHAINS) {
++ *pos++ = status->chain_signal[chain];
++ *pos++ = chain;
++ }
++}
++
++/*
++ * This function copies a received frame to all monitor interfaces and
++ * returns a cleaned-up SKB that no longer includes the FCS nor the
++ * radiotap header the driver might have added.
++ */
++static struct sk_buff *
++ieee80211_rx_monitor(struct ieee80211_local *local, struct sk_buff *origskb,
++ struct ieee80211_rate *rate)
++{
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(origskb);
++ struct ieee80211_sub_if_data *sdata;
++ int needed_headroom;
++ struct sk_buff *skb, *skb2;
++ struct net_device *prev_dev = NULL;
++ int present_fcs_len = 0;
++
++ /*
++ * First, we may need to make a copy of the skb because
++ * (1) we need to modify it for radiotap (if not present), and
++ * (2) the other RX handlers will modify the skb we got.
++ *
++ * We don't need to, of course, if we aren't going to return
++ * the SKB because it has a bad FCS/PLCP checksum.
++ */
++
++ if (local->hw.flags & IEEE80211_HW_RX_INCLUDES_FCS)
++ present_fcs_len = FCS_LEN;
++
++ /* ensure hdr->frame_control is in skb head */
++ if (!pskb_may_pull(origskb, 2)) {
++ dev_kfree_skb(origskb);
++ return NULL;
++ }
++
++ if (!local->monitors) {
++ if (should_drop_frame(origskb, present_fcs_len)) {
++ dev_kfree_skb(origskb);
++ return NULL;
++ }
++
++ return remove_monitor_info(local, origskb);
++ }
++
++ /* room for the radiotap header based on driver features */
++ needed_headroom = ieee80211_rx_radiotap_space(local, status);
++
++ if (should_drop_frame(origskb, present_fcs_len)) {
++ /* only need to expand headroom if necessary */
++ skb = origskb;
++ origskb = NULL;
++
++ /*
++ * This shouldn't trigger often because most devices have an
++ * RX header they pull before we get here, and that should
++ * be big enough for our radiotap information. We should
++ * probably export the length to drivers so that we can have
++ * them allocate enough headroom to start with.
++ */
++ if (skb_headroom(skb) < needed_headroom &&
++ pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC)) {
++ dev_kfree_skb(skb);
++ return NULL;
++ }
++ } else {
++ /*
++ * Need to make a copy and possibly remove radiotap header
++ * and FCS from the original.
++ */
++ skb = skb_copy_expand(origskb, needed_headroom, 0, GFP_ATOMIC);
++
++ origskb = remove_monitor_info(local, origskb);
++
++ if (!skb)
++ return origskb;
++ }
++
++ /* prepend radiotap information */
++ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
++ true);
++
++ skb_reset_mac_header(skb);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->pkt_type = PACKET_OTHERHOST;
++ skb->protocol = htons(ETH_P_802_2);
++
++ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++ if (sdata->vif.type != NL80211_IFTYPE_MONITOR)
++ continue;
++
++ if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES)
++ continue;
++
++ if (!ieee80211_sdata_running(sdata))
++ continue;
++
++ if (prev_dev) {
++ skb2 = skb_clone(skb, GFP_ATOMIC);
++ if (skb2) {
++ skb2->dev = prev_dev;
++ netif_receive_skb(skb2);
++ }
++ }
++
++ prev_dev = sdata->dev;
++ sdata->dev->stats.rx_packets++;
++ sdata->dev->stats.rx_bytes += skb->len;
++ }
++
++ if (prev_dev) {
++ skb->dev = prev_dev;
++ netif_receive_skb(skb);
++ } else
++ dev_kfree_skb(skb);
++
++ return origskb;
++}
++
++static void ieee80211_parse_qos(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++ int tid, seqno_idx, security_idx;
++
++ /* does the frame have a qos control field? */
++ if (ieee80211_is_data_qos(hdr->frame_control)) {
++ u8 *qc = ieee80211_get_qos_ctl(hdr);
++ /* frame has qos control */
++ tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
++ if (*qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT)
++ status->rx_flags |= IEEE80211_RX_AMSDU;
++
++ seqno_idx = tid;
++ security_idx = tid;
++ } else {
++ /*
++ * IEEE 802.11-2007, 7.1.3.4.1 ("Sequence Number field"):
++ *
++ * Sequence numbers for management frames, QoS data
++ * frames with a broadcast/multicast address in the
++ * Address 1 field, and all non-QoS data frames sent
++ * by QoS STAs are assigned using an additional single
++ * modulo-4096 counter, [...]
++ *
++ * We also use that counter for non-QoS STAs.
++ */
++ seqno_idx = IEEE80211_NUM_TIDS;
++ security_idx = 0;
++ if (ieee80211_is_mgmt(hdr->frame_control))
++ security_idx = IEEE80211_NUM_TIDS;
++ tid = 0;
++ }
++
++ rx->seqno_idx = seqno_idx;
++ rx->security_idx = security_idx;
++ /* Set skb->priority to 1d tag if highest order bit of TID is not set.
++ * For now, set skb->priority to 0 for other cases. */
++ rx->skb->priority = (tid > 7) ? 0 : tid;
++}
++
++/**
++ * DOC: Packet alignment
++ *
++ * Drivers always need to pass packets that are aligned to two-byte boundaries
++ * to the stack.
++ *
++ * Additionally, should, if possible, align the payload data in a way that
++ * guarantees that the contained IP header is aligned to a four-byte
++ * boundary. In the case of regular frames, this simply means aligning the
++ * payload to a four-byte boundary (because either the IP header is directly
++ * contained, or IV/RFC1042 headers that have a length divisible by four are
++ * in front of it). If the payload data is not properly aligned and the
++ * architecture doesn't support efficient unaligned operations, mac80211
++ * will align the data.
++ *
++ * With A-MSDU frames, however, the payload data address must yield two modulo
++ * four because there are 14-byte 802.3 headers within the A-MSDU frames that
++ * push the IP header further back to a multiple of four again. Thankfully, the
++ * specs were sane enough this time around to require padding each A-MSDU
++ * subframe to a length that is a multiple of four.
++ *
++ * Padding like Atheros hardware adds which is between the 802.11 header and
++ * the payload is not supported, the driver is required to move the 802.11
++ * header to be directly in front of the payload in that case.
++ */
++static void ieee80211_verify_alignment(struct ieee80211_rx_data *rx)
++{
++#ifdef CONFIG_MAC80211_VERBOSE_DEBUG
++ WARN_ONCE((unsigned long)rx->skb->data & 1,
++ "unaligned packet at 0x%p\n", rx->skb->data);
++#endif
++}
++
++
++/* rx handlers */
++
++static int ieee80211_is_unicast_robust_mgmt_frame(struct sk_buff *skb)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
++
++ if (is_multicast_ether_addr(hdr->addr1))
++ return 0;
++
++ return ieee80211_is_robust_mgmt_frame(skb);
++}
++
++
++static int ieee80211_is_multicast_robust_mgmt_frame(struct sk_buff *skb)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
++
++ if (!is_multicast_ether_addr(hdr->addr1))
++ return 0;
++
++ return ieee80211_is_robust_mgmt_frame(skb);
++}
++
++
++/* Get the BIP key index from MMIE; return -1 if this is not a BIP frame */
++static int ieee80211_get_mmie_keyidx(struct sk_buff *skb)
++{
++ struct ieee80211_mgmt *hdr = (struct ieee80211_mgmt *) skb->data;
++ struct ieee80211_mmie *mmie;
++
++ if (skb->len < 24 + sizeof(*mmie) || !is_multicast_ether_addr(hdr->da))
++ return -1;
++
++ if (!ieee80211_is_robust_mgmt_frame(skb))
++ return -1; /* not a robust management frame */
++
++ mmie = (struct ieee80211_mmie *)
++ (skb->data + skb->len - sizeof(*mmie));
++ if (mmie->element_id != WLAN_EID_MMIE ||
++ mmie->length != sizeof(*mmie) - 2)
++ return -1;
++
++ return le16_to_cpu(mmie->key_id);
++}
++
++static int iwl80211_get_cs_keyid(const struct ieee80211_cipher_scheme *cs,
++ struct sk_buff *skb)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
++ __le16 fc;
++ int hdrlen;
++ u8 keyid;
++
++ fc = hdr->frame_control;
++ hdrlen = ieee80211_hdrlen(fc);
++
++ if (skb->len < hdrlen + cs->hdr_len)
++ return -EINVAL;
++
++ skb_copy_bits(skb, hdrlen + cs->key_idx_off, &keyid, 1);
++ keyid &= cs->key_idx_mask;
++ keyid >>= cs->key_idx_shift;
++
++ return keyid;
++}
++
++static ieee80211_rx_result ieee80211_rx_mesh_check(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
++ char *dev_addr = rx->sdata->vif.addr;
++
++ if (ieee80211_is_data(hdr->frame_control)) {
++ if (is_multicast_ether_addr(hdr->addr1)) {
++ if (ieee80211_has_tods(hdr->frame_control) ||
++ !ieee80211_has_fromds(hdr->frame_control))
++ return RX_DROP_MONITOR;
++ if (ether_addr_equal(hdr->addr3, dev_addr))
++ return RX_DROP_MONITOR;
++ } else {
++ if (!ieee80211_has_a4(hdr->frame_control))
++ return RX_DROP_MONITOR;
++ if (ether_addr_equal(hdr->addr4, dev_addr))
++ return RX_DROP_MONITOR;
++ }
++ }
++
++ /* If there is not an established peer link and this is not a peer link
++ * establisment frame, beacon or probe, drop the frame.
++ */
++
++ if (!rx->sta || sta_plink_state(rx->sta) != NL80211_PLINK_ESTAB) {
++ struct ieee80211_mgmt *mgmt;
++
++ if (!ieee80211_is_mgmt(hdr->frame_control))
++ return RX_DROP_MONITOR;
++
++ if (ieee80211_is_action(hdr->frame_control)) {
++ u8 category;
++
++ /* make sure category field is present */
++ if (rx->skb->len < IEEE80211_MIN_ACTION_SIZE)
++ return RX_DROP_MONITOR;
++
++ mgmt = (struct ieee80211_mgmt *)hdr;
++ category = mgmt->u.action.category;
++ if (category != WLAN_CATEGORY_MESH_ACTION &&
++ category != WLAN_CATEGORY_SELF_PROTECTED)
++ return RX_DROP_MONITOR;
++ return RX_CONTINUE;
++ }
++
++ if (ieee80211_is_probe_req(hdr->frame_control) ||
++ ieee80211_is_probe_resp(hdr->frame_control) ||
++ ieee80211_is_beacon(hdr->frame_control) ||
++ ieee80211_is_auth(hdr->frame_control))
++ return RX_CONTINUE;
++
++ return RX_DROP_MONITOR;
++ }
++
++ return RX_CONTINUE;
++}
++
++static void ieee80211_release_reorder_frame(struct ieee80211_sub_if_data *sdata,
++ struct tid_ampdu_rx *tid_agg_rx,
++ int index,
++ struct sk_buff_head *frames)
++{
++ struct sk_buff_head *skb_list = &tid_agg_rx->reorder_buf[index];
++ struct sk_buff *skb;
++ struct ieee80211_rx_status *status;
++
++ lockdep_assert_held(&tid_agg_rx->reorder_lock);
++
++ if (skb_queue_empty(skb_list))
++ goto no_frame;
++
++ if (!ieee80211_rx_reorder_ready(skb_list)) {
++ __skb_queue_purge(skb_list);
++ goto no_frame;
++ }
++
++ /* release frames from the reorder ring buffer */
++ tid_agg_rx->stored_mpdu_num--;
++ while ((skb = __skb_dequeue(skb_list))) {
++ status = IEEE80211_SKB_RXCB(skb);
++ status->rx_flags |= IEEE80211_RX_DEFERRED_RELEASE;
++ __skb_queue_tail(frames, skb);
++ }
++
++no_frame:
++ tid_agg_rx->head_seq_num = ieee80211_sn_inc(tid_agg_rx->head_seq_num);
++}
++
++static void ieee80211_release_reorder_frames(struct ieee80211_sub_if_data *sdata,
++ struct tid_ampdu_rx *tid_agg_rx,
++ u16 head_seq_num,
++ struct sk_buff_head *frames)
++{
++ int index;
++
++ lockdep_assert_held(&tid_agg_rx->reorder_lock);
++
++ while (ieee80211_sn_less(tid_agg_rx->head_seq_num, head_seq_num)) {
++ index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
++ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
++ frames);
++ }
++}
++
++/*
++ * Timeout (in jiffies) for skb's that are waiting in the RX reorder buffer. If
++ * the skb was added to the buffer longer than this time ago, the earlier
++ * frames that have not yet been received are assumed to be lost and the skb
++ * can be released for processing. This may also release other skb's from the
++ * reorder buffer if there are no additional gaps between the frames.
++ *
++ * Callers must hold tid_agg_rx->reorder_lock.
++ */
++#define HT_RX_REORDER_BUF_TIMEOUT (HZ / 10)
++
++static void ieee80211_sta_reorder_release(struct ieee80211_sub_if_data *sdata,
++ struct tid_ampdu_rx *tid_agg_rx,
++ struct sk_buff_head *frames)
++{
++ int index, i, j;
++
++ lockdep_assert_held(&tid_agg_rx->reorder_lock);
++
++ /* release the buffer until next missing frame */
++ index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
++ if (!ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index]) &&
++ tid_agg_rx->stored_mpdu_num) {
++ /*
++ * No buffers ready to be released, but check whether any
++ * frames in the reorder buffer have timed out.
++ */
++ int skipped = 1;
++ for (j = (index + 1) % tid_agg_rx->buf_size; j != index;
++ j = (j + 1) % tid_agg_rx->buf_size) {
++ if (!ieee80211_rx_reorder_ready(
++ &tid_agg_rx->reorder_buf[j])) {
++ skipped++;
++ continue;
++ }
++ if (skipped &&
++ !time_after(jiffies, tid_agg_rx->reorder_time[j] +
++ HT_RX_REORDER_BUF_TIMEOUT))
++ goto set_release_timer;
++
++ /* don't leave incomplete A-MSDUs around */
++ for (i = (index + 1) % tid_agg_rx->buf_size; i != j;
++ i = (i + 1) % tid_agg_rx->buf_size)
++ __skb_queue_purge(&tid_agg_rx->reorder_buf[i]);
++
++ ht_dbg_ratelimited(sdata,
++ "release an RX reorder frame due to timeout on earlier frames\n");
++ ieee80211_release_reorder_frame(sdata, tid_agg_rx, j,
++ frames);
++
++ /*
++ * Increment the head seq# also for the skipped slots.
++ */
++ tid_agg_rx->head_seq_num =
++ (tid_agg_rx->head_seq_num +
++ skipped) & IEEE80211_SN_MASK;
++ skipped = 0;
++ }
++ } else while (ieee80211_rx_reorder_ready(
++ &tid_agg_rx->reorder_buf[index])) {
++ ieee80211_release_reorder_frame(sdata, tid_agg_rx, index,
++ frames);
++ index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
++ }
++
++ if (tid_agg_rx->stored_mpdu_num) {
++ j = index = tid_agg_rx->head_seq_num % tid_agg_rx->buf_size;
++
++ for (; j != (index - 1) % tid_agg_rx->buf_size;
++ j = (j + 1) % tid_agg_rx->buf_size) {
++ if (ieee80211_rx_reorder_ready(
++ &tid_agg_rx->reorder_buf[j]))
++ break;
++ }
++
++ set_release_timer:
++
++ mod_timer(&tid_agg_rx->reorder_timer,
++ tid_agg_rx->reorder_time[j] + 1 +
++ HT_RX_REORDER_BUF_TIMEOUT);
++ } else {
++ del_timer(&tid_agg_rx->reorder_timer);
++ }
++}
++
++/*
++ * As this function belongs to the RX path it must be under
++ * rcu_read_lock protection. It returns false if the frame
++ * can be processed immediately, true if it was consumed.
++ */
++static bool ieee80211_sta_manage_reorder_buf(struct ieee80211_sub_if_data *sdata,
++ struct tid_ampdu_rx *tid_agg_rx,
++ struct sk_buff *skb,
++ struct sk_buff_head *frames)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ u16 sc = le16_to_cpu(hdr->seq_ctrl);
++ u16 mpdu_seq_num = (sc & IEEE80211_SCTL_SEQ) >> 4;
++ u16 head_seq_num, buf_size;
++ int index;
++ bool ret = true;
++
++ spin_lock(&tid_agg_rx->reorder_lock);
++
++ /*
++ * Offloaded BA sessions have no known starting sequence number so pick
++ * one from first Rxed frame for this tid after BA was started.
++ */
++ if (unlikely(tid_agg_rx->auto_seq)) {
++ tid_agg_rx->auto_seq = false;
++ tid_agg_rx->ssn = mpdu_seq_num;
++ tid_agg_rx->head_seq_num = mpdu_seq_num;
++ }
++
++ buf_size = tid_agg_rx->buf_size;
++ head_seq_num = tid_agg_rx->head_seq_num;
++
++ /* frame with out of date sequence number */
++ if (ieee80211_sn_less(mpdu_seq_num, head_seq_num)) {
++ dev_kfree_skb(skb);
++ goto out;
++ }
++
++ /*
++ * If frame the sequence number exceeds our buffering window
++ * size release some previous frames to make room for this one.
++ */
++ if (!ieee80211_sn_less(mpdu_seq_num, head_seq_num + buf_size)) {
++ head_seq_num = ieee80211_sn_inc(
++ ieee80211_sn_sub(mpdu_seq_num, buf_size));
++ /* release stored frames up to new head to stack */
++ ieee80211_release_reorder_frames(sdata, tid_agg_rx,
++ head_seq_num, frames);
++ }
++
++ /* Now the new frame is always in the range of the reordering buffer */
++
++ index = mpdu_seq_num % tid_agg_rx->buf_size;
++
++ /* check if we already stored this frame */
++ if (ieee80211_rx_reorder_ready(&tid_agg_rx->reorder_buf[index])) {
++ dev_kfree_skb(skb);
++ goto out;
++ }
++
++ /*
++ * If the current MPDU is in the right order and nothing else
++ * is stored we can process it directly, no need to buffer it.
++ * If it is first but there's something stored, we may be able
++ * to release frames after this one.
++ */
++ if (mpdu_seq_num == tid_agg_rx->head_seq_num &&
++ tid_agg_rx->stored_mpdu_num == 0) {
++ if (!(status->flag & RX_FLAG_AMSDU_MORE))
++ tid_agg_rx->head_seq_num =
++ ieee80211_sn_inc(tid_agg_rx->head_seq_num);
++ ret = false;
++ goto out;
++ }
++
++ /* put the frame in the reordering buffer */
++ __skb_queue_tail(&tid_agg_rx->reorder_buf[index], skb);
++ if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
++ tid_agg_rx->reorder_time[index] = jiffies;
++ tid_agg_rx->stored_mpdu_num++;
++ ieee80211_sta_reorder_release(sdata, tid_agg_rx, frames);
++ }
++
++ out:
++ spin_unlock(&tid_agg_rx->reorder_lock);
++ return ret;
++}
++
++/*
++ * Reorder MPDUs from A-MPDUs, keeping them on a buffer. Returns
++ * true if the MPDU was buffered, false if it should be processed.
++ */
++static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
++ struct sk_buff_head *frames)
++{
++ struct sk_buff *skb = rx->skb;
++ struct ieee80211_local *local = rx->local;
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ struct sta_info *sta = rx->sta;
++ struct tid_ampdu_rx *tid_agg_rx;
++ u16 sc;
++ u8 tid, ack_policy;
++
++ if (!ieee80211_is_data_qos(hdr->frame_control) ||
++ is_multicast_ether_addr(hdr->addr1))
++ goto dont_reorder;
++
++ /*
++ * filter the QoS data rx stream according to
++ * STA/TID and check if this STA/TID is on aggregation
++ */
++
++ if (!sta)
++ goto dont_reorder;
++
++ ack_policy = *ieee80211_get_qos_ctl(hdr) &
++ IEEE80211_QOS_CTL_ACK_POLICY_MASK;
++ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
++
++ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
++ if (!tid_agg_rx)
++ goto dont_reorder;
++
++ /* qos null data frames are excluded */
++ if (unlikely(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)))
++ goto dont_reorder;
++
++ /* not part of a BA session */
++ if (ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_BLOCKACK &&
++ ack_policy != IEEE80211_QOS_CTL_ACK_POLICY_NORMAL)
++ goto dont_reorder;
++
++ /* not actually part of this BA session */
++ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
++ goto dont_reorder;
++
++ /* new, potentially un-ordered, ampdu frame - process it */
++
++ /* reset session timer */
++ if (tid_agg_rx->timeout)
++ tid_agg_rx->last_rx = jiffies;
++
++ /* if this mpdu is fragmented - terminate rx aggregation session */
++ sc = le16_to_cpu(hdr->seq_ctrl);
++ if (sc & IEEE80211_SCTL_FRAG) {
++ skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
++ skb_queue_tail(&rx->sdata->skb_queue, skb);
++ ieee80211_queue_work(&local->hw, &rx->sdata->work);
++ return;
++ }
++
++ /*
++ * No locking needed -- we will only ever process one
++ * RX packet at a time, and thus own tid_agg_rx. All
++ * other code manipulating it needs to (and does) make
++ * sure that we cannot get to it any more before doing
++ * anything with it.
++ */
++ if (ieee80211_sta_manage_reorder_buf(rx->sdata, tid_agg_rx, skb,
++ frames))
++ return;
++
++ dont_reorder:
++ __skb_queue_tail(frames, skb);
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_check(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++
++ /*
++ * Drop duplicate 802.11 retransmissions
++ * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery")
++ */
++ if (rx->skb->len >= 24 && rx->sta &&
++ !ieee80211_is_ctl(hdr->frame_control) &&
++ !ieee80211_is_qos_nullfunc(hdr->frame_control) &&
++ !is_multicast_ether_addr(hdr->addr1)) {
++ if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
++ rx->sta->last_seq_ctrl[rx->seqno_idx] ==
++ hdr->seq_ctrl)) {
++ if (status->rx_flags & IEEE80211_RX_RA_MATCH) {
++ rx->local->dot11FrameDuplicateCount++;
++ rx->sta->num_duplicates++;
++ }
++ return RX_DROP_UNUSABLE;
++ } else if (!(status->flag & RX_FLAG_AMSDU_MORE)) {
++ rx->sta->last_seq_ctrl[rx->seqno_idx] = hdr->seq_ctrl;
++ }
++ }
++
++ if (unlikely(rx->skb->len < 16)) {
++ I802_DEBUG_INC(rx->local->rx_handlers_drop_short);
++ return RX_DROP_MONITOR;
++ }
++
++ /* Drop disallowed frame classes based on STA auth/assoc state;
++ * IEEE 802.11, Chap 5.5.
++ *
++ * mac80211 filters only based on association state, i.e. it drops
++ * Class 3 frames from not associated stations. hostapd sends
++ * deauth/disassoc frames when needed. In addition, hostapd is
++ * responsible for filtering on both auth and assoc states.
++ */
++
++ if (ieee80211_vif_is_mesh(&rx->sdata->vif))
++ return ieee80211_rx_mesh_check(rx);
++
++ if (unlikely((ieee80211_is_data(hdr->frame_control) ||
++ ieee80211_is_pspoll(hdr->frame_control)) &&
++ rx->sdata->vif.type != NL80211_IFTYPE_ADHOC &&
++ rx->sdata->vif.type != NL80211_IFTYPE_WDS &&
++ (!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_ASSOC)))) {
++ /*
++ * accept port control frames from the AP even when it's not
++ * yet marked ASSOC to prevent a race where we don't set the
++ * assoc bit quickly enough before it sends the first frame
++ */
++ if (rx->sta && rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
++ ieee80211_is_data_present(hdr->frame_control)) {
++ unsigned int hdrlen;
++ __be16 ethertype;
++
++ hdrlen = ieee80211_hdrlen(hdr->frame_control);
++
++ if (rx->skb->len < hdrlen + 8)
++ return RX_DROP_MONITOR;
++
++ skb_copy_bits(rx->skb, hdrlen + 6, &ethertype, 2);
++ if (ethertype == rx->sdata->control_port_protocol)
++ return RX_CONTINUE;
++ }
++
++ if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
++ cfg80211_rx_spurious_frame(rx->sdata->dev,
++ hdr->addr2,
++ GFP_ATOMIC))
++ return RX_DROP_UNUSABLE;
++
++ return RX_DROP_MONITOR;
++ }
++
++ return RX_CONTINUE;
++}
++
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_check_more_data(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_local *local;
++ struct ieee80211_hdr *hdr;
++ struct sk_buff *skb;
++
++ local = rx->local;
++ skb = rx->skb;
++ hdr = (struct ieee80211_hdr *) skb->data;
++
++ if (!local->pspolling)
++ return RX_CONTINUE;
++
++ if (!ieee80211_has_fromds(hdr->frame_control))
++ /* this is not from AP */
++ return RX_CONTINUE;
++
++ if (!ieee80211_is_data(hdr->frame_control))
++ return RX_CONTINUE;
++
++ if (!ieee80211_has_moredata(hdr->frame_control)) {
++ /* AP has no more frames buffered for us */
++ local->pspolling = false;
++ return RX_CONTINUE;
++ }
++
++ /* more data bit is set, let's request a new frame from the AP */
++ ieee80211_send_pspoll(local, rx->sdata);
++
++ return RX_CONTINUE;
++}
++
++static void sta_ps_start(struct sta_info *sta)
++{
++ struct ieee80211_sub_if_data *sdata = sta->sdata;
++ struct ieee80211_local *local = sdata->local;
++ struct ps_data *ps;
++
++ if (sta->sdata->vif.type == NL80211_IFTYPE_AP ||
++ sta->sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ ps = &sdata->bss->ps;
++ else
++ return;
++
++ atomic_inc(&ps->num_sta_ps);
++ set_sta_flag(sta, WLAN_STA_PS_STA);
++ if (!(local->hw.flags & IEEE80211_HW_AP_LINK_PS))
++ drv_sta_notify(local, sdata, STA_NOTIFY_SLEEP, &sta->sta);
++ ps_dbg(sdata, "STA %pM aid %d enters power save mode\n",
++ sta->sta.addr, sta->sta.aid);
++}
++
++static void sta_ps_end(struct sta_info *sta)
++{
++ ps_dbg(sta->sdata, "STA %pM aid %d exits power save mode\n",
++ sta->sta.addr, sta->sta.aid);
++
++ if (test_sta_flag(sta, WLAN_STA_PS_DRIVER)) {
++ /*
++ * Clear the flag only if the other one is still set
++ * so that the TX path won't start TX'ing new frames
++ * directly ... In the case that the driver flag isn't
++ * set ieee80211_sta_ps_deliver_wakeup() will clear it.
++ */
++ clear_sta_flag(sta, WLAN_STA_PS_STA);
++ ps_dbg(sta->sdata, "STA %pM aid %d driver-ps-blocked\n",
++ sta->sta.addr, sta->sta.aid);
++ return;
++ }
++
++ set_sta_flag(sta, WLAN_STA_PS_DELIVER);
++ clear_sta_flag(sta, WLAN_STA_PS_STA);
++ ieee80211_sta_ps_deliver_wakeup(sta);
++}
++
++int ieee80211_sta_ps_transition(struct ieee80211_sta *sta, bool start)
++{
++ struct sta_info *sta_inf = container_of(sta, struct sta_info, sta);
++ bool in_ps;
++
++ WARN_ON(!(sta_inf->local->hw.flags & IEEE80211_HW_AP_LINK_PS));
++
++ /* Don't let the same PS state be set twice */
++ in_ps = test_sta_flag(sta_inf, WLAN_STA_PS_STA);
++ if ((start && in_ps) || (!start && !in_ps))
++ return -EINVAL;
++
++ if (start)
++ sta_ps_start(sta_inf);
++ else
++ sta_ps_end(sta_inf);
++
++ return 0;
++}
++EXPORT_SYMBOL(ieee80211_sta_ps_transition);
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_uapsd_and_pspoll(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_hdr *hdr = (void *)rx->skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++ int tid, ac;
++
++ if (!rx->sta || !(status->rx_flags & IEEE80211_RX_RA_MATCH))
++ return RX_CONTINUE;
++
++ if (sdata->vif.type != NL80211_IFTYPE_AP &&
++ sdata->vif.type != NL80211_IFTYPE_AP_VLAN)
++ return RX_CONTINUE;
++
++ /*
++ * The device handles station powersave, so don't do anything about
++ * uAPSD and PS-Poll frames (the latter shouldn't even come up from
++ * it to mac80211 since they're handled.)
++ */
++ if (sdata->local->hw.flags & IEEE80211_HW_AP_LINK_PS)
++ return RX_CONTINUE;
++
++ /*
++ * Don't do anything if the station isn't already asleep. In
++ * the uAPSD case, the station will probably be marked asleep,
++ * in the PS-Poll case the station must be confused ...
++ */
++ if (!test_sta_flag(rx->sta, WLAN_STA_PS_STA))
++ return RX_CONTINUE;
++
++ if (unlikely(ieee80211_is_pspoll(hdr->frame_control))) {
++ if (!test_sta_flag(rx->sta, WLAN_STA_SP)) {
++ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
++ ieee80211_sta_ps_deliver_poll_response(rx->sta);
++ else
++ set_sta_flag(rx->sta, WLAN_STA_PSPOLL);
++ }
++
++ /* Free PS Poll skb here instead of returning RX_DROP that would
++ * count as an dropped frame. */
++ dev_kfree_skb(rx->skb);
++
++ return RX_QUEUED;
++ } else if (!ieee80211_has_morefrags(hdr->frame_control) &&
++ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
++ ieee80211_has_pm(hdr->frame_control) &&
++ (ieee80211_is_data_qos(hdr->frame_control) ||
++ ieee80211_is_qos_nullfunc(hdr->frame_control))) {
++ tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
++ ac = ieee802_1d_to_ac[tid & 7];
++
++ /*
++ * If this AC is not trigger-enabled do nothing.
++ *
++ * NB: This could/should check a separate bitmap of trigger-
++ * enabled queues, but for now we only implement uAPSD w/o
++ * TSPEC changes to the ACs, so they're always the same.
++ */
++ if (!(rx->sta->sta.uapsd_queues & BIT(ac)))
++ return RX_CONTINUE;
++
++ /* if we are in a service period, do nothing */
++ if (test_sta_flag(rx->sta, WLAN_STA_SP))
++ return RX_CONTINUE;
++
++ if (!test_sta_flag(rx->sta, WLAN_STA_PS_DRIVER))
++ ieee80211_sta_ps_deliver_uapsd(rx->sta);
++ else
++ set_sta_flag(rx->sta, WLAN_STA_UAPSD);
++ }
++
++ return RX_CONTINUE;
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_sta_process(struct ieee80211_rx_data *rx)
++{
++ struct sta_info *sta = rx->sta;
++ struct sk_buff *skb = rx->skb;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
++ int i;
++
++ if (!sta)
++ return RX_CONTINUE;
++
++ /*
++ * Update last_rx only for IBSS packets which are for the current
++ * BSSID and for station already AUTHORIZED to avoid keeping the
++ * current IBSS network alive in cases where other STAs start
++ * using different BSSID. This will also give the station another
++ * chance to restart the authentication/authorization in case
++ * something went wrong the first time.
++ */
++ if (rx->sdata->vif.type == NL80211_IFTYPE_ADHOC) {
++ u8 *bssid = ieee80211_get_bssid(hdr, rx->skb->len,
++ NL80211_IFTYPE_ADHOC);
++ if (ether_addr_equal(bssid, rx->sdata->u.ibss.bssid) &&
++ test_sta_flag(sta, WLAN_STA_AUTHORIZED)) {
++ sta->last_rx = jiffies;
++ if (ieee80211_is_data(hdr->frame_control) &&
++ !is_multicast_ether_addr(hdr->addr1)) {
++ sta->last_rx_rate_idx = status->rate_idx;
++ sta->last_rx_rate_flag = status->flag;
++ sta->last_rx_rate_vht_flag = status->vht_flag;
++ sta->last_rx_rate_vht_nss = status->vht_nss;
++ }
++ }
++ } else if (!is_multicast_ether_addr(hdr->addr1)) {
++ /*
++ * Mesh beacons will update last_rx when if they are found to
++ * match the current local configuration when processed.
++ */
++ sta->last_rx = jiffies;
++ if (ieee80211_is_data(hdr->frame_control)) {
++ sta->last_rx_rate_idx = status->rate_idx;
++ sta->last_rx_rate_flag = status->flag;
++ sta->last_rx_rate_vht_flag = status->vht_flag;
++ sta->last_rx_rate_vht_nss = status->vht_nss;
++ }
++ }
++
++ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
++ return RX_CONTINUE;
++
++ if (rx->sdata->vif.type == NL80211_IFTYPE_STATION)
++ ieee80211_sta_rx_notify(rx->sdata, hdr);
++
++ sta->rx_fragments++;
++ sta->rx_bytes += rx->skb->len;
++ if (!(status->flag & RX_FLAG_NO_SIGNAL_VAL)) {
++ sta->last_signal = status->signal;
++ ewma_add(&sta->avg_signal, -status->signal);
++ }
++
++ if (status->chains) {
++ sta->chains = status->chains;
++ for (i = 0; i < ARRAY_SIZE(status->chain_signal); i++) {
++ int signal = status->chain_signal[i];
++
++ if (!(status->chains & BIT(i)))
++ continue;
++
++ sta->chain_signal_last[i] = signal;
++ ewma_add(&sta->chain_signal_avg[i], -signal);
++ }
++ }
++
++ /*
++ * Change STA power saving mode only at the end of a frame
++ * exchange sequence.
++ */
++ if (!(sta->local->hw.flags & IEEE80211_HW_AP_LINK_PS) &&
++ !ieee80211_has_morefrags(hdr->frame_control) &&
++ !(status->rx_flags & IEEE80211_RX_DEFERRED_RELEASE) &&
++ (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
++ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
++ /* PM bit is only checked in frames where it isn't reserved,
++ * in AP mode it's reserved in non-bufferable management frames
++ * (cf. IEEE 802.11-2012 8.2.4.1.7 Power Management field)
++ */
++ (!ieee80211_is_mgmt(hdr->frame_control) ||
++ ieee80211_is_bufferable_mmpdu(hdr->frame_control))) {
++ if (test_sta_flag(sta, WLAN_STA_PS_STA)) {
++ if (!ieee80211_has_pm(hdr->frame_control))
++ sta_ps_end(sta);
++ } else {
++ if (ieee80211_has_pm(hdr->frame_control))
++ sta_ps_start(sta);
++ }
++ }
++
++ /* mesh power save support */
++ if (ieee80211_vif_is_mesh(&rx->sdata->vif))
++ ieee80211_mps_rx_h_sta_process(sta, hdr);
++
++ /*
++ * Drop (qos-)data::nullfunc frames silently, since they
++ * are used only to control station power saving mode.
++ */
++ if (ieee80211_is_nullfunc(hdr->frame_control) ||
++ ieee80211_is_qos_nullfunc(hdr->frame_control)) {
++ I802_DEBUG_INC(rx->local->rx_handlers_drop_nullfunc);
++
++ /*
++ * If we receive a 4-addr nullfunc frame from a STA
++ * that was not moved to a 4-addr STA vlan yet send
++ * the event to userspace and for older hostapd drop
++ * the frame to the monitor interface.
++ */
++ if (ieee80211_has_a4(hdr->frame_control) &&
++ (rx->sdata->vif.type == NL80211_IFTYPE_AP ||
++ (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
++ !rx->sdata->u.vlan.sta))) {
++ if (!test_and_set_sta_flag(sta, WLAN_STA_4ADDR_EVENT))
++ cfg80211_rx_unexpected_4addr_frame(
++ rx->sdata->dev, sta->sta.addr,
++ GFP_ATOMIC);
++ return RX_DROP_MONITOR;
++ }
++ /*
++ * Update counter and free packet here to avoid
++ * counting this as a dropped packed.
++ */
++ sta->rx_packets++;
++ dev_kfree_skb(rx->skb);
++ return RX_QUEUED;
++ }
++
++ return RX_CONTINUE;
++} /* ieee80211_rx_h_sta_process */
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_decrypt(struct ieee80211_rx_data *rx)
++{
++ struct sk_buff *skb = rx->skb;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
++ int keyidx;
++ int hdrlen;
++ ieee80211_rx_result result = RX_DROP_UNUSABLE;
++ struct ieee80211_key *sta_ptk = NULL;
++ int mmie_keyidx = -1;
++ __le16 fc;
++ const struct ieee80211_cipher_scheme *cs = NULL;
++
++ /*
++ * Key selection 101
++ *
++ * There are four types of keys:
++ * - GTK (group keys)
++ * - IGTK (group keys for management frames)
++ * - PTK (pairwise keys)
++ * - STK (station-to-station pairwise keys)
++ *
++ * When selecting a key, we have to distinguish between multicast
++ * (including broadcast) and unicast frames, the latter can only
++ * use PTKs and STKs while the former always use GTKs and IGTKs.
++ * Unless, of course, actual WEP keys ("pre-RSNA") are used, then
++ * unicast frames can also use key indices like GTKs. Hence, if we
++ * don't have a PTK/STK we check the key index for a WEP key.
++ *
++ * Note that in a regular BSS, multicast frames are sent by the
++ * AP only, associated stations unicast the frame to the AP first
++ * which then multicasts it on their behalf.
++ *
++ * There is also a slight problem in IBSS mode: GTKs are negotiated
++ * with each station, that is something we don't currently handle.
++ * The spec seems to expect that one negotiates the same key with
++ * every station but there's no such requirement; VLANs could be
++ * possible.
++ */
++
++ /*
++ * No point in finding a key and decrypting if the frame is neither
++ * addressed to us nor a multicast frame.
++ */
++ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
++ return RX_CONTINUE;
++
++ /* start without a key */
++ rx->key = NULL;
++ fc = hdr->frame_control;
++
++ if (rx->sta) {
++ int keyid = rx->sta->ptk_idx;
++
++ if (ieee80211_has_protected(fc) && rx->sta->cipher_scheme) {
++ cs = rx->sta->cipher_scheme;
++ keyid = iwl80211_get_cs_keyid(cs, rx->skb);
++ if (unlikely(keyid < 0))
++ return RX_DROP_UNUSABLE;
++ }
++ sta_ptk = rcu_dereference(rx->sta->ptk[keyid]);
++ }
++
++ if (!ieee80211_has_protected(fc))
++ mmie_keyidx = ieee80211_get_mmie_keyidx(rx->skb);
++
++ if (!is_multicast_ether_addr(hdr->addr1) && sta_ptk) {
++ rx->key = sta_ptk;
++ if ((status->flag & RX_FLAG_DECRYPTED) &&
++ (status->flag & RX_FLAG_IV_STRIPPED))
++ return RX_CONTINUE;
++ /* Skip decryption if the frame is not protected. */
++ if (!ieee80211_has_protected(fc))
++ return RX_CONTINUE;
++ } else if (mmie_keyidx >= 0) {
++ /* Broadcast/multicast robust management frame / BIP */
++ if ((status->flag & RX_FLAG_DECRYPTED) &&
++ (status->flag & RX_FLAG_IV_STRIPPED))
++ return RX_CONTINUE;
++
++ if (mmie_keyidx < NUM_DEFAULT_KEYS ||
++ mmie_keyidx >= NUM_DEFAULT_KEYS + NUM_DEFAULT_MGMT_KEYS)
++ return RX_DROP_MONITOR; /* unexpected BIP keyidx */
++ if (rx->sta)
++ rx->key = rcu_dereference(rx->sta->gtk[mmie_keyidx]);
++ if (!rx->key)
++ rx->key = rcu_dereference(rx->sdata->keys[mmie_keyidx]);
++ } else if (!ieee80211_has_protected(fc)) {
++ /*
++ * The frame was not protected, so skip decryption. However, we
++ * need to set rx->key if there is a key that could have been
++ * used so that the frame may be dropped if encryption would
++ * have been expected.
++ */
++ struct ieee80211_key *key = NULL;
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ int i;
++
++ if (ieee80211_is_mgmt(fc) &&
++ is_multicast_ether_addr(hdr->addr1) &&
++ (key = rcu_dereference(rx->sdata->default_mgmt_key)))
++ rx->key = key;
++ else {
++ if (rx->sta) {
++ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
++ key = rcu_dereference(rx->sta->gtk[i]);
++ if (key)
++ break;
++ }
++ }
++ if (!key) {
++ for (i = 0; i < NUM_DEFAULT_KEYS; i++) {
++ key = rcu_dereference(sdata->keys[i]);
++ if (key)
++ break;
++ }
++ }
++ if (key)
++ rx->key = key;
++ }
++ return RX_CONTINUE;
++ } else {
++ u8 keyid;
++
++ /*
++ * The device doesn't give us the IV so we won't be
++ * able to look up the key. That's ok though, we
++ * don't need to decrypt the frame, we just won't
++ * be able to keep statistics accurate.
++ * Except for key threshold notifications, should
++ * we somehow allow the driver to tell us which key
++ * the hardware used if this flag is set?
++ */
++ if ((status->flag & RX_FLAG_DECRYPTED) &&
++ (status->flag & RX_FLAG_IV_STRIPPED))
++ return RX_CONTINUE;
++
++ hdrlen = ieee80211_hdrlen(fc);
++
++ if (cs) {
++ keyidx = iwl80211_get_cs_keyid(cs, rx->skb);
++
++ if (unlikely(keyidx < 0))
++ return RX_DROP_UNUSABLE;
++ } else {
++ if (rx->skb->len < 8 + hdrlen)
++ return RX_DROP_UNUSABLE; /* TODO: count this? */
++ /*
++ * no need to call ieee80211_wep_get_keyidx,
++ * it verifies a bunch of things we've done already
++ */
++ skb_copy_bits(rx->skb, hdrlen + 3, &keyid, 1);
++ keyidx = keyid >> 6;
++ }
++
++ /* check per-station GTK first, if multicast packet */
++ if (is_multicast_ether_addr(hdr->addr1) && rx->sta)
++ rx->key = rcu_dereference(rx->sta->gtk[keyidx]);
++
++ /* if not found, try default key */
++ if (!rx->key) {
++ rx->key = rcu_dereference(rx->sdata->keys[keyidx]);
++
++ /*
++ * RSNA-protected unicast frames should always be
++ * sent with pairwise or station-to-station keys,
++ * but for WEP we allow using a key index as well.
++ */
++ if (rx->key &&
++ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP40 &&
++ rx->key->conf.cipher != WLAN_CIPHER_SUITE_WEP104 &&
++ !is_multicast_ether_addr(hdr->addr1))
++ rx->key = NULL;
++ }
++ }
++
++ if (rx->key) {
++ if (unlikely(rx->key->flags & KEY_FLAG_TAINTED))
++ return RX_DROP_MONITOR;
++
++ rx->key->tx_rx_count++;
++ /* TODO: add threshold stuff again */
++ } else {
++ return RX_DROP_MONITOR;
++ }
++
++ switch (rx->key->conf.cipher) {
++ case WLAN_CIPHER_SUITE_WEP40:
++ case WLAN_CIPHER_SUITE_WEP104:
++ result = ieee80211_crypto_wep_decrypt(rx);
++ break;
++ case WLAN_CIPHER_SUITE_TKIP:
++ result = ieee80211_crypto_tkip_decrypt(rx);
++ break;
++ case WLAN_CIPHER_SUITE_CCMP:
++ result = ieee80211_crypto_ccmp_decrypt(rx);
++ break;
++ case WLAN_CIPHER_SUITE_AES_CMAC:
++ result = ieee80211_crypto_aes_cmac_decrypt(rx);
++ break;
++ default:
++ result = ieee80211_crypto_hw_decrypt(rx);
++ }
++
++ /* the hdr variable is invalid after the decrypt handlers */
++
++ /* either the frame has been decrypted or will be dropped */
++ status->flag |= RX_FLAG_DECRYPTED;
++
++ return result;
++}
++
++static inline struct ieee80211_fragment_entry *
++ieee80211_reassemble_add(struct ieee80211_sub_if_data *sdata,
++ unsigned int frag, unsigned int seq, int rx_queue,
++ struct sk_buff **skb)
++{
++ struct ieee80211_fragment_entry *entry;
++
++ entry = &sdata->fragments[sdata->fragment_next++];
++ if (sdata->fragment_next >= IEEE80211_FRAGMENT_MAX)
++ sdata->fragment_next = 0;
++
++ if (!skb_queue_empty(&entry->skb_list))
++ __skb_queue_purge(&entry->skb_list);
++
++ __skb_queue_tail(&entry->skb_list, *skb); /* no need for locking */
++ *skb = NULL;
++ entry->first_frag_time = jiffies;
++ entry->seq = seq;
++ entry->rx_queue = rx_queue;
++ entry->last_frag = frag;
++ entry->ccmp = 0;
++ entry->extra_len = 0;
++
++ return entry;
++}
++
++static inline struct ieee80211_fragment_entry *
++ieee80211_reassemble_find(struct ieee80211_sub_if_data *sdata,
++ unsigned int frag, unsigned int seq,
++ int rx_queue, struct ieee80211_hdr *hdr)
++{
++ struct ieee80211_fragment_entry *entry;
++ int i, idx;
++
++ idx = sdata->fragment_next;
++ for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) {
++ struct ieee80211_hdr *f_hdr;
++
++ idx--;
++ if (idx < 0)
++ idx = IEEE80211_FRAGMENT_MAX - 1;
++
++ entry = &sdata->fragments[idx];
++ if (skb_queue_empty(&entry->skb_list) || entry->seq != seq ||
++ entry->rx_queue != rx_queue ||
++ entry->last_frag + 1 != frag)
++ continue;
++
++ f_hdr = (struct ieee80211_hdr *)entry->skb_list.next->data;
++
++ /*
++ * Check ftype and addresses are equal, else check next fragment
++ */
++ if (((hdr->frame_control ^ f_hdr->frame_control) &
++ cpu_to_le16(IEEE80211_FCTL_FTYPE)) ||
++ !ether_addr_equal(hdr->addr1, f_hdr->addr1) ||
++ !ether_addr_equal(hdr->addr2, f_hdr->addr2))
++ continue;
++
++ if (time_after(jiffies, entry->first_frag_time + 2 * HZ)) {
++ __skb_queue_purge(&entry->skb_list);
++ continue;
++ }
++ return entry;
++ }
++
++ return NULL;
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_hdr *hdr;
++ u16 sc;
++ __le16 fc;
++ unsigned int frag, seq;
++ struct ieee80211_fragment_entry *entry;
++ struct sk_buff *skb;
++ struct ieee80211_rx_status *status;
++
++ hdr = (struct ieee80211_hdr *)rx->skb->data;
++ fc = hdr->frame_control;
++
++ if (ieee80211_is_ctl(fc))
++ return RX_CONTINUE;
++
++ sc = le16_to_cpu(hdr->seq_ctrl);
++ frag = sc & IEEE80211_SCTL_FRAG;
++
++ if (is_multicast_ether_addr(hdr->addr1)) {
++ rx->local->dot11MulticastReceivedFrameCount++;
++ goto out_no_led;
++ }
++
++ if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
++ goto out;
++
++ I802_DEBUG_INC(rx->local->rx_handlers_fragments);
++
++ if (skb_linearize(rx->skb))
++ return RX_DROP_UNUSABLE;
++
++ /*
++ * skb_linearize() might change the skb->data and
++ * previously cached variables (in this case, hdr) need to
++ * be refreshed with the new data.
++ */
++ hdr = (struct ieee80211_hdr *)rx->skb->data;
++ seq = (sc & IEEE80211_SCTL_SEQ) >> 4;
++
++ if (frag == 0) {
++ /* This is the first fragment of a new frame. */
++ entry = ieee80211_reassemble_add(rx->sdata, frag, seq,
++ rx->seqno_idx, &(rx->skb));
++ if (rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_CCMP &&
++ ieee80211_has_protected(fc)) {
++ int queue = rx->security_idx;
++ /* Store CCMP PN so that we can verify that the next
++ * fragment has a sequential PN value. */
++ entry->ccmp = 1;
++ memcpy(entry->last_pn,
++ rx->key->u.ccmp.rx_pn[queue],
++ IEEE80211_CCMP_PN_LEN);
++ }
++ return RX_QUEUED;
++ }
++
++ /* This is a fragment for a frame that should already be pending in
++ * fragment cache. Add this fragment to the end of the pending entry.
++ */
++ entry = ieee80211_reassemble_find(rx->sdata, frag, seq,
++ rx->seqno_idx, hdr);
++ if (!entry) {
++ I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
++ return RX_DROP_MONITOR;
++ }
++
++ /* Verify that MPDUs within one MSDU have sequential PN values.
++ * (IEEE 802.11i, 8.3.3.4.5) */
++ if (entry->ccmp) {
++ int i;
++ u8 pn[IEEE80211_CCMP_PN_LEN], *rpn;
++ int queue;
++ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_CCMP)
++ return RX_DROP_UNUSABLE;
++ memcpy(pn, entry->last_pn, IEEE80211_CCMP_PN_LEN);
++ for (i = IEEE80211_CCMP_PN_LEN - 1; i >= 0; i--) {
++ pn[i]++;
++ if (pn[i])
++ break;
++ }
++ queue = rx->security_idx;
++ rpn = rx->key->u.ccmp.rx_pn[queue];
++ if (memcmp(pn, rpn, IEEE80211_CCMP_PN_LEN))
++ return RX_DROP_UNUSABLE;
++ memcpy(entry->last_pn, pn, IEEE80211_CCMP_PN_LEN);
++ }
++
++ skb_pull(rx->skb, ieee80211_hdrlen(fc));
++ __skb_queue_tail(&entry->skb_list, rx->skb);
++ entry->last_frag = frag;
++ entry->extra_len += rx->skb->len;
++ if (ieee80211_has_morefrags(fc)) {
++ rx->skb = NULL;
++ return RX_QUEUED;
++ }
++
++ rx->skb = __skb_dequeue(&entry->skb_list);
++ if (skb_tailroom(rx->skb) < entry->extra_len) {
++ I802_DEBUG_INC(rx->local->rx_expand_skb_head2);
++ if (unlikely(pskb_expand_head(rx->skb, 0, entry->extra_len,
++ GFP_ATOMIC))) {
++ I802_DEBUG_INC(rx->local->rx_handlers_drop_defrag);
++ __skb_queue_purge(&entry->skb_list);
++ return RX_DROP_UNUSABLE;
++ }
++ }
++ while ((skb = __skb_dequeue(&entry->skb_list))) {
++ memcpy(skb_put(rx->skb, skb->len), skb->data, skb->len);
++ dev_kfree_skb(skb);
++ }
++
++ /* Complete frame has been reassembled - process it now */
++ status = IEEE80211_SKB_RXCB(rx->skb);
++ status->rx_flags |= IEEE80211_RX_FRAGMENTED;
++
++ out:
++ ieee80211_led_rx(rx->local);
++ out_no_led:
++ if (rx->sta)
++ rx->sta->rx_packets++;
++ return RX_CONTINUE;
++}
++
++static int ieee80211_802_1x_port_control(struct ieee80211_rx_data *rx)
++{
++ if (unlikely(!rx->sta || !test_sta_flag(rx->sta, WLAN_STA_AUTHORIZED)))
++ return -EACCES;
++
++ return 0;
++}
++
++static int ieee80211_drop_unencrypted(struct ieee80211_rx_data *rx, __le16 fc)
++{
++ struct sk_buff *skb = rx->skb;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++
++ /*
++ * Pass through unencrypted frames if the hardware has
++ * decrypted them already.
++ */
++ if (status->flag & RX_FLAG_DECRYPTED)
++ return 0;
++
++ /* Drop unencrypted frames if key is set. */
++ if (unlikely(!ieee80211_has_protected(fc) &&
++ !ieee80211_is_nullfunc(fc) &&
++ ieee80211_is_data(fc) &&
++ (rx->key || rx->sdata->drop_unencrypted)))
++ return -EACCES;
++
++ return 0;
++}
++
++static int ieee80211_drop_unencrypted_mgmt(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++ __le16 fc = hdr->frame_control;
++
++ /*
++ * Pass through unencrypted frames if the hardware has
++ * decrypted them already.
++ */
++ if (status->flag & RX_FLAG_DECRYPTED)
++ return 0;
++
++ if (rx->sta && test_sta_flag(rx->sta, WLAN_STA_MFP)) {
++ if (unlikely(!ieee80211_has_protected(fc) &&
++ ieee80211_is_unicast_robust_mgmt_frame(rx->skb) &&
++ rx->key)) {
++ if (ieee80211_is_deauth(fc) ||
++ ieee80211_is_disassoc(fc))
++ cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
++ rx->skb->data,
++ rx->skb->len);
++ return -EACCES;
++ }
++ /* BIP does not use Protected field, so need to check MMIE */
++ if (unlikely(ieee80211_is_multicast_robust_mgmt_frame(rx->skb) &&
++ ieee80211_get_mmie_keyidx(rx->skb) < 0)) {
++ if (ieee80211_is_deauth(fc) ||
++ ieee80211_is_disassoc(fc))
++ cfg80211_rx_unprot_mlme_mgmt(rx->sdata->dev,
++ rx->skb->data,
++ rx->skb->len);
++ return -EACCES;
++ }
++ /*
++ * When using MFP, Action frames are not allowed prior to
++ * having configured keys.
++ */
++ if (unlikely(ieee80211_is_action(fc) && !rx->key &&
++ ieee80211_is_robust_mgmt_frame(rx->skb)))
++ return -EACCES;
++ }
++
++ return 0;
++}
++
++static int
++__ieee80211_data_to_8023(struct ieee80211_rx_data *rx, bool *port_control)
++{
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
++ bool check_port_control = false;
++ struct ethhdr *ehdr;
++ int ret;
++
++ *port_control = false;
++ if (ieee80211_has_a4(hdr->frame_control) &&
++ sdata->vif.type == NL80211_IFTYPE_AP_VLAN && !sdata->u.vlan.sta)
++ return -1;
++
++ if (sdata->vif.type == NL80211_IFTYPE_STATION &&
++ !!sdata->u.mgd.use_4addr != !!ieee80211_has_a4(hdr->frame_control)) {
++
++ if (!sdata->u.mgd.use_4addr)
++ return -1;
++ else
++ check_port_control = true;
++ }
++
++ if (is_multicast_ether_addr(hdr->addr1) &&
++ sdata->vif.type == NL80211_IFTYPE_AP_VLAN && sdata->u.vlan.sta)
++ return -1;
++
++ ret = ieee80211_data_to_8023(rx->skb, sdata->vif.addr, sdata->vif.type);
++ if (ret < 0)
++ return ret;
++
++ ehdr = (struct ethhdr *) rx->skb->data;
++ if (ehdr->h_proto == rx->sdata->control_port_protocol)
++ *port_control = true;
++ else if (check_port_control)
++ return -1;
++
++ return 0;
++}
++
++/*
++ * requires that rx->skb is a frame with ethernet header
++ */
++static bool ieee80211_frame_allowed(struct ieee80211_rx_data *rx, __le16 fc)
++{
++ static const u8 pae_group_addr[ETH_ALEN] __aligned(2)
++ = { 0x01, 0x80, 0xC2, 0x00, 0x00, 0x03 };
++ struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
++
++ /*
++ * Allow EAPOL frames to us/the PAE group address regardless
++ * of whether the frame was encrypted or not.
++ */
++ if (ehdr->h_proto == rx->sdata->control_port_protocol &&
++ (ether_addr_equal(ehdr->h_dest, rx->sdata->vif.addr) ||
++ ether_addr_equal(ehdr->h_dest, pae_group_addr)))
++ return true;
++
++ if (ieee80211_802_1x_port_control(rx) ||
++ ieee80211_drop_unencrypted(rx, fc))
++ return false;
++
++ return true;
++}
++
++/*
++ * requires that rx->skb is a frame with ethernet header
++ */
++static void
++ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct net_device *dev = sdata->dev;
++ struct sk_buff *skb, *xmit_skb;
++ struct ethhdr *ehdr = (struct ethhdr *) rx->skb->data;
++ struct sta_info *dsta;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++
++ skb = rx->skb;
++ xmit_skb = NULL;
++
++ if ((sdata->vif.type == NL80211_IFTYPE_AP ||
++ sdata->vif.type == NL80211_IFTYPE_AP_VLAN) &&
++ !(sdata->flags & IEEE80211_SDATA_DONT_BRIDGE_PACKETS) &&
++ (status->rx_flags & IEEE80211_RX_RA_MATCH) &&
++ (sdata->vif.type != NL80211_IFTYPE_AP_VLAN || !sdata->u.vlan.sta)) {
++ if (is_multicast_ether_addr(ehdr->h_dest)) {
++ /*
++ * send multicast frames both to higher layers in
++ * local net stack and back to the wireless medium
++ */
++ xmit_skb = skb_copy(skb, GFP_ATOMIC);
++ if (!xmit_skb)
++ net_info_ratelimited("%s: failed to clone multicast frame\n",
++ dev->name);
++ } else {
++ dsta = sta_info_get(sdata, skb->data);
++ if (dsta) {
++ /*
++ * The destination station is associated to
++ * this AP (in this VLAN), so send the frame
++ * directly to it and do not pass it to local
++ * net stack.
++ */
++ xmit_skb = skb;
++ skb = NULL;
++ }
++ }
++ }
++
++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
++ if (skb) {
++ /* 'align' will only take the values 0 or 2 here since all
++ * frames are required to be aligned to 2-byte boundaries
++ * when being passed to mac80211; the code here works just
++ * as well if that isn't true, but mac80211 assumes it can
++ * access fields as 2-byte aligned (e.g. for ether_addr_equal)
++ */
++ int align;
++
++ align = (unsigned long)(skb->data + sizeof(struct ethhdr)) & 3;
++ if (align) {
++ if (WARN_ON(skb_headroom(skb) < 3)) {
++ dev_kfree_skb(skb);
++ skb = NULL;
++ } else {
++ u8 *data = skb->data;
++ size_t len = skb_headlen(skb);
++ skb->data -= align;
++ memmove(skb->data, data, len);
++ skb_set_tail_pointer(skb, len);
++ }
++ }
++ }
++#endif
++
++ if (skb) {
++ /* deliver to local stack */
++ skb->protocol = eth_type_trans(skb, dev);
++ memset(skb->cb, 0, sizeof(skb->cb));
++ if (rx->local->napi)
++ napi_gro_receive(rx->local->napi, skb);
++ else
++ netif_receive_skb(skb);
++ }
++
++ if (xmit_skb) {
++ /*
++ * Send to wireless media and increase priority by 256 to
++ * keep the received priority instead of reclassifying
++ * the frame (see cfg80211_classify8021d).
++ */
++ xmit_skb->priority += 256;
++ xmit_skb->protocol = htons(ETH_P_802_3);
++ skb_reset_network_header(xmit_skb);
++ skb_reset_mac_header(xmit_skb);
++ dev_queue_xmit(xmit_skb);
++ }
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_amsdu(struct ieee80211_rx_data *rx)
++{
++ struct net_device *dev = rx->sdata->dev;
++ struct sk_buff *skb = rx->skb;
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
++ __le16 fc = hdr->frame_control;
++ struct sk_buff_head frame_list;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++
++ if (unlikely(!ieee80211_is_data(fc)))
++ return RX_CONTINUE;
++
++ if (unlikely(!ieee80211_is_data_present(fc)))
++ return RX_DROP_MONITOR;
++
++ if (!(status->rx_flags & IEEE80211_RX_AMSDU))
++ return RX_CONTINUE;
++
++ if (ieee80211_has_a4(hdr->frame_control) &&
++ rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
++ !rx->sdata->u.vlan.sta)
++ return RX_DROP_UNUSABLE;
++
++ if (is_multicast_ether_addr(hdr->addr1) &&
++ ((rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
++ rx->sdata->u.vlan.sta) ||
++ (rx->sdata->vif.type == NL80211_IFTYPE_STATION &&
++ rx->sdata->u.mgd.use_4addr)))
++ return RX_DROP_UNUSABLE;
++
++ skb->dev = dev;
++ __skb_queue_head_init(&frame_list);
++
++ if (skb_linearize(skb))
++ return RX_DROP_UNUSABLE;
++
++ ieee80211_amsdu_to_8023s(skb, &frame_list, dev->dev_addr,
++ rx->sdata->vif.type,
++ rx->local->hw.extra_tx_headroom, true);
++
++ while (!skb_queue_empty(&frame_list)) {
++ rx->skb = __skb_dequeue(&frame_list);
++
++ if (!ieee80211_frame_allowed(rx, fc)) {
++ dev_kfree_skb(rx->skb);
++ continue;
++ }
++ dev->stats.rx_packets++;
++ dev->stats.rx_bytes += rx->skb->len;
++
++ ieee80211_deliver_skb(rx);
++ }
++
++ return RX_QUEUED;
++}
++
++#ifdef CONFIG_MAC80211_MESH
++static ieee80211_rx_result
++ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_hdr *fwd_hdr, *hdr;
++ struct ieee80211_tx_info *info;
++ struct ieee80211s_hdr *mesh_hdr;
++ struct sk_buff *skb = rx->skb, *fwd_skb;
++ struct ieee80211_local *local = rx->local;
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
++ u16 q, hdrlen;
++
++ hdr = (struct ieee80211_hdr *) skb->data;
++ hdrlen = ieee80211_hdrlen(hdr->frame_control);
++
++ /* make sure fixed part of mesh header is there, also checks skb len */
++ if (!pskb_may_pull(rx->skb, hdrlen + 6))
++ return RX_DROP_MONITOR;
++
++ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
++
++ /* make sure full mesh header is there, also checks skb len */
++ if (!pskb_may_pull(rx->skb,
++ hdrlen + ieee80211_get_mesh_hdrlen(mesh_hdr)))
++ return RX_DROP_MONITOR;
++
++ /* reload pointers */
++ hdr = (struct ieee80211_hdr *) skb->data;
++ mesh_hdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
++
++ if (ieee80211_drop_unencrypted(rx, hdr->frame_control))
++ return RX_DROP_MONITOR;
++
++ /* frame is in RMC, don't forward */
++ if (ieee80211_is_data(hdr->frame_control) &&
++ is_multicast_ether_addr(hdr->addr1) &&
++ mesh_rmc_check(rx->sdata, hdr->addr3, mesh_hdr))
++ return RX_DROP_MONITOR;
++
++ if (!ieee80211_is_data(hdr->frame_control) ||
++ !(status->rx_flags & IEEE80211_RX_RA_MATCH))
++ return RX_CONTINUE;
++
++ if (!mesh_hdr->ttl)
++ return RX_DROP_MONITOR;
++
++ if (mesh_hdr->flags & MESH_FLAGS_AE) {
++ struct mesh_path *mppath;
++ char *proxied_addr;
++ char *mpp_addr;
++
++ if (is_multicast_ether_addr(hdr->addr1)) {
++ mpp_addr = hdr->addr3;
++ proxied_addr = mesh_hdr->eaddr1;
++ } else if (mesh_hdr->flags & MESH_FLAGS_AE_A5_A6) {
++ /* has_a4 already checked in ieee80211_rx_mesh_check */
++ mpp_addr = hdr->addr4;
++ proxied_addr = mesh_hdr->eaddr2;
++ } else {
++ return RX_DROP_MONITOR;
++ }
++
++ rcu_read_lock();
++ mppath = mpp_path_lookup(sdata, proxied_addr);
++ if (!mppath) {
++ mpp_path_add(sdata, proxied_addr, mpp_addr);
++ } else {
++ spin_lock_bh(&mppath->state_lock);
++ if (!ether_addr_equal(mppath->mpp, mpp_addr))
++ memcpy(mppath->mpp, mpp_addr, ETH_ALEN);
++ spin_unlock_bh(&mppath->state_lock);
++ }
++ rcu_read_unlock();
++ }
++
++ /* Frame has reached destination. Don't forward */
++ if (!is_multicast_ether_addr(hdr->addr1) &&
++ ether_addr_equal(sdata->vif.addr, hdr->addr3))
++ return RX_CONTINUE;
++
++ q = ieee80211_select_queue_80211(sdata, skb, hdr);
++ if (ieee80211_queue_stopped(&local->hw, q)) {
++ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_congestion);
++ return RX_DROP_MONITOR;
++ }
++ skb_set_queue_mapping(skb, q);
++
++ if (!--mesh_hdr->ttl) {
++ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_ttl);
++ goto out;
++ }
++
++ if (!ifmsh->mshcfg.dot11MeshForwarding)
++ goto out;
++
++ fwd_skb = skb_copy(skb, GFP_ATOMIC);
++ if (!fwd_skb) {
++ net_info_ratelimited("%s: failed to clone mesh frame\n",
++ sdata->name);
++ goto out;
++ }
++
++ fwd_hdr = (struct ieee80211_hdr *) fwd_skb->data;
++ fwd_hdr->frame_control &= ~cpu_to_le16(IEEE80211_FCTL_RETRY);
++ info = IEEE80211_SKB_CB(fwd_skb);
++ memset(info, 0, sizeof(*info));
++ info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
++ info->control.vif = &rx->sdata->vif;
++ info->control.jiffies = jiffies;
++ if (is_multicast_ether_addr(fwd_hdr->addr1)) {
++ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_mcast);
++ memcpy(fwd_hdr->addr2, sdata->vif.addr, ETH_ALEN);
++ /* update power mode indication when forwarding */
++ ieee80211_mps_set_frame_flags(sdata, NULL, fwd_hdr);
++ } else if (!mesh_nexthop_lookup(sdata, fwd_skb)) {
++ /* mesh power mode flags updated in mesh_nexthop_lookup */
++ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_unicast);
++ } else {
++ /* unable to resolve next hop */
++ mesh_path_error_tx(sdata, ifmsh->mshcfg.element_ttl,
++ fwd_hdr->addr3, 0,
++ WLAN_REASON_MESH_PATH_NOFORWARD,
++ fwd_hdr->addr2);
++ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, dropped_frames_no_route);
++ kfree_skb(fwd_skb);
++ return RX_DROP_MONITOR;
++ }
++
++ IEEE80211_IFSTA_MESH_CTR_INC(ifmsh, fwded_frames);
++ ieee80211_add_pending_skb(local, fwd_skb);
++ out:
++ if (is_multicast_ether_addr(hdr->addr1) ||
++ sdata->dev->flags & IFF_PROMISC)
++ return RX_CONTINUE;
++ else
++ return RX_DROP_MONITOR;
++}
++#endif
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_data(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_local *local = rx->local;
++ struct net_device *dev = sdata->dev;
++ struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data;
++ __le16 fc = hdr->frame_control;
++ bool port_control;
++ int err;
++
++ if (unlikely(!ieee80211_is_data(hdr->frame_control)))
++ return RX_CONTINUE;
++
++ if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
++ return RX_DROP_MONITOR;
++
++ /*
++ * Send unexpected-4addr-frame event to hostapd. For older versions,
++ * also drop the frame to cooked monitor interfaces.
++ */
++ if (ieee80211_has_a4(hdr->frame_control) &&
++ sdata->vif.type == NL80211_IFTYPE_AP) {
++ if (rx->sta &&
++ !test_and_set_sta_flag(rx->sta, WLAN_STA_4ADDR_EVENT))
++ cfg80211_rx_unexpected_4addr_frame(
++ rx->sdata->dev, rx->sta->sta.addr, GFP_ATOMIC);
++ return RX_DROP_MONITOR;
++ }
++
++ err = __ieee80211_data_to_8023(rx, &port_control);
++ if (unlikely(err))
++ return RX_DROP_UNUSABLE;
++
++ if (!ieee80211_frame_allowed(rx, fc))
++ return RX_DROP_MONITOR;
++
++ if (rx->sdata->vif.type == NL80211_IFTYPE_AP_VLAN &&
++ unlikely(port_control) && sdata->bss) {
++ sdata = container_of(sdata->bss, struct ieee80211_sub_if_data,
++ u.ap);
++ dev = sdata->dev;
++ rx->sdata = sdata;
++ }
++
++ rx->skb->dev = dev;
++
++ dev->stats.rx_packets++;
++ dev->stats.rx_bytes += rx->skb->len;
++
++ if (local->ps_sdata && local->hw.conf.dynamic_ps_timeout > 0 &&
++ !is_multicast_ether_addr(
++ ((struct ethhdr *)rx->skb->data)->h_dest) &&
++ (!local->scanning &&
++ !test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state))) {
++ mod_timer(&local->dynamic_ps_timer, jiffies +
++ msecs_to_jiffies(local->hw.conf.dynamic_ps_timeout));
++ }
++
++ ieee80211_deliver_skb(rx);
++
++ return RX_QUEUED;
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_ctrl(struct ieee80211_rx_data *rx, struct sk_buff_head *frames)
++{
++ struct sk_buff *skb = rx->skb;
++ struct ieee80211_bar *bar = (struct ieee80211_bar *)skb->data;
++ struct tid_ampdu_rx *tid_agg_rx;
++ u16 start_seq_num;
++ u16 tid;
++
++ if (likely(!ieee80211_is_ctl(bar->frame_control)))
++ return RX_CONTINUE;
++
++ if (ieee80211_is_back_req(bar->frame_control)) {
++ struct {
++ __le16 control, start_seq_num;
++ } __packed bar_data;
++
++ if (!rx->sta)
++ return RX_DROP_MONITOR;
++
++ if (skb_copy_bits(skb, offsetof(struct ieee80211_bar, control),
++ &bar_data, sizeof(bar_data)))
++ return RX_DROP_MONITOR;
++
++ tid = le16_to_cpu(bar_data.control) >> 12;
++
++ tid_agg_rx = rcu_dereference(rx->sta->ampdu_mlme.tid_rx[tid]);
++ if (!tid_agg_rx)
++ return RX_DROP_MONITOR;
++
++ start_seq_num = le16_to_cpu(bar_data.start_seq_num) >> 4;
++
++ /* reset session timer */
++ if (tid_agg_rx->timeout)
++ mod_timer(&tid_agg_rx->session_timer,
++ TU_TO_EXP_TIME(tid_agg_rx->timeout));
++
++ spin_lock(&tid_agg_rx->reorder_lock);
++ /* release stored frames up to start of BAR */
++ ieee80211_release_reorder_frames(rx->sdata, tid_agg_rx,
++ start_seq_num, frames);
++ spin_unlock(&tid_agg_rx->reorder_lock);
++
++ kfree_skb(skb);
++ return RX_QUEUED;
++ }
++
++ /*
++ * After this point, we only want management frames,
++ * so we can drop all remaining control frames to
++ * cooked monitor interfaces.
++ */
++ return RX_DROP_MONITOR;
++}
++
++static void ieee80211_process_sa_query_req(struct ieee80211_sub_if_data *sdata,
++ struct ieee80211_mgmt *mgmt,
++ size_t len)
++{
++ struct ieee80211_local *local = sdata->local;
++ struct sk_buff *skb;
++ struct ieee80211_mgmt *resp;
++
++ if (!ether_addr_equal(mgmt->da, sdata->vif.addr)) {
++ /* Not to own unicast address */
++ return;
++ }
++
++ if (!ether_addr_equal(mgmt->sa, sdata->u.mgd.bssid) ||
++ !ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid)) {
++ /* Not from the current AP or not associated yet. */
++ return;
++ }
++
++ if (len < 24 + 1 + sizeof(resp->u.action.u.sa_query)) {
++ /* Too short SA Query request frame */
++ return;
++ }
++
++ skb = dev_alloc_skb(sizeof(*resp) + local->hw.extra_tx_headroom);
++ if (skb == NULL)
++ return;
++
++ skb_reserve(skb, local->hw.extra_tx_headroom);
++ resp = (struct ieee80211_mgmt *) skb_put(skb, 24);
++ memset(resp, 0, 24);
++ memcpy(resp->da, mgmt->sa, ETH_ALEN);
++ memcpy(resp->sa, sdata->vif.addr, ETH_ALEN);
++ memcpy(resp->bssid, sdata->u.mgd.bssid, ETH_ALEN);
++ resp->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
++ IEEE80211_STYPE_ACTION);
++ skb_put(skb, 1 + sizeof(resp->u.action.u.sa_query));
++ resp->u.action.category = WLAN_CATEGORY_SA_QUERY;
++ resp->u.action.u.sa_query.action = WLAN_ACTION_SA_QUERY_RESPONSE;
++ memcpy(resp->u.action.u.sa_query.trans_id,
++ mgmt->u.action.u.sa_query.trans_id,
++ WLAN_SA_QUERY_TR_ID_LEN);
++
++ ieee80211_tx_skb(sdata, skb);
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_mgmt_check(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++
++ /*
++ * From here on, look only at management frames.
++ * Data and control frames are already handled,
++ * and unknown (reserved) frames are useless.
++ */
++ if (rx->skb->len < 24)
++ return RX_DROP_MONITOR;
++
++ if (!ieee80211_is_mgmt(mgmt->frame_control))
++ return RX_DROP_MONITOR;
++
++ if (rx->sdata->vif.type == NL80211_IFTYPE_AP &&
++ ieee80211_is_beacon(mgmt->frame_control) &&
++ !(rx->flags & IEEE80211_RX_BEACON_REPORTED)) {
++ int sig = 0;
++
++ if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
++ sig = status->signal;
++
++ cfg80211_report_obss_beacon(rx->local->hw.wiphy,
++ rx->skb->data, rx->skb->len,
++ status->freq, sig);
++ rx->flags |= IEEE80211_RX_BEACON_REPORTED;
++ }
++
++ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
++ return RX_DROP_MONITOR;
++
++ if (ieee80211_drop_unencrypted_mgmt(rx))
++ return RX_DROP_UNUSABLE;
++
++ return RX_CONTINUE;
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_local *local = rx->local;
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++ int len = rx->skb->len;
++
++ if (!ieee80211_is_action(mgmt->frame_control))
++ return RX_CONTINUE;
++
++ /* drop too small frames */
++ if (len < IEEE80211_MIN_ACTION_SIZE)
++ return RX_DROP_UNUSABLE;
++
++ if (!rx->sta && mgmt->u.action.category != WLAN_CATEGORY_PUBLIC &&
++ mgmt->u.action.category != WLAN_CATEGORY_SELF_PROTECTED &&
++ mgmt->u.action.category != WLAN_CATEGORY_SPECTRUM_MGMT)
++ return RX_DROP_UNUSABLE;
++
++ if (!(status->rx_flags & IEEE80211_RX_RA_MATCH))
++ return RX_DROP_UNUSABLE;
++
++ switch (mgmt->u.action.category) {
++ case WLAN_CATEGORY_HT:
++ /* reject HT action frames from stations not supporting HT */
++ if (!rx->sta->sta.ht_cap.ht_supported)
++ goto invalid;
++
++ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
++ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
++ sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
++ sdata->vif.type != NL80211_IFTYPE_AP &&
++ sdata->vif.type != NL80211_IFTYPE_ADHOC)
++ break;
++
++ /* verify action & smps_control/chanwidth are present */
++ if (len < IEEE80211_MIN_ACTION_SIZE + 2)
++ goto invalid;
++
++ switch (mgmt->u.action.u.ht_smps.action) {
++ case WLAN_HT_ACTION_SMPS: {
++ struct ieee80211_supported_band *sband;
++ enum ieee80211_smps_mode smps_mode;
++
++ /* convert to HT capability */
++ switch (mgmt->u.action.u.ht_smps.smps_control) {
++ case WLAN_HT_SMPS_CONTROL_DISABLED:
++ smps_mode = IEEE80211_SMPS_OFF;
++ break;
++ case WLAN_HT_SMPS_CONTROL_STATIC:
++ smps_mode = IEEE80211_SMPS_STATIC;
++ break;
++ case WLAN_HT_SMPS_CONTROL_DYNAMIC:
++ smps_mode = IEEE80211_SMPS_DYNAMIC;
++ break;
++ default:
++ goto invalid;
++ }
++
++ /* if no change do nothing */
++ if (rx->sta->sta.smps_mode == smps_mode)
++ goto handled;
++ rx->sta->sta.smps_mode = smps_mode;
++
++ sband = rx->local->hw.wiphy->bands[status->band];
++
++ rate_control_rate_update(local, sband, rx->sta,
++ IEEE80211_RC_SMPS_CHANGED);
++ goto handled;
++ }
++ case WLAN_HT_ACTION_NOTIFY_CHANWIDTH: {
++ struct ieee80211_supported_band *sband;
++ u8 chanwidth = mgmt->u.action.u.ht_notify_cw.chanwidth;
++ enum ieee80211_sta_rx_bandwidth new_bw;
++
++ /* If it doesn't support 40 MHz it can't change ... */
++ if (!(rx->sta->sta.ht_cap.cap &
++ IEEE80211_HT_CAP_SUP_WIDTH_20_40))
++ goto handled;
++
++ if (chanwidth == IEEE80211_HT_CHANWIDTH_20MHZ)
++ new_bw = IEEE80211_STA_RX_BW_20;
++ else
++ new_bw = ieee80211_sta_cur_vht_bw(rx->sta);
++
++ if (rx->sta->sta.bandwidth == new_bw)
++ goto handled;
++
++ sband = rx->local->hw.wiphy->bands[status->band];
++
++ rate_control_rate_update(local, sband, rx->sta,
++ IEEE80211_RC_BW_CHANGED);
++ goto handled;
++ }
++ default:
++ goto invalid;
++ }
++
++ break;
++ case WLAN_CATEGORY_PUBLIC:
++ if (len < IEEE80211_MIN_ACTION_SIZE + 1)
++ goto invalid;
++ if (sdata->vif.type != NL80211_IFTYPE_STATION)
++ break;
++ if (!rx->sta)
++ break;
++ if (!ether_addr_equal(mgmt->bssid, sdata->u.mgd.bssid))
++ break;
++ if (mgmt->u.action.u.ext_chan_switch.action_code !=
++ WLAN_PUB_ACTION_EXT_CHANSW_ANN)
++ break;
++ if (len < offsetof(struct ieee80211_mgmt,
++ u.action.u.ext_chan_switch.variable))
++ goto invalid;
++ goto queue;
++ case WLAN_CATEGORY_VHT:
++ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
++ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
++ sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
++ sdata->vif.type != NL80211_IFTYPE_AP &&
++ sdata->vif.type != NL80211_IFTYPE_ADHOC)
++ break;
++
++ /* verify action code is present */
++ if (len < IEEE80211_MIN_ACTION_SIZE + 1)
++ goto invalid;
++
++ switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
++ case WLAN_VHT_ACTION_OPMODE_NOTIF: {
++ u8 opmode;
++
++ /* verify opmode is present */
++ if (len < IEEE80211_MIN_ACTION_SIZE + 2)
++ goto invalid;
++
++ opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
++
++ ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
++ opmode, status->band,
++ false);
++ goto handled;
++ }
++ default:
++ break;
++ }
++ break;
++ case WLAN_CATEGORY_BACK:
++ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
++ sdata->vif.type != NL80211_IFTYPE_MESH_POINT &&
++ sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
++ sdata->vif.type != NL80211_IFTYPE_AP &&
++ sdata->vif.type != NL80211_IFTYPE_ADHOC)
++ break;
++
++ /* verify action_code is present */
++ if (len < IEEE80211_MIN_ACTION_SIZE + 1)
++ break;
++
++ switch (mgmt->u.action.u.addba_req.action_code) {
++ case WLAN_ACTION_ADDBA_REQ:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.addba_req)))
++ goto invalid;
++ break;
++ case WLAN_ACTION_ADDBA_RESP:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.addba_resp)))
++ goto invalid;
++ break;
++ case WLAN_ACTION_DELBA:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.delba)))
++ goto invalid;
++ break;
++ default:
++ goto invalid;
++ }
++
++ goto queue;
++ case WLAN_CATEGORY_SPECTRUM_MGMT:
++ /* verify action_code is present */
++ if (len < IEEE80211_MIN_ACTION_SIZE + 1)
++ break;
++
++ switch (mgmt->u.action.u.measurement.action_code) {
++ case WLAN_ACTION_SPCT_MSR_REQ:
++ if (status->band != IEEE80211_BAND_5GHZ)
++ break;
++
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.measurement)))
++ break;
++
++ if (sdata->vif.type != NL80211_IFTYPE_STATION)
++ break;
++
++ ieee80211_process_measurement_req(sdata, mgmt, len);
++ goto handled;
++ case WLAN_ACTION_SPCT_CHL_SWITCH: {
++ u8 *bssid;
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.chan_switch)))
++ break;
++
++ if (sdata->vif.type != NL80211_IFTYPE_STATION &&
++ sdata->vif.type != NL80211_IFTYPE_ADHOC &&
++ sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
++ break;
++
++ if (sdata->vif.type == NL80211_IFTYPE_STATION)
++ bssid = sdata->u.mgd.bssid;
++ else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
++ bssid = sdata->u.ibss.bssid;
++ else if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT)
++ bssid = mgmt->sa;
++ else
++ break;
++
++ if (!ether_addr_equal(mgmt->bssid, bssid))
++ break;
++
++ goto queue;
++ }
++ }
++ break;
++ case WLAN_CATEGORY_SA_QUERY:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.sa_query)))
++ break;
++
++ switch (mgmt->u.action.u.sa_query.action) {
++ case WLAN_ACTION_SA_QUERY_REQUEST:
++ if (sdata->vif.type != NL80211_IFTYPE_STATION)
++ break;
++ ieee80211_process_sa_query_req(sdata, mgmt, len);
++ goto handled;
++ }
++ break;
++ case WLAN_CATEGORY_SELF_PROTECTED:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.self_prot.action_code)))
++ break;
++
++ switch (mgmt->u.action.u.self_prot.action_code) {
++ case WLAN_SP_MESH_PEERING_OPEN:
++ case WLAN_SP_MESH_PEERING_CLOSE:
++ case WLAN_SP_MESH_PEERING_CONFIRM:
++ if (!ieee80211_vif_is_mesh(&sdata->vif))
++ goto invalid;
++ if (sdata->u.mesh.user_mpm)
++ /* userspace handles this frame */
++ break;
++ goto queue;
++ case WLAN_SP_MGK_INFORM:
++ case WLAN_SP_MGK_ACK:
++ if (!ieee80211_vif_is_mesh(&sdata->vif))
++ goto invalid;
++ break;
++ }
++ break;
++ case WLAN_CATEGORY_MESH_ACTION:
++ if (len < (IEEE80211_MIN_ACTION_SIZE +
++ sizeof(mgmt->u.action.u.mesh_action.action_code)))
++ break;
++
++ if (!ieee80211_vif_is_mesh(&sdata->vif))
++ break;
++ if (mesh_action_is_path_sel(mgmt) &&
++ !mesh_path_sel_is_hwmp(sdata))
++ break;
++ goto queue;
++ }
++
++ return RX_CONTINUE;
++
++ invalid:
++ status->rx_flags |= IEEE80211_RX_MALFORMED_ACTION_FRM;
++ /* will return in the next handlers */
++ return RX_CONTINUE;
++
++ handled:
++ if (rx->sta)
++ rx->sta->rx_packets++;
++ dev_kfree_skb(rx->skb);
++ return RX_QUEUED;
++
++ queue:
++ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
++ skb_queue_tail(&sdata->skb_queue, rx->skb);
++ ieee80211_queue_work(&local->hw, &sdata->work);
++ if (rx->sta)
++ rx->sta->rx_packets++;
++ return RX_QUEUED;
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_userspace_mgmt(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++ int sig = 0;
++
++ /* skip known-bad action frames and return them in the next handler */
++ if (status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM)
++ return RX_CONTINUE;
++
++ /*
++ * Getting here means the kernel doesn't know how to handle
++ * it, but maybe userspace does ... include returned frames
++ * so userspace can register for those to know whether ones
++ * it transmitted were processed or returned.
++ */
++
++ if (rx->local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
++ sig = status->signal;
++
++ if (cfg80211_rx_mgmt(&rx->sdata->wdev, status->freq, sig,
++ rx->skb->data, rx->skb->len, 0)) {
++ if (rx->sta)
++ rx->sta->rx_packets++;
++ dev_kfree_skb(rx->skb);
++ return RX_QUEUED;
++ }
++
++ return RX_CONTINUE;
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_local *local = rx->local;
++ struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *) rx->skb->data;
++ struct sk_buff *nskb;
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb);
++
++ if (!ieee80211_is_action(mgmt->frame_control))
++ return RX_CONTINUE;
++
++ /*
++ * For AP mode, hostapd is responsible for handling any action
++ * frames that we didn't handle, including returning unknown
++ * ones. For all other modes we will return them to the sender,
++ * setting the 0x80 bit in the action category, as required by
++ * 802.11-2012 9.24.4.
++ * Newer versions of hostapd shall also use the management frame
++ * registration mechanisms, but older ones still use cooked
++ * monitor interfaces so push all frames there.
++ */
++ if (!(status->rx_flags & IEEE80211_RX_MALFORMED_ACTION_FRM) &&
++ (sdata->vif.type == NL80211_IFTYPE_AP ||
++ sdata->vif.type == NL80211_IFTYPE_AP_VLAN))
++ return RX_DROP_MONITOR;
++
++ if (is_multicast_ether_addr(mgmt->da))
++ return RX_DROP_MONITOR;
++
++ /* do not return rejected action frames */
++ if (mgmt->u.action.category & 0x80)
++ return RX_DROP_UNUSABLE;
++
++ nskb = skb_copy_expand(rx->skb, local->hw.extra_tx_headroom, 0,
++ GFP_ATOMIC);
++ if (nskb) {
++ struct ieee80211_mgmt *nmgmt = (void *)nskb->data;
++
++ nmgmt->u.action.category |= 0x80;
++ memcpy(nmgmt->da, nmgmt->sa, ETH_ALEN);
++ memcpy(nmgmt->sa, rx->sdata->vif.addr, ETH_ALEN);
++
++ memset(nskb->cb, 0, sizeof(nskb->cb));
++
++ if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) {
++ struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb);
++
++ info->flags = IEEE80211_TX_CTL_TX_OFFCHAN |
++ IEEE80211_TX_INTFL_OFFCHAN_TX_OK |
++ IEEE80211_TX_CTL_NO_CCK_RATE;
++ if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL)
++ info->hw_queue =
++ local->hw.offchannel_tx_hw_queue;
++ }
++
++ __ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7,
++ status->band);
++ }
++ dev_kfree_skb(rx->skb);
++ return RX_QUEUED;
++}
++
++static ieee80211_rx_result debug_noinline
++ieee80211_rx_h_mgmt(struct ieee80211_rx_data *rx)
++{
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_mgmt *mgmt = (void *)rx->skb->data;
++ __le16 stype;
++
++ stype = mgmt->frame_control & cpu_to_le16(IEEE80211_FCTL_STYPE);
++
++ if (!ieee80211_vif_is_mesh(&sdata->vif) &&
++ sdata->vif.type != NL80211_IFTYPE_ADHOC &&
++ sdata->vif.type != NL80211_IFTYPE_STATION)
++ return RX_DROP_MONITOR;
++
++ switch (stype) {
++ case cpu_to_le16(IEEE80211_STYPE_AUTH):
++ case cpu_to_le16(IEEE80211_STYPE_BEACON):
++ case cpu_to_le16(IEEE80211_STYPE_PROBE_RESP):
++ /* process for all: mesh, mlme, ibss */
++ break;
++ case cpu_to_le16(IEEE80211_STYPE_ASSOC_RESP):
++ case cpu_to_le16(IEEE80211_STYPE_REASSOC_RESP):
++ case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
++ case cpu_to_le16(IEEE80211_STYPE_DISASSOC):
++ if (is_multicast_ether_addr(mgmt->da) &&
++ !is_broadcast_ether_addr(mgmt->da))
++ return RX_DROP_MONITOR;
++
++ /* process only for station */
++ if (sdata->vif.type != NL80211_IFTYPE_STATION)
++ return RX_DROP_MONITOR;
++ break;
++ case cpu_to_le16(IEEE80211_STYPE_PROBE_REQ):
++ /* process only for ibss and mesh */
++ if (sdata->vif.type != NL80211_IFTYPE_ADHOC &&
++ sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
++ return RX_DROP_MONITOR;
++ break;
++ default:
++ return RX_DROP_MONITOR;
++ }
++
++ /* queue up frame and kick off work to process it */
++ rx->skb->pkt_type = IEEE80211_SDATA_QUEUE_TYPE_FRAME;
++ skb_queue_tail(&sdata->skb_queue, rx->skb);
++ ieee80211_queue_work(&rx->local->hw, &sdata->work);
++ if (rx->sta)
++ rx->sta->rx_packets++;
++
++ return RX_QUEUED;
++}
++
++/* TODO: use IEEE80211_RX_FRAGMENTED */
++static void ieee80211_rx_cooked_monitor(struct ieee80211_rx_data *rx,
++ struct ieee80211_rate *rate)
++{
++ struct ieee80211_sub_if_data *sdata;
++ struct ieee80211_local *local = rx->local;
++ struct sk_buff *skb = rx->skb, *skb2;
++ struct net_device *prev_dev = NULL;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ int needed_headroom;
++
++ /*
++ * If cooked monitor has been processed already, then
++ * don't do it again. If not, set the flag.
++ */
++ if (rx->flags & IEEE80211_RX_CMNTR)
++ goto out_free_skb;
++ rx->flags |= IEEE80211_RX_CMNTR;
++
++ /* If there are no cooked monitor interfaces, just free the SKB */
++ if (!local->cooked_mntrs)
++ goto out_free_skb;
++
++ /* room for the radiotap header based on driver features */
++ needed_headroom = ieee80211_rx_radiotap_space(local, status);
++
++ if (skb_headroom(skb) < needed_headroom &&
++ pskb_expand_head(skb, needed_headroom, 0, GFP_ATOMIC))
++ goto out_free_skb;
++
++ /* prepend radiotap information */
++ ieee80211_add_rx_radiotap_header(local, skb, rate, needed_headroom,
++ false);
++
++ skb_set_mac_header(skb, 0);
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->pkt_type = PACKET_OTHERHOST;
++ skb->protocol = htons(ETH_P_802_2);
++
++ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++ if (!ieee80211_sdata_running(sdata))
++ continue;
++
++ if (sdata->vif.type != NL80211_IFTYPE_MONITOR ||
++ !(sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES))
++ continue;
++
++ if (prev_dev) {
++ skb2 = skb_clone(skb, GFP_ATOMIC);
++ if (skb2) {
++ skb2->dev = prev_dev;
++ netif_receive_skb(skb2);
++ }
++ }
++
++ prev_dev = sdata->dev;
++ sdata->dev->stats.rx_packets++;
++ sdata->dev->stats.rx_bytes += skb->len;
++ }
++
++ if (prev_dev) {
++ skb->dev = prev_dev;
++ netif_receive_skb(skb);
++ return;
++ }
++
++ out_free_skb:
++ dev_kfree_skb(skb);
++}
++
++static void ieee80211_rx_handlers_result(struct ieee80211_rx_data *rx,
++ ieee80211_rx_result res)
++{
++ switch (res) {
++ case RX_DROP_MONITOR:
++ I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
++ if (rx->sta)
++ rx->sta->rx_dropped++;
++ /* fall through */
++ case RX_CONTINUE: {
++ struct ieee80211_rate *rate = NULL;
++ struct ieee80211_supported_band *sband;
++ struct ieee80211_rx_status *status;
++
++ status = IEEE80211_SKB_RXCB((rx->skb));
++
++ sband = rx->local->hw.wiphy->bands[status->band];
++ if (!(status->flag & RX_FLAG_HT) &&
++ !(status->flag & RX_FLAG_VHT))
++ rate = &sband->bitrates[status->rate_idx];
++
++ ieee80211_rx_cooked_monitor(rx, rate);
++ break;
++ }
++ case RX_DROP_UNUSABLE:
++ I802_DEBUG_INC(rx->sdata->local->rx_handlers_drop);
++ if (rx->sta)
++ rx->sta->rx_dropped++;
++ dev_kfree_skb(rx->skb);
++ break;
++ case RX_QUEUED:
++ I802_DEBUG_INC(rx->sdata->local->rx_handlers_queued);
++ break;
++ }
++}
++
++static void ieee80211_rx_handlers(struct ieee80211_rx_data *rx,
++ struct sk_buff_head *frames)
++{
++ ieee80211_rx_result res = RX_DROP_MONITOR;
++ struct sk_buff *skb;
++
++#define CALL_RXH(rxh) \
++ do { \
++ res = rxh(rx); \
++ if (res != RX_CONTINUE) \
++ goto rxh_next; \
++ } while (0);
++
++ spin_lock_bh(&rx->local->rx_path_lock);
++
++ while ((skb = __skb_dequeue(frames))) {
++ /*
++ * all the other fields are valid across frames
++ * that belong to an aMPDU since they are on the
++ * same TID from the same station
++ */
++ rx->skb = skb;
++
++ CALL_RXH(ieee80211_rx_h_check_more_data)
++ CALL_RXH(ieee80211_rx_h_uapsd_and_pspoll)
++ CALL_RXH(ieee80211_rx_h_sta_process)
++ CALL_RXH(ieee80211_rx_h_decrypt)
++ CALL_RXH(ieee80211_rx_h_defragment)
++ CALL_RXH(ieee80211_rx_h_michael_mic_verify)
++ /* must be after MMIC verify so header is counted in MPDU mic */
++#ifdef CONFIG_MAC80211_MESH
++ if (ieee80211_vif_is_mesh(&rx->sdata->vif))
++ CALL_RXH(ieee80211_rx_h_mesh_fwding);
++#endif
++ CALL_RXH(ieee80211_rx_h_amsdu)
++ CALL_RXH(ieee80211_rx_h_data)
++
++ /* special treatment -- needs the queue */
++ res = ieee80211_rx_h_ctrl(rx, frames);
++ if (res != RX_CONTINUE)
++ goto rxh_next;
++
++ CALL_RXH(ieee80211_rx_h_mgmt_check)
++ CALL_RXH(ieee80211_rx_h_action)
++ CALL_RXH(ieee80211_rx_h_userspace_mgmt)
++ CALL_RXH(ieee80211_rx_h_action_return)
++ CALL_RXH(ieee80211_rx_h_mgmt)
++
++ rxh_next:
++ ieee80211_rx_handlers_result(rx, res);
++
++#undef CALL_RXH
++ }
++
++ spin_unlock_bh(&rx->local->rx_path_lock);
++}
++
++static void ieee80211_invoke_rx_handlers(struct ieee80211_rx_data *rx)
++{
++ struct sk_buff_head reorder_release;
++ ieee80211_rx_result res = RX_DROP_MONITOR;
++
++ __skb_queue_head_init(&reorder_release);
++
++#define CALL_RXH(rxh) \
++ do { \
++ res = rxh(rx); \
++ if (res != RX_CONTINUE) \
++ goto rxh_next; \
++ } while (0);
++
++ CALL_RXH(ieee80211_rx_h_check)
++
++ ieee80211_rx_reorder_ampdu(rx, &reorder_release);
++
++ ieee80211_rx_handlers(rx, &reorder_release);
++ return;
++
++ rxh_next:
++ ieee80211_rx_handlers_result(rx, res);
++
++#undef CALL_RXH
++}
++
++/*
++ * This function makes calls into the RX path, therefore
++ * it has to be invoked under RCU read lock.
++ */
++void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
++{
++ struct sk_buff_head frames;
++ struct ieee80211_rx_data rx = {
++ .sta = sta,
++ .sdata = sta->sdata,
++ .local = sta->local,
++ /* This is OK -- must be QoS data frame */
++ .security_idx = tid,
++ .seqno_idx = tid,
++ .flags = 0,
++ };
++ struct tid_ampdu_rx *tid_agg_rx;
++
++ tid_agg_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[tid]);
++ if (!tid_agg_rx)
++ return;
++
++ __skb_queue_head_init(&frames);
++
++ spin_lock(&tid_agg_rx->reorder_lock);
++ ieee80211_sta_reorder_release(sta->sdata, tid_agg_rx, &frames);
++ spin_unlock(&tid_agg_rx->reorder_lock);
++
++ ieee80211_rx_handlers(&rx, &frames);
++}
++
++/* main receive path */
++
++static bool prepare_for_handlers(struct ieee80211_rx_data *rx,
++ struct ieee80211_hdr *hdr)
++{
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct sk_buff *skb = rx->skb;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ u8 *bssid = ieee80211_get_bssid(hdr, skb->len, sdata->vif.type);
++ int multicast = is_multicast_ether_addr(hdr->addr1);
++
++ switch (sdata->vif.type) {
++ case NL80211_IFTYPE_STATION:
++ if (!bssid && !sdata->u.mgd.use_4addr)
++ return false;
++ if (!multicast &&
++ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
++ if (!(sdata->dev->flags & IFF_PROMISC) ||
++ sdata->u.mgd.use_4addr)
++ return false;
++ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
++ }
++ break;
++ case NL80211_IFTYPE_ADHOC:
++ if (!bssid)
++ return false;
++ if (ether_addr_equal(sdata->vif.addr, hdr->addr2) ||
++ ether_addr_equal(sdata->u.ibss.bssid, hdr->addr2))
++ return false;
++ if (ieee80211_is_beacon(hdr->frame_control)) {
++ return true;
++ } else if (!ieee80211_bssid_match(bssid, sdata->u.ibss.bssid)) {
++ return false;
++ } else if (!multicast &&
++ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
++ if (!(sdata->dev->flags & IFF_PROMISC))
++ return false;
++ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
++ } else if (!rx->sta) {
++ int rate_idx;
++ if (status->flag & (RX_FLAG_HT | RX_FLAG_VHT))
++ rate_idx = 0; /* TODO: HT/VHT rates */
++ else
++ rate_idx = status->rate_idx;
++ ieee80211_ibss_rx_no_sta(sdata, bssid, hdr->addr2,
++ BIT(rate_idx));
++ }
++ break;
++ case NL80211_IFTYPE_MESH_POINT:
++ if (!multicast &&
++ !ether_addr_equal(sdata->vif.addr, hdr->addr1)) {
++ if (!(sdata->dev->flags & IFF_PROMISC))
++ return false;
++
++ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
++ }
++ break;
++ case NL80211_IFTYPE_AP_VLAN:
++ case NL80211_IFTYPE_AP:
++ if (!bssid) {
++ if (!ether_addr_equal(sdata->vif.addr, hdr->addr1))
++ return false;
++ } else if (!ieee80211_bssid_match(bssid, sdata->vif.addr)) {
++ /*
++ * Accept public action frames even when the
++ * BSSID doesn't match, this is used for P2P
++ * and location updates. Note that mac80211
++ * itself never looks at these frames.
++ */
++ if (!multicast &&
++ !ether_addr_equal(sdata->vif.addr, hdr->addr1))
++ return false;
++ if (ieee80211_is_public_action(hdr, skb->len))
++ return true;
++ if (!ieee80211_is_beacon(hdr->frame_control))
++ return false;
++ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
++ } else if (!ieee80211_has_tods(hdr->frame_control)) {
++ /* ignore data frames to TDLS-peers */
++ if (ieee80211_is_data(hdr->frame_control))
++ return false;
++ /* ignore action frames to TDLS-peers */
++ if (ieee80211_is_action(hdr->frame_control) &&
++ !ether_addr_equal(bssid, hdr->addr1))
++ return false;
++ }
++ break;
++ case NL80211_IFTYPE_WDS:
++ if (bssid || !ieee80211_is_data(hdr->frame_control))
++ return false;
++ if (!ether_addr_equal(sdata->u.wds.remote_addr, hdr->addr2))
++ return false;
++ break;
++ case NL80211_IFTYPE_P2P_DEVICE:
++ if (!ieee80211_is_public_action(hdr, skb->len) &&
++ !ieee80211_is_probe_req(hdr->frame_control) &&
++ !ieee80211_is_probe_resp(hdr->frame_control) &&
++ !ieee80211_is_beacon(hdr->frame_control))
++ return false;
++ if (!ether_addr_equal(sdata->vif.addr, hdr->addr1) &&
++ !multicast)
++ status->rx_flags &= ~IEEE80211_RX_RA_MATCH;
++ break;
++ default:
++ /* should never get here */
++ WARN_ON_ONCE(1);
++ break;
++ }
++
++ return true;
++}
++
++/*
++ * This function returns whether or not the SKB
++ * was destined for RX processing or not, which,
++ * if consume is true, is equivalent to whether
++ * or not the skb was consumed.
++ */
++static bool ieee80211_prepare_and_rx_handle(struct ieee80211_rx_data *rx,
++ struct sk_buff *skb, bool consume)
++{
++ struct ieee80211_local *local = rx->local;
++ struct ieee80211_sub_if_data *sdata = rx->sdata;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++ struct ieee80211_hdr *hdr = (void *)skb->data;
++
++ rx->skb = skb;
++ status->rx_flags |= IEEE80211_RX_RA_MATCH;
++
++ if (!prepare_for_handlers(rx, hdr))
++ return false;
++
++ if (!consume) {
++ skb = skb_copy(skb, GFP_ATOMIC);
++ if (!skb) {
++ if (net_ratelimit())
++ wiphy_debug(local->hw.wiphy,
++ "failed to copy skb for %s\n",
++ sdata->name);
++ return true;
++ }
++
++ rx->skb = skb;
++ }
++
++ ieee80211_invoke_rx_handlers(rx);
++ return true;
++}
++
++/*
++ * This is the actual Rx frames handler. as it belongs to Rx path it must
++ * be called with rcu_read_lock protection.
++ */
++static void __ieee80211_rx_handle_packet(struct ieee80211_hw *hw,
++ struct sk_buff *skb)
++{
++ struct ieee80211_local *local = hw_to_local(hw);
++ struct ieee80211_sub_if_data *sdata;
++ struct ieee80211_hdr *hdr;
++ __le16 fc;
++ struct ieee80211_rx_data rx;
++ struct ieee80211_sub_if_data *prev;
++ struct sta_info *sta, *tmp, *prev_sta;
++ int err = 0;
++
++ fc = ((struct ieee80211_hdr *)skb->data)->frame_control;
++ memset(&rx, 0, sizeof(rx));
++ rx.skb = skb;
++ rx.local = local;
++
++ if (ieee80211_is_data(fc) || ieee80211_is_mgmt(fc))
++ local->dot11ReceivedFragmentCount++;
++
++ if (ieee80211_is_mgmt(fc)) {
++ /* drop frame if too short for header */
++ if (skb->len < ieee80211_hdrlen(fc))
++ err = -ENOBUFS;
++ else
++ err = skb_linearize(skb);
++ } else {
++ err = !pskb_may_pull(skb, ieee80211_hdrlen(fc));
++ }
++
++ if (err) {
++ dev_kfree_skb(skb);
++ return;
++ }
++
++ hdr = (struct ieee80211_hdr *)skb->data;
++ ieee80211_parse_qos(&rx);
++ ieee80211_verify_alignment(&rx);
++
++ if (unlikely(ieee80211_is_probe_resp(hdr->frame_control) ||
++ ieee80211_is_beacon(hdr->frame_control)))
++ ieee80211_scan_rx(local, skb);
++
++ if (ieee80211_is_data(fc)) {
++ prev_sta = NULL;
++
++ for_each_sta_info(local, hdr->addr2, sta, tmp) {
++ if (!prev_sta) {
++ prev_sta = sta;
++ continue;
++ }
++
++ rx.sta = prev_sta;
++ rx.sdata = prev_sta->sdata;
++ ieee80211_prepare_and_rx_handle(&rx, skb, false);
++
++ prev_sta = sta;
++ }
++
++ if (prev_sta) {
++ rx.sta = prev_sta;
++ rx.sdata = prev_sta->sdata;
++
++ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
++ return;
++ goto out;
++ }
++ }
++
++ prev = NULL;
++
++ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
++ if (!ieee80211_sdata_running(sdata))
++ continue;
++
++ if (sdata->vif.type == NL80211_IFTYPE_MONITOR ||
++ sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
++ continue;
++
++ /*
++ * frame is destined for this interface, but if it's
++ * not also for the previous one we handle that after
++ * the loop to avoid copying the SKB once too much
++ */
++
++ if (!prev) {
++ prev = sdata;
++ continue;
++ }
++
++ rx.sta = sta_info_get_bss(prev, hdr->addr2);
++ rx.sdata = prev;
++ ieee80211_prepare_and_rx_handle(&rx, skb, false);
++
++ prev = sdata;
++ }
++
++ if (prev) {
++ rx.sta = sta_info_get_bss(prev, hdr->addr2);
++ rx.sdata = prev;
++
++ if (ieee80211_prepare_and_rx_handle(&rx, skb, true))
++ return;
++ }
++
++ out:
++ dev_kfree_skb(skb);
++}
++
++/*
++ * This is the receive path handler. It is called by a low level driver when an
++ * 802.11 MPDU is received from the hardware.
++ */
++void ieee80211_rx(struct ieee80211_hw *hw, struct sk_buff *skb)
++{
++ struct ieee80211_local *local = hw_to_local(hw);
++ struct ieee80211_rate *rate = NULL;
++ struct ieee80211_supported_band *sband;
++ struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
++
++ WARN_ON_ONCE(softirq_count() == 0);
++
++ if (WARN_ON(status->band >= IEEE80211_NUM_BANDS))
++ goto drop;
++
++ sband = local->hw.wiphy->bands[status->band];
++ if (WARN_ON(!sband))
++ goto drop;
++
++ /*
++ * If we're suspending, it is possible although not too likely
++ * that we'd be receiving frames after having already partially
++ * quiesced the stack. We can't process such frames then since
++ * that might, for example, cause stations to be added or other
++ * driver callbacks be invoked.
++ */
++ if (unlikely(local->quiescing || local->suspended))
++ goto drop;
++
++ /* We might be during a HW reconfig, prevent Rx for the same reason */
++ if (unlikely(local->in_reconfig))
++ goto drop;
++
++ /*
++ * The same happens when we're not even started,
++ * but that's worth a warning.
++ */
++ if (WARN_ON(!local->started))
++ goto drop;
++
++ if (likely(!(status->flag & RX_FLAG_FAILED_PLCP_CRC))) {
++ /*
++ * Validate the rate, unless a PLCP error means that
++ * we probably can't have a valid rate here anyway.
++ */
++
++ if (status->flag & RX_FLAG_HT) {
++ /*
++ * rate_idx is MCS index, which can be [0-76]
++ * as documented on:
++ *
++ * http://wireless.kernel.org/en/developers/Documentation/ieee80211/802.11n
++ *
++ * Anything else would be some sort of driver or
++ * hardware error. The driver should catch hardware
++ * errors.
++ */
++ if (WARN(status->rate_idx > 76,
++ "Rate marked as an HT rate but passed "
++ "status->rate_idx is not "
++ "an MCS index [0-76]: %d (0x%02x)\n",
++ status->rate_idx,
++ status->rate_idx))
++ goto drop;
++ } else if (status->flag & RX_FLAG_VHT) {
++ if (WARN_ONCE(status->rate_idx > 9 ||
++ !status->vht_nss ||
++ status->vht_nss > 8,
++ "Rate marked as a VHT rate but data is invalid: MCS: %d, NSS: %d\n",
++ status->rate_idx, status->vht_nss))
++ goto drop;
++ } else {
++ if (WARN_ON(status->rate_idx >= sband->n_bitrates))
++ goto drop;
++ rate = &sband->bitrates[status->rate_idx];
++ }
++ }
++
++ status->rx_flags = 0;
++
++ /*
++ * key references and virtual interfaces are protected using RCU
++ * and this requires that we are in a read-side RCU section during
++ * receive processing
++ */
++ rcu_read_lock();
++
++ /*
++ * Frames with failed FCS/PLCP checksum are not returned,
++ * all other frames are returned without radiotap header
++ * if it was previously present.
++ * Also, frames with less than 16 bytes are dropped.
++ */
++ skb = ieee80211_rx_monitor(local, skb, rate);
++ if (!skb) {
++ rcu_read_unlock();
++ return;
++ }
++
++ ieee80211_tpt_led_trig_rx(local,
++ ((struct ieee80211_hdr *)skb->data)->frame_control,
++ skb->len);
++ __ieee80211_rx_handle_packet(hw, skb);
++
++ rcu_read_unlock();
++
++ return;
++ drop:
++ kfree_skb(skb);
++}
++EXPORT_SYMBOL(ieee80211_rx);
++
++/* This is a version of the rx handler that can be called from hard irq
++ * context. Post the skb on the queue and schedule the tasklet */
++void ieee80211_rx_irqsafe(struct ieee80211_hw *hw, struct sk_buff *skb)
++{
++ struct ieee80211_local *local = hw_to_local(hw);
++
++ BUILD_BUG_ON(sizeof(struct ieee80211_rx_status) > sizeof(skb->cb));
++
++ skb->pkt_type = IEEE80211_RX_MSG;
++ skb_queue_tail(&local->skb_queue, skb);
++ tasklet_schedule(&local->tasklet);
++}
++EXPORT_SYMBOL(ieee80211_rx_irqsafe);
+diff -Nur linux-3.18.12.orig/net/netfilter/core.c linux-3.18.12/net/netfilter/core.c
+--- linux-3.18.12.orig/net/netfilter/core.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/netfilter/core.c 2015-04-26 13:32:22.471684003 -0500
@@ -21,11 +21,17 @@
#include <linux/proc_fs.h>
#include <linux/mutex.h>
@@ -24135,9 +36114,9 @@ diff -Nur linux-3.18.10.orig/net/netfilter/core.c linux-3.18.10/net/netfilter/co
static DEFINE_MUTEX(afinfo_mutex);
const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly;
-diff -Nur linux-3.18.10.orig/net/packet/af_packet.c linux-3.18.10/net/packet/af_packet.c
---- linux-3.18.10.orig/net/packet/af_packet.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/packet/af_packet.c 2015-03-26 12:42:18.695588359 +0100
+diff -Nur linux-3.18.12.orig/net/packet/af_packet.c linux-3.18.12/net/packet/af_packet.c
+--- linux-3.18.12.orig/net/packet/af_packet.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/packet/af_packet.c 2015-04-26 13:32:22.471684003 -0500
@@ -63,6 +63,7 @@
#include <linux/if_packet.h>
#include <linux/wireless.h>
@@ -24164,9 +36143,9 @@ diff -Nur linux-3.18.10.orig/net/packet/af_packet.c linux-3.18.10/net/packet/af_
}
}
prb_close_block(pkc, pbd, po, status);
-diff -Nur linux-3.18.10.orig/net/rds/ib_rdma.c linux-3.18.10/net/rds/ib_rdma.c
---- linux-3.18.10.orig/net/rds/ib_rdma.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/rds/ib_rdma.c 2015-03-26 12:42:18.695588359 +0100
+diff -Nur linux-3.18.12.orig/net/rds/ib_rdma.c linux-3.18.12/net/rds/ib_rdma.c
+--- linux-3.18.12.orig/net/rds/ib_rdma.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/rds/ib_rdma.c 2015-04-26 13:32:22.471684003 -0500
@@ -34,6 +34,7 @@
#include <linux/slab.h>
#include <linux/rculist.h>
@@ -24184,9 +36163,9 @@ diff -Nur linux-3.18.10.orig/net/rds/ib_rdma.c linux-3.18.10/net/rds/ib_rdma.c
}
}
-diff -Nur linux-3.18.10.orig/net/sched/sch_generic.c linux-3.18.10/net/sched/sch_generic.c
---- linux-3.18.10.orig/net/sched/sch_generic.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/sched/sch_generic.c 2015-03-26 12:42:18.695588359 +0100
+diff -Nur linux-3.18.12.orig/net/sched/sch_generic.c linux-3.18.12/net/sched/sch_generic.c
+--- linux-3.18.12.orig/net/sched/sch_generic.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/sched/sch_generic.c 2015-04-26 13:32:22.471684003 -0500
@@ -894,7 +894,7 @@
/* Wait for outstanding qdisc_run calls. */
list_for_each_entry(dev, head, close_list)
@@ -24196,9 +36175,9 @@ diff -Nur linux-3.18.10.orig/net/sched/sch_generic.c linux-3.18.10/net/sched/sch
}
void dev_deactivate(struct net_device *dev)
-diff -Nur linux-3.18.10.orig/net/sunrpc/svc_xprt.c linux-3.18.10/net/sunrpc/svc_xprt.c
---- linux-3.18.10.orig/net/sunrpc/svc_xprt.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/net/sunrpc/svc_xprt.c 2015-03-26 12:42:18.695588359 +0100
+diff -Nur linux-3.18.12.orig/net/sunrpc/svc_xprt.c linux-3.18.12/net/sunrpc/svc_xprt.c
+--- linux-3.18.12.orig/net/sunrpc/svc_xprt.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/net/sunrpc/svc_xprt.c 2015-04-26 13:32:22.475684003 -0500
@@ -357,7 +357,7 @@
return;
}
@@ -24217,9 +36196,9 @@ diff -Nur linux-3.18.10.orig/net/sunrpc/svc_xprt.c linux-3.18.10/net/sunrpc/svc_
}
/*
-diff -Nur linux-3.18.10.orig/scripts/mkcompile_h linux-3.18.10/scripts/mkcompile_h
---- linux-3.18.10.orig/scripts/mkcompile_h 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/scripts/mkcompile_h 2015-03-26 12:42:18.695588359 +0100
+diff -Nur linux-3.18.12.orig/scripts/mkcompile_h linux-3.18.12/scripts/mkcompile_h
+--- linux-3.18.12.orig/scripts/mkcompile_h 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/scripts/mkcompile_h 2015-04-26 13:32:22.475684003 -0500
@@ -4,7 +4,8 @@
ARCH=$2
SMP=$3
@@ -24238,9 +36217,9 @@ diff -Nur linux-3.18.10.orig/scripts/mkcompile_h linux-3.18.10/scripts/mkcompile
UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP"
# Truncate to maximum length
-diff -Nur linux-3.18.10.orig/sound/core/pcm_native.c linux-3.18.10/sound/core/pcm_native.c
---- linux-3.18.10.orig/sound/core/pcm_native.c 2015-03-24 02:05:12.000000000 +0100
-+++ linux-3.18.10/sound/core/pcm_native.c 2015-03-26 12:42:18.695588359 +0100
+diff -Nur linux-3.18.12.orig/sound/core/pcm_native.c linux-3.18.12/sound/core/pcm_native.c
+--- linux-3.18.12.orig/sound/core/pcm_native.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/sound/core/pcm_native.c 2015-04-26 13:32:22.475684003 -0500
@@ -104,7 +104,7 @@
void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream)
{
@@ -24277,3 +36256,75 @@ diff -Nur linux-3.18.10.orig/sound/core/pcm_native.c linux-3.18.10/sound/core/pc
}
EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore);
+diff -Nur linux-3.18.12.orig/virt/kvm/async_pf.c linux-3.18.12/virt/kvm/async_pf.c
+--- linux-3.18.12.orig/virt/kvm/async_pf.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/virt/kvm/async_pf.c 2015-04-26 13:32:22.475684003 -0500
+@@ -94,8 +94,8 @@
+
+ trace_kvm_async_pf_completed(addr, gva);
+
+- if (waitqueue_active(&vcpu->wq))
+- wake_up_interruptible(&vcpu->wq);
++ if (swaitqueue_active(&vcpu->wq))
++ swait_wake_interruptible(&vcpu->wq);
+
+ mmput(mm);
+ kvm_put_kvm(vcpu->kvm);
+diff -Nur linux-3.18.12.orig/virt/kvm/kvm_main.c linux-3.18.12/virt/kvm/kvm_main.c
+--- linux-3.18.12.orig/virt/kvm/kvm_main.c 2015-04-20 14:48:02.000000000 -0500
++++ linux-3.18.12/virt/kvm/kvm_main.c 2015-04-26 13:32:22.475684003 -0500
+@@ -221,7 +221,7 @@
+ vcpu->kvm = kvm;
+ vcpu->vcpu_id = id;
+ vcpu->pid = NULL;
+- init_waitqueue_head(&vcpu->wq);
++ init_swait_head(&vcpu->wq);
+ kvm_async_pf_vcpu_init(vcpu);
+
+ page = alloc_page(GFP_KERNEL | __GFP_ZERO);
+@@ -1740,10 +1740,10 @@
+ */
+ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
+ {
+- DEFINE_WAIT(wait);
++ DEFINE_SWAITER(wait);
+
+ for (;;) {
+- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
++ swait_prepare(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
+
+ if (kvm_arch_vcpu_runnable(vcpu)) {
+ kvm_make_request(KVM_REQ_UNHALT, vcpu);
+@@ -1757,7 +1757,7 @@
+ schedule();
+ }
+
+- finish_wait(&vcpu->wq, &wait);
++ swait_finish(&vcpu->wq, &wait);
+ }
+ EXPORT_SYMBOL_GPL(kvm_vcpu_block);
+
+@@ -1769,11 +1769,11 @@
+ {
+ int me;
+ int cpu = vcpu->cpu;
+- wait_queue_head_t *wqp;
++ struct swait_head *wqp;
+
+ wqp = kvm_arch_vcpu_wq(vcpu);
+- if (waitqueue_active(wqp)) {
+- wake_up_interruptible(wqp);
++ if (swaitqueue_active(wqp)) {
++ swait_wake_interruptible(wqp);
+ ++vcpu->stat.halt_wakeup;
+ }
+
+@@ -1878,7 +1878,7 @@
+ continue;
+ if (vcpu == me)
+ continue;
+- if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
++ if (swaitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu))
+ continue;
+ if (!kvm_vcpu_eligible_for_directed_yield(vcpu))
+ continue;
diff --git a/target/linux/patches/3.18.10/regmap-bool.patch b/target/linux/patches/3.18.12/regmap-bool.patch
index 5c0ff5e2c..5c0ff5e2c 100644
--- a/target/linux/patches/3.18.10/regmap-bool.patch
+++ b/target/linux/patches/3.18.12/regmap-bool.patch
diff --git a/target/linux/patches/3.18.10/relocs.patch b/target/linux/patches/3.18.12/relocs.patch
index 69a7c88a9..69a7c88a9 100644
--- a/target/linux/patches/3.18.10/relocs.patch
+++ b/target/linux/patches/3.18.12/relocs.patch
diff --git a/target/linux/patches/3.18.10/sgidefs.patch b/target/linux/patches/3.18.12/sgidefs.patch
index f00a284d9..f00a284d9 100644
--- a/target/linux/patches/3.18.10/sgidefs.patch
+++ b/target/linux/patches/3.18.12/sgidefs.patch
diff --git a/target/linux/patches/3.18.10/sortext.patch b/target/linux/patches/3.18.12/sortext.patch
index 8fd4e1d6b..8fd4e1d6b 100644
--- a/target/linux/patches/3.18.10/sortext.patch
+++ b/target/linux/patches/3.18.12/sortext.patch
diff --git a/target/linux/patches/3.18.10/startup.patch b/target/linux/patches/3.18.12/startup.patch
index d396b75e4..d396b75e4 100644
--- a/target/linux/patches/3.18.10/startup.patch
+++ b/target/linux/patches/3.18.12/startup.patch
diff --git a/target/linux/patches/3.18.10/wlan-cf.patch b/target/linux/patches/3.18.12/wlan-cf.patch
index fc20759e2..fc20759e2 100644
--- a/target/linux/patches/3.18.10/wlan-cf.patch
+++ b/target/linux/patches/3.18.12/wlan-cf.patch
diff --git a/target/linux/patches/3.18.10/xargs.patch b/target/linux/patches/3.18.12/xargs.patch
index 2c7b3df59..2c7b3df59 100644
--- a/target/linux/patches/3.18.10/xargs.patch
+++ b/target/linux/patches/3.18.12/xargs.patch
diff --git a/target/linux/patches/3.18.10/yaffs2.patch b/target/linux/patches/3.18.12/yaffs2.patch
index bb244c7ca..bb244c7ca 100644
--- a/target/linux/patches/3.18.10/yaffs2.patch
+++ b/target/linux/patches/3.18.12/yaffs2.patch
diff --git a/target/linux/patches/3.19.1/bsd-compatibility.patch b/target/linux/patches/3.19.5/bsd-compatibility.patch
index b954b658f..b954b658f 100644
--- a/target/linux/patches/3.19.1/bsd-compatibility.patch
+++ b/target/linux/patches/3.19.5/bsd-compatibility.patch
diff --git a/target/linux/patches/3.19.1/cleankernel.patch b/target/linux/patches/3.19.5/cleankernel.patch
index d8c055dc3..d8c055dc3 100644
--- a/target/linux/patches/3.19.1/cleankernel.patch
+++ b/target/linux/patches/3.19.5/cleankernel.patch
diff --git a/target/linux/patches/3.19.1/cris-header.patch b/target/linux/patches/3.19.5/cris-header.patch
index 2b5a88461..2b5a88461 100644
--- a/target/linux/patches/3.19.1/cris-header.patch
+++ b/target/linux/patches/3.19.5/cris-header.patch
diff --git a/target/linux/patches/3.19.1/export-symbol-for-exmap.patch b/target/linux/patches/3.19.5/export-symbol-for-exmap.patch
index 4f0fc8449..4f0fc8449 100644
--- a/target/linux/patches/3.19.1/export-symbol-for-exmap.patch
+++ b/target/linux/patches/3.19.5/export-symbol-for-exmap.patch
diff --git a/target/linux/patches/3.19.1/fblogo.patch b/target/linux/patches/3.19.5/fblogo.patch
index a090f393f..a090f393f 100644
--- a/target/linux/patches/3.19.1/fblogo.patch
+++ b/target/linux/patches/3.19.5/fblogo.patch
diff --git a/target/linux/patches/3.19.1/gemalto.patch b/target/linux/patches/3.19.5/gemalto.patch
index 65f7af1d7..65f7af1d7 100644
--- a/target/linux/patches/3.19.1/gemalto.patch
+++ b/target/linux/patches/3.19.5/gemalto.patch
diff --git a/target/linux/patches/3.19.1/initramfs-nosizelimit.patch b/target/linux/patches/3.19.5/initramfs-nosizelimit.patch
index 40d2f6bd8..40d2f6bd8 100644
--- a/target/linux/patches/3.19.1/initramfs-nosizelimit.patch
+++ b/target/linux/patches/3.19.5/initramfs-nosizelimit.patch
diff --git a/target/linux/patches/3.19.1/lemote-rfkill.patch b/target/linux/patches/3.19.5/lemote-rfkill.patch
index a61488434..a61488434 100644
--- a/target/linux/patches/3.19.1/lemote-rfkill.patch
+++ b/target/linux/patches/3.19.5/lemote-rfkill.patch
diff --git a/target/linux/patches/3.19.1/microblaze-axi.patch b/target/linux/patches/3.19.5/microblaze-axi.patch
index 1a4b17d8c..1a4b17d8c 100644
--- a/target/linux/patches/3.19.1/microblaze-axi.patch
+++ b/target/linux/patches/3.19.5/microblaze-axi.patch
diff --git a/target/linux/patches/3.19.1/microblaze-ethernet.patch b/target/linux/patches/3.19.5/microblaze-ethernet.patch
index 742ab477e..742ab477e 100644
--- a/target/linux/patches/3.19.1/microblaze-ethernet.patch
+++ b/target/linux/patches/3.19.5/microblaze-ethernet.patch
diff --git a/target/linux/patches/3.19.1/mkpiggy.patch b/target/linux/patches/3.19.5/mkpiggy.patch
index 751678b74..751678b74 100644
--- a/target/linux/patches/3.19.1/mkpiggy.patch
+++ b/target/linux/patches/3.19.5/mkpiggy.patch
diff --git a/target/linux/patches/3.19.1/mtd-rootfs.patch b/target/linux/patches/3.19.5/mtd-rootfs.patch
index 775d5fc80..775d5fc80 100644
--- a/target/linux/patches/3.19.1/mtd-rootfs.patch
+++ b/target/linux/patches/3.19.5/mtd-rootfs.patch
diff --git a/target/linux/patches/3.19.1/nfsv3-tcp.patch b/target/linux/patches/3.19.5/nfsv3-tcp.patch
index d5e07e1c2..d5e07e1c2 100644
--- a/target/linux/patches/3.19.1/nfsv3-tcp.patch
+++ b/target/linux/patches/3.19.5/nfsv3-tcp.patch
diff --git a/target/linux/patches/3.19.1/non-static.patch b/target/linux/patches/3.19.5/non-static.patch
index a967703d0..a967703d0 100644
--- a/target/linux/patches/3.19.1/non-static.patch
+++ b/target/linux/patches/3.19.5/non-static.patch
diff --git a/target/linux/patches/3.19.1/ppc64-missing-zlib.patch b/target/linux/patches/3.19.5/ppc64-missing-zlib.patch
index c6e0616be..c6e0616be 100644
--- a/target/linux/patches/3.19.1/ppc64-missing-zlib.patch
+++ b/target/linux/patches/3.19.5/ppc64-missing-zlib.patch
diff --git a/target/linux/patches/3.19.1/regmap-bool.patch b/target/linux/patches/3.19.5/regmap-bool.patch
index 5c0ff5e2c..5c0ff5e2c 100644
--- a/target/linux/patches/3.19.1/regmap-bool.patch
+++ b/target/linux/patches/3.19.5/regmap-bool.patch
diff --git a/target/linux/patches/3.19.1/relocs.patch b/target/linux/patches/3.19.5/relocs.patch
index 69a7c88a9..69a7c88a9 100644
--- a/target/linux/patches/3.19.1/relocs.patch
+++ b/target/linux/patches/3.19.5/relocs.patch
diff --git a/target/linux/patches/3.19.1/sgidefs.patch b/target/linux/patches/3.19.5/sgidefs.patch
index f00a284d9..f00a284d9 100644
--- a/target/linux/patches/3.19.1/sgidefs.patch
+++ b/target/linux/patches/3.19.5/sgidefs.patch
diff --git a/target/linux/patches/3.19.1/sortext.patch b/target/linux/patches/3.19.5/sortext.patch
index 8fd4e1d6b..8fd4e1d6b 100644
--- a/target/linux/patches/3.19.1/sortext.patch
+++ b/target/linux/patches/3.19.5/sortext.patch
diff --git a/target/linux/patches/3.19.1/sparc-aout.patch b/target/linux/patches/3.19.5/sparc-aout.patch
index 3360d6a6e..3360d6a6e 100644
--- a/target/linux/patches/3.19.1/sparc-aout.patch
+++ b/target/linux/patches/3.19.5/sparc-aout.patch
diff --git a/target/linux/patches/3.19.1/startup.patch b/target/linux/patches/3.19.5/startup.patch
index d396b75e4..d396b75e4 100644
--- a/target/linux/patches/3.19.1/startup.patch
+++ b/target/linux/patches/3.19.5/startup.patch
diff --git a/target/linux/patches/3.19.1/wlan-cf.patch b/target/linux/patches/3.19.5/wlan-cf.patch
index fc20759e2..fc20759e2 100644
--- a/target/linux/patches/3.19.1/wlan-cf.patch
+++ b/target/linux/patches/3.19.5/wlan-cf.patch
diff --git a/target/linux/patches/3.19.1/xargs.patch b/target/linux/patches/3.19.5/xargs.patch
index 2c7b3df59..2c7b3df59 100644
--- a/target/linux/patches/3.19.1/xargs.patch
+++ b/target/linux/patches/3.19.5/xargs.patch
diff --git a/target/linux/patches/3.19.1/yaffs2.patch b/target/linux/patches/3.19.5/yaffs2.patch
index bb244c7ca..bb244c7ca 100644
--- a/target/linux/patches/3.19.1/yaffs2.patch
+++ b/target/linux/patches/3.19.5/yaffs2.patch
diff --git a/target/linux/patches/3.4.106/aufs2.patch b/target/linux/patches/3.4.107/aufs2.patch
index d40c9a3fe..d40c9a3fe 100644
--- a/target/linux/patches/3.4.106/aufs2.patch
+++ b/target/linux/patches/3.4.107/aufs2.patch
diff --git a/target/linux/patches/3.4.106/bsd-compatibility.patch b/target/linux/patches/3.4.107/bsd-compatibility.patch
index 9e91a62de..9e91a62de 100644
--- a/target/linux/patches/3.4.106/bsd-compatibility.patch
+++ b/target/linux/patches/3.4.107/bsd-compatibility.patch
diff --git a/target/linux/patches/3.4.106/defaults.patch b/target/linux/patches/3.4.107/defaults.patch
index 58aae610b..58aae610b 100644
--- a/target/linux/patches/3.4.106/defaults.patch
+++ b/target/linux/patches/3.4.107/defaults.patch
diff --git a/target/linux/patches/3.4.106/gemalto.patch b/target/linux/patches/3.4.107/gemalto.patch
index 65f7af1d7..65f7af1d7 100644
--- a/target/linux/patches/3.4.106/gemalto.patch
+++ b/target/linux/patches/3.4.107/gemalto.patch
diff --git a/target/linux/patches/3.4.106/lemote-rfkill.patch b/target/linux/patches/3.4.107/lemote-rfkill.patch
index a61488434..a61488434 100644
--- a/target/linux/patches/3.4.106/lemote-rfkill.patch
+++ b/target/linux/patches/3.4.107/lemote-rfkill.patch
diff --git a/target/linux/patches/3.4.106/linux-gcc-check.patch b/target/linux/patches/3.4.107/linux-gcc-check.patch
index 7cc381845..7cc381845 100644
--- a/target/linux/patches/3.4.106/linux-gcc-check.patch
+++ b/target/linux/patches/3.4.107/linux-gcc-check.patch
diff --git a/target/linux/patches/3.4.106/mips-error.patch b/target/linux/patches/3.4.107/mips-error.patch
index 800abc80d..800abc80d 100644
--- a/target/linux/patches/3.4.106/mips-error.patch
+++ b/target/linux/patches/3.4.107/mips-error.patch
diff --git a/target/linux/patches/3.4.106/mkpiggy.patch b/target/linux/patches/3.4.107/mkpiggy.patch
index d4e815cd2..d4e815cd2 100644
--- a/target/linux/patches/3.4.106/mkpiggy.patch
+++ b/target/linux/patches/3.4.107/mkpiggy.patch
diff --git a/target/linux/patches/3.4.106/module-alloc-size-check.patch b/target/linux/patches/3.4.107/module-alloc-size-check.patch
index a792ac60a..a792ac60a 100644
--- a/target/linux/patches/3.4.106/module-alloc-size-check.patch
+++ b/target/linux/patches/3.4.107/module-alloc-size-check.patch
diff --git a/target/linux/patches/3.4.106/non-static.patch b/target/linux/patches/3.4.107/non-static.patch
index a967703d0..a967703d0 100644
--- a/target/linux/patches/3.4.106/non-static.patch
+++ b/target/linux/patches/3.4.107/non-static.patch
diff --git a/target/linux/patches/3.4.106/relocs.patch b/target/linux/patches/3.4.107/relocs.patch
index 43c5bb580..43c5bb580 100644
--- a/target/linux/patches/3.4.106/relocs.patch
+++ b/target/linux/patches/3.4.107/relocs.patch
diff --git a/target/linux/patches/3.4.106/sparc-aout.patch b/target/linux/patches/3.4.107/sparc-aout.patch
index 5cd74c2e7..5cd74c2e7 100644
--- a/target/linux/patches/3.4.106/sparc-aout.patch
+++ b/target/linux/patches/3.4.107/sparc-aout.patch
diff --git a/target/linux/patches/3.4.106/sparc-include.patch b/target/linux/patches/3.4.107/sparc-include.patch
index 2f8ffd061..2f8ffd061 100644
--- a/target/linux/patches/3.4.106/sparc-include.patch
+++ b/target/linux/patches/3.4.107/sparc-include.patch
diff --git a/target/linux/patches/3.4.106/startup.patch b/target/linux/patches/3.4.107/startup.patch
index c26430bcb..c26430bcb 100644
--- a/target/linux/patches/3.4.106/startup.patch
+++ b/target/linux/patches/3.4.107/startup.patch
diff --git a/target/linux/patches/3.4.106/usb-defaults-off.patch b/target/linux/patches/3.4.107/usb-defaults-off.patch
index 31367108a..31367108a 100644
--- a/target/linux/patches/3.4.106/usb-defaults-off.patch
+++ b/target/linux/patches/3.4.107/usb-defaults-off.patch
diff --git a/target/linux/patches/3.4.106/vga-cons-default-off.patch b/target/linux/patches/3.4.107/vga-cons-default-off.patch
index 178aeeeb9..178aeeeb9 100644
--- a/target/linux/patches/3.4.106/vga-cons-default-off.patch
+++ b/target/linux/patches/3.4.107/vga-cons-default-off.patch
diff --git a/target/linux/patches/3.4.106/wlan-cf.patch b/target/linux/patches/3.4.107/wlan-cf.patch
index fc20759e2..fc20759e2 100644
--- a/target/linux/patches/3.4.106/wlan-cf.patch
+++ b/target/linux/patches/3.4.107/wlan-cf.patch
diff --git a/target/linux/patches/3.4.106/yaffs2.patch b/target/linux/patches/3.4.107/yaffs2.patch
index 44c95915f..44c95915f 100644
--- a/target/linux/patches/3.4.106/yaffs2.patch
+++ b/target/linux/patches/3.4.107/yaffs2.patch
diff --git a/target/linux/patches/3.4.106/zlib-inflate.patch b/target/linux/patches/3.4.107/zlib-inflate.patch
index 58e1f6d21..58e1f6d21 100644
--- a/target/linux/patches/3.4.106/zlib-inflate.patch
+++ b/target/linux/patches/3.4.107/zlib-inflate.patch