From db6e6a11e601ed79a78110bf0fb9a3ac7b58e4e5 Mon Sep 17 00:00:00 2001 From: Waldemar Brodkorb Date: Sat, 27 Aug 2016 13:11:40 +0200 Subject: add support for Qemu ARM noMMU Greg Ungerer made me aware of this possibility. Rather hackish implementation, we need to deduplicate later. Update Linux Kernel to 4.4.19 while there. Only non-Devicetree system supported right now. --- .../linux/patches/4.4.17/coldfire-sighandler.patch | 100 - target/linux/patches/4.4.17/crisv32.patch | 33 - .../patches/4.4.17/initramfs-nosizelimit.patch | 57 - target/linux/patches/4.4.17/ld-or1k.patch | 12 - target/linux/patches/4.4.17/macsonic.patch | 11 - target/linux/patches/4.4.17/mips-xz.patch | 12 - target/linux/patches/4.4.17/patch-realtime | 30649 ------------------- target/linux/patches/4.4.17/revert-sparc.patch | 319 - target/linux/patches/4.4.17/startup.patch | 34 - .../linux/patches/4.4.17/use-libgcc-for-sh.patch | 29 - .../linux/patches/4.4.19/coldfire-sighandler.patch | 100 + target/linux/patches/4.4.19/crisv32.patch | 33 + .../patches/4.4.19/initramfs-nosizelimit.patch | 57 + target/linux/patches/4.4.19/ld-or1k.patch | 12 + target/linux/patches/4.4.19/macsonic.patch | 11 + target/linux/patches/4.4.19/mips-xz.patch | 12 + target/linux/patches/4.4.19/patch-realtime | 30649 +++++++++++++++++++ target/linux/patches/4.4.19/revert-sparc.patch | 319 + target/linux/patches/4.4.19/startup.patch | 34 + .../linux/patches/4.4.19/use-libgcc-for-sh.patch | 29 + target/linux/patches/4.4.19/versatile-nommu.patch | 16 + 21 files changed, 31272 insertions(+), 31256 deletions(-) delete mode 100644 target/linux/patches/4.4.17/coldfire-sighandler.patch delete mode 100644 target/linux/patches/4.4.17/crisv32.patch delete mode 100644 target/linux/patches/4.4.17/initramfs-nosizelimit.patch delete mode 100644 target/linux/patches/4.4.17/ld-or1k.patch delete mode 100644 target/linux/patches/4.4.17/macsonic.patch delete mode 100644 target/linux/patches/4.4.17/mips-xz.patch delete mode 100644 target/linux/patches/4.4.17/patch-realtime delete mode 100644 target/linux/patches/4.4.17/revert-sparc.patch delete mode 100644 target/linux/patches/4.4.17/startup.patch delete mode 100644 target/linux/patches/4.4.17/use-libgcc-for-sh.patch create mode 100644 target/linux/patches/4.4.19/coldfire-sighandler.patch create mode 100644 target/linux/patches/4.4.19/crisv32.patch create mode 100644 target/linux/patches/4.4.19/initramfs-nosizelimit.patch create mode 100644 target/linux/patches/4.4.19/ld-or1k.patch create mode 100644 target/linux/patches/4.4.19/macsonic.patch create mode 100644 target/linux/patches/4.4.19/mips-xz.patch create mode 100644 target/linux/patches/4.4.19/patch-realtime create mode 100644 target/linux/patches/4.4.19/revert-sparc.patch create mode 100644 target/linux/patches/4.4.19/startup.patch create mode 100644 target/linux/patches/4.4.19/use-libgcc-for-sh.patch create mode 100644 target/linux/patches/4.4.19/versatile-nommu.patch (limited to 'target/linux/patches') diff --git a/target/linux/patches/4.4.17/coldfire-sighandler.patch b/target/linux/patches/4.4.17/coldfire-sighandler.patch deleted file mode 100644 index c52a4e228..000000000 --- a/target/linux/patches/4.4.17/coldfire-sighandler.patch +++ /dev/null @@ -1,100 +0,0 @@ -From a95517992a37488c0bc8b629c47c570e580e407d Mon Sep 17 00:00:00 2001 -From: Greg Ungerer -Date: Mon, 15 Feb 2016 16:36:29 +1000 -Subject: m68k: Use conventional function parameters for do_sigreturn - -Create conventional stack parameters for the calls to do_sigreturn and -do_rt_sigreturn. The current C code for do_sigreturn and do_rt_sigreturn -dig into the stack to create local pointers to the saved switch stack -and the pt_regs structs. - -The motivation for this change is a problem with non-MMU targets that -have broken signal return paths on newer versions of gcc. It appears as -though gcc has determined that the pointers into the saved stack structs, -and the saved structs themselves, are function parameters and updates to -them will be lost on function return, so they are optimized away. This -results in large parts of restore_sigcontext() and mangle_kernel_stack() -functions being removed. Of course this results in non-functional code -causing kernel oops. This problem has been observed with gcc version -5.2 and 5.3, and probably exists in earlier versions as well. - -Using conventional stack parameter pointers passed to these functions has -the advantage of the code here not needing to know the exact details of -how the underlying entry handler layed these structs out on the stack. -So the rather ugly pointer setup casting and arg referencing can be -removed. - -The resulting code after this change is a few bytes larger (due to the -overhead of creating the stack args and their tear down). Not being hot -paths I don't think this is too much of a problem here. - -An alternative solution is to put a barrier() in the do_sigreturn() code, -but this doesn't feel quite as clean as this solution. - -This change has been compile tested on all defconfigs, and run tested on -Atari (through aranym), ColdFire with MMU (M5407EVB) and ColdFire with -no-MMU (QEMU and M5208EVB). - -Signed-off-by: Greg Ungerer -Acked-by: Andreas Schwab -Signed-off-by: Geert Uytterhoeven ---- - arch/m68k/kernel/entry.S | 6 ++++++ - arch/m68k/kernel/signal.c | 8 ++------ - 2 files changed, 8 insertions(+), 6 deletions(-) - -diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S -index b54ac7a..97cd3ea 100644 ---- a/arch/m68k/kernel/entry.S -+++ b/arch/m68k/kernel/entry.S -@@ -71,13 +71,19 @@ ENTRY(__sys_vfork) - - ENTRY(sys_sigreturn) - SAVE_SWITCH_STACK -+ movel %sp,%sp@- | switch_stack pointer -+ pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer - jbsr do_sigreturn -+ addql #8,%sp - RESTORE_SWITCH_STACK - rts - - ENTRY(sys_rt_sigreturn) - SAVE_SWITCH_STACK -+ movel %sp,%sp@- | switch_stack pointer -+ pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer - jbsr do_rt_sigreturn -+ addql #8,%sp - RESTORE_SWITCH_STACK - rts - -diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c -index af1c4f3..2dcee3a 100644 ---- a/arch/m68k/kernel/signal.c -+++ b/arch/m68k/kernel/signal.c -@@ -737,10 +737,8 @@ badframe: - return 1; - } - --asmlinkage int do_sigreturn(unsigned long __unused) -+asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw) - { -- struct switch_stack *sw = (struct switch_stack *) &__unused; -- struct pt_regs *regs = (struct pt_regs *) (sw + 1); - unsigned long usp = rdusp(); - struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); - sigset_t set; -@@ -764,10 +762,8 @@ badframe: - return 0; - } - --asmlinkage int do_rt_sigreturn(unsigned long __unused) -+asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw) - { -- struct switch_stack *sw = (struct switch_stack *) &__unused; -- struct pt_regs *regs = (struct pt_regs *) (sw + 1); - unsigned long usp = rdusp(); - struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); - sigset_t set; --- -cgit v0.12 - diff --git a/target/linux/patches/4.4.17/crisv32.patch b/target/linux/patches/4.4.17/crisv32.patch deleted file mode 100644 index cb9b0d028..000000000 --- a/target/linux/patches/4.4.17/crisv32.patch +++ /dev/null @@ -1,33 +0,0 @@ -diff -Nur linux-4.4.13.orig/arch/cris/arch-v32/mm/intmem.c linux-4.4.13/arch/cris/arch-v32/mm/intmem.c ---- linux-4.4.13.orig/arch/cris/arch-v32/mm/intmem.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/cris/arch-v32/mm/intmem.c 2016-06-21 20:40:18.919361891 +0200 -@@ -113,14 +113,14 @@ - - allocation->status = STATUS_FREE; - /* Join with prev and/or next if also free */ -- if ((prev != &intmem_allocations) && -+ if ((&prev->entry != &intmem_allocations) && - (prev->status == STATUS_FREE)) { - prev->size += allocation->size; - list_del(&allocation->entry); - kfree(allocation); - allocation = prev; - } -- if ((next != &intmem_allocations) && -+ if ((&next->entry != &intmem_allocations) && - (next->status == STATUS_FREE)) { - allocation->size += next->size; - list_del(&next->entry); -@@ -145,5 +145,11 @@ - (unsigned long)intmem_virtual + MEM_INTMEM_START + - RESERVED_SIZE); - } --device_initcall(crisv32_intmem_init); - -+static int __init crisv32_intmem_setup(void) -+{ -+ crisv32_intmem_init(); -+ -+ return 0; -+} -+device_initcall(crisv32_intmem_setup); diff --git a/target/linux/patches/4.4.17/initramfs-nosizelimit.patch b/target/linux/patches/4.4.17/initramfs-nosizelimit.patch deleted file mode 100644 index 40d2f6bd8..000000000 --- a/target/linux/patches/4.4.17/initramfs-nosizelimit.patch +++ /dev/null @@ -1,57 +0,0 @@ -From 9a18df7a71bfa620b1278777d64783a359d7eb4e Mon Sep 17 00:00:00 2001 -From: Thorsten Glaser -Date: Sun, 4 May 2014 01:37:54 +0200 -Subject: [PATCH] mount tmpfs-as-rootfs (initramfs) with -o - nr_blocks=0,nr_inodes=0 - -I would have preferred to write this patch to be able to pass -rootflags=nr_blocks=0,nr_inodes=0 on the kernel command line, -and then hand these rootflags over to the initramfs (tmpfs) -mount in the same way the kernel hands them over to the block -device rootfs mount. But at least the Debian/m68k initrd also -parses $rootflags from the environment and adds it to the call -to the user-space mount for the eventual root device, which -would make the kernel command line rootflags option be used in -both places (tmpfs and e.g. ext4) which is guaranteed to error -out in at least one of them. - -This change is intended to aid people in a setup where the -initrd is the final root filesystem, i.e. not mounted over. -This is especially useful in automated tests running on qemu -for boards with constrained memory (e.g. 64 MiB on sh4). - -Considering that the initramfs is normally emptied out then -overmounted, this change is probably safe for setups where -initramfs just hosts early userspace, too, since the tmpfs -backing it is not accessible any more later on, AFAICT. - -Signed-off-by: Thorsten Glaser ---- - init/do_mounts.c | 4 ++++ - 1 file changed, 4 insertions(+) - -diff --git a/init/do_mounts.c b/init/do_mounts.c -index 82f2288..55a4cfe 100644 ---- a/init/do_mounts.c -+++ b/init/do_mounts.c -@@ -594,6 +594,7 @@ out: - } - - static bool is_tmpfs; -+static char tmpfs_rootflags[] = "nr_blocks=0,nr_inodes=0"; - static struct dentry *rootfs_mount(struct file_system_type *fs_type, - int flags, const char *dev_name, void *data) - { -@@ -606,6 +607,9 @@ static struct dentry *rootfs_mount(struct file_system_type *fs_type, - if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs) - fill = shmem_fill_super; - -+ if (is_tmpfs) -+ data = tmpfs_rootflags; -+ - return mount_nodev(fs_type, flags, data, fill); - } - --- -2.0.0.rc0 - diff --git a/target/linux/patches/4.4.17/ld-or1k.patch b/target/linux/patches/4.4.17/ld-or1k.patch deleted file mode 100644 index 264f9166f..000000000 --- a/target/linux/patches/4.4.17/ld-or1k.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -Nur linux-4.4.5.orig/arch/openrisc/kernel/vmlinux.lds.S linux-4.4.5/arch/openrisc/kernel/vmlinux.lds.S ---- linux-4.4.5.orig/arch/openrisc/kernel/vmlinux.lds.S 2016-03-10 00:35:58.000000000 +0100 -+++ linux-4.4.5/arch/openrisc/kernel/vmlinux.lds.S 2016-03-16 09:21:07.933535532 +0100 -@@ -30,7 +30,7 @@ - #include - #include - --OUTPUT_FORMAT("elf32-or32", "elf32-or32", "elf32-or32") -+OUTPUT_FORMAT("elf32-or1k", "elf32-or1k", "elf32-or1k") - jiffies = jiffies_64 + 4; - - SECTIONS diff --git a/target/linux/patches/4.4.17/macsonic.patch b/target/linux/patches/4.4.17/macsonic.patch deleted file mode 100644 index 75a6fcad2..000000000 --- a/target/linux/patches/4.4.17/macsonic.patch +++ /dev/null @@ -1,11 +0,0 @@ -diff -Nur linux-4.4.6.orig/drivers/net/ethernet/natsemi/Kconfig linux-4.4.6/drivers/net/ethernet/natsemi/Kconfig ---- linux-4.4.6.orig/drivers/net/ethernet/natsemi/Kconfig 2016-03-16 16:43:17.000000000 +0100 -+++ linux-4.4.6/drivers/net/ethernet/natsemi/Kconfig 2016-03-18 10:43:34.000000000 +0100 -@@ -18,6 +18,7 @@ - config MACSONIC - tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)" - depends on MAC -+ select BITREVERSE - ---help--- - Support for NatSemi SONIC based Ethernet devices. This includes - the onboard Ethernet in many Quadras as well as some LC-PDS, diff --git a/target/linux/patches/4.4.17/mips-xz.patch b/target/linux/patches/4.4.17/mips-xz.patch deleted file mode 100644 index 5cfac6254..000000000 --- a/target/linux/patches/4.4.17/mips-xz.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -Nur linux-4.4.5.orig/arch/mips/boot/compressed/Makefile linux-4.4.5/arch/mips/boot/compressed/Makefile ---- linux-4.4.5.orig/arch/mips/boot/compressed/Makefile 2016-03-10 00:35:58.000000000 +0100 -+++ linux-4.4.5/arch/mips/boot/compressed/Makefile 2016-03-15 11:35:31.951795457 +0100 -@@ -41,7 +41,7 @@ - endif - - ifdef CONFIG_KERNEL_XZ --vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o -+vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o $(obj)/../../lib/bswapsi.o - endif - - targets += vmlinux.bin diff --git a/target/linux/patches/4.4.17/patch-realtime b/target/linux/patches/4.4.17/patch-realtime deleted file mode 100644 index c677e4098..000000000 --- a/target/linux/patches/4.4.17/patch-realtime +++ /dev/null @@ -1,30649 +0,0 @@ -diff -Nur linux-4.4.13.orig/arch/arm/include/asm/switch_to.h linux-4.4.13/arch/arm/include/asm/switch_to.h ---- linux-4.4.13.orig/arch/arm/include/asm/switch_to.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/include/asm/switch_to.h 2016-06-22 21:55:07.966523867 +0200 -@@ -3,6 +3,13 @@ - - #include - -+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM -+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); -+#else -+static inline void -+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -+#endif -+ - /* - * For v7 SMP cores running a preemptible kernel we may be pre-empted - * during a TLB maintenance operation, so execute an inner-shareable dsb -@@ -25,6 +32,7 @@ - #define switch_to(prev,next,last) \ - do { \ - __complete_pending_tlbi(); \ -+ switch_kmaps(prev, next); \ - last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ - } while (0) - -diff -Nur linux-4.4.13.orig/arch/arm/include/asm/thread_info.h linux-4.4.13/arch/arm/include/asm/thread_info.h ---- linux-4.4.13.orig/arch/arm/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/include/asm/thread_info.h 2016-06-22 21:55:07.966523867 +0200 -@@ -49,6 +49,7 @@ - struct thread_info { - unsigned long flags; /* low level flags */ - int preempt_count; /* 0 => preemptable, <0 => bug */ -+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ - mm_segment_t addr_limit; /* address limit */ - struct task_struct *task; /* main task structure */ - __u32 cpu; /* cpu */ -@@ -142,7 +143,8 @@ - #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ - #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ - #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ --#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ -+#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ -+#define TIF_NEED_RESCHED_LAZY 7 - - #define TIF_NOHZ 12 /* in adaptive nohz mode */ - #define TIF_USING_IWMMXT 17 -@@ -152,6 +154,7 @@ - #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) - #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) - #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_UPROBE (1 << TIF_UPROBE) - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -@@ -167,7 +170,8 @@ - * Change these and you break ASM code in entry-common.S - */ - #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ -- _TIF_NOTIFY_RESUME | _TIF_UPROBE) -+ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ -+ _TIF_NEED_RESCHED_LAZY) - - #endif /* __KERNEL__ */ - #endif /* __ASM_ARM_THREAD_INFO_H */ -diff -Nur linux-4.4.13.orig/arch/arm/Kconfig linux-4.4.13/arch/arm/Kconfig ---- linux-4.4.13.orig/arch/arm/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/Kconfig 2016-06-22 21:55:07.966523867 +0200 -@@ -33,7 +33,7 @@ - select HARDIRQS_SW_RESEND - select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) - select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 -- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 -+ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !PREEMPT_RT_BASE - select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 - select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) - select HAVE_ARCH_TRACEHOOK -@@ -68,6 +68,7 @@ - select HAVE_PERF_EVENTS - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP -+ select HAVE_PREEMPT_LAZY - select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) - select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_SYSCALL_TRACEPOINTS -diff -Nur linux-4.4.13.orig/arch/arm/kernel/asm-offsets.c linux-4.4.13/arch/arm/kernel/asm-offsets.c ---- linux-4.4.13.orig/arch/arm/kernel/asm-offsets.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kernel/asm-offsets.c 2016-06-22 21:55:07.970523867 +0200 -@@ -65,6 +65,7 @@ - BLANK(); - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); -+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); -diff -Nur linux-4.4.13.orig/arch/arm/kernel/entry-armv.S linux-4.4.13/arch/arm/kernel/entry-armv.S ---- linux-4.4.13.orig/arch/arm/kernel/entry-armv.S 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kernel/entry-armv.S 2016-06-22 21:55:07.970523867 +0200 -@@ -215,11 +215,18 @@ - #ifdef CONFIG_PREEMPT - get_thread_info tsk - ldr r8, [tsk, #TI_PREEMPT] @ get preempt count -- ldr r0, [tsk, #TI_FLAGS] @ get flags - teq r8, #0 @ if preempt count != 0 -+ bne 1f @ return from exeption -+ ldr r0, [tsk, #TI_FLAGS] @ get flags -+ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set -+ blne svc_preempt @ preempt! -+ -+ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count -+ teq r8, #0 @ if preempt lazy count != 0 - movne r0, #0 @ force flags to 0 -- tst r0, #_TIF_NEED_RESCHED -+ tst r0, #_TIF_NEED_RESCHED_LAZY - blne svc_preempt -+1: - #endif - - svc_exit r5, irq = 1 @ return from exception -@@ -234,8 +241,14 @@ - 1: bl preempt_schedule_irq @ irq en/disable is done inside - ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS - tst r0, #_TIF_NEED_RESCHED -+ bne 1b -+ tst r0, #_TIF_NEED_RESCHED_LAZY - reteq r8 @ go again -- b 1b -+ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count -+ teq r0, #0 @ if preempt lazy count != 0 -+ beq 1b -+ ret r8 @ go again -+ - #endif - - __und_fault: -diff -Nur linux-4.4.13.orig/arch/arm/kernel/entry-common.S linux-4.4.13/arch/arm/kernel/entry-common.S ---- linux-4.4.13.orig/arch/arm/kernel/entry-common.S 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kernel/entry-common.S 2016-06-22 21:55:07.970523867 +0200 -@@ -36,7 +36,9 @@ - UNWIND(.cantunwind ) - disable_irq_notrace @ disable interrupts - ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing -- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne fast_work_pending -+ tst r1, #_TIF_SECCOMP - bne fast_work_pending - - /* perform architecture specific actions before user return */ -@@ -62,8 +64,11 @@ - str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 - disable_irq_notrace @ disable interrupts - ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing -- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK -+ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) -+ bne do_slower_path -+ tst r1, #_TIF_SECCOMP - beq no_work_pending -+do_slower_path: - UNWIND(.fnend ) - ENDPROC(ret_fast_syscall) - -diff -Nur linux-4.4.13.orig/arch/arm/kernel/process.c linux-4.4.13/arch/arm/kernel/process.c ---- linux-4.4.13.orig/arch/arm/kernel/process.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kernel/process.c 2016-06-22 21:55:07.970523867 +0200 -@@ -319,6 +319,30 @@ - } - - #ifdef CONFIG_MMU -+/* -+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not -+ * initialized by pgtable_page_ctor() then a coredump of the vector page will -+ * fail. -+ */ -+static int __init vectors_user_mapping_init_page(void) -+{ -+ struct page *page; -+ unsigned long addr = 0xffff0000; -+ pgd_t *pgd; -+ pud_t *pud; -+ pmd_t *pmd; -+ -+ pgd = pgd_offset_k(addr); -+ pud = pud_offset(pgd, addr); -+ pmd = pmd_offset(pud, addr); -+ page = pmd_page(*(pmd)); -+ -+ pgtable_page_ctor(page); -+ -+ return 0; -+} -+late_initcall(vectors_user_mapping_init_page); -+ - #ifdef CONFIG_KUSER_HELPERS - /* - * The vectors page is always readable from user space for the -diff -Nur linux-4.4.13.orig/arch/arm/kernel/signal.c linux-4.4.13/arch/arm/kernel/signal.c ---- linux-4.4.13.orig/arch/arm/kernel/signal.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kernel/signal.c 2016-06-22 21:55:07.970523867 +0200 -@@ -572,7 +572,8 @@ - */ - trace_hardirqs_off(); - do { -- if (likely(thread_flags & _TIF_NEED_RESCHED)) { -+ if (likely(thread_flags & (_TIF_NEED_RESCHED | -+ _TIF_NEED_RESCHED_LAZY))) { - schedule(); - } else { - if (unlikely(!user_mode(regs))) -diff -Nur linux-4.4.13.orig/arch/arm/kernel/smp.c linux-4.4.13/arch/arm/kernel/smp.c ---- linux-4.4.13.orig/arch/arm/kernel/smp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kernel/smp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -230,8 +230,6 @@ - flush_cache_louis(); - local_flush_tlb_all(); - -- clear_tasks_mm_cpumask(cpu); -- - return 0; - } - -@@ -247,6 +245,9 @@ - pr_err("CPU%u: cpu didn't die\n", cpu); - return; - } -+ -+ clear_tasks_mm_cpumask(cpu); -+ - pr_notice("CPU%u: shutdown\n", cpu); - - /* -diff -Nur linux-4.4.13.orig/arch/arm/kernel/unwind.c linux-4.4.13/arch/arm/kernel/unwind.c ---- linux-4.4.13.orig/arch/arm/kernel/unwind.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kernel/unwind.c 2016-06-22 21:55:07.974523867 +0200 -@@ -93,7 +93,7 @@ - static const struct unwind_idx *__origin_unwind_idx; - extern const struct unwind_idx __stop_unwind_idx[]; - --static DEFINE_SPINLOCK(unwind_lock); -+static DEFINE_RAW_SPINLOCK(unwind_lock); - static LIST_HEAD(unwind_tables); - - /* Convert a prel31 symbol to an absolute address */ -@@ -201,7 +201,7 @@ - /* module unwind tables */ - struct unwind_table *table; - -- spin_lock_irqsave(&unwind_lock, flags); -+ raw_spin_lock_irqsave(&unwind_lock, flags); - list_for_each_entry(table, &unwind_tables, list) { - if (addr >= table->begin_addr && - addr < table->end_addr) { -@@ -213,7 +213,7 @@ - break; - } - } -- spin_unlock_irqrestore(&unwind_lock, flags); -+ raw_spin_unlock_irqrestore(&unwind_lock, flags); - } - - pr_debug("%s: idx = %p\n", __func__, idx); -@@ -529,9 +529,9 @@ - tab->begin_addr = text_addr; - tab->end_addr = text_addr + text_size; - -- spin_lock_irqsave(&unwind_lock, flags); -+ raw_spin_lock_irqsave(&unwind_lock, flags); - list_add_tail(&tab->list, &unwind_tables); -- spin_unlock_irqrestore(&unwind_lock, flags); -+ raw_spin_unlock_irqrestore(&unwind_lock, flags); - - return tab; - } -@@ -543,9 +543,9 @@ - if (!tab) - return; - -- spin_lock_irqsave(&unwind_lock, flags); -+ raw_spin_lock_irqsave(&unwind_lock, flags); - list_del(&tab->list); -- spin_unlock_irqrestore(&unwind_lock, flags); -+ raw_spin_unlock_irqrestore(&unwind_lock, flags); - - kfree(tab); - } -diff -Nur linux-4.4.13.orig/arch/arm/kvm/arm.c linux-4.4.13/arch/arm/kvm/arm.c ---- linux-4.4.13.orig/arch/arm/kvm/arm.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kvm/arm.c 2016-06-22 21:55:07.974523867 +0200 -@@ -498,18 +498,18 @@ - struct kvm_vcpu *vcpu; - - kvm_for_each_vcpu(i, vcpu, kvm) { -- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); -+ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); - - vcpu->arch.pause = false; -- wake_up_interruptible(wq); -+ swake_up(wq); - } - } - - static void vcpu_sleep(struct kvm_vcpu *vcpu) - { -- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); -+ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); - -- wait_event_interruptible(*wq, ((!vcpu->arch.power_off) && -+ swait_event_interruptible(*wq, ((!vcpu->arch.power_off) && - (!vcpu->arch.pause))); - } - -@@ -568,7 +568,7 @@ - * involves poking the GIC, which must be done in a - * non-preemptible context. - */ -- preempt_disable(); -+ migrate_disable(); - kvm_timer_flush_hwstate(vcpu); - kvm_vgic_flush_hwstate(vcpu); - -@@ -587,7 +587,7 @@ - local_irq_enable(); - kvm_timer_sync_hwstate(vcpu); - kvm_vgic_sync_hwstate(vcpu); -- preempt_enable(); -+ migrate_enable(); - continue; - } - -@@ -641,7 +641,7 @@ - - kvm_vgic_sync_hwstate(vcpu); - -- preempt_enable(); -+ migrate_enable(); - - ret = handle_exit(vcpu, run, ret); - } -diff -Nur linux-4.4.13.orig/arch/arm/kvm/psci.c linux-4.4.13/arch/arm/kvm/psci.c ---- linux-4.4.13.orig/arch/arm/kvm/psci.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/kvm/psci.c 2016-06-22 21:55:07.974523867 +0200 -@@ -70,7 +70,7 @@ - { - struct kvm *kvm = source_vcpu->kvm; - struct kvm_vcpu *vcpu = NULL; -- wait_queue_head_t *wq; -+ struct swait_queue_head *wq; - unsigned long cpu_id; - unsigned long context_id; - phys_addr_t target_pc; -@@ -119,7 +119,7 @@ - smp_mb(); /* Make sure the above is visible */ - - wq = kvm_arch_vcpu_wq(vcpu); -- wake_up_interruptible(wq); -+ swake_up(wq); - - return PSCI_RET_SUCCESS; - } -diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/at91rm9200.c linux-4.4.13/arch/arm/mach-at91/at91rm9200.c ---- linux-4.4.13.orig/arch/arm/mach-at91/at91rm9200.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-at91/at91rm9200.c 2016-06-22 21:55:07.974523867 +0200 -@@ -12,7 +12,6 @@ - #include - - #include --#include - - #include "generic.h" - #include "soc.h" -@@ -33,7 +32,6 @@ - - of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev); - -- arm_pm_idle = at91rm9200_idle; - at91rm9200_pm_init(); - } - -diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/at91sam9.c linux-4.4.13/arch/arm/mach-at91/at91sam9.c ---- linux-4.4.13.orig/arch/arm/mach-at91/at91sam9.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-at91/at91sam9.c 2016-06-22 21:55:07.974523867 +0200 -@@ -62,8 +62,6 @@ - soc_dev = soc_device_to_device(soc); - - of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev); -- -- arm_pm_idle = at91sam9_idle; - } - - static void __init at91sam9_dt_device_init(void) -diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/generic.h linux-4.4.13/arch/arm/mach-at91/generic.h ---- linux-4.4.13.orig/arch/arm/mach-at91/generic.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-at91/generic.h 2016-06-22 21:55:07.974523867 +0200 -@@ -11,27 +11,18 @@ - #ifndef _AT91_GENERIC_H - #define _AT91_GENERIC_H - --#include --#include -- -- /* Map io */ --extern void __init at91_map_io(void); --extern void __init at91_alt_map_io(void); -- --/* idle */ --extern void at91rm9200_idle(void); --extern void at91sam9_idle(void); -- - #ifdef CONFIG_PM - extern void __init at91rm9200_pm_init(void); - extern void __init at91sam9260_pm_init(void); - extern void __init at91sam9g45_pm_init(void); - extern void __init at91sam9x5_pm_init(void); -+extern void __init sama5_pm_init(void); - #else - static inline void __init at91rm9200_pm_init(void) { } - static inline void __init at91sam9260_pm_init(void) { } - static inline void __init at91sam9g45_pm_init(void) { } - static inline void __init at91sam9x5_pm_init(void) { } -+static inline void __init sama5_pm_init(void) { } - #endif - - #endif /* _AT91_GENERIC_H */ -diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/Kconfig linux-4.4.13/arch/arm/mach-at91/Kconfig ---- linux-4.4.13.orig/arch/arm/mach-at91/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-at91/Kconfig 2016-06-22 21:55:07.974523867 +0200 -@@ -99,6 +99,7 @@ - config COMMON_CLK_AT91 - bool - select COMMON_CLK -+ select MFD_SYSCON - - config HAVE_AT91_SMD - bool -diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/pm.c linux-4.4.13/arch/arm/mach-at91/pm.c ---- linux-4.4.13.orig/arch/arm/mach-at91/pm.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-at91/pm.c 2016-06-22 21:55:07.974523867 +0200 -@@ -31,10 +31,13 @@ - #include - #include - #include -+#include - - #include "generic.h" - #include "pm.h" - -+static void __iomem *pmc; -+ - /* - * FIXME: this is needed to communicate between the pinctrl driver and - * the PM implementation in the machine. Possibly part of the PM -@@ -87,7 +90,7 @@ - unsigned long scsr; - int i; - -- scsr = at91_pmc_read(AT91_PMC_SCSR); -+ scsr = readl(pmc + AT91_PMC_SCSR); - - /* USB must not be using PLLB */ - if ((scsr & at91_pm_data.uhp_udp_mask) != 0) { -@@ -101,8 +104,7 @@ - - if ((scsr & (AT91_PMC_PCK0 << i)) == 0) - continue; -- -- css = at91_pmc_read(AT91_PMC_PCKR(i)) & AT91_PMC_CSS; -+ css = readl(pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS; - if (css != AT91_PMC_CSS_SLOW) { - pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css); - return 0; -@@ -145,8 +147,8 @@ - flush_cache_all(); - outer_disable(); - -- at91_suspend_sram_fn(at91_pmc_base, at91_ramc_base[0], -- at91_ramc_base[1], pm_data); -+ at91_suspend_sram_fn(pmc, at91_ramc_base[0], -+ at91_ramc_base[1], pm_data); - - outer_resume(); - } -@@ -353,6 +355,21 @@ - at91_pm_set_standby(standby); - } - -+void at91rm9200_idle(void) -+{ -+ /* -+ * Disable the processor clock. The processor will be automatically -+ * re-enabled by an interrupt or by a reset. -+ */ -+ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR); -+} -+ -+void at91sam9_idle(void) -+{ -+ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR); -+ cpu_do_idle(); -+} -+ - static void __init at91_pm_sram_init(void) - { - struct gen_pool *sram_pool; -@@ -399,13 +416,36 @@ - &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); - } - --static void __init at91_pm_init(void) -+static const struct of_device_id atmel_pmc_ids[] __initconst = { -+ { .compatible = "atmel,at91rm9200-pmc" }, -+ { .compatible = "atmel,at91sam9260-pmc" }, -+ { .compatible = "atmel,at91sam9g45-pmc" }, -+ { .compatible = "atmel,at91sam9n12-pmc" }, -+ { .compatible = "atmel,at91sam9x5-pmc" }, -+ { .compatible = "atmel,sama5d3-pmc" }, -+ { .compatible = "atmel,sama5d2-pmc" }, -+ { /* sentinel */ }, -+}; -+ -+static void __init at91_pm_init(void (*pm_idle)(void)) - { -- at91_pm_sram_init(); -+ struct device_node *pmc_np; - - if (at91_cpuidle_device.dev.platform_data) - platform_device_register(&at91_cpuidle_device); - -+ pmc_np = of_find_matching_node(NULL, atmel_pmc_ids); -+ pmc = of_iomap(pmc_np, 0); -+ if (!pmc) { -+ pr_err("AT91: PM not supported, PMC not found\n"); -+ return; -+ } -+ -+ if (pm_idle) -+ arm_pm_idle = pm_idle; -+ -+ at91_pm_sram_init(); -+ - if (at91_suspend_sram_fn) - suspend_set_ops(&at91_pm_ops); - else -@@ -424,7 +464,7 @@ - at91_pm_data.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP; - at91_pm_data.memctrl = AT91_MEMCTRL_MC; - -- at91_pm_init(); -+ at91_pm_init(at91rm9200_idle); - } - - void __init at91sam9260_pm_init(void) -@@ -432,7 +472,7 @@ - at91_dt_ramc(); - at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC; - at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP; -- return at91_pm_init(); -+ at91_pm_init(at91sam9_idle); - } - - void __init at91sam9g45_pm_init(void) -@@ -440,7 +480,7 @@ - at91_dt_ramc(); - at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP; - at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR; -- return at91_pm_init(); -+ at91_pm_init(at91sam9_idle); - } - - void __init at91sam9x5_pm_init(void) -@@ -448,5 +488,13 @@ - at91_dt_ramc(); - at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP; - at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR; -- return at91_pm_init(); -+ at91_pm_init(at91sam9_idle); -+} -+ -+void __init sama5_pm_init(void) -+{ -+ at91_dt_ramc(); -+ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP; -+ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR; -+ at91_pm_init(NULL); - } -diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/sama5.c linux-4.4.13/arch/arm/mach-at91/sama5.c ---- linux-4.4.13.orig/arch/arm/mach-at91/sama5.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-at91/sama5.c 2016-06-22 21:55:07.974523867 +0200 -@@ -51,7 +51,7 @@ - soc_dev = soc_device_to_device(soc); - - of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev); -- at91sam9x5_pm_init(); -+ sama5_pm_init(); - } - - static const char *const sama5_dt_board_compat[] __initconst = { -diff -Nur linux-4.4.13.orig/arch/arm/mach-exynos/platsmp.c linux-4.4.13/arch/arm/mach-exynos/platsmp.c ---- linux-4.4.13.orig/arch/arm/mach-exynos/platsmp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-exynos/platsmp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -230,7 +230,7 @@ - return (void __iomem *)(S5P_VA_SCU); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void exynos_secondary_init(unsigned int cpu) - { -@@ -243,8 +243,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) -@@ -308,7 +308,7 @@ - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -335,7 +335,7 @@ - - if (timeout == 0) { - printk(KERN_ERR "cpu1 power enable failed"); -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - return -ETIMEDOUT; - } - } -@@ -381,7 +381,7 @@ - * calibrations, then wait for it to finish - */ - fail: -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? ret : 0; - } -diff -Nur linux-4.4.13.orig/arch/arm/mach-hisi/platmcpm.c linux-4.4.13/arch/arm/mach-hisi/platmcpm.c ---- linux-4.4.13.orig/arch/arm/mach-hisi/platmcpm.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-hisi/platmcpm.c 2016-06-22 21:55:07.974523867 +0200 -@@ -61,7 +61,7 @@ - - static void __iomem *sysctrl, *fabric; - static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - static u32 fabric_phys_addr; - /* - * [0]: bootwrapper physical address -@@ -113,7 +113,7 @@ - if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) - return -EINVAL; - -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - - if (hip04_cpu_table[cluster][cpu]) - goto out; -@@ -147,7 +147,7 @@ - - out: - hip04_cpu_table[cluster][cpu]++; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - - return 0; - } -@@ -162,11 +162,11 @@ - cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); - cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - hip04_cpu_table[cluster][cpu]--; - if (hip04_cpu_table[cluster][cpu] == 1) { - /* A power_up request went ahead of us. */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - return; - } else if (hip04_cpu_table[cluster][cpu] > 1) { - pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); -@@ -174,7 +174,7 @@ - } - - last_man = hip04_cluster_is_down(cluster); -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - if (last_man) { - /* Since it's Cortex A15, disable L2 prefetching. */ - asm volatile( -@@ -203,7 +203,7 @@ - cpu >= HIP04_MAX_CPUS_PER_CLUSTER); - - count = TIMEOUT_MSEC / POLL_MSEC; -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - for (tries = 0; tries < count; tries++) { - if (hip04_cpu_table[cluster][cpu]) - goto err; -@@ -211,10 +211,10 @@ - data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); - if (data & CORE_WFI_STATUS(cpu)) - break; -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - /* Wait for clean L2 when the whole cluster is down. */ - msleep(POLL_MSEC); -- spin_lock_irq(&boot_lock); -+ raw_spin_lock_irq(&boot_lock); - } - if (tries >= count) - goto err; -@@ -231,10 +231,10 @@ - goto err; - if (hip04_cluster_is_down(cluster)) - hip04_set_snoop_filter(cluster, 0); -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return 1; - err: -- spin_unlock_irq(&boot_lock); -+ raw_spin_unlock_irq(&boot_lock); - return 0; - } - #endif -diff -Nur linux-4.4.13.orig/arch/arm/mach-imx/Kconfig linux-4.4.13/arch/arm/mach-imx/Kconfig ---- linux-4.4.13.orig/arch/arm/mach-imx/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-imx/Kconfig 2016-06-22 21:55:07.974523867 +0200 -@@ -524,7 +524,7 @@ - bool "i.MX6 Quad/DualLite support" - select ARM_ERRATA_764369 if SMP - select HAVE_ARM_SCU if SMP -- select HAVE_ARM_TWD if SMP -+ select HAVE_ARM_TWD - select PCI_DOMAINS if PCI - select PINCTRL_IMX6Q - select SOC_IMX6 -diff -Nur linux-4.4.13.orig/arch/arm/mach-omap2/omap-smp.c linux-4.4.13/arch/arm/mach-omap2/omap-smp.c ---- linux-4.4.13.orig/arch/arm/mach-omap2/omap-smp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-omap2/omap-smp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -43,7 +43,7 @@ - /* SCU base address */ - static void __iomem *scu_base; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void __iomem *omap4_get_scu_base(void) - { -@@ -74,8 +74,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -89,7 +89,7 @@ - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Update the AuxCoreBoot0 with boot state for secondary core. -@@ -166,7 +166,7 @@ - * Now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return 0; - } -diff -Nur linux-4.4.13.orig/arch/arm/mach-prima2/platsmp.c linux-4.4.13/arch/arm/mach-prima2/platsmp.c ---- linux-4.4.13.orig/arch/arm/mach-prima2/platsmp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-prima2/platsmp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -22,7 +22,7 @@ - - static void __iomem *clk_base; - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void sirfsoc_secondary_init(unsigned int cpu) - { -@@ -36,8 +36,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static const struct of_device_id clk_ids[] = { -@@ -75,7 +75,7 @@ - /* make sure write buffer is drained */ - mb(); - -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -107,7 +107,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-4.4.13.orig/arch/arm/mach-qcom/platsmp.c linux-4.4.13/arch/arm/mach-qcom/platsmp.c ---- linux-4.4.13.orig/arch/arm/mach-qcom/platsmp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-qcom/platsmp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -46,7 +46,7 @@ - - extern void secondary_startup_arm(void); - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - #ifdef CONFIG_HOTPLUG_CPU - static void qcom_cpu_die(unsigned int cpu) -@@ -60,8 +60,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int scss_release_secondary(unsigned int cpu) -@@ -284,7 +284,7 @@ - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * Send the secondary CPU a soft interrupt, thereby causing -@@ -297,7 +297,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return ret; - } -diff -Nur linux-4.4.13.orig/arch/arm/mach-spear/platsmp.c linux-4.4.13/arch/arm/mach-spear/platsmp.c ---- linux-4.4.13.orig/arch/arm/mach-spear/platsmp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-spear/platsmp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -32,7 +32,7 @@ - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void __iomem *scu_base = IOMEM(VA_SCU_BASE); - -@@ -47,8 +47,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -59,7 +59,7 @@ - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -84,7 +84,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-4.4.13.orig/arch/arm/mach-sti/platsmp.c linux-4.4.13/arch/arm/mach-sti/platsmp.c ---- linux-4.4.13.orig/arch/arm/mach-sti/platsmp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mach-sti/platsmp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -35,7 +35,7 @@ - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - static void sti_secondary_init(unsigned int cpu) - { -@@ -48,8 +48,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -60,7 +60,7 @@ - * set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * The secondary processor is waiting to be released from -@@ -91,7 +91,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-4.4.13.orig/arch/arm/mm/fault.c linux-4.4.13/arch/arm/mm/fault.c ---- linux-4.4.13.orig/arch/arm/mm/fault.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mm/fault.c 2016-06-22 21:55:07.974523867 +0200 -@@ -430,6 +430,9 @@ - if (addr < TASK_SIZE) - return do_page_fault(addr, fsr, regs); - -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - if (user_mode(regs)) - goto bad_area; - -@@ -497,6 +500,9 @@ - static int - do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) - { -+ if (interrupts_enabled(regs)) -+ local_irq_enable(); -+ - do_bad_area(addr, fsr, regs); - return 0; - } -diff -Nur linux-4.4.13.orig/arch/arm/mm/highmem.c linux-4.4.13/arch/arm/mm/highmem.c ---- linux-4.4.13.orig/arch/arm/mm/highmem.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/mm/highmem.c 2016-06-22 21:55:07.974523867 +0200 -@@ -34,6 +34,11 @@ - return *ptep; - } - -+static unsigned int fixmap_idx(int type) -+{ -+ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+} -+ - void *kmap(struct page *page) - { - might_sleep(); -@@ -54,12 +59,13 @@ - - void *kmap_atomic(struct page *page) - { -+ pte_t pte = mk_pte(page, kmap_prot); - unsigned int idx; - unsigned long vaddr; - void *kmap; - int type; - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); -@@ -79,7 +85,7 @@ - - type = kmap_atomic_idx_push(); - -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - vaddr = __fix_to_virt(idx); - #ifdef CONFIG_DEBUG_HIGHMEM - /* -@@ -93,7 +99,10 @@ - * in place, so the contained TLB flush ensures the TLB is updated - * with the new mapping. - */ -- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -@@ -106,44 +115,75 @@ - - if (kvaddr >= (void *)FIXADDR_START) { - type = kmap_atomic_idx(); -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - - if (cache_is_vivt()) - __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(vaddr != __fix_to_virt(idx)); -- set_fixmap_pte(idx, __pte(0)); - #else - (void) idx; /* to kill a warning */ - #endif -+ set_fixmap_pte(idx, __pte(0)); - kmap_atomic_idx_pop(); - } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { - /* this address was obtained through kmap_high_get() */ - kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); - } - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(__kunmap_atomic); - - void *kmap_atomic_pfn(unsigned long pfn) - { -+ pte_t pte = pfn_pte(pfn, kmap_prot); - unsigned long vaddr; - int idx, type; - struct page *page = pfn_to_page(pfn); - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - if (!PageHighMem(page)) - return page_address(page); - - type = kmap_atomic_idx_push(); -- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); -+ idx = fixmap_idx(type); - vaddr = __fix_to_virt(idx); - #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(get_fixmap_pte(vaddr))); - #endif -- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_fixmap_pte(idx, pte); - - return (void *)vaddr; - } -+#if defined CONFIG_PREEMPT_RT_FULL -+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = fixmap_idx(i); -+ -+ set_fixmap_pte(idx, __pte(0)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = fixmap_idx(i); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_fixmap_pte(idx, next_p->kmap_pte[i]); -+ } -+} -+#endif -diff -Nur linux-4.4.13.orig/arch/arm/plat-versatile/platsmp.c linux-4.4.13/arch/arm/plat-versatile/platsmp.c ---- linux-4.4.13.orig/arch/arm/plat-versatile/platsmp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm/plat-versatile/platsmp.c 2016-06-22 21:55:07.974523867 +0200 -@@ -30,7 +30,7 @@ - sync_cache_w(&pen_release); - } - --static DEFINE_SPINLOCK(boot_lock); -+static DEFINE_RAW_SPINLOCK(boot_lock); - - void versatile_secondary_init(unsigned int cpu) - { -@@ -43,8 +43,8 @@ - /* - * Synchronise with the boot thread. - */ -- spin_lock(&boot_lock); -- spin_unlock(&boot_lock); -+ raw_spin_lock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - } - - int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) -@@ -55,7 +55,7 @@ - * Set synchronisation state between this boot processor - * and the secondary one - */ -- spin_lock(&boot_lock); -+ raw_spin_lock(&boot_lock); - - /* - * This is really belt and braces; we hold unintended secondary -@@ -85,7 +85,7 @@ - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ -- spin_unlock(&boot_lock); -+ raw_spin_unlock(&boot_lock); - - return pen_release != -1 ? -ENOSYS : 0; - } -diff -Nur linux-4.4.13.orig/arch/arm64/include/asm/thread_info.h linux-4.4.13/arch/arm64/include/asm/thread_info.h ---- linux-4.4.13.orig/arch/arm64/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm64/include/asm/thread_info.h 2016-06-22 21:55:07.974523867 +0200 -@@ -49,6 +49,7 @@ - mm_segment_t addr_limit; /* address limit */ - struct task_struct *task; /* main task structure */ - int preempt_count; /* 0 => preemptable, <0 => bug */ -+ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ - int cpu; /* cpu */ - }; - -@@ -103,6 +104,7 @@ - #define TIF_NEED_RESCHED 1 - #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ - #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ -+#define TIF_NEED_RESCHED_LAZY 4 - #define TIF_NOHZ 7 - #define TIF_SYSCALL_TRACE 8 - #define TIF_SYSCALL_AUDIT 9 -@@ -118,6 +120,7 @@ - #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) - #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) - #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_NOHZ (1 << TIF_NOHZ) - #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) -@@ -126,7 +129,8 @@ - #define _TIF_32BIT (1 << TIF_32BIT) - - #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ -- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) -+ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ -+ _TIF_NEED_RESCHED_LAZY) - - #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ - _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ -diff -Nur linux-4.4.13.orig/arch/arm64/Kconfig linux-4.4.13/arch/arm64/Kconfig ---- linux-4.4.13.orig/arch/arm64/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm64/Kconfig 2016-06-22 21:55:07.974523867 +0200 -@@ -76,6 +76,7 @@ - select HAVE_PERF_REGS - select HAVE_PERF_USER_STACK_DUMP - select HAVE_RCU_TABLE_FREE -+ select HAVE_PREEMPT_LAZY - select HAVE_SYSCALL_TRACEPOINTS - select IOMMU_DMA if IOMMU_SUPPORT - select IRQ_DOMAIN -@@ -562,7 +563,7 @@ - - config XEN - bool "Xen guest support on ARM64" -- depends on ARM64 && OF -+ depends on ARM64 && OF && !PREEMPT_RT_FULL - select SWIOTLB_XEN - help - Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. -diff -Nur linux-4.4.13.orig/arch/arm64/kernel/asm-offsets.c linux-4.4.13/arch/arm64/kernel/asm-offsets.c ---- linux-4.4.13.orig/arch/arm64/kernel/asm-offsets.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm64/kernel/asm-offsets.c 2016-06-22 21:55:07.974523867 +0200 -@@ -35,6 +35,7 @@ - BLANK(); - DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); - DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); -+ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); - DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); - DEFINE(TI_TASK, offsetof(struct thread_info, task)); - DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); -diff -Nur linux-4.4.13.orig/arch/arm64/kernel/entry.S linux-4.4.13/arch/arm64/kernel/entry.S ---- linux-4.4.13.orig/arch/arm64/kernel/entry.S 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/arm64/kernel/entry.S 2016-06-22 21:55:07.978523867 +0200 -@@ -363,11 +363,16 @@ - #ifdef CONFIG_PREEMPT - get_thread_info tsk - ldr w24, [tsk, #TI_PREEMPT] // get preempt count -- cbnz w24, 1f // preempt count != 0 -+ cbnz w24, 2f // preempt count != 0 - ldr x0, [tsk, #TI_FLAGS] // get flags -- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? -- bl el1_preempt -+ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? -+ -+ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count -+ cbnz w24, 2f // preempt lazy count != 0 -+ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? - 1: -+ bl el1_preempt -+2: - #endif - #ifdef CONFIG_TRACE_IRQFLAGS - bl trace_hardirqs_on -@@ -381,6 +386,7 @@ - 1: bl preempt_schedule_irq // irq en/disable is done inside - ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS - tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? -+ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? - ret x24 - #endif - -@@ -625,6 +631,7 @@ - */ - work_pending: - tbnz x1, #TIF_NEED_RESCHED, work_resched -+ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched - /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ - ldr x2, [sp, #S_PSTATE] - mov x0, sp // 'regs' -diff -Nur linux-4.4.13.orig/arch/Kconfig linux-4.4.13/arch/Kconfig ---- linux-4.4.13.orig/arch/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/Kconfig 2016-06-22 21:55:07.978523867 +0200 -@@ -9,6 +9,7 @@ - tristate "OProfile system profiling" - depends on PROFILING - depends on HAVE_OPROFILE -+ depends on !PREEMPT_RT_FULL - select RING_BUFFER - select RING_BUFFER_ALLOW_SWAP - help -@@ -52,6 +53,7 @@ - config JUMP_LABEL - bool "Optimize very unlikely/likely branches" - depends on HAVE_ARCH_JUMP_LABEL -+ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST) - help - This option enables a transparent branch optimization that - makes certain almost-always-true or almost-always-false branch -diff -Nur linux-4.4.13.orig/arch/mips/Kconfig linux-4.4.13/arch/mips/Kconfig ---- linux-4.4.13.orig/arch/mips/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/mips/Kconfig 2016-06-22 21:55:07.978523867 +0200 -@@ -2410,7 +2410,7 @@ - # - config HIGHMEM - bool "High Memory Support" -- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA -+ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL - - config CPU_SUPPORTS_HIGHMEM - bool -diff -Nur linux-4.4.13.orig/arch/mips/kvm/mips.c linux-4.4.13/arch/mips/kvm/mips.c ---- linux-4.4.13.orig/arch/mips/kvm/mips.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/mips/kvm/mips.c 2016-06-22 21:55:07.978523867 +0200 -@@ -445,8 +445,8 @@ - - dvcpu->arch.wait = 0; - -- if (waitqueue_active(&dvcpu->wq)) -- wake_up_interruptible(&dvcpu->wq); -+ if (swait_active(&dvcpu->wq)) -+ swake_up(&dvcpu->wq); - - return 0; - } -@@ -1174,8 +1174,8 @@ - kvm_mips_callbacks->queue_timer_int(vcpu); - - vcpu->arch.wait = 0; -- if (waitqueue_active(&vcpu->wq)) -- wake_up_interruptible(&vcpu->wq); -+ if (swait_active(&vcpu->wq)) -+ swake_up(&vcpu->wq); - } - - /* low level hrtimer wake routine */ -diff -Nur linux-4.4.13.orig/arch/powerpc/include/asm/kvm_host.h linux-4.4.13/arch/powerpc/include/asm/kvm_host.h ---- linux-4.4.13.orig/arch/powerpc/include/asm/kvm_host.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/powerpc/include/asm/kvm_host.h 2016-06-22 21:55:07.978523867 +0200 -@@ -286,7 +286,7 @@ - struct list_head runnable_threads; - struct list_head preempt_list; - spinlock_t lock; -- wait_queue_head_t wq; -+ struct swait_queue_head wq; - spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ - u64 stolen_tb; - u64 preempt_tb; -@@ -626,7 +626,7 @@ - u8 prodded; - u32 last_inst; - -- wait_queue_head_t *wqp; -+ struct swait_queue_head *wqp; - struct kvmppc_vcore *vcore; - int ret; - int trap; -diff -Nur linux-4.4.13.orig/arch/powerpc/include/asm/thread_info.h linux-4.4.13/arch/powerpc/include/asm/thread_info.h ---- linux-4.4.13.orig/arch/powerpc/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/powerpc/include/asm/thread_info.h 2016-06-22 21:55:07.978523867 +0200 -@@ -42,6 +42,8 @@ - int cpu; /* cpu we're on */ - int preempt_count; /* 0 => preemptable, - <0 => BUG */ -+ int preempt_lazy_count; /* 0 => preemptable, -+ <0 => BUG */ - unsigned long local_flags; /* private flags for thread */ - - /* low level flags - has atomic operations done on it */ -@@ -82,8 +84,7 @@ - #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ - #define TIF_SIGPENDING 1 /* signal pending */ - #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ --#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling -- TIF_NEED_RESCHED */ -+#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ - #define TIF_32BIT 4 /* 32 bit binary */ - #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ -@@ -101,6 +102,8 @@ - #if defined(CONFIG_PPC64) - #define TIF_ELF2ABI 18 /* function descriptors must die! */ - #endif -+#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling -+ TIF_NEED_RESCHED */ - - /* as above, but as bit values */ - #define _TIF_SYSCALL_TRACE (1<flags) - set_bits(irqtp->flags, &curtp->flags); - } -+#endif - - irq_hw_number_t virq_to_hw(unsigned int virq) - { -diff -Nur linux-4.4.13.orig/arch/powerpc/kernel/misc_32.S linux-4.4.13/arch/powerpc/kernel/misc_32.S ---- linux-4.4.13.orig/arch/powerpc/kernel/misc_32.S 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/powerpc/kernel/misc_32.S 2016-06-22 21:55:07.978523867 +0200 -@@ -40,6 +40,7 @@ - * We store the saved ksp_limit in the unused part - * of the STACK_FRAME_OVERHEAD - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - stw r0,4(r1) -@@ -56,6 +57,7 @@ - stw r10,THREAD+KSP_LIMIT(r2) - mtlr r0 - blr -+#endif - - /* - * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); -diff -Nur linux-4.4.13.orig/arch/powerpc/kernel/misc_64.S linux-4.4.13/arch/powerpc/kernel/misc_64.S ---- linux-4.4.13.orig/arch/powerpc/kernel/misc_64.S 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/powerpc/kernel/misc_64.S 2016-06-22 21:55:07.978523867 +0200 -@@ -30,6 +30,7 @@ - - .text - -+#ifndef CONFIG_PREEMPT_RT_FULL - _GLOBAL(call_do_softirq) - mflr r0 - std r0,16(r1) -@@ -40,6 +41,7 @@ - ld r0,16(r1) - mtlr r0 - blr -+#endif - - _GLOBAL(call_do_irq) - mflr r0 -diff -Nur linux-4.4.13.orig/arch/powerpc/kvm/book3s_hv.c linux-4.4.13/arch/powerpc/kvm/book3s_hv.c ---- linux-4.4.13.orig/arch/powerpc/kvm/book3s_hv.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/powerpc/kvm/book3s_hv.c 2016-06-22 21:55:07.978523867 +0200 -@@ -114,11 +114,11 @@ - static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) - { - int cpu; -- wait_queue_head_t *wqp; -+ struct swait_queue_head *wqp; - - wqp = kvm_arch_vcpu_wq(vcpu); -- if (waitqueue_active(wqp)) { -- wake_up_interruptible(wqp); -+ if (swait_active(wqp)) { -+ swake_up(wqp); - ++vcpu->stat.halt_wakeup; - } - -@@ -707,8 +707,8 @@ - tvcpu->arch.prodded = 1; - smp_mb(); - if (vcpu->arch.ceded) { -- if (waitqueue_active(&vcpu->wq)) { -- wake_up_interruptible(&vcpu->wq); -+ if (swait_active(&vcpu->wq)) { -+ swake_up(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } - } -@@ -1447,7 +1447,7 @@ - INIT_LIST_HEAD(&vcore->runnable_threads); - spin_lock_init(&vcore->lock); - spin_lock_init(&vcore->stoltb_lock); -- init_waitqueue_head(&vcore->wq); -+ init_swait_queue_head(&vcore->wq); - vcore->preempt_tb = TB_NIL; - vcore->lpcr = kvm->arch.lpcr; - vcore->first_vcpuid = core * threads_per_subcore; -@@ -2519,10 +2519,9 @@ - { - struct kvm_vcpu *vcpu; - int do_sleep = 1; -+ DECLARE_SWAITQUEUE(wait); - -- DEFINE_WAIT(wait); -- -- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); -+ prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); - - /* - * Check one last time for pending exceptions and ceded state after -@@ -2536,7 +2535,7 @@ - } - - if (!do_sleep) { -- finish_wait(&vc->wq, &wait); -+ finish_swait(&vc->wq, &wait); - return; - } - -@@ -2544,7 +2543,7 @@ - trace_kvmppc_vcore_blocked(vc, 0); - spin_unlock(&vc->lock); - schedule(); -- finish_wait(&vc->wq, &wait); -+ finish_swait(&vc->wq, &wait); - spin_lock(&vc->lock); - vc->vcore_state = VCORE_INACTIVE; - trace_kvmppc_vcore_blocked(vc, 1); -@@ -2600,7 +2599,7 @@ - kvmppc_start_thread(vcpu, vc); - trace_kvm_guest_enter(vcpu); - } else if (vc->vcore_state == VCORE_SLEEPING) { -- wake_up(&vc->wq); -+ swake_up(&vc->wq); - } - - } -diff -Nur linux-4.4.13.orig/arch/powerpc/kvm/Kconfig linux-4.4.13/arch/powerpc/kvm/Kconfig ---- linux-4.4.13.orig/arch/powerpc/kvm/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/powerpc/kvm/Kconfig 2016-06-22 21:55:07.978523867 +0200 -@@ -172,6 +172,7 @@ - config KVM_MPIC - bool "KVM in-kernel MPIC emulation" - depends on KVM && E500 -+ depends on !PREEMPT_RT_FULL - select HAVE_KVM_IRQCHIP - select HAVE_KVM_IRQFD - select HAVE_KVM_IRQ_ROUTING -diff -Nur linux-4.4.13.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.4.13/arch/powerpc/platforms/ps3/device-init.c ---- linux-4.4.13.orig/arch/powerpc/platforms/ps3/device-init.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/powerpc/platforms/ps3/device-init.c 2016-06-22 21:55:07.982523867 +0200 -@@ -752,7 +752,7 @@ - } - pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); - -- res = wait_event_interruptible(dev->done.wait, -+ res = swait_event_interruptible(dev->done.wait, - dev->done.done || kthread_should_stop()); - if (kthread_should_stop()) - res = -EINTR; -diff -Nur linux-4.4.13.orig/arch/s390/include/asm/kvm_host.h linux-4.4.13/arch/s390/include/asm/kvm_host.h ---- linux-4.4.13.orig/arch/s390/include/asm/kvm_host.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/s390/include/asm/kvm_host.h 2016-06-22 21:55:07.982523867 +0200 -@@ -427,7 +427,7 @@ - struct kvm_s390_local_interrupt { - spinlock_t lock; - struct kvm_s390_float_interrupt *float_int; -- wait_queue_head_t *wq; -+ struct swait_queue_head *wq; - atomic_t *cpuflags; - DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); - struct kvm_s390_irq_payload irq; -diff -Nur linux-4.4.13.orig/arch/s390/kvm/interrupt.c linux-4.4.13/arch/s390/kvm/interrupt.c ---- linux-4.4.13.orig/arch/s390/kvm/interrupt.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/s390/kvm/interrupt.c 2016-06-22 21:55:07.982523867 +0200 -@@ -868,13 +868,13 @@ - - void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) - { -- if (waitqueue_active(&vcpu->wq)) { -+ if (swait_active(&vcpu->wq)) { - /* - * The vcpu gave up the cpu voluntarily, mark it as a good - * yield-candidate. - */ - vcpu->preempted = true; -- wake_up_interruptible(&vcpu->wq); -+ swake_up(&vcpu->wq); - vcpu->stat.halt_wakeup++; - } - } -diff -Nur linux-4.4.13.orig/arch/sh/kernel/irq.c linux-4.4.13/arch/sh/kernel/irq.c ---- linux-4.4.13.orig/arch/sh/kernel/irq.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/sh/kernel/irq.c 2016-06-22 21:55:07.982523867 +0200 -@@ -147,6 +147,7 @@ - hardirq_ctx[cpu] = NULL; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curctx; -@@ -174,6 +175,7 @@ - "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" - ); - } -+#endif - #else - static inline void handle_one_irq(unsigned int irq) - { -diff -Nur linux-4.4.13.orig/arch/sparc/Kconfig linux-4.4.13/arch/sparc/Kconfig ---- linux-4.4.13.orig/arch/sparc/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/sparc/Kconfig 2016-06-22 21:55:07.982523867 +0200 -@@ -189,12 +189,10 @@ - source kernel/Kconfig.hz - - config RWSEM_GENERIC_SPINLOCK -- bool -- default y if SPARC32 -+ def_bool PREEMPT_RT_FULL - - config RWSEM_XCHGADD_ALGORITHM -- bool -- default y if SPARC64 -+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - - config GENERIC_HWEIGHT - bool -diff -Nur linux-4.4.13.orig/arch/sparc/kernel/irq_64.c linux-4.4.13/arch/sparc/kernel/irq_64.c ---- linux-4.4.13.orig/arch/sparc/kernel/irq_64.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/sparc/kernel/irq_64.c 2016-06-22 21:55:07.982523867 +0200 -@@ -854,6 +854,7 @@ - set_irq_regs(old_regs); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - void *orig_sp, *sp = softirq_stack[smp_processor_id()]; -@@ -868,6 +869,7 @@ - __asm__ __volatile__("mov %0, %%sp" - : : "r" (orig_sp)); - } -+#endif - - #ifdef CONFIG_HOTPLUG_CPU - void fixup_irqs(void) -diff -Nur linux-4.4.13.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.4.13/arch/x86/crypto/aesni-intel_glue.c ---- linux-4.4.13.orig/arch/x86/crypto/aesni-intel_glue.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/crypto/aesni-intel_glue.c 2016-06-22 21:55:07.982523867 +0200 -@@ -383,14 +383,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, -- nbytes & AES_BLOCK_MASK); -+ nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -407,14 +407,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -431,14 +431,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -455,14 +455,14 @@ - err = blkcipher_walk_virt(desc, &walk); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes)) { -+ kernel_fpu_begin(); - aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- kernel_fpu_end(); - - return err; - } -@@ -514,18 +514,20 @@ - err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - -- kernel_fpu_begin(); - while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { -+ kernel_fpu_begin(); - aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, - nbytes & AES_BLOCK_MASK, walk.iv); -+ kernel_fpu_end(); - nbytes &= AES_BLOCK_SIZE - 1; - err = blkcipher_walk_done(desc, &walk, nbytes); - } - if (walk.nbytes) { -+ kernel_fpu_begin(); - ctr_crypt_final(ctx, &walk); -+ kernel_fpu_end(); - err = blkcipher_walk_done(desc, &walk, 0); - } -- kernel_fpu_end(); - - return err; - } -diff -Nur linux-4.4.13.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.4.13/arch/x86/crypto/cast5_avx_glue.c ---- linux-4.4.13.orig/arch/x86/crypto/cast5_avx_glue.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/crypto/cast5_avx_glue.c 2016-06-22 21:55:07.982523867 +0200 -@@ -59,7 +59,7 @@ - static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, - bool enc) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = CAST5_BLOCK_SIZE; - unsigned int nbytes; -@@ -75,7 +75,7 @@ - u8 *wsrc = walk->src.virt.addr; - u8 *wdst = walk->dst.virt.addr; - -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - - /* Process multi-block batch */ - if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { -@@ -103,10 +103,9 @@ - } while (nbytes >= bsize); - - done: -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -227,7 +226,7 @@ - static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -236,12 +235,11 @@ - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes)) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - nbytes = __cbc_decrypt(desc, &walk); -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } -- -- cast5_fpu_end(fpu_enabled); - return err; - } - -@@ -311,7 +309,7 @@ - static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes) - { -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -320,13 +318,12 @@ - desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; - - while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { -- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); -+ fpu_enabled = cast5_fpu_begin(false, nbytes); - nbytes = __ctr_crypt(desc, &walk); -+ cast5_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- cast5_fpu_end(fpu_enabled); -- - if (walk.nbytes) { - ctr_crypt_final(desc, &walk); - err = blkcipher_walk_done(desc, &walk, 0); -diff -Nur linux-4.4.13.orig/arch/x86/crypto/glue_helper.c linux-4.4.13/arch/x86/crypto/glue_helper.c ---- linux-4.4.13.orig/arch/x86/crypto/glue_helper.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/crypto/glue_helper.c 2016-06-22 21:55:07.982523867 +0200 -@@ -39,7 +39,7 @@ - void *ctx = crypto_blkcipher_ctx(desc->tfm); - const unsigned int bsize = 128 / 8; - unsigned int nbytes, i, func_bytes; -- bool fpu_enabled = false; -+ bool fpu_enabled; - int err; - - err = blkcipher_walk_virt(desc, walk); -@@ -49,7 +49,7 @@ - u8 *wdst = walk->dst.virt.addr; - - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - - for (i = 0; i < gctx->num_funcs; i++) { - func_bytes = bsize * gctx->funcs[i].num_blocks; -@@ -71,10 +71,10 @@ - } - - done: -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); - return err; - } - -@@ -194,7 +194,7 @@ - struct scatterlist *src, unsigned int nbytes) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -203,12 +203,12 @@ - - while ((nbytes = walk.nbytes)) { - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); - return err; - } - EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); -@@ -277,7 +277,7 @@ - struct scatterlist *src, unsigned int nbytes) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -286,13 +286,12 @@ - - while ((nbytes = walk.nbytes) >= bsize) { - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, nbytes); -+ desc, false, nbytes); - nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - } - -- glue_fpu_end(fpu_enabled); -- - if (walk.nbytes) { - glue_ctr_crypt_final_128bit( - gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); -@@ -347,7 +346,7 @@ - void *tweak_ctx, void *crypt_ctx) - { - const unsigned int bsize = 128 / 8; -- bool fpu_enabled = false; -+ bool fpu_enabled; - struct blkcipher_walk walk; - int err; - -@@ -360,21 +359,21 @@ - - /* set minimum length to bsize, for tweak_fn */ - fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -- desc, fpu_enabled, -+ desc, false, - nbytes < bsize ? bsize : nbytes); -- - /* calculate first value of T */ - tweak_fn(tweak_ctx, walk.iv, walk.iv); -+ glue_fpu_end(fpu_enabled); - - while (nbytes) { -+ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, -+ desc, false, nbytes); - nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); - -+ glue_fpu_end(fpu_enabled); - err = blkcipher_walk_done(desc, &walk, nbytes); - nbytes = walk.nbytes; - } -- -- glue_fpu_end(fpu_enabled); -- - return err; - } - EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); -diff -Nur linux-4.4.13.orig/arch/x86/entry/common.c linux-4.4.13/arch/x86/entry/common.c ---- linux-4.4.13.orig/arch/x86/entry/common.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/entry/common.c 2016-06-22 21:55:07.982523867 +0200 -@@ -220,7 +220,7 @@ - - #define EXIT_TO_USERMODE_LOOP_FLAGS \ - (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ -- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) -+ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY) - - static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) - { -@@ -236,9 +236,16 @@ - /* We have work to do. */ - local_irq_enable(); - -- if (cached_flags & _TIF_NEED_RESCHED) -+ if (cached_flags & _TIF_NEED_RESCHED_MASK) - schedule(); - -+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND -+ if (unlikely(current->forced_info.si_signo)) { -+ struct task_struct *t = current; -+ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); -+ t->forced_info.si_signo = 0; -+ } -+#endif - if (cached_flags & _TIF_UPROBE) - uprobe_notify_resume(regs); - -diff -Nur linux-4.4.13.orig/arch/x86/entry/entry_32.S linux-4.4.13/arch/x86/entry/entry_32.S ---- linux-4.4.13.orig/arch/x86/entry/entry_32.S 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/entry/entry_32.S 2016-06-22 21:55:07.982523867 +0200 -@@ -278,8 +278,24 @@ - ENTRY(resume_kernel) - DISABLE_INTERRUPTS(CLBR_ANY) - need_resched: -+ # preempt count == 0 + NEED_RS set? - cmpl $0, PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz restore_all -+#else -+ jz test_int_off -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jne restore_all -+ -+ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? -+ jnz restore_all -+ -+ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) -+ jz restore_all -+test_int_off: -+#endif - testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? - jz restore_all - call preempt_schedule_irq -diff -Nur linux-4.4.13.orig/arch/x86/entry/entry_64.S linux-4.4.13/arch/x86/entry/entry_64.S ---- linux-4.4.13.orig/arch/x86/entry/entry_64.S 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/entry/entry_64.S 2016-06-22 21:55:07.982523867 +0200 -@@ -579,7 +579,23 @@ - bt $9, EFLAGS(%rsp) /* were interrupts off? */ - jnc 1f - 0: cmpl $0, PER_CPU_VAR(__preempt_count) -+#ifndef CONFIG_PREEMPT_LAZY - jnz 1f -+#else -+ jz do_preempt_schedule_irq -+ -+ # atleast preempt count == 0 ? -+ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) -+ jnz 1f -+ -+ GET_THREAD_INFO(%rcx) -+ cmpl $0, TI_preempt_lazy_count(%rcx) -+ jnz 1f -+ -+ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) -+ jnc 1f -+do_preempt_schedule_irq: -+#endif - call preempt_schedule_irq - jmp 0b - 1: -@@ -867,6 +883,7 @@ - jmp 2b - .previous - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* Call softirq on interrupt stack. Interrupts are off. */ - ENTRY(do_softirq_own_stack) - pushq %rbp -@@ -879,6 +896,7 @@ - decl PER_CPU_VAR(irq_count) - ret - END(do_softirq_own_stack) -+#endif - - #ifdef CONFIG_XEN - idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 -diff -Nur linux-4.4.13.orig/arch/x86/include/asm/preempt.h linux-4.4.13/arch/x86/include/asm/preempt.h ---- linux-4.4.13.orig/arch/x86/include/asm/preempt.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/include/asm/preempt.h 2016-06-22 21:55:07.982523867 +0200 -@@ -79,17 +79,33 @@ - * a decrement which hits zero means we have no preempt_count and should - * reschedule. - */ --static __always_inline bool __preempt_count_dec_and_test(void) -+static __always_inline bool ____preempt_count_dec_and_test(void) - { - GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); - } - -+static __always_inline bool __preempt_count_dec_and_test(void) -+{ -+ if (____preempt_count_dec_and_test()) -+ return true; -+#ifdef CONFIG_PREEMPT_LAZY -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+#else -+ return false; -+#endif -+} -+ - /* - * Returns true when we need to resched and can (barring IRQ state). - */ - static __always_inline bool should_resched(int preempt_offset) - { -+#ifdef CONFIG_PREEMPT_LAZY -+ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset || -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)); -+#else - return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); -+#endif - } - - #ifdef CONFIG_PREEMPT -diff -Nur linux-4.4.13.orig/arch/x86/include/asm/signal.h linux-4.4.13/arch/x86/include/asm/signal.h ---- linux-4.4.13.orig/arch/x86/include/asm/signal.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/include/asm/signal.h 2016-06-22 21:55:07.982523867 +0200 -@@ -23,6 +23,19 @@ - unsigned long sig[_NSIG_WORDS]; - } sigset_t; - -+/* -+ * Because some traps use the IST stack, we must keep preemption -+ * disabled while calling do_trap(), but do_trap() may call -+ * force_sig_info() which will grab the signal spin_locks for the -+ * task, which in PREEMPT_RT_FULL are mutexes. By defining -+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set -+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the -+ * trap. -+ */ -+#if defined(CONFIG_PREEMPT_RT_FULL) -+#define ARCH_RT_DELAYS_SIGNAL_SEND -+#endif -+ - #ifndef CONFIG_COMPAT - typedef sigset_t compat_sigset_t; - #endif -diff -Nur linux-4.4.13.orig/arch/x86/include/asm/stackprotector.h linux-4.4.13/arch/x86/include/asm/stackprotector.h ---- linux-4.4.13.orig/arch/x86/include/asm/stackprotector.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/include/asm/stackprotector.h 2016-06-22 21:55:07.982523867 +0200 -@@ -59,7 +59,7 @@ - */ - static __always_inline void boot_init_stack_canary(void) - { -- u64 canary; -+ u64 uninitialized_var(canary); - u64 tsc; - - #ifdef CONFIG_X86_64 -@@ -70,8 +70,15 @@ - * of randomness. The TSC only matters for very early init, - * there it already has some randomness on most systems. Later - * on during the bootup the random pool has true entropy too. -+ * -+ * For preempt-rt we need to weaken the randomness a bit, as -+ * we can't call into the random generator from atomic context -+ * due to locking constraints. We just leave canary -+ * uninitialized and use the TSC based randomness on top of it. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - get_random_bytes(&canary, sizeof(canary)); -+#endif - tsc = rdtsc(); - canary += tsc + (tsc << 32UL); - -diff -Nur linux-4.4.13.orig/arch/x86/include/asm/thread_info.h linux-4.4.13/arch/x86/include/asm/thread_info.h ---- linux-4.4.13.orig/arch/x86/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/include/asm/thread_info.h 2016-06-22 21:55:07.982523867 +0200 -@@ -58,6 +58,8 @@ - __u32 status; /* thread synchronous flags */ - __u32 cpu; /* current CPU */ - mm_segment_t addr_limit; -+ int preempt_lazy_count; /* 0 => lazy preemptable -+ <0 => BUG */ - unsigned int sig_on_uaccess_error:1; - unsigned int uaccess_err:1; /* uaccess failed */ - }; -@@ -95,6 +97,7 @@ - #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ - #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ - #define TIF_SECCOMP 8 /* secure computing */ -+#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ - #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ - #define TIF_UPROBE 12 /* breakpointed or singlestepping */ - #define TIF_NOTSC 16 /* TSC is not accessible in userland */ -@@ -119,6 +122,7 @@ - #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) - #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) - #define _TIF_SECCOMP (1 << TIF_SECCOMP) -+#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) - #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) - #define _TIF_UPROBE (1 << TIF_UPROBE) - #define _TIF_NOTSC (1 << TIF_NOTSC) -@@ -152,6 +156,8 @@ - #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) - #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) - -+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) -+ - #define STACK_WARN (THREAD_SIZE/8) - - /* -diff -Nur linux-4.4.13.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.4.13/arch/x86/include/asm/uv/uv_bau.h ---- linux-4.4.13.orig/arch/x86/include/asm/uv/uv_bau.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/include/asm/uv/uv_bau.h 2016-06-22 21:55:07.982523867 +0200 -@@ -615,9 +615,9 @@ - cycles_t send_message; - cycles_t period_end; - cycles_t period_time; -- spinlock_t uvhub_lock; -- spinlock_t queue_lock; -- spinlock_t disable_lock; -+ raw_spinlock_t uvhub_lock; -+ raw_spinlock_t queue_lock; -+ raw_spinlock_t disable_lock; - /* tunables */ - int max_concurr; - int max_concurr_const; -@@ -776,15 +776,15 @@ - * to be lowered below the current 'v'. atomic_add_unless can only stop - * on equal. - */ --static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) -+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) - { -- spin_lock(lock); -+ raw_spin_lock(lock); - if (atomic_read(v) >= u) { -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 0; - } - atomic_inc(v); -- spin_unlock(lock); -+ raw_spin_unlock(lock); - return 1; - } - -diff -Nur linux-4.4.13.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.4.13/arch/x86/include/asm/uv/uv_hub.h ---- linux-4.4.13.orig/arch/x86/include/asm/uv/uv_hub.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/include/asm/uv/uv_hub.h 2016-06-22 21:55:07.982523867 +0200 -@@ -492,7 +492,7 @@ - unsigned short nr_online_cpus; - unsigned short pnode; - short memory_nid; -- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ -+ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ - unsigned long nmi_count; /* obsolete, see uv_hub_nmi */ - }; - extern struct uv_blade_info *uv_blade_info; -diff -Nur linux-4.4.13.orig/arch/x86/Kconfig linux-4.4.13/arch/x86/Kconfig ---- linux-4.4.13.orig/arch/x86/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/Kconfig 2016-06-22 21:55:07.986523867 +0200 -@@ -17,6 +17,7 @@ - ### Arch settings - config X86 - def_bool y -+ select HAVE_PREEMPT_LAZY - select ACPI_LEGACY_TABLES_LOOKUP if ACPI - select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI - select ANON_INODES -@@ -212,8 +213,11 @@ - def_bool y - depends on ISA_DMA_API - -+config RWSEM_GENERIC_SPINLOCK -+ def_bool PREEMPT_RT_FULL -+ - config RWSEM_XCHGADD_ALGORITHM -- def_bool y -+ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL - - config GENERIC_CALIBRATE_DELAY - def_bool y -@@ -848,7 +852,7 @@ - config MAXSMP - bool "Enable Maximum number of SMP Processors and NUMA Nodes" - depends on X86_64 && SMP && DEBUG_KERNEL -- select CPUMASK_OFFSTACK -+ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL - ---help--- - Enable maximum number of CPUS and NUMA Nodes for this architecture. - If unsure, say N. -diff -Nur linux-4.4.13.orig/arch/x86/kernel/apic/io_apic.c linux-4.4.13/arch/x86/kernel/apic/io_apic.c ---- linux-4.4.13.orig/arch/x86/kernel/apic/io_apic.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/apic/io_apic.c 2016-06-22 21:55:07.986523867 +0200 -@@ -1711,7 +1711,8 @@ - static inline bool ioapic_irqd_mask(struct irq_data *data) - { - /* If we are moving the irq we need to mask it */ -- if (unlikely(irqd_is_setaffinity_pending(data))) { -+ if (unlikely(irqd_is_setaffinity_pending(data) && -+ !irqd_irq_inprogress(data))) { - mask_ioapic_irq(data); - return true; - } -diff -Nur linux-4.4.13.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.4.13/arch/x86/kernel/apic/x2apic_uv_x.c ---- linux-4.4.13.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/apic/x2apic_uv_x.c 2016-06-22 21:55:07.986523867 +0200 -@@ -947,7 +947,7 @@ - uv_blade_info[blade].pnode = pnode; - uv_blade_info[blade].nr_possible_cpus = 0; - uv_blade_info[blade].nr_online_cpus = 0; -- spin_lock_init(&uv_blade_info[blade].nmi_lock); -+ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock); - min_pnode = min(pnode, min_pnode); - max_pnode = max(pnode, max_pnode); - blade++; -diff -Nur linux-4.4.13.orig/arch/x86/kernel/asm-offsets.c linux-4.4.13/arch/x86/kernel/asm-offsets.c ---- linux-4.4.13.orig/arch/x86/kernel/asm-offsets.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/asm-offsets.c 2016-06-22 21:55:07.986523867 +0200 -@@ -32,6 +32,7 @@ - OFFSET(TI_flags, thread_info, flags); - OFFSET(TI_status, thread_info, status); - OFFSET(TI_addr_limit, thread_info, addr_limit); -+ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); - - BLANK(); - OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); -@@ -89,4 +90,5 @@ - - BLANK(); - DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); -+ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); - } -diff -Nur linux-4.4.13.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.4.13/arch/x86/kernel/cpu/mcheck/mce.c ---- linux-4.4.13.orig/arch/x86/kernel/cpu/mcheck/mce.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/cpu/mcheck/mce.c 2016-06-22 21:55:07.986523867 +0200 -@@ -41,6 +41,8 @@ - #include - #include - #include -+#include -+#include - - #include - #include -@@ -1236,7 +1238,7 @@ - static unsigned long check_interval = INITIAL_CHECK_INTERVAL; - - static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ --static DEFINE_PER_CPU(struct timer_list, mce_timer); -+static DEFINE_PER_CPU(struct hrtimer, mce_timer); - - static unsigned long mce_adjust_timer_default(unsigned long interval) - { -@@ -1245,32 +1247,18 @@ - - static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; - --static void __restart_timer(struct timer_list *t, unsigned long interval) -+static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval) - { -- unsigned long when = jiffies + interval; -- unsigned long flags; -- -- local_irq_save(flags); -- -- if (timer_pending(t)) { -- if (time_before(when, t->expires)) -- mod_timer_pinned(t, when); -- } else { -- t->expires = round_jiffies(when); -- add_timer_on(t, smp_processor_id()); -- } -- -- local_irq_restore(flags); -+ if (!interval) -+ return HRTIMER_NORESTART; -+ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval))); -+ return HRTIMER_RESTART; - } - --static void mce_timer_fn(unsigned long data) -+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -- int cpu = smp_processor_id(); - unsigned long iv; - -- WARN_ON(cpu != data); -- - iv = __this_cpu_read(mce_next_interval); - - if (mce_available(this_cpu_ptr(&cpu_info))) { -@@ -1293,7 +1281,7 @@ - - done: - __this_cpu_write(mce_next_interval, iv); -- __restart_timer(t, iv); -+ return __restart_timer(timer, iv); - } - - /* -@@ -1301,7 +1289,7 @@ - */ - void mce_timer_kick(unsigned long interval) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -+ struct hrtimer *t = this_cpu_ptr(&mce_timer); - unsigned long iv = __this_cpu_read(mce_next_interval); - - __restart_timer(t, interval); -@@ -1316,7 +1304,7 @@ - int cpu; - - for_each_online_cpu(cpu) -- del_timer_sync(&per_cpu(mce_timer, cpu)); -+ hrtimer_cancel(&per_cpu(mce_timer, cpu)); - } - - static void mce_do_trigger(struct work_struct *work) -@@ -1326,6 +1314,56 @@ - - static DECLARE_WORK(mce_trigger_work, mce_do_trigger); - -+static void __mce_notify_work(struct swork_event *event) -+{ -+ /* Not more than two messages every minute */ -+ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); -+ -+ /* wake processes polling /dev/mcelog */ -+ wake_up_interruptible(&mce_chrdev_wait); -+ -+ /* -+ * There is no risk of missing notifications because -+ * work_pending is always cleared before the function is -+ * executed. -+ */ -+ if (mce_helper[0] && !work_pending(&mce_trigger_work)) -+ schedule_work(&mce_trigger_work); -+ -+ if (__ratelimit(&ratelimit)) -+ pr_info(HW_ERR "Machine check events logged\n"); -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static bool notify_work_ready __read_mostly; -+static struct swork_event notify_work; -+ -+static int mce_notify_work_init(void) -+{ -+ int err; -+ -+ err = swork_get(); -+ if (err) -+ return err; -+ -+ INIT_SWORK(¬ify_work, __mce_notify_work); -+ notify_work_ready = true; -+ return 0; -+} -+ -+static void mce_notify_work(void) -+{ -+ if (notify_work_ready) -+ swork_queue(¬ify_work); -+} -+#else -+static void mce_notify_work(void) -+{ -+ __mce_notify_work(NULL); -+} -+static inline int mce_notify_work_init(void) { return 0; } -+#endif -+ - /* - * Notify the user(s) about new machine check events. - * Can be called from interrupt context, but not from machine check/NMI -@@ -1333,19 +1371,8 @@ - */ - int mce_notify_irq(void) - { -- /* Not more than two messages every minute */ -- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); -- - if (test_and_clear_bit(0, &mce_need_notify)) { -- /* wake processes polling /dev/mcelog */ -- wake_up_interruptible(&mce_chrdev_wait); -- -- if (mce_helper[0]) -- schedule_work(&mce_trigger_work); -- -- if (__ratelimit(&ratelimit)) -- pr_info(HW_ERR "Machine check events logged\n"); -- -+ mce_notify_work(); - return 1; - } - return 0; -@@ -1639,7 +1666,7 @@ - } - } - --static void mce_start_timer(unsigned int cpu, struct timer_list *t) -+static void mce_start_timer(unsigned int cpu, struct hrtimer *t) - { - unsigned long iv = check_interval * HZ; - -@@ -1648,16 +1675,17 @@ - - per_cpu(mce_next_interval, cpu) = iv; - -- t->expires = round_jiffies(jiffies + iv); -- add_timer_on(t, cpu); -+ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), -+ 0, HRTIMER_MODE_REL_PINNED); - } - - static void __mcheck_cpu_init_timer(void) - { -- struct timer_list *t = this_cpu_ptr(&mce_timer); -+ struct hrtimer *t = this_cpu_ptr(&mce_timer); - unsigned int cpu = smp_processor_id(); - -- setup_timer(t, mce_timer_fn, cpu); -+ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ t->function = mce_timer_fn; - mce_start_timer(cpu, t); - } - -@@ -2376,6 +2404,8 @@ - if (!mce_available(raw_cpu_ptr(&cpu_info))) - return; - -+ hrtimer_cancel(this_cpu_ptr(&mce_timer)); -+ - if (!(action & CPU_TASKS_FROZEN)) - cmci_clear(); - -@@ -2398,6 +2428,7 @@ - if (b->init) - wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); - } -+ __mcheck_cpu_init_timer(); - } - - /* Get notified when a cpu comes on/off. Be hotplug friendly. */ -@@ -2405,7 +2436,6 @@ - mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) - { - unsigned int cpu = (unsigned long)hcpu; -- struct timer_list *t = &per_cpu(mce_timer, cpu); - - switch (action & ~CPU_TASKS_FROZEN) { - case CPU_ONLINE: -@@ -2425,11 +2455,9 @@ - break; - case CPU_DOWN_PREPARE: - smp_call_function_single(cpu, mce_disable_cpu, &action, 1); -- del_timer_sync(t); - break; - case CPU_DOWN_FAILED: - smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); -- mce_start_timer(cpu, t); - break; - } - -@@ -2468,6 +2496,10 @@ - goto err_out; - } - -+ err = mce_notify_work_init(); -+ if (err) -+ goto err_out; -+ - if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { - err = -ENOMEM; - goto err_out; -diff -Nur linux-4.4.13.orig/arch/x86/kernel/cpu/perf_event_intel_rapl.c linux-4.4.13/arch/x86/kernel/cpu/perf_event_intel_rapl.c ---- linux-4.4.13.orig/arch/x86/kernel/cpu/perf_event_intel_rapl.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/cpu/perf_event_intel_rapl.c 2016-06-22 21:55:07.986523867 +0200 -@@ -117,7 +117,7 @@ - }; - - struct rapl_pmu { -- spinlock_t lock; -+ raw_spinlock_t lock; - int n_active; /* number of active events */ - struct list_head active_list; - struct pmu *pmu; /* pointer to rapl_pmu_class */ -@@ -220,13 +220,13 @@ - if (!pmu->n_active) - return HRTIMER_NORESTART; - -- spin_lock_irqsave(&pmu->lock, flags); -+ raw_spin_lock_irqsave(&pmu->lock, flags); - - list_for_each_entry(event, &pmu->active_list, active_entry) { - rapl_event_update(event); - } - -- spin_unlock_irqrestore(&pmu->lock, flags); -+ raw_spin_unlock_irqrestore(&pmu->lock, flags); - - hrtimer_forward_now(hrtimer, pmu->timer_interval); - -@@ -263,9 +263,9 @@ - struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); - unsigned long flags; - -- spin_lock_irqsave(&pmu->lock, flags); -+ raw_spin_lock_irqsave(&pmu->lock, flags); - __rapl_pmu_event_start(pmu, event); -- spin_unlock_irqrestore(&pmu->lock, flags); -+ raw_spin_unlock_irqrestore(&pmu->lock, flags); - } - - static void rapl_pmu_event_stop(struct perf_event *event, int mode) -@@ -274,7 +274,7 @@ - struct hw_perf_event *hwc = &event->hw; - unsigned long flags; - -- spin_lock_irqsave(&pmu->lock, flags); -+ raw_spin_lock_irqsave(&pmu->lock, flags); - - /* mark event as deactivated and stopped */ - if (!(hwc->state & PERF_HES_STOPPED)) { -@@ -299,7 +299,7 @@ - hwc->state |= PERF_HES_UPTODATE; - } - -- spin_unlock_irqrestore(&pmu->lock, flags); -+ raw_spin_unlock_irqrestore(&pmu->lock, flags); - } - - static int rapl_pmu_event_add(struct perf_event *event, int mode) -@@ -308,14 +308,14 @@ - struct hw_perf_event *hwc = &event->hw; - unsigned long flags; - -- spin_lock_irqsave(&pmu->lock, flags); -+ raw_spin_lock_irqsave(&pmu->lock, flags); - - hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; - - if (mode & PERF_EF_START) - __rapl_pmu_event_start(pmu, event); - -- spin_unlock_irqrestore(&pmu->lock, flags); -+ raw_spin_unlock_irqrestore(&pmu->lock, flags); - - return 0; - } -@@ -603,7 +603,7 @@ - pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); - if (!pmu) - return -1; -- spin_lock_init(&pmu->lock); -+ raw_spin_lock_init(&pmu->lock); - - INIT_LIST_HEAD(&pmu->active_list); - -diff -Nur linux-4.4.13.orig/arch/x86/kernel/dumpstack_32.c linux-4.4.13/arch/x86/kernel/dumpstack_32.c ---- linux-4.4.13.orig/arch/x86/kernel/dumpstack_32.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/dumpstack_32.c 2016-06-22 21:55:07.986523867 +0200 -@@ -42,7 +42,7 @@ - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data) - { -- const unsigned cpu = get_cpu(); -+ const unsigned cpu = get_cpu_light(); - int graph = 0; - u32 *prev_esp; - -@@ -86,7 +86,7 @@ - break; - touch_nmi_watchdog(); - } -- put_cpu(); -+ put_cpu_light(); - } - EXPORT_SYMBOL(dump_trace); - -diff -Nur linux-4.4.13.orig/arch/x86/kernel/dumpstack_64.c linux-4.4.13/arch/x86/kernel/dumpstack_64.c ---- linux-4.4.13.orig/arch/x86/kernel/dumpstack_64.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/dumpstack_64.c 2016-06-22 21:55:07.986523867 +0200 -@@ -152,7 +152,7 @@ - unsigned long *stack, unsigned long bp, - const struct stacktrace_ops *ops, void *data) - { -- const unsigned cpu = get_cpu(); -+ const unsigned cpu = get_cpu_light(); - struct thread_info *tinfo; - unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); - unsigned long dummy; -@@ -241,7 +241,7 @@ - * This handles the process stack: - */ - bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); -- put_cpu(); -+ put_cpu_light(); - } - EXPORT_SYMBOL(dump_trace); - -@@ -255,7 +255,7 @@ - int cpu; - int i; - -- preempt_disable(); -+ migrate_disable(); - cpu = smp_processor_id(); - - irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); -@@ -291,7 +291,7 @@ - pr_cont(" %016lx", *stack++); - touch_nmi_watchdog(); - } -- preempt_enable(); -+ migrate_enable(); - - pr_cont("\n"); - show_trace_log_lvl(task, regs, sp, bp, log_lvl); -diff -Nur linux-4.4.13.orig/arch/x86/kernel/irq_32.c linux-4.4.13/arch/x86/kernel/irq_32.c ---- linux-4.4.13.orig/arch/x86/kernel/irq_32.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/irq_32.c 2016-06-22 21:55:07.986523867 +0200 -@@ -128,6 +128,7 @@ - cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - void do_softirq_own_stack(void) - { - struct thread_info *curstk; -@@ -146,6 +147,7 @@ - - call_on_stack(__do_softirq, isp); - } -+#endif - - bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) - { -diff -Nur linux-4.4.13.orig/arch/x86/kernel/kvm.c linux-4.4.13/arch/x86/kernel/kvm.c ---- linux-4.4.13.orig/arch/x86/kernel/kvm.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/kvm.c 2016-06-22 21:55:07.986523867 +0200 -@@ -36,6 +36,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -91,14 +92,14 @@ - - struct kvm_task_sleep_node { - struct hlist_node link; -- wait_queue_head_t wq; -+ struct swait_queue_head wq; - u32 token; - int cpu; - bool halted; - }; - - static struct kvm_task_sleep_head { -- spinlock_t lock; -+ raw_spinlock_t lock; - struct hlist_head list; - } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; - -@@ -122,17 +123,17 @@ - u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); - struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; - struct kvm_task_sleep_node n, *e; -- DEFINE_WAIT(wait); -+ DECLARE_SWAITQUEUE(wait); - - rcu_irq_enter(); - -- spin_lock(&b->lock); -+ raw_spin_lock(&b->lock); - e = _find_apf_task(b, token); - if (e) { - /* dummy entry exist -> wake up was delivered ahead of PF */ - hlist_del(&e->link); - kfree(e); -- spin_unlock(&b->lock); -+ raw_spin_unlock(&b->lock); - - rcu_irq_exit(); - return; -@@ -141,13 +142,13 @@ - n.token = token; - n.cpu = smp_processor_id(); - n.halted = is_idle_task(current) || preempt_count() > 1; -- init_waitqueue_head(&n.wq); -+ init_swait_queue_head(&n.wq); - hlist_add_head(&n.link, &b->list); -- spin_unlock(&b->lock); -+ raw_spin_unlock(&b->lock); - - for (;;) { - if (!n.halted) -- prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); -+ prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); - if (hlist_unhashed(&n.link)) - break; - -@@ -166,7 +167,7 @@ - } - } - if (!n.halted) -- finish_wait(&n.wq, &wait); -+ finish_swait(&n.wq, &wait); - - rcu_irq_exit(); - return; -@@ -178,8 +179,8 @@ - hlist_del_init(&n->link); - if (n->halted) - smp_send_reschedule(n->cpu); -- else if (waitqueue_active(&n->wq)) -- wake_up(&n->wq); -+ else if (swait_active(&n->wq)) -+ swake_up(&n->wq); - } - - static void apf_task_wake_all(void) -@@ -189,14 +190,14 @@ - for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { - struct hlist_node *p, *next; - struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; -- spin_lock(&b->lock); -+ raw_spin_lock(&b->lock); - hlist_for_each_safe(p, next, &b->list) { - struct kvm_task_sleep_node *n = - hlist_entry(p, typeof(*n), link); - if (n->cpu == smp_processor_id()) - apf_task_wake_one(n); - } -- spin_unlock(&b->lock); -+ raw_spin_unlock(&b->lock); - } - } - -@@ -212,7 +213,7 @@ - } - - again: -- spin_lock(&b->lock); -+ raw_spin_lock(&b->lock); - n = _find_apf_task(b, token); - if (!n) { - /* -@@ -225,17 +226,17 @@ - * Allocation failed! Busy wait while other cpu - * handles async PF. - */ -- spin_unlock(&b->lock); -+ raw_spin_unlock(&b->lock); - cpu_relax(); - goto again; - } - n->token = token; - n->cpu = smp_processor_id(); -- init_waitqueue_head(&n->wq); -+ init_swait_queue_head(&n->wq); - hlist_add_head(&n->link, &b->list); - } else - apf_task_wake_one(n); -- spin_unlock(&b->lock); -+ raw_spin_unlock(&b->lock); - return; - } - EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); -@@ -486,7 +487,7 @@ - paravirt_ops_setup(); - register_reboot_notifier(&kvm_pv_reboot_nb); - for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) -- spin_lock_init(&async_pf_sleepers[i].lock); -+ raw_spin_lock_init(&async_pf_sleepers[i].lock); - if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) - x86_init.irqs.trap_init = kvm_apf_trap_init; - -diff -Nur linux-4.4.13.orig/arch/x86/kernel/nmi.c linux-4.4.13/arch/x86/kernel/nmi.c ---- linux-4.4.13.orig/arch/x86/kernel/nmi.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/nmi.c 2016-06-22 21:55:07.986523867 +0200 -@@ -231,7 +231,7 @@ - #endif - - if (panic_on_unrecovered_nmi) -- panic("NMI: Not continuing"); -+ nmi_panic(regs, "NMI: Not continuing"); - - pr_emerg("Dazed and confused, but trying to continue\n"); - -@@ -255,8 +255,16 @@ - reason, smp_processor_id()); - show_regs(regs); - -- if (panic_on_io_nmi) -- panic("NMI IOCK error: Not continuing"); -+ if (panic_on_io_nmi) { -+ nmi_panic(regs, "NMI IOCK error: Not continuing"); -+ -+ /* -+ * If we end up here, it means we have received an NMI while -+ * processing panic(). Simply return without delaying and -+ * re-enabling NMIs. -+ */ -+ return; -+ } - - /* Re-enable the IOCK line, wait for a few seconds */ - reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; -@@ -297,7 +305,7 @@ - - pr_emerg("Do you have a strange power saving mode enabled?\n"); - if (unknown_nmi_panic || panic_on_unrecovered_nmi) -- panic("NMI: Not continuing"); -+ nmi_panic(regs, "NMI: Not continuing"); - - pr_emerg("Dazed and confused, but trying to continue\n"); - } -diff -Nur linux-4.4.13.orig/arch/x86/kernel/process_32.c linux-4.4.13/arch/x86/kernel/process_32.c ---- linux-4.4.13.orig/arch/x86/kernel/process_32.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/process_32.c 2016-06-22 21:55:07.986523867 +0200 -@@ -35,6 +35,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -210,6 +211,35 @@ - } - EXPORT_SYMBOL_GPL(start_thread); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) -+{ -+ int i; -+ -+ /* -+ * Clear @prev's kmap_atomic mappings -+ */ -+ for (i = 0; i < prev_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ pte_t *ptep = kmap_pte - idx; -+ -+ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); -+ } -+ /* -+ * Restore @next_p's kmap_atomic mappings -+ */ -+ for (i = 0; i < next_p->kmap_idx; i++) { -+ int idx = i + KM_TYPE_NR * smp_processor_id(); -+ -+ if (!pte_none(next_p->kmap_pte[i])) -+ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); -+ } -+} -+#else -+static inline void -+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } -+#endif -+ - - /* - * switch_to(x,y) should switch tasks from x to y. -@@ -286,6 +316,8 @@ - task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) - __switch_to_xtra(prev_p, next_p, tss); - -+ switch_kmaps(prev_p, next_p); -+ - /* - * Leave lazy mode, flushing any hypercalls made here. - * This must be done before restoring TLS segments so -diff -Nur linux-4.4.13.orig/arch/x86/kernel/reboot.c linux-4.4.13/arch/x86/kernel/reboot.c ---- linux-4.4.13.orig/arch/x86/kernel/reboot.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kernel/reboot.c 2016-06-22 21:55:07.986523867 +0200 -@@ -726,6 +726,7 @@ - static nmi_shootdown_cb shootdown_callback; - - static atomic_t waiting_for_crash_ipi; -+static int crash_ipi_issued; - - static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) - { -@@ -788,6 +789,9 @@ - - smp_send_nmi_allbutself(); - -+ /* Kick CPUs looping in NMI context. */ -+ WRITE_ONCE(crash_ipi_issued, 1); -+ - msecs = 1000; /* Wait at most a second for the other cpus to stop */ - while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { - mdelay(1); -@@ -796,6 +800,22 @@ - - /* Leave the nmi callback set */ - } -+ -+/* Override the weak function in kernel/panic.c */ -+void nmi_panic_self_stop(struct pt_regs *regs) -+{ -+ while (1) { -+ /* -+ * Wait for the crash dumping IPI to be issued, and then -+ * call its callback directly. -+ */ -+ if (READ_ONCE(crash_ipi_issued)) -+ crash_nmi_callback(0, regs); /* Don't return */ -+ -+ cpu_relax(); -+ } -+} -+ - #else /* !CONFIG_SMP */ - void nmi_shootdown_cpus(nmi_shootdown_cb callback) - { -diff -Nur linux-4.4.13.orig/arch/x86/kvm/lapic.c linux-4.4.13/arch/x86/kvm/lapic.c ---- linux-4.4.13.orig/arch/x86/kvm/lapic.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kvm/lapic.c 2016-06-22 21:55:07.986523867 +0200 -@@ -1195,7 +1195,7 @@ - static void apic_timer_expired(struct kvm_lapic *apic) - { - struct kvm_vcpu *vcpu = apic->vcpu; -- wait_queue_head_t *q = &vcpu->wq; -+ struct swait_queue_head *q = &vcpu->wq; - struct kvm_timer *ktimer = &apic->lapic_timer; - - if (atomic_read(&apic->lapic_timer.pending)) -@@ -1204,8 +1204,8 @@ - atomic_inc(&apic->lapic_timer.pending); - kvm_set_pending_timer(vcpu); - -- if (waitqueue_active(q)) -- wake_up_interruptible(q); -+ if (swait_active(q)) -+ swake_up(q); - - if (apic_lvtt_tscdeadline(apic)) - ktimer->expired_tscdeadline = ktimer->tscdeadline; -@@ -1801,6 +1801,7 @@ - hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, - HRTIMER_MODE_ABS); - apic->lapic_timer.timer.function = apic_timer_fn; -+ apic->lapic_timer.timer.irqsafe = 1; - - /* - * APIC is created enabled. This will prevent kvm_lapic_set_base from -diff -Nur linux-4.4.13.orig/arch/x86/kvm/x86.c linux-4.4.13/arch/x86/kvm/x86.c ---- linux-4.4.13.orig/arch/x86/kvm/x86.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/kvm/x86.c 2016-06-22 21:55:07.990523867 +0200 -@@ -5788,6 +5788,13 @@ - goto out; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { -+ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); -+ return -EOPNOTSUPP; -+ } -+#endif -+ - r = kvm_mmu_module_init(); - if (r) - goto out_free_percpu; -diff -Nur linux-4.4.13.orig/arch/x86/mm/highmem_32.c linux-4.4.13/arch/x86/mm/highmem_32.c ---- linux-4.4.13.orig/arch/x86/mm/highmem_32.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/mm/highmem_32.c 2016-06-22 21:55:07.990523867 +0200 -@@ -32,10 +32,11 @@ - */ - void *kmap_atomic_prot(struct page *page, pgprot_t prot) - { -+ pte_t pte = mk_pte(page, prot); - unsigned long vaddr; - int idx, type; - -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - - if (!PageHighMem(page)) -@@ -45,7 +46,10 @@ - idx = type + KM_TYPE_NR*smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); - BUG_ON(!pte_none(*(kmap_pte-idx))); -- set_pte(kmap_pte-idx, mk_pte(page, prot)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte-idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -88,6 +92,9 @@ - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - arch_flush_lazy_mmu_mode(); -@@ -100,7 +107,7 @@ - #endif - - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(__kunmap_atomic); - -diff -Nur linux-4.4.13.orig/arch/x86/mm/iomap_32.c linux-4.4.13/arch/x86/mm/iomap_32.c ---- linux-4.4.13.orig/arch/x86/mm/iomap_32.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/mm/iomap_32.c 2016-06-22 21:55:07.990523867 +0200 -@@ -56,6 +56,7 @@ - - void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) - { -+ pte_t pte = pfn_pte(pfn, prot); - unsigned long vaddr; - int idx, type; - -@@ -65,7 +66,12 @@ - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); - vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); -- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); -+ WARN_ON(!pte_none(*(kmap_pte - idx))); -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = pte; -+#endif -+ set_pte(kmap_pte - idx, pte); - arch_flush_lazy_mmu_mode(); - - return (void *)vaddr; -@@ -113,6 +119,9 @@ - * is a bad idea also, in case the page changes cacheability - * attributes or becomes a protected page in a hypervisor. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ current->kmap_pte[type] = __pte(0); -+#endif - kpte_clear_flush(kmap_pte-idx, vaddr); - kmap_atomic_idx_pop(); - } -diff -Nur linux-4.4.13.orig/arch/x86/platform/uv/tlb_uv.c linux-4.4.13/arch/x86/platform/uv/tlb_uv.c ---- linux-4.4.13.orig/arch/x86/platform/uv/tlb_uv.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/platform/uv/tlb_uv.c 2016-06-22 21:55:07.990523867 +0200 -@@ -714,9 +714,9 @@ - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -736,9 +736,9 @@ - - quiesce_local_uvhub(hmaster); - -- spin_lock(&hmaster->queue_lock); -+ raw_spin_lock(&hmaster->queue_lock); - reset_with_ipi(&bau_desc->distribution, bcp); -- spin_unlock(&hmaster->queue_lock); -+ raw_spin_unlock(&hmaster->queue_lock); - - end_uvhub_quiesce(hmaster); - -@@ -759,7 +759,7 @@ - cycles_t tm1; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (!bcp->baudisabled) { - stat->s_bau_disabled++; - tm1 = get_cycles(); -@@ -772,7 +772,7 @@ - } - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - } - - static void count_max_concurr(int stat, struct bau_control *bcp, -@@ -835,7 +835,7 @@ - */ - static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) - { -- spinlock_t *lock = &hmaster->uvhub_lock; -+ raw_spinlock_t *lock = &hmaster->uvhub_lock; - atomic_t *v; - - v = &hmaster->active_descriptor_count; -@@ -968,7 +968,7 @@ - struct bau_control *hmaster; - - hmaster = bcp->uvhub_master; -- spin_lock(&hmaster->disable_lock); -+ raw_spin_lock(&hmaster->disable_lock); - if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { - stat->s_bau_reenabled++; - for_each_present_cpu(tcpu) { -@@ -980,10 +980,10 @@ - tbcp->period_giveups = 0; - } - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return 0; - } -- spin_unlock(&hmaster->disable_lock); -+ raw_spin_unlock(&hmaster->disable_lock); - return -1; - } - -@@ -1901,9 +1901,9 @@ - bcp->cong_reps = congested_reps; - bcp->disabled_period = sec_2_cycles(disabled_period); - bcp->giveup_limit = giveup_limit; -- spin_lock_init(&bcp->queue_lock); -- spin_lock_init(&bcp->uvhub_lock); -- spin_lock_init(&bcp->disable_lock); -+ raw_spin_lock_init(&bcp->queue_lock); -+ raw_spin_lock_init(&bcp->uvhub_lock); -+ raw_spin_lock_init(&bcp->disable_lock); - } - } - -diff -Nur linux-4.4.13.orig/arch/x86/platform/uv/uv_time.c linux-4.4.13/arch/x86/platform/uv/uv_time.c ---- linux-4.4.13.orig/arch/x86/platform/uv/uv_time.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/arch/x86/platform/uv/uv_time.c 2016-06-22 21:55:07.990523867 +0200 -@@ -57,7 +57,7 @@ - - /* There is one of these allocated per node */ - struct uv_rtc_timer_head { -- spinlock_t lock; -+ raw_spinlock_t lock; - /* next cpu waiting for timer, local node relative: */ - int next_cpu; - /* number of cpus on this node: */ -@@ -177,7 +177,7 @@ - uv_rtc_deallocate_timers(); - return -ENOMEM; - } -- spin_lock_init(&head->lock); -+ raw_spin_lock_init(&head->lock); - head->ncpus = uv_blade_nr_possible_cpus(bid); - head->next_cpu = -1; - blade_info[bid] = head; -@@ -231,7 +231,7 @@ - unsigned long flags; - int next_cpu; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - next_cpu = head->next_cpu; - *t = expires; -@@ -243,12 +243,12 @@ - if (uv_setup_intr(cpu, expires)) { - *t = ULLONG_MAX; - uv_rtc_find_next_timer(head, pnode); -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return -ETIME; - } - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - return 0; - } - -@@ -267,7 +267,7 @@ - unsigned long flags; - int rc = 0; - -- spin_lock_irqsave(&head->lock, flags); -+ raw_spin_lock_irqsave(&head->lock, flags); - - if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) - rc = 1; -@@ -279,7 +279,7 @@ - uv_rtc_find_next_timer(head, pnode); - } - -- spin_unlock_irqrestore(&head->lock, flags); -+ raw_spin_unlock_irqrestore(&head->lock, flags); - - return rc; - } -@@ -299,13 +299,18 @@ - static cycle_t uv_read_rtc(struct clocksource *cs) - { - unsigned long offset; -+ cycle_t cycles; - -+ preempt_disable(); - if (uv_get_min_hub_revision_id() == 1) - offset = 0; - else - offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; - -- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); -+ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset); -+ preempt_enable(); -+ -+ return cycles; - } - - /* -diff -Nur linux-4.4.13.orig/block/blk-core.c linux-4.4.13/block/blk-core.c ---- linux-4.4.13.orig/block/blk-core.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/blk-core.c 2016-06-22 21:55:07.990523867 +0200 -@@ -125,6 +125,9 @@ - - INIT_LIST_HEAD(&rq->queuelist); - INIT_LIST_HEAD(&rq->timeout_list); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - rq->cpu = -1; - rq->q = q; - rq->__sector = (sector_t) -1; -@@ -233,7 +236,7 @@ - **/ - void blk_start_queue(struct request_queue *q) - { -- WARN_ON(!irqs_disabled()); -+ WARN_ON_NONRT(!irqs_disabled()); - - queue_flag_clear(QUEUE_FLAG_STOPPED, q); - __blk_run_queue(q); -@@ -657,7 +660,7 @@ - if (!gfpflags_allow_blocking(gfp)) - return -EBUSY; - -- ret = wait_event_interruptible(q->mq_freeze_wq, -+ ret = swait_event_interruptible(q->mq_freeze_wq, - !atomic_read(&q->mq_freeze_depth) || - blk_queue_dying(q)); - if (blk_queue_dying(q)) -@@ -677,7 +680,7 @@ - struct request_queue *q = - container_of(ref, struct request_queue, q_usage_counter); - -- wake_up_all(&q->mq_freeze_wq); -+ swake_up_all(&q->mq_freeze_wq); - } - - struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) -@@ -739,7 +742,7 @@ - q->bypass_depth = 1; - __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); - -- init_waitqueue_head(&q->mq_freeze_wq); -+ init_swait_queue_head(&q->mq_freeze_wq); - - /* - * Init percpu_ref in atomic mode so that it's faster to shutdown. -@@ -3198,7 +3201,7 @@ - blk_run_queue_async(q); - else - __blk_run_queue(q); -- spin_unlock(q->queue_lock); -+ spin_unlock_irq(q->queue_lock); - } - - static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) -@@ -3246,7 +3249,6 @@ - void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) - { - struct request_queue *q; -- unsigned long flags; - struct request *rq; - LIST_HEAD(list); - unsigned int depth; -@@ -3266,11 +3268,6 @@ - q = NULL; - depth = 0; - -- /* -- * Save and disable interrupts here, to avoid doing it for every -- * queue lock we have to take. -- */ -- local_irq_save(flags); - while (!list_empty(&list)) { - rq = list_entry_rq(list.next); - list_del_init(&rq->queuelist); -@@ -3283,7 +3280,7 @@ - queue_unplugged(q, depth, from_schedule); - q = rq->q; - depth = 0; -- spin_lock(q->queue_lock); -+ spin_lock_irq(q->queue_lock); - } - - /* -@@ -3310,8 +3307,6 @@ - */ - if (q) - queue_unplugged(q, depth, from_schedule); -- -- local_irq_restore(flags); - } - - void blk_finish_plug(struct blk_plug *plug) -diff -Nur linux-4.4.13.orig/block/blk-ioc.c linux-4.4.13/block/blk-ioc.c ---- linux-4.4.13.orig/block/blk-ioc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/blk-ioc.c 2016-06-22 21:55:07.990523867 +0200 -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - #include "blk.h" - -@@ -109,7 +110,7 @@ - spin_unlock(q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - spin_lock_irqsave_nested(&ioc->lock, flags, 1); - } - } -@@ -187,7 +188,7 @@ - spin_unlock(icq->q->queue_lock); - } else { - spin_unlock_irqrestore(&ioc->lock, flags); -- cpu_relax(); -+ cpu_chill(); - goto retry; - } - } -diff -Nur linux-4.4.13.orig/block/blk-iopoll.c linux-4.4.13/block/blk-iopoll.c ---- linux-4.4.13.orig/block/blk-iopoll.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/blk-iopoll.c 2016-06-22 21:55:07.990523867 +0200 -@@ -35,6 +35,7 @@ - list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(blk_iopoll_sched); - -@@ -132,6 +133,7 @@ - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - /** -@@ -201,6 +203,7 @@ - this_cpu_ptr(&blk_cpu_iopoll)); - __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - return NOTIFY_OK; -diff -Nur linux-4.4.13.orig/block/blk-mq.c linux-4.4.13/block/blk-mq.c ---- linux-4.4.13.orig/block/blk-mq.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/blk-mq.c 2016-06-22 21:55:07.990523867 +0200 -@@ -92,7 +92,7 @@ - - static void blk_mq_freeze_queue_wait(struct request_queue *q) - { -- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); -+ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); - } - - /* -@@ -130,7 +130,7 @@ - WARN_ON_ONCE(freeze_depth < 0); - if (!freeze_depth) { - percpu_ref_reinit(&q->q_usage_counter); -- wake_up_all(&q->mq_freeze_wq); -+ swake_up_all(&q->mq_freeze_wq); - } - } - EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); -@@ -149,7 +149,7 @@ - * dying, we need to ensure that processes currently waiting on - * the queue are notified as well. - */ -- wake_up_all(&q->mq_freeze_wq); -+ swake_up_all(&q->mq_freeze_wq); - } - - bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) -@@ -196,6 +196,9 @@ - rq->resid_len = 0; - rq->sense = NULL; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); -+#endif - INIT_LIST_HEAD(&rq->timeout_list); - rq->timeout = 0; - -@@ -325,6 +328,17 @@ - } - EXPORT_SYMBOL(blk_mq_end_request); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+void __blk_mq_complete_request_remote_work(struct work_struct *work) -+{ -+ struct request *rq = container_of(work, struct request, work); -+ -+ rq->q->softirq_done_fn(rq); -+} -+ -+#else -+ - static void __blk_mq_complete_request_remote(void *data) - { - struct request *rq = data; -@@ -332,6 +346,8 @@ - rq->q->softirq_done_fn(rq); - } - -+#endif -+ - static void blk_mq_ipi_complete_request(struct request *rq) - { - struct blk_mq_ctx *ctx = rq->mq_ctx; -@@ -343,19 +359,23 @@ - return; - } - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) - shared = cpus_share_cache(cpu, ctx->cpu); - - if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ schedule_work_on(ctx->cpu, &rq->work); -+#else - rq->csd.func = __blk_mq_complete_request_remote; - rq->csd.info = rq; - rq->csd.flags = 0; - smp_call_function_single_async(ctx->cpu, &rq->csd); -+#endif - } else { - rq->q->softirq_done_fn(rq); - } -- put_cpu(); -+ put_cpu_light(); - } - - static void __blk_mq_complete_request(struct request *rq) -@@ -862,14 +882,14 @@ - return; - - if (!async) { -- int cpu = get_cpu(); -+ int cpu = get_cpu_light(); - if (cpumask_test_cpu(cpu, hctx->cpumask)) { - __blk_mq_run_hw_queue(hctx); -- put_cpu(); -+ put_cpu_light(); - return; - } - -- put_cpu(); -+ put_cpu_light(); - } - - kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), -@@ -1617,7 +1637,7 @@ - { - struct blk_mq_hw_ctx *hctx = data; - -- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) -+ if (action == CPU_POST_DEAD) - return blk_mq_hctx_cpu_offline(hctx, cpu); - - /* -diff -Nur linux-4.4.13.orig/block/blk-mq-cpu.c linux-4.4.13/block/blk-mq-cpu.c ---- linux-4.4.13.orig/block/blk-mq-cpu.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/blk-mq-cpu.c 2016-06-22 21:55:07.990523867 +0200 -@@ -16,7 +16,7 @@ - #include "blk-mq.h" - - static LIST_HEAD(blk_mq_cpu_notify_list); --static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); -+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); - - static int blk_mq_main_cpu_notify(struct notifier_block *self, - unsigned long action, void *hcpu) -@@ -25,7 +25,10 @@ - struct blk_mq_cpu_notifier *notify; - int ret = NOTIFY_OK; - -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ if (action != CPU_POST_DEAD) -+ return NOTIFY_OK; -+ -+ spin_lock(&blk_mq_cpu_notify_lock); - - list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { - ret = notify->notify(notify->data, action, cpu); -@@ -33,7 +36,7 @@ - break; - } - -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - return ret; - } - -@@ -41,16 +44,16 @@ - { - BUG_ON(!notifier->notify); - -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ spin_lock(&blk_mq_cpu_notify_lock); - list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - } - - void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) - { -- raw_spin_lock(&blk_mq_cpu_notify_lock); -+ spin_lock(&blk_mq_cpu_notify_lock); - list_del(¬ifier->list); -- raw_spin_unlock(&blk_mq_cpu_notify_lock); -+ spin_unlock(&blk_mq_cpu_notify_lock); - } - - void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, -diff -Nur linux-4.4.13.orig/block/blk-mq.h linux-4.4.13/block/blk-mq.h ---- linux-4.4.13.orig/block/blk-mq.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/blk-mq.h 2016-06-22 21:55:07.990523867 +0200 -@@ -74,7 +74,10 @@ - static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, - unsigned int cpu) - { -- return per_cpu_ptr(q->queue_ctx, cpu); -+ struct blk_mq_ctx *ctx; -+ -+ ctx = per_cpu_ptr(q->queue_ctx, cpu); -+ return ctx; - } - - /* -@@ -85,12 +88,12 @@ - */ - static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) - { -- return __blk_mq_get_ctx(q, get_cpu()); -+ return __blk_mq_get_ctx(q, get_cpu_light()); - } - - static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) - { -- put_cpu(); -+ put_cpu_light(); - } - - struct blk_mq_alloc_data { -diff -Nur linux-4.4.13.orig/block/blk-softirq.c linux-4.4.13/block/blk-softirq.c ---- linux-4.4.13.orig/block/blk-softirq.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/blk-softirq.c 2016-06-22 21:55:07.990523867 +0200 -@@ -51,6 +51,7 @@ - raise_softirq_irqoff(BLOCK_SOFTIRQ); - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - /* -@@ -93,6 +94,7 @@ - this_cpu_ptr(&blk_cpu_done)); - raise_softirq_irqoff(BLOCK_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - return NOTIFY_OK; -@@ -150,6 +152,7 @@ - goto do_local; - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - /** -diff -Nur linux-4.4.13.orig/block/bounce.c linux-4.4.13/block/bounce.c ---- linux-4.4.13.orig/block/bounce.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/block/bounce.c 2016-06-22 21:55:07.990523867 +0200 -@@ -55,11 +55,11 @@ - unsigned long flags; - unsigned char *vto; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - vto = kmap_atomic(to->bv_page); - memcpy(vto + to->bv_offset, vfrom, to->bv_len); - kunmap_atomic(vto); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - #else /* CONFIG_HIGHMEM */ -diff -Nur linux-4.4.13.orig/crypto/algapi.c linux-4.4.13/crypto/algapi.c ---- linux-4.4.13.orig/crypto/algapi.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/crypto/algapi.c 2016-06-22 21:55:07.990523867 +0200 -@@ -719,13 +719,13 @@ - - int crypto_register_notifier(struct notifier_block *nb) - { -- return blocking_notifier_chain_register(&crypto_chain, nb); -+ return srcu_notifier_chain_register(&crypto_chain, nb); - } - EXPORT_SYMBOL_GPL(crypto_register_notifier); - - int crypto_unregister_notifier(struct notifier_block *nb) - { -- return blocking_notifier_chain_unregister(&crypto_chain, nb); -+ return srcu_notifier_chain_unregister(&crypto_chain, nb); - } - EXPORT_SYMBOL_GPL(crypto_unregister_notifier); - -diff -Nur linux-4.4.13.orig/crypto/api.c linux-4.4.13/crypto/api.c ---- linux-4.4.13.orig/crypto/api.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/crypto/api.c 2016-06-22 21:55:07.990523867 +0200 -@@ -31,7 +31,7 @@ - DECLARE_RWSEM(crypto_alg_sem); - EXPORT_SYMBOL_GPL(crypto_alg_sem); - --BLOCKING_NOTIFIER_HEAD(crypto_chain); -+SRCU_NOTIFIER_HEAD(crypto_chain); - EXPORT_SYMBOL_GPL(crypto_chain); - - static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); -@@ -236,10 +236,10 @@ - { - int ok; - -- ok = blocking_notifier_call_chain(&crypto_chain, val, v); -+ ok = srcu_notifier_call_chain(&crypto_chain, val, v); - if (ok == NOTIFY_DONE) { - request_module("cryptomgr"); -- ok = blocking_notifier_call_chain(&crypto_chain, val, v); -+ ok = srcu_notifier_call_chain(&crypto_chain, val, v); - } - - return ok; -diff -Nur linux-4.4.13.orig/crypto/internal.h linux-4.4.13/crypto/internal.h ---- linux-4.4.13.orig/crypto/internal.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/crypto/internal.h 2016-06-22 21:55:07.990523867 +0200 -@@ -47,7 +47,7 @@ - - extern struct list_head crypto_alg_list; - extern struct rw_semaphore crypto_alg_sem; --extern struct blocking_notifier_head crypto_chain; -+extern struct srcu_notifier_head crypto_chain; - - #ifdef CONFIG_PROC_FS - void __init crypto_init_proc(void); -@@ -143,7 +143,7 @@ - - static inline void crypto_notify(unsigned long val, void *v) - { -- blocking_notifier_call_chain(&crypto_chain, val, v); -+ srcu_notifier_call_chain(&crypto_chain, val, v); - } - - #endif /* _CRYPTO_INTERNAL_H */ -diff -Nur linux-4.4.13.orig/Documentation/hwlat_detector.txt linux-4.4.13/Documentation/hwlat_detector.txt ---- linux-4.4.13.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/Documentation/hwlat_detector.txt 2016-06-22 21:55:07.994523867 +0200 -@@ -0,0 +1,64 @@ -+Introduction: -+------------- -+ -+The module hwlat_detector is a special purpose kernel module that is used to -+detect large system latencies induced by the behavior of certain underlying -+hardware or firmware, independent of Linux itself. The code was developed -+originally to detect SMIs (System Management Interrupts) on x86 systems, -+however there is nothing x86 specific about this patchset. It was -+originally written for use by the "RT" patch since the Real Time -+kernel is highly latency sensitive. -+ -+SMIs are usually not serviced by the Linux kernel, which typically does not -+even know that they are occuring. SMIs are instead are set up by BIOS code -+and are serviced by BIOS code, usually for "critical" events such as -+management of thermal sensors and fans. Sometimes though, SMIs are used for -+other tasks and those tasks can spend an inordinate amount of time in the -+handler (sometimes measured in milliseconds). Obviously this is a problem if -+you are trying to keep event service latencies down in the microsecond range. -+ -+The hardware latency detector works by hogging all of the cpus for configurable -+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter -+for some period, then looking for gaps in the TSC data. Any gap indicates a -+time when the polling was interrupted and since the machine is stopped and -+interrupts turned off the only thing that could do that would be an SMI. -+ -+Note that the SMI detector should *NEVER* be used in a production environment. -+It is intended to be run manually to determine if the hardware platform has a -+problem with long system firmware service routines. -+ -+Usage: -+------ -+ -+Loading the module hwlat_detector passing the parameter "enabled=1" (or by -+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only -+step required to start the hwlat_detector. It is possible to redefine the -+threshold in microseconds (us) above which latency spikes will be taken -+into account (parameter "threshold="). -+ -+Example: -+ -+ # modprobe hwlat_detector enabled=1 threshold=100 -+ -+After the module is loaded, it creates a directory named "hwlat_detector" under -+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary -+to have debugfs mounted, which might be on /sys/debug on your system. -+ -+The /debug/hwlat_detector interface contains the following files: -+ -+count - number of latency spikes observed since last reset -+enable - a global enable/disable toggle (0/1), resets count -+max - maximum hardware latency actually observed (usecs) -+sample - a pipe from which to read current raw sample data -+ in the format -+ (can be opened O_NONBLOCK for a single sample) -+threshold - minimum latency value to be considered (usecs) -+width - time period to sample with CPUs held (usecs) -+ must be less than the total window size (enforced) -+window - total period of sampling, width being inside (usecs) -+ -+By default we will set width to 500,000 and window to 1,000,000, meaning that -+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we -+observe any latencies that exceed the threshold (initially 100 usecs), -+then we write to a global sample ring buffer of 8K samples, which is -+consumed by reading from the "sample" (pipe) debugfs file interface. -diff -Nur linux-4.4.13.orig/Documentation/kernel-parameters.txt linux-4.4.13/Documentation/kernel-parameters.txt ---- linux-4.4.13.orig/Documentation/kernel-parameters.txt 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/Documentation/kernel-parameters.txt 2016-06-22 21:55:07.994523867 +0200 -@@ -1629,6 +1629,15 @@ - ip= [IP_PNP] - See Documentation/filesystems/nfs/nfsroot.txt. - -+ irqaffinity= [SMP] Set the default irq affinity mask -+ Format: -+ ,..., -+ or -+ - -+ (must be a positive range in ascending order) -+ or a mixture -+ ,...,- -+ - irqfixup [HW] - When an interrupt is not handled search all handlers - for it. Intended to get systems with badly broken -diff -Nur linux-4.4.13.orig/Documentation/sysrq.txt linux-4.4.13/Documentation/sysrq.txt ---- linux-4.4.13.orig/Documentation/sysrq.txt 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/Documentation/sysrq.txt 2016-06-22 21:55:07.994523867 +0200 -@@ -59,10 +59,17 @@ - On other - If you know of the key combos for other architectures, please - let me know so I can add them to this section. - --On all - write a character to /proc/sysrq-trigger. e.g.: -- -+On all - write a character to /proc/sysrq-trigger, e.g.: - echo t > /proc/sysrq-trigger - -+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. -+ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq -+ Send an ICMP echo request with this pattern plus the particular -+ SysRq command key. Example: -+ # ping -c1 -s57 -p0102030468 -+ will trigger the SysRq-H (help) command. -+ -+ - * What are the 'command' keys? - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - 'b' - Will immediately reboot the system without syncing or unmounting -diff -Nur linux-4.4.13.orig/Documentation/trace/histograms.txt linux-4.4.13/Documentation/trace/histograms.txt ---- linux-4.4.13.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/Documentation/trace/histograms.txt 2016-06-22 21:55:07.994523867 +0200 -@@ -0,0 +1,186 @@ -+ Using the Linux Kernel Latency Histograms -+ -+ -+This document gives a short explanation how to enable, configure and use -+latency histograms. Latency histograms are primarily relevant in the -+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) -+and are used in the quality management of the Linux real-time -+capabilities. -+ -+ -+* Purpose of latency histograms -+ -+A latency histogram continuously accumulates the frequencies of latency -+data. There are two types of histograms -+- potential sources of latencies -+- effective latencies -+ -+ -+* Potential sources of latencies -+ -+Potential sources of latencies are code segments where interrupts, -+preemption or both are disabled (aka critical sections). To create -+histograms of potential sources of latency, the kernel stores the time -+stamp at the start of a critical section, determines the time elapsed -+when the end of the section is reached, and increments the frequency -+counter of that latency value - irrespective of whether any concurrently -+running process is affected by latency or not. -+- Configuration items (in the Kernel hacking/Tracers submenu) -+ CONFIG_INTERRUPT_OFF_LATENCY -+ CONFIG_PREEMPT_OFF_LATENCY -+ -+ -+* Effective latencies -+ -+Effective latencies are actually occuring during wakeup of a process. To -+determine effective latencies, the kernel stores the time stamp when a -+process is scheduled to be woken up, and determines the duration of the -+wakeup time shortly before control is passed over to this process. Note -+that the apparent latency in user space may be somewhat longer, since the -+process may be interrupted after control is passed over to it but before -+the execution in user space takes place. Simply measuring the interval -+between enqueuing and wakeup may also not appropriate in cases when a -+process is scheduled as a result of a timer expiration. The timer may have -+missed its deadline, e.g. due to disabled interrupts, but this latency -+would not be registered. Therefore, the offsets of missed timers are -+recorded in a separate histogram. If both wakeup latency and missed timer -+offsets are configured and enabled, a third histogram may be enabled that -+records the overall latency as a sum of the timer latency, if any, and the -+wakeup latency. This histogram is called "timerandwakeup". -+- Configuration items (in the Kernel hacking/Tracers submenu) -+ CONFIG_WAKEUP_LATENCY -+ CONFIG_MISSED_TIMER_OFSETS -+ -+ -+* Usage -+ -+The interface to the administration of the latency histograms is located -+in the debugfs file system. To mount it, either enter -+ -+mount -t sysfs nodev /sys -+mount -t debugfs nodev /sys/kernel/debug -+ -+from shell command line level, or add -+ -+nodev /sys sysfs defaults 0 0 -+nodev /sys/kernel/debug debugfs defaults 0 0 -+ -+to the file /etc/fstab. All latency histogram related files are then -+available in the directory /sys/kernel/debug/tracing/latency_hist. A -+particular histogram type is enabled by writing non-zero to the related -+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. -+Select "preemptirqsoff" for the histograms of potential sources of -+latencies and "wakeup" for histograms of effective latencies etc. The -+histogram data - one per CPU - are available in the files -+ -+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx -+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx -+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx -+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx -+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx -+ -+The histograms are reset by writing non-zero to the file "reset" in a -+particular latency directory. To reset all latency data, use -+ -+#!/bin/sh -+ -+TRACINGDIR=/sys/kernel/debug/tracing -+HISTDIR=$TRACINGDIR/latency_hist -+ -+if test -d $HISTDIR -+then -+ cd $HISTDIR -+ for i in `find . | grep /reset$` -+ do -+ echo 1 >$i -+ done -+fi -+ -+ -+* Data format -+ -+Latency data are stored with a resolution of one microsecond. The -+maximum latency is 10,240 microseconds. The data are only valid, if the -+overflow register is empty. Every output line contains the latency in -+microseconds in the first row and the number of samples in the second -+row. To display only lines with a positive latency count, use, for -+example, -+ -+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 -+ -+#Minimum latency: 0 microseconds. -+#Average latency: 0 microseconds. -+#Maximum latency: 25 microseconds. -+#Total samples: 3104770694 -+#There are 0 samples greater or equal than 10240 microseconds -+#usecs samples -+ 0 2984486876 -+ 1 49843506 -+ 2 58219047 -+ 3 5348126 -+ 4 2187960 -+ 5 3388262 -+ 6 959289 -+ 7 208294 -+ 8 40420 -+ 9 4485 -+ 10 14918 -+ 11 18340 -+ 12 25052 -+ 13 19455 -+ 14 5602 -+ 15 969 -+ 16 47 -+ 17 18 -+ 18 14 -+ 19 1 -+ 20 3 -+ 21 2 -+ 22 5 -+ 23 2 -+ 25 1 -+ -+ -+* Wakeup latency of a selected process -+ -+To only collect wakeup latency data of a particular process, write the -+PID of the requested process to -+ -+/sys/kernel/debug/tracing/latency_hist/wakeup/pid -+ -+PIDs are not considered, if this variable is set to 0. -+ -+ -+* Details of the process with the highest wakeup latency so far -+ -+Selected data of the process that suffered from the highest wakeup -+latency that occurred in a particular CPU are available in the file -+ -+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. -+ -+In addition, other relevant system data at the time when the -+latency occurred are given. -+ -+The format of the data is (all in one line): -+ () \ -+<- -+ -+The value of is only relevant in the combined timer -+and wakeup latency recording. In the wakeup recording, it is -+always 0, in the missed_timer_offsets recording, it is the same -+as . -+ -+When retrospectively searching for the origin of a latency and -+tracing was not enabled, it may be helpful to know the name and -+some basic data of the task that (finally) was switching to the -+late real-tlme task. In addition to the victim's data, also the -+data of the possible culprit are therefore displayed after the -+"<-" symbol. -+ -+Finally, the timestamp of the time when the latency occurred -+in . after the most recent system boot -+is provided. -+ -+These data are also reset when the wakeup histogram is reset. -diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/acglobal.h linux-4.4.13/drivers/acpi/acpica/acglobal.h ---- linux-4.4.13.orig/drivers/acpi/acpica/acglobal.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/acpi/acpica/acglobal.h 2016-06-22 21:55:07.994523867 +0200 -@@ -116,7 +116,7 @@ - * interrupt level - */ - ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */ --ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ -+ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ - ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock); - - /* Mutex for _OSI support */ -diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/hwregs.c linux-4.4.13/drivers/acpi/acpica/hwregs.c ---- linux-4.4.13.orig/drivers/acpi/acpica/hwregs.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/acpi/acpica/hwregs.c 2016-06-22 21:55:07.994523867 +0200 -@@ -269,14 +269,14 @@ - ACPI_BITMASK_ALL_FIXED_STATUS, - ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); - -- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); -+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); - - /* Clear the fixed events in PM1 A/B */ - - status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, - ACPI_BITMASK_ALL_FIXED_STATUS); - -- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); -+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); - - if (ACPI_FAILURE(status)) { - goto exit; -diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/hwxface.c linux-4.4.13/drivers/acpi/acpica/hwxface.c ---- linux-4.4.13.orig/drivers/acpi/acpica/hwxface.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/acpi/acpica/hwxface.c 2016-06-22 21:55:07.994523867 +0200 -@@ -374,7 +374,7 @@ - return_ACPI_STATUS(AE_BAD_PARAMETER); - } - -- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); -+ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); - - /* - * At this point, we know that the parent register is one of the -@@ -435,7 +435,7 @@ - - unlock_and_exit: - -- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); -+ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); - return_ACPI_STATUS(status); - } - -diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/utmutex.c linux-4.4.13/drivers/acpi/acpica/utmutex.c ---- linux-4.4.13.orig/drivers/acpi/acpica/utmutex.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/acpi/acpica/utmutex.c 2016-06-22 21:55:07.994523867 +0200 -@@ -88,7 +88,7 @@ - return_ACPI_STATUS (status); - } - -- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); -+ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); - if (ACPI_FAILURE (status)) { - return_ACPI_STATUS (status); - } -@@ -156,7 +156,7 @@ - /* Delete the spinlocks */ - - acpi_os_delete_lock(acpi_gbl_gpe_lock); -- acpi_os_delete_lock(acpi_gbl_hardware_lock); -+ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); - acpi_os_delete_lock(acpi_gbl_reference_count_lock); - - /* Delete the reader/writer lock */ -diff -Nur linux-4.4.13.orig/drivers/ata/libata-sff.c linux-4.4.13/drivers/ata/libata-sff.c ---- linux-4.4.13.orig/drivers/ata/libata-sff.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ata/libata-sff.c 2016-06-22 21:55:07.994523867 +0200 -@@ -678,9 +678,9 @@ - unsigned long flags; - unsigned int consumed; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - return consumed; - } -@@ -719,7 +719,7 @@ - unsigned long flags; - - /* FIXME: use a bounce buffer */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - buf = kmap_atomic(page); - - /* do the actual data transfer */ -@@ -727,7 +727,7 @@ - do_write); - - kunmap_atomic(buf); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } else { - buf = page_address(page); - ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, -@@ -864,7 +864,7 @@ - unsigned long flags; - - /* FIXME: use bounce buffer */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - buf = kmap_atomic(page); - - /* do the actual data transfer */ -@@ -872,7 +872,7 @@ - count, rw); - - kunmap_atomic(buf); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } else { - buf = page_address(page); - consumed = ap->ops->sff_data_xfer(dev, buf + offset, -diff -Nur linux-4.4.13.orig/drivers/block/zram/zram_drv.c linux-4.4.13/drivers/block/zram/zram_drv.c ---- linux-4.4.13.orig/drivers/block/zram/zram_drv.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/block/zram/zram_drv.c 2016-06-22 21:55:07.994523867 +0200 -@@ -520,6 +520,8 @@ - goto out_error; - } - -+ zram_meta_init_table_locks(meta, disksize); -+ - return meta; - - out_error: -@@ -568,12 +570,12 @@ - unsigned long handle; - size_t size; - -- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_lock_table(&meta->table[index]); - handle = meta->table[index].handle; - size = zram_get_obj_size(meta, index); - - if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - clear_page(mem); - return 0; - } -@@ -584,7 +586,7 @@ - else - ret = zcomp_decompress(zram->comp, cmem, size, mem); - zs_unmap_object(meta->mem_pool, handle); -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - - /* Should NEVER happen. Return bio error if it does. */ - if (unlikely(ret)) { -@@ -604,14 +606,14 @@ - struct zram_meta *meta = zram->meta; - page = bvec->bv_page; - -- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_lock_table(&meta->table[index]); - if (unlikely(!meta->table[index].handle) || - zram_test_flag(meta, index, ZRAM_ZERO)) { -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - handle_zero_page(bvec); - return 0; - } -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - - if (is_partial_io(bvec)) - /* Use a temporary buffer to decompress the page */ -@@ -689,10 +691,10 @@ - if (user_mem) - kunmap_atomic(user_mem); - /* Free memory associated with this sector now. */ -- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_lock_table(&meta->table[index]); - zram_free_page(zram, index); - zram_set_flag(meta, index, ZRAM_ZERO); -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - - atomic64_inc(&zram->stats.zero_pages); - ret = 0; -@@ -752,12 +754,12 @@ - * Free memory associated with this sector - * before overwriting unused sectors. - */ -- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_lock_table(&meta->table[index]); - zram_free_page(zram, index); - - meta->table[index].handle = handle; - zram_set_obj_size(meta, index, clen); -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - - /* Update stats */ - atomic64_add(clen, &zram->stats.compr_data_size); -@@ -800,9 +802,9 @@ - } - - while (n >= PAGE_SIZE) { -- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_lock_table(&meta->table[index]); - zram_free_page(zram, index); -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - atomic64_inc(&zram->stats.notify_free); - index++; - n -= PAGE_SIZE; -@@ -928,9 +930,9 @@ - zram = bdev->bd_disk->private_data; - meta = zram->meta; - -- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_lock_table(&meta->table[index]); - zram_free_page(zram, index); -- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); -+ zram_unlock_table(&meta->table[index]); - atomic64_inc(&zram->stats.notify_free); - } - -diff -Nur linux-4.4.13.orig/drivers/block/zram/zram_drv.h linux-4.4.13/drivers/block/zram/zram_drv.h ---- linux-4.4.13.orig/drivers/block/zram/zram_drv.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/block/zram/zram_drv.h 2016-06-22 21:55:07.994523867 +0200 -@@ -72,6 +72,9 @@ - struct zram_table_entry { - unsigned long handle; - unsigned long value; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spinlock_t lock; -+#endif - }; - - struct zram_stats { -@@ -119,4 +122,42 @@ - */ - bool claim; /* Protected by bdev->bd_mutex */ - }; -+ -+#ifndef CONFIG_PREEMPT_RT_BASE -+static inline void zram_lock_table(struct zram_table_entry *table) -+{ -+ bit_spin_lock(ZRAM_ACCESS, &table->value); -+} -+ -+static inline void zram_unlock_table(struct zram_table_entry *table) -+{ -+ bit_spin_unlock(ZRAM_ACCESS, &table->value); -+} -+ -+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { } -+#else /* CONFIG_PREEMPT_RT_BASE */ -+static inline void zram_lock_table(struct zram_table_entry *table) -+{ -+ spin_lock(&table->lock); -+ __set_bit(ZRAM_ACCESS, &table->value); -+} -+ -+static inline void zram_unlock_table(struct zram_table_entry *table) -+{ -+ __clear_bit(ZRAM_ACCESS, &table->value); -+ spin_unlock(&table->lock); -+} -+ -+static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) -+{ -+ size_t num_pages = disksize >> PAGE_SHIFT; -+ size_t index; -+ -+ for (index = 0; index < num_pages; index++) { -+ spinlock_t *lock = &meta->table[index].lock; -+ spin_lock_init(lock); -+ } -+} -+#endif /* CONFIG_PREEMPT_RT_BASE */ -+ - #endif -diff -Nur linux-4.4.13.orig/drivers/char/random.c linux-4.4.13/drivers/char/random.c ---- linux-4.4.13.orig/drivers/char/random.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/char/random.c 2016-06-22 21:55:07.994523867 +0200 -@@ -796,8 +796,6 @@ - } sample; - long delta, delta2, delta3; - -- preempt_disable(); -- - sample.jiffies = jiffies; - sample.cycles = random_get_entropy(); - sample.num = num; -@@ -838,7 +836,6 @@ - */ - credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); - } -- preempt_enable(); - } - - void add_input_randomness(unsigned int type, unsigned int code, -@@ -891,28 +888,27 @@ - return *(ptr + f->reg_idx++); - } - --void add_interrupt_randomness(int irq, int irq_flags) -+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) - { - struct entropy_store *r; - struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); -- struct pt_regs *regs = get_irq_regs(); - unsigned long now = jiffies; - cycles_t cycles = random_get_entropy(); - __u32 c_high, j_high; -- __u64 ip; - unsigned long seed; - int credit = 0; - - if (cycles == 0) -- cycles = get_reg(fast_pool, regs); -+ cycles = get_reg(fast_pool, NULL); - c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; - j_high = (sizeof(now) > 4) ? now >> 32 : 0; - fast_pool->pool[0] ^= cycles ^ j_high ^ irq; - fast_pool->pool[1] ^= now ^ c_high; -- ip = regs ? instruction_pointer(regs) : _RET_IP_; -+ if (!ip) -+ ip = _RET_IP_; - fast_pool->pool[2] ^= ip; - fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : -- get_reg(fast_pool, regs); -+ get_reg(fast_pool, NULL); - - fast_mix(fast_pool); - add_interrupt_bench(cycles); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-generated.c linux-4.4.13/drivers/clk/at91/clk-generated.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-generated.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-generated.c 2016-06-22 21:55:07.994523867 +0200 -@@ -15,8 +15,8 @@ - #include - #include - #include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -28,8 +28,9 @@ - - struct clk_generated { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - struct clk_range range; -+ spinlock_t *lock; - u32 id; - u32 gckdiv; - u8 parent_id; -@@ -41,49 +42,52 @@ - static int clk_generated_enable(struct clk_hw *hw) - { - struct clk_generated *gck = to_clk_generated(hw); -- struct at91_pmc *pmc = gck->pmc; -- u32 tmp; -+ unsigned long flags; - - pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n", - __func__, gck->gckdiv, gck->parent_id); - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); -- tmp = pmc_read(pmc, AT91_PMC_PCR) & -- ~(AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK); -- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_GCKCSS(gck->parent_id) -- | AT91_PMC_PCR_CMD -- | AT91_PMC_PCR_GCKDIV(gck->gckdiv) -- | AT91_PMC_PCR_GCKEN); -- pmc_unlock(pmc); -+ spin_lock_irqsave(gck->lock, flags); -+ regmap_write(gck->regmap, AT91_PMC_PCR, -+ (gck->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_update_bits(gck->regmap, AT91_PMC_PCR, -+ AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK | -+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN, -+ AT91_PMC_PCR_GCKCSS(gck->parent_id) | -+ AT91_PMC_PCR_CMD | -+ AT91_PMC_PCR_GCKDIV(gck->gckdiv) | -+ AT91_PMC_PCR_GCKEN); -+ spin_unlock_irqrestore(gck->lock, flags); - return 0; - } - - static void clk_generated_disable(struct clk_hw *hw) - { - struct clk_generated *gck = to_clk_generated(hw); -- struct at91_pmc *pmc = gck->pmc; -- u32 tmp; -+ unsigned long flags; - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); -- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_GCKEN; -- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD); -- pmc_unlock(pmc); -+ spin_lock_irqsave(gck->lock, flags); -+ regmap_write(gck->regmap, AT91_PMC_PCR, -+ (gck->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_update_bits(gck->regmap, AT91_PMC_PCR, -+ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN, -+ AT91_PMC_PCR_CMD); -+ spin_unlock_irqrestore(gck->lock, flags); - } - - static int clk_generated_is_enabled(struct clk_hw *hw) - { - struct clk_generated *gck = to_clk_generated(hw); -- struct at91_pmc *pmc = gck->pmc; -- int ret; -+ unsigned long flags; -+ unsigned int status; - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); -- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_GCKEN); -- pmc_unlock(pmc); -+ spin_lock_irqsave(gck->lock, flags); -+ regmap_write(gck->regmap, AT91_PMC_PCR, -+ (gck->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_read(gck->regmap, AT91_PMC_PCR, &status); -+ spin_unlock_irqrestore(gck->lock, flags); - -- return ret; -+ return status & AT91_PMC_PCR_GCKEN ? 1 : 0; - } - - static unsigned long -@@ -214,13 +218,14 @@ - */ - static void clk_generated_startup(struct clk_generated *gck) - { -- struct at91_pmc *pmc = gck->pmc; - u32 tmp; -+ unsigned long flags; - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); -- tmp = pmc_read(pmc, AT91_PMC_PCR); -- pmc_unlock(pmc); -+ spin_lock_irqsave(gck->lock, flags); -+ regmap_write(gck->regmap, AT91_PMC_PCR, -+ (gck->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_read(gck->regmap, AT91_PMC_PCR, &tmp); -+ spin_unlock_irqrestore(gck->lock, flags); - - gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK) - >> AT91_PMC_PCR_GCKCSS_OFFSET; -@@ -229,8 +234,8 @@ - } - - static struct clk * __init --at91_clk_register_generated(struct at91_pmc *pmc, const char *name, -- const char **parent_names, u8 num_parents, -+at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char -+ *name, const char **parent_names, u8 num_parents, - u8 id, const struct clk_range *range) - { - struct clk_generated *gck; -@@ -249,7 +254,8 @@ - - gck->id = id; - gck->hw.init = &init; -- gck->pmc = pmc; -+ gck->regmap = regmap; -+ gck->lock = lock; - gck->range = *range; - - clk = clk_register(NULL, &gck->hw); -@@ -261,8 +267,7 @@ - return clk; - } - --void __init of_sama5d2_clk_generated_setup(struct device_node *np, -- struct at91_pmc *pmc) -+void __init of_sama5d2_clk_generated_setup(struct device_node *np) - { - int num; - u32 id; -@@ -272,6 +277,7 @@ - const char *parent_names[GENERATED_SOURCE_MAX]; - struct device_node *gcknp; - struct clk_range range = CLK_RANGE(0, 0); -+ struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents <= 0 || num_parents > GENERATED_SOURCE_MAX) -@@ -283,6 +289,10 @@ - if (!num || num > PERIPHERAL_MAX) - return; - -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ - for_each_child_of_node(np, gcknp) { - if (of_property_read_u32(gcknp, "reg", &id)) - continue; -@@ -296,11 +306,14 @@ - of_at91_get_clk_range(gcknp, "atmel,clk-output-range", - &range); - -- clk = at91_clk_register_generated(pmc, name, parent_names, -- num_parents, id, &range); -+ clk = at91_clk_register_generated(regmap, &pmc_pcr_lock, name, -+ parent_names, num_parents, -+ id, &range); - if (IS_ERR(clk)) - continue; - - of_clk_add_provider(gcknp, of_clk_src_simple_get, clk); - } - } -+CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated", -+ of_sama5d2_clk_generated_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-h32mx.c linux-4.4.13/drivers/clk/at91/clk-h32mx.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-h32mx.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-h32mx.c 2016-06-22 21:55:07.994523867 +0200 -@@ -15,15 +15,9 @@ - #include - #include - #include --#include - #include --#include --#include --#include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -31,7 +25,7 @@ - - struct clk_sama5d4_h32mx { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - }; - - #define to_clk_sama5d4_h32mx(hw) container_of(hw, struct clk_sama5d4_h32mx, hw) -@@ -40,8 +34,10 @@ - unsigned long parent_rate) - { - struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw); -+ unsigned int mckr; - -- if (pmc_read(h32mxclk->pmc, AT91_PMC_MCKR) & AT91_PMC_H32MXDIV) -+ regmap_read(h32mxclk->regmap, AT91_PMC_MCKR, &mckr); -+ if (mckr & AT91_PMC_H32MXDIV) - return parent_rate / 2; - - if (parent_rate > H32MX_MAX_FREQ) -@@ -70,18 +66,16 @@ - unsigned long parent_rate) - { - struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw); -- struct at91_pmc *pmc = h32mxclk->pmc; -- u32 tmp; -+ u32 mckr = 0; - - if (parent_rate != rate && (parent_rate / 2) != rate) - return -EINVAL; - -- pmc_lock(pmc); -- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_H32MXDIV; - if ((parent_rate / 2) == rate) -- tmp |= AT91_PMC_H32MXDIV; -- pmc_write(pmc, AT91_PMC_MCKR, tmp); -- pmc_unlock(pmc); -+ mckr = AT91_PMC_H32MXDIV; -+ -+ regmap_update_bits(h32mxclk->regmap, AT91_PMC_MCKR, -+ AT91_PMC_H32MXDIV, mckr); - - return 0; - } -@@ -92,14 +86,18 @@ - .set_rate = clk_sama5d4_h32mx_set_rate, - }; - --void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np) - { - struct clk_sama5d4_h32mx *h32mxclk; - struct clk_init_data init; - const char *parent_name; -+ struct regmap *regmap; - struct clk *clk; - -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ - h32mxclk = kzalloc(sizeof(*h32mxclk), GFP_KERNEL); - if (!h32mxclk) - return; -@@ -113,7 +111,7 @@ - init.flags = CLK_SET_RATE_GATE; - - h32mxclk->hw.init = &init; -- h32mxclk->pmc = pmc; -+ h32mxclk->regmap = regmap; - - clk = clk_register(NULL, &h32mxclk->hw); - if (IS_ERR(clk)) { -@@ -123,3 +121,5 @@ - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx", -+ of_sama5d4_clk_h32mx_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-main.c linux-4.4.13/drivers/clk/at91/clk-main.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-main.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-main.c 2016-06-22 21:55:07.994523867 +0200 -@@ -13,13 +13,8 @@ - #include - #include - #include --#include --#include --#include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -34,18 +29,14 @@ - - struct clk_main_osc { - struct clk_hw hw; -- struct at91_pmc *pmc; -- unsigned int irq; -- wait_queue_head_t wait; -+ struct regmap *regmap; - }; - - #define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw) - - struct clk_main_rc_osc { - struct clk_hw hw; -- struct at91_pmc *pmc; -- unsigned int irq; -- wait_queue_head_t wait; -+ struct regmap *regmap; - unsigned long frequency; - unsigned long accuracy; - }; -@@ -54,51 +45,47 @@ - - struct clk_rm9200_main { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - }; - - #define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw) - - struct clk_sam9x5_main { - struct clk_hw hw; -- struct at91_pmc *pmc; -- unsigned int irq; -- wait_queue_head_t wait; -+ struct regmap *regmap; - u8 parent; - }; - - #define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw) - --static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id) -+static inline bool clk_main_osc_ready(struct regmap *regmap) - { -- struct clk_main_osc *osc = dev_id; -+ unsigned int status; - -- wake_up(&osc->wait); -- disable_irq_nosync(osc->irq); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return IRQ_HANDLED; -+ return status & AT91_PMC_MOSCS; - } - - static int clk_main_osc_prepare(struct clk_hw *hw) - { - struct clk_main_osc *osc = to_clk_main_osc(hw); -- struct at91_pmc *pmc = osc->pmc; -+ struct regmap *regmap = osc->regmap; - u32 tmp; - -- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; -+ regmap_read(regmap, AT91_CKGR_MOR, &tmp); -+ tmp &= ~MOR_KEY_MASK; -+ - if (tmp & AT91_PMC_OSCBYPASS) - return 0; - - if (!(tmp & AT91_PMC_MOSCEN)) { - tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY; -- pmc_write(pmc, AT91_CKGR_MOR, tmp); -+ regmap_write(regmap, AT91_CKGR_MOR, tmp); - } - -- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) { -- enable_irq(osc->irq); -- wait_event(osc->wait, -- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS); -- } -+ while (!clk_main_osc_ready(regmap)) -+ cpu_relax(); - - return 0; - } -@@ -106,9 +93,10 @@ - static void clk_main_osc_unprepare(struct clk_hw *hw) - { - struct clk_main_osc *osc = to_clk_main_osc(hw); -- struct at91_pmc *pmc = osc->pmc; -- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); -+ struct regmap *regmap = osc->regmap; -+ u32 tmp; - -+ regmap_read(regmap, AT91_CKGR_MOR, &tmp); - if (tmp & AT91_PMC_OSCBYPASS) - return; - -@@ -116,20 +104,22 @@ - return; - - tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN); -- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); -+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); - } - - static int clk_main_osc_is_prepared(struct clk_hw *hw) - { - struct clk_main_osc *osc = to_clk_main_osc(hw); -- struct at91_pmc *pmc = osc->pmc; -- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); -+ struct regmap *regmap = osc->regmap; -+ u32 tmp, status; - -+ regmap_read(regmap, AT91_CKGR_MOR, &tmp); - if (tmp & AT91_PMC_OSCBYPASS) - return 1; - -- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) && -- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN)); -+ regmap_read(regmap, AT91_PMC_SR, &status); -+ -+ return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN); - } - - static const struct clk_ops main_osc_ops = { -@@ -139,18 +129,16 @@ - }; - - static struct clk * __init --at91_clk_register_main_osc(struct at91_pmc *pmc, -- unsigned int irq, -+at91_clk_register_main_osc(struct regmap *regmap, - const char *name, - const char *parent_name, - bool bypass) - { -- int ret; - struct clk_main_osc *osc; - struct clk *clk = NULL; - struct clk_init_data init; - -- if (!pmc || !irq || !name || !parent_name) -+ if (!name || !parent_name) - return ERR_PTR(-EINVAL); - - osc = kzalloc(sizeof(*osc), GFP_KERNEL); -@@ -164,85 +152,70 @@ - init.flags = CLK_IGNORE_UNUSED; - - osc->hw.init = &init; -- osc->pmc = pmc; -- osc->irq = irq; -- -- init_waitqueue_head(&osc->wait); -- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); -- ret = request_irq(osc->irq, clk_main_osc_irq_handler, -- IRQF_TRIGGER_HIGH, name, osc); -- if (ret) { -- kfree(osc); -- return ERR_PTR(ret); -- } -+ osc->regmap = regmap; - - if (bypass) -- pmc_write(pmc, AT91_CKGR_MOR, -- (pmc_read(pmc, AT91_CKGR_MOR) & -- ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) | -- AT91_PMC_OSCBYPASS | AT91_PMC_KEY); -+ regmap_update_bits(regmap, -+ AT91_CKGR_MOR, MOR_KEY_MASK | -+ AT91_PMC_MOSCEN, -+ AT91_PMC_OSCBYPASS | AT91_PMC_KEY); - - clk = clk_register(NULL, &osc->hw); -- if (IS_ERR(clk)) { -- free_irq(irq, osc); -+ if (IS_ERR(clk)) - kfree(osc); -- } - - return clk; - } - --void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np) - { - struct clk *clk; -- unsigned int irq; - const char *name = np->name; - const char *parent_name; -+ struct regmap *regmap; - bool bypass; - - of_property_read_string(np, "clock-output-names", &name); - bypass = of_property_read_bool(np, "atmel,osc-bypass"); - parent_name = of_clk_get_parent_name(np, 0); - -- irq = irq_of_parse_and_map(np, 0); -- if (!irq) -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) - return; - -- clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass); -+ clk = at91_clk_register_main_osc(regmap, name, parent_name, bypass); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc", -+ of_at91rm9200_clk_main_osc_setup); - --static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id) -+static bool clk_main_rc_osc_ready(struct regmap *regmap) - { -- struct clk_main_rc_osc *osc = dev_id; -+ unsigned int status; - -- wake_up(&osc->wait); -- disable_irq_nosync(osc->irq); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return IRQ_HANDLED; -+ return status & AT91_PMC_MOSCRCS; - } - - static int clk_main_rc_osc_prepare(struct clk_hw *hw) - { - struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); -- struct at91_pmc *pmc = osc->pmc; -- u32 tmp; -+ struct regmap *regmap = osc->regmap; -+ unsigned int mor; - -- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; -+ regmap_read(regmap, AT91_CKGR_MOR, &mor); - -- if (!(tmp & AT91_PMC_MOSCRCEN)) { -- tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY; -- pmc_write(pmc, AT91_CKGR_MOR, tmp); -- } -+ if (!(mor & AT91_PMC_MOSCRCEN)) -+ regmap_update_bits(regmap, AT91_CKGR_MOR, -+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN, -+ AT91_PMC_MOSCRCEN | AT91_PMC_KEY); - -- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) { -- enable_irq(osc->irq); -- wait_event(osc->wait, -- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS); -- } -+ while (!clk_main_rc_osc_ready(regmap)) -+ cpu_relax(); - - return 0; - } -@@ -250,23 +223,28 @@ - static void clk_main_rc_osc_unprepare(struct clk_hw *hw) - { - struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); -- struct at91_pmc *pmc = osc->pmc; -- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); -+ struct regmap *regmap = osc->regmap; -+ unsigned int mor; - -- if (!(tmp & AT91_PMC_MOSCRCEN)) -+ regmap_read(regmap, AT91_CKGR_MOR, &mor); -+ -+ if (!(mor & AT91_PMC_MOSCRCEN)) - return; - -- tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN); -- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); -+ regmap_update_bits(regmap, AT91_CKGR_MOR, -+ MOR_KEY_MASK | AT91_PMC_MOSCRCEN, AT91_PMC_KEY); - } - - static int clk_main_rc_osc_is_prepared(struct clk_hw *hw) - { - struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); -- struct at91_pmc *pmc = osc->pmc; -+ struct regmap *regmap = osc->regmap; -+ unsigned int mor, status; -+ -+ regmap_read(regmap, AT91_CKGR_MOR, &mor); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) && -- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN)); -+ return (mor & AT91_PMC_MOSCRCEN) && (status & AT91_PMC_MOSCRCS); - } - - static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw, -@@ -294,17 +272,15 @@ - }; - - static struct clk * __init --at91_clk_register_main_rc_osc(struct at91_pmc *pmc, -- unsigned int irq, -+at91_clk_register_main_rc_osc(struct regmap *regmap, - const char *name, - u32 frequency, u32 accuracy) - { -- int ret; - struct clk_main_rc_osc *osc; - struct clk *clk = NULL; - struct clk_init_data init; - -- if (!pmc || !irq || !name || !frequency) -+ if (!name || !frequency) - return ERR_PTR(-EINVAL); - - osc = kzalloc(sizeof(*osc), GFP_KERNEL); -@@ -318,63 +294,53 @@ - init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED; - - osc->hw.init = &init; -- osc->pmc = pmc; -- osc->irq = irq; -+ osc->regmap = regmap; - osc->frequency = frequency; - osc->accuracy = accuracy; - -- init_waitqueue_head(&osc->wait); -- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); -- ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler, -- IRQF_TRIGGER_HIGH, name, osc); -- if (ret) -- return ERR_PTR(ret); -- - clk = clk_register(NULL, &osc->hw); -- if (IS_ERR(clk)) { -- free_irq(irq, osc); -+ if (IS_ERR(clk)) - kfree(osc); -- } - - return clk; - } - --void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np) - { - struct clk *clk; -- unsigned int irq; - u32 frequency = 0; - u32 accuracy = 0; - const char *name = np->name; -+ struct regmap *regmap; - - of_property_read_string(np, "clock-output-names", &name); - of_property_read_u32(np, "clock-frequency", &frequency); - of_property_read_u32(np, "clock-accuracy", &accuracy); - -- irq = irq_of_parse_and_map(np, 0); -- if (!irq) -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) - return; - -- clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency, -- accuracy); -+ clk = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc", -+ of_at91sam9x5_clk_main_rc_osc_setup); - - --static int clk_main_probe_frequency(struct at91_pmc *pmc) -+static int clk_main_probe_frequency(struct regmap *regmap) - { - unsigned long prep_time, timeout; -- u32 tmp; -+ unsigned int mcfr; - - timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT); - do { - prep_time = jiffies; -- tmp = pmc_read(pmc, AT91_CKGR_MCFR); -- if (tmp & AT91_PMC_MAINRDY) -+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); -+ if (mcfr & AT91_PMC_MAINRDY) - return 0; - usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); - } while (time_before(prep_time, timeout)); -@@ -382,34 +348,37 @@ - return -ETIMEDOUT; - } - --static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc, -+static unsigned long clk_main_recalc_rate(struct regmap *regmap, - unsigned long parent_rate) - { -- u32 tmp; -+ unsigned int mcfr; - - if (parent_rate) - return parent_rate; - - pr_warn("Main crystal frequency not set, using approximate value\n"); -- tmp = pmc_read(pmc, AT91_CKGR_MCFR); -- if (!(tmp & AT91_PMC_MAINRDY)) -+ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); -+ if (!(mcfr & AT91_PMC_MAINRDY)) - return 0; - -- return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV; -+ return ((mcfr & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV; - } - - static int clk_rm9200_main_prepare(struct clk_hw *hw) - { - struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); - -- return clk_main_probe_frequency(clkmain->pmc); -+ return clk_main_probe_frequency(clkmain->regmap); - } - - static int clk_rm9200_main_is_prepared(struct clk_hw *hw) - { - struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); -+ unsigned int status; -+ -+ regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status); - -- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY); -+ return status & AT91_PMC_MAINRDY ? 1 : 0; - } - - static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw, -@@ -417,7 +386,7 @@ - { - struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); - -- return clk_main_recalc_rate(clkmain->pmc, parent_rate); -+ return clk_main_recalc_rate(clkmain->regmap, parent_rate); - } - - static const struct clk_ops rm9200_main_ops = { -@@ -427,7 +396,7 @@ - }; - - static struct clk * __init --at91_clk_register_rm9200_main(struct at91_pmc *pmc, -+at91_clk_register_rm9200_main(struct regmap *regmap, - const char *name, - const char *parent_name) - { -@@ -435,7 +404,7 @@ - struct clk *clk = NULL; - struct clk_init_data init; - -- if (!pmc || !name) -+ if (!name) - return ERR_PTR(-EINVAL); - - if (!parent_name) -@@ -452,7 +421,7 @@ - init.flags = 0; - - clkmain->hw.init = &init; -- clkmain->pmc = pmc; -+ clkmain->regmap = regmap; - - clk = clk_register(NULL, &clkmain->hw); - if (IS_ERR(clk)) -@@ -461,52 +430,54 @@ - return clk; - } - --void __init of_at91rm9200_clk_main_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_main_setup(struct device_node *np) - { - struct clk *clk; - const char *parent_name; - const char *name = np->name; -+ struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - of_property_read_string(np, "clock-output-names", &name); - -- clk = at91_clk_register_rm9200_main(pmc, name, parent_name); -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ -+ clk = at91_clk_register_rm9200_main(regmap, name, parent_name); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main", -+ of_at91rm9200_clk_main_setup); - --static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id) -+static inline bool clk_sam9x5_main_ready(struct regmap *regmap) - { -- struct clk_sam9x5_main *clkmain = dev_id; -+ unsigned int status; - -- wake_up(&clkmain->wait); -- disable_irq_nosync(clkmain->irq); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return IRQ_HANDLED; -+ return status & AT91_PMC_MOSCSELS ? 1 : 0; - } - - static int clk_sam9x5_main_prepare(struct clk_hw *hw) - { - struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); -- struct at91_pmc *pmc = clkmain->pmc; -+ struct regmap *regmap = clkmain->regmap; - -- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) { -- enable_irq(clkmain->irq); -- wait_event(clkmain->wait, -- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); -- } -+ while (!clk_sam9x5_main_ready(regmap)) -+ cpu_relax(); - -- return clk_main_probe_frequency(pmc); -+ return clk_main_probe_frequency(regmap); - } - - static int clk_sam9x5_main_is_prepared(struct clk_hw *hw) - { - struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); - -- return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); -+ return clk_sam9x5_main_ready(clkmain->regmap); - } - - static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw, -@@ -514,30 +485,28 @@ - { - struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); - -- return clk_main_recalc_rate(clkmain->pmc, parent_rate); -+ return clk_main_recalc_rate(clkmain->regmap, parent_rate); - } - - static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index) - { - struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); -- struct at91_pmc *pmc = clkmain->pmc; -- u32 tmp; -+ struct regmap *regmap = clkmain->regmap; -+ unsigned int tmp; - - if (index > 1) - return -EINVAL; - -- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; -+ regmap_read(regmap, AT91_CKGR_MOR, &tmp); -+ tmp &= ~MOR_KEY_MASK; - - if (index && !(tmp & AT91_PMC_MOSCSEL)) -- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); -+ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); - else if (!index && (tmp & AT91_PMC_MOSCSEL)) -- pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); -+ regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); - -- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) { -- enable_irq(clkmain->irq); -- wait_event(clkmain->wait, -- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); -- } -+ while (!clk_sam9x5_main_ready(regmap)) -+ cpu_relax(); - - return 0; - } -@@ -545,8 +514,11 @@ - static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw) - { - struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); -+ unsigned int status; -+ -+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); - -- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN); -+ return status & AT91_PMC_MOSCEN ? 1 : 0; - } - - static const struct clk_ops sam9x5_main_ops = { -@@ -558,18 +530,17 @@ - }; - - static struct clk * __init --at91_clk_register_sam9x5_main(struct at91_pmc *pmc, -- unsigned int irq, -+at91_clk_register_sam9x5_main(struct regmap *regmap, - const char *name, - const char **parent_names, - int num_parents) - { -- int ret; - struct clk_sam9x5_main *clkmain; - struct clk *clk = NULL; - struct clk_init_data init; -+ unsigned int status; - -- if (!pmc || !irq || !name) -+ if (!name) - return ERR_PTR(-EINVAL); - - if (!parent_names || !num_parents) -@@ -586,51 +557,42 @@ - init.flags = CLK_SET_PARENT_GATE; - - clkmain->hw.init = &init; -- clkmain->pmc = pmc; -- clkmain->irq = irq; -- clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & -- AT91_PMC_MOSCEN); -- init_waitqueue_head(&clkmain->wait); -- irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN); -- ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler, -- IRQF_TRIGGER_HIGH, name, clkmain); -- if (ret) -- return ERR_PTR(ret); -+ clkmain->regmap = regmap; -+ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); -+ clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0; - - clk = clk_register(NULL, &clkmain->hw); -- if (IS_ERR(clk)) { -- free_irq(clkmain->irq, clkmain); -+ if (IS_ERR(clk)) - kfree(clkmain); -- } - - return clk; - } - --void __init of_at91sam9x5_clk_main_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_main_setup(struct device_node *np) - { - struct clk *clk; - const char *parent_names[2]; - int num_parents; -- unsigned int irq; - const char *name = np->name; -+ struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents <= 0 || num_parents > 2) - return; - - of_clk_parent_fill(np, parent_names, num_parents); -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; - - of_property_read_string(np, "clock-output-names", &name); - -- irq = irq_of_parse_and_map(np, 0); -- if (!irq) -- return; -- -- clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names, -+ clk = at91_clk_register_sam9x5_main(regmap, name, parent_names, - num_parents); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main", -+ of_at91sam9x5_clk_main_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-master.c linux-4.4.13/drivers/clk/at91/clk-master.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-master.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-master.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,13 +12,8 @@ - #include - #include - #include --#include --#include --#include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -44,32 +39,26 @@ - - struct clk_master { - struct clk_hw hw; -- struct at91_pmc *pmc; -- unsigned int irq; -- wait_queue_head_t wait; -+ struct regmap *regmap; - const struct clk_master_layout *layout; - const struct clk_master_characteristics *characteristics; - }; - --static irqreturn_t clk_master_irq_handler(int irq, void *dev_id) -+static inline bool clk_master_ready(struct regmap *regmap) - { -- struct clk_master *master = (struct clk_master *)dev_id; -+ unsigned int status; - -- wake_up(&master->wait); -- disable_irq_nosync(master->irq); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return IRQ_HANDLED; -+ return status & AT91_PMC_MCKRDY ? 1 : 0; - } -+ - static int clk_master_prepare(struct clk_hw *hw) - { - struct clk_master *master = to_clk_master(hw); -- struct at91_pmc *pmc = master->pmc; - -- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) { -- enable_irq(master->irq); -- wait_event(master->wait, -- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY); -- } -+ while (!clk_master_ready(master->regmap)) -+ cpu_relax(); - - return 0; - } -@@ -78,7 +67,7 @@ - { - struct clk_master *master = to_clk_master(hw); - -- return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY); -+ return clk_master_ready(master->regmap); - } - - static unsigned long clk_master_recalc_rate(struct clk_hw *hw, -@@ -88,18 +77,16 @@ - u8 div; - unsigned long rate = parent_rate; - struct clk_master *master = to_clk_master(hw); -- struct at91_pmc *pmc = master->pmc; - const struct clk_master_layout *layout = master->layout; - const struct clk_master_characteristics *characteristics = - master->characteristics; -- u32 tmp; -+ unsigned int mckr; - -- pmc_lock(pmc); -- tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask; -- pmc_unlock(pmc); -+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr); -+ mckr &= layout->mask; - -- pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK; -- div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; -+ pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK; -+ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; - - if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX) - rate /= 3; -@@ -119,9 +106,11 @@ - static u8 clk_master_get_parent(struct clk_hw *hw) - { - struct clk_master *master = to_clk_master(hw); -- struct at91_pmc *pmc = master->pmc; -+ unsigned int mckr; - -- return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS; -+ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr); -+ -+ return mckr & AT91_PMC_CSS; - } - - static const struct clk_ops master_ops = { -@@ -132,18 +121,17 @@ - }; - - static struct clk * __init --at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq, -+at91_clk_register_master(struct regmap *regmap, - const char *name, int num_parents, - const char **parent_names, - const struct clk_master_layout *layout, - const struct clk_master_characteristics *characteristics) - { -- int ret; - struct clk_master *master; - struct clk *clk = NULL; - struct clk_init_data init; - -- if (!pmc || !irq || !name || !num_parents || !parent_names) -+ if (!name || !num_parents || !parent_names) - return ERR_PTR(-EINVAL); - - master = kzalloc(sizeof(*master), GFP_KERNEL); -@@ -159,20 +147,10 @@ - master->hw.init = &init; - master->layout = layout; - master->characteristics = characteristics; -- master->pmc = pmc; -- master->irq = irq; -- init_waitqueue_head(&master->wait); -- irq_set_status_flags(master->irq, IRQ_NOAUTOEN); -- ret = request_irq(master->irq, clk_master_irq_handler, -- IRQF_TRIGGER_HIGH, "clk-master", master); -- if (ret) { -- kfree(master); -- return ERR_PTR(ret); -- } -+ master->regmap = regmap; - - clk = clk_register(NULL, &master->hw); - if (IS_ERR(clk)) { -- free_irq(master->irq, master); - kfree(master); - } - -@@ -217,15 +195,15 @@ - } - - static void __init --of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, -+of_at91_clk_master_setup(struct device_node *np, - const struct clk_master_layout *layout) - { - struct clk *clk; - int num_parents; -- unsigned int irq; - const char *parent_names[MASTER_SOURCE_MAX]; - const char *name = np->name; - struct clk_master_characteristics *characteristics; -+ struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX) -@@ -239,11 +217,11 @@ - if (!characteristics) - return; - -- irq = irq_of_parse_and_map(np, 0); -- if (!irq) -- goto out_free_characteristics; -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; - -- clk = at91_clk_register_master(pmc, irq, name, num_parents, -+ clk = at91_clk_register_master(regmap, name, num_parents, - parent_names, layout, - characteristics); - if (IS_ERR(clk)) -@@ -256,14 +234,16 @@ - kfree(characteristics); - } - --void __init of_at91rm9200_clk_master_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_master_setup(struct device_node *np) - { -- of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout); -+ of_at91_clk_master_setup(np, &at91rm9200_master_layout); - } -+CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master", -+ of_at91rm9200_clk_master_setup); - --void __init of_at91sam9x5_clk_master_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_master_setup(struct device_node *np) - { -- of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout); -+ of_at91_clk_master_setup(np, &at91sam9x5_master_layout); - } -+CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master", -+ of_at91sam9x5_clk_master_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-peripheral.c linux-4.4.13/drivers/clk/at91/clk-peripheral.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-peripheral.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-peripheral.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,11 +12,13 @@ - #include - #include - #include --#include --#include -+#include -+#include - - #include "pmc.h" - -+DEFINE_SPINLOCK(pmc_pcr_lock); -+ - #define PERIPHERAL_MAX 64 - - #define PERIPHERAL_AT91RM9200 0 -@@ -33,7 +35,7 @@ - - struct clk_peripheral { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - u32 id; - }; - -@@ -41,8 +43,9 @@ - - struct clk_sam9x5_peripheral { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - struct clk_range range; -+ spinlock_t *lock; - u32 id; - u32 div; - bool auto_div; -@@ -54,7 +57,6 @@ - static int clk_peripheral_enable(struct clk_hw *hw) - { - struct clk_peripheral *periph = to_clk_peripheral(hw); -- struct at91_pmc *pmc = periph->pmc; - int offset = AT91_PMC_PCER; - u32 id = periph->id; - -@@ -62,14 +64,14 @@ - return 0; - if (id > PERIPHERAL_ID_MAX) - offset = AT91_PMC_PCER1; -- pmc_write(pmc, offset, PERIPHERAL_MASK(id)); -+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id)); -+ - return 0; - } - - static void clk_peripheral_disable(struct clk_hw *hw) - { - struct clk_peripheral *periph = to_clk_peripheral(hw); -- struct at91_pmc *pmc = periph->pmc; - int offset = AT91_PMC_PCDR; - u32 id = periph->id; - -@@ -77,21 +79,23 @@ - return; - if (id > PERIPHERAL_ID_MAX) - offset = AT91_PMC_PCDR1; -- pmc_write(pmc, offset, PERIPHERAL_MASK(id)); -+ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id)); - } - - static int clk_peripheral_is_enabled(struct clk_hw *hw) - { - struct clk_peripheral *periph = to_clk_peripheral(hw); -- struct at91_pmc *pmc = periph->pmc; - int offset = AT91_PMC_PCSR; -+ unsigned int status; - u32 id = periph->id; - - if (id < PERIPHERAL_ID_MIN) - return 1; - if (id > PERIPHERAL_ID_MAX) - offset = AT91_PMC_PCSR1; -- return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id)); -+ regmap_read(periph->regmap, offset, &status); -+ -+ return status & PERIPHERAL_MASK(id) ? 1 : 0; - } - - static const struct clk_ops peripheral_ops = { -@@ -101,14 +105,14 @@ - }; - - static struct clk * __init --at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name, -+at91_clk_register_peripheral(struct regmap *regmap, const char *name, - const char *parent_name, u32 id) - { - struct clk_peripheral *periph; - struct clk *clk = NULL; - struct clk_init_data init; - -- if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX) -+ if (!name || !parent_name || id > PERIPHERAL_ID_MAX) - return ERR_PTR(-EINVAL); - - periph = kzalloc(sizeof(*periph), GFP_KERNEL); -@@ -123,7 +127,7 @@ - - periph->id = id; - periph->hw.init = &init; -- periph->pmc = pmc; -+ periph->regmap = regmap; - - clk = clk_register(NULL, &periph->hw); - if (IS_ERR(clk)) -@@ -160,53 +164,58 @@ - static int clk_sam9x5_peripheral_enable(struct clk_hw *hw) - { - struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); -- struct at91_pmc *pmc = periph->pmc; -- u32 tmp; -+ unsigned long flags; - - if (periph->id < PERIPHERAL_ID_MIN) - return 0; - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); -- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_DIV_MASK; -- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_DIV(periph->div) -- | AT91_PMC_PCR_CMD -- | AT91_PMC_PCR_EN); -- pmc_unlock(pmc); -+ spin_lock_irqsave(periph->lock, flags); -+ regmap_write(periph->regmap, AT91_PMC_PCR, -+ (periph->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_update_bits(periph->regmap, AT91_PMC_PCR, -+ AT91_PMC_PCR_DIV_MASK | AT91_PMC_PCR_CMD | -+ AT91_PMC_PCR_EN, -+ AT91_PMC_PCR_DIV(periph->div) | -+ AT91_PMC_PCR_CMD | -+ AT91_PMC_PCR_EN); -+ spin_unlock_irqrestore(periph->lock, flags); -+ - return 0; - } - - static void clk_sam9x5_peripheral_disable(struct clk_hw *hw) - { - struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); -- struct at91_pmc *pmc = periph->pmc; -- u32 tmp; -+ unsigned long flags; - - if (periph->id < PERIPHERAL_ID_MIN) - return; - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); -- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_EN; -- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD); -- pmc_unlock(pmc); -+ spin_lock_irqsave(periph->lock, flags); -+ regmap_write(periph->regmap, AT91_PMC_PCR, -+ (periph->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_update_bits(periph->regmap, AT91_PMC_PCR, -+ AT91_PMC_PCR_EN | AT91_PMC_PCR_CMD, -+ AT91_PMC_PCR_CMD); -+ spin_unlock_irqrestore(periph->lock, flags); - } - - static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw) - { - struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); -- struct at91_pmc *pmc = periph->pmc; -- int ret; -+ unsigned long flags; -+ unsigned int status; - - if (periph->id < PERIPHERAL_ID_MIN) - return 1; - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); -- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN); -- pmc_unlock(pmc); -+ spin_lock_irqsave(periph->lock, flags); -+ regmap_write(periph->regmap, AT91_PMC_PCR, -+ (periph->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_read(periph->regmap, AT91_PMC_PCR, &status); -+ spin_unlock_irqrestore(periph->lock, flags); - -- return ret; -+ return status & AT91_PMC_PCR_EN ? 1 : 0; - } - - static unsigned long -@@ -214,19 +223,20 @@ - unsigned long parent_rate) - { - struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); -- struct at91_pmc *pmc = periph->pmc; -- u32 tmp; -+ unsigned long flags; -+ unsigned int status; - - if (periph->id < PERIPHERAL_ID_MIN) - return parent_rate; - -- pmc_lock(pmc); -- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); -- tmp = pmc_read(pmc, AT91_PMC_PCR); -- pmc_unlock(pmc); -+ spin_lock_irqsave(periph->lock, flags); -+ regmap_write(periph->regmap, AT91_PMC_PCR, -+ (periph->id & AT91_PMC_PCR_PID_MASK)); -+ regmap_read(periph->regmap, AT91_PMC_PCR, &status); -+ spin_unlock_irqrestore(periph->lock, flags); - -- if (tmp & AT91_PMC_PCR_EN) { -- periph->div = PERIPHERAL_RSHIFT(tmp); -+ if (status & AT91_PMC_PCR_EN) { -+ periph->div = PERIPHERAL_RSHIFT(status); - periph->auto_div = false; - } else { - clk_sam9x5_peripheral_autodiv(periph); -@@ -318,15 +328,15 @@ - }; - - static struct clk * __init --at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name, -- const char *parent_name, u32 id, -- const struct clk_range *range) -+at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, -+ const char *name, const char *parent_name, -+ u32 id, const struct clk_range *range) - { - struct clk_sam9x5_peripheral *periph; - struct clk *clk = NULL; - struct clk_init_data init; - -- if (!pmc || !name || !parent_name) -+ if (!name || !parent_name) - return ERR_PTR(-EINVAL); - - periph = kzalloc(sizeof(*periph), GFP_KERNEL); -@@ -342,7 +352,8 @@ - periph->id = id; - periph->hw.init = &init; - periph->div = 0; -- periph->pmc = pmc; -+ periph->regmap = regmap; -+ periph->lock = lock; - periph->auto_div = true; - periph->range = *range; - -@@ -356,7 +367,7 @@ - } - - static void __init --of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type) -+of_at91_clk_periph_setup(struct device_node *np, u8 type) - { - int num; - u32 id; -@@ -364,6 +375,7 @@ - const char *parent_name; - const char *name; - struct device_node *periphclknp; -+ struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - if (!parent_name) -@@ -373,6 +385,10 @@ - if (!num || num > PERIPHERAL_MAX) - return; - -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ - for_each_child_of_node(np, periphclknp) { - if (of_property_read_u32(periphclknp, "reg", &id)) - continue; -@@ -384,7 +400,7 @@ - name = periphclknp->name; - - if (type == PERIPHERAL_AT91RM9200) { -- clk = at91_clk_register_peripheral(pmc, name, -+ clk = at91_clk_register_peripheral(regmap, name, - parent_name, id); - } else { - struct clk_range range = CLK_RANGE(0, 0); -@@ -393,7 +409,9 @@ - "atmel,clk-output-range", - &range); - -- clk = at91_clk_register_sam9x5_peripheral(pmc, name, -+ clk = at91_clk_register_sam9x5_peripheral(regmap, -+ &pmc_pcr_lock, -+ name, - parent_name, - id, &range); - } -@@ -405,14 +423,16 @@ - } - } - --void __init of_at91rm9200_clk_periph_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_periph_setup(struct device_node *np) - { -- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200); -+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200); - } -+CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral", -+ of_at91rm9200_clk_periph_setup); - --void __init of_at91sam9x5_clk_periph_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np) - { -- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5); -+ of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5); - } -+CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral", -+ of_at91sam9x5_clk_periph_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-pll.c linux-4.4.13/drivers/clk/at91/clk-pll.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-pll.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-pll.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,14 +12,8 @@ - #include - #include - #include --#include --#include --#include --#include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -58,9 +52,7 @@ - - struct clk_pll { - struct clk_hw hw; -- struct at91_pmc *pmc; -- unsigned int irq; -- wait_queue_head_t wait; -+ struct regmap *regmap; - u8 id; - u8 div; - u8 range; -@@ -69,20 +61,19 @@ - const struct clk_pll_characteristics *characteristics; - }; - --static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id) -+static inline bool clk_pll_ready(struct regmap *regmap, int id) - { -- struct clk_pll *pll = (struct clk_pll *)dev_id; -+ unsigned int status; - -- wake_up(&pll->wait); -- disable_irq_nosync(pll->irq); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return IRQ_HANDLED; -+ return status & PLL_STATUS_MASK(id) ? 1 : 0; - } - - static int clk_pll_prepare(struct clk_hw *hw) - { - struct clk_pll *pll = to_clk_pll(hw); -- struct at91_pmc *pmc = pll->pmc; -+ struct regmap *regmap = pll->regmap; - const struct clk_pll_layout *layout = pll->layout; - const struct clk_pll_characteristics *characteristics = - pll->characteristics; -@@ -90,39 +81,34 @@ - u32 mask = PLL_STATUS_MASK(id); - int offset = PLL_REG(id); - u8 out = 0; -- u32 pllr, icpr; -+ unsigned int pllr; -+ unsigned int status; - u8 div; - u16 mul; - -- pllr = pmc_read(pmc, offset); -+ regmap_read(regmap, offset, &pllr); - div = PLL_DIV(pllr); - mul = PLL_MUL(pllr, layout); - -- if ((pmc_read(pmc, AT91_PMC_SR) & mask) && -+ regmap_read(regmap, AT91_PMC_SR, &status); -+ if ((status & mask) && - (div == pll->div && mul == pll->mul)) - return 0; - - if (characteristics->out) - out = characteristics->out[pll->range]; -- if (characteristics->icpll) { -- icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id); -- icpr |= (characteristics->icpll[pll->range] << -- PLL_ICPR_SHIFT(id)); -- pmc_write(pmc, AT91_PMC_PLLICPR, icpr); -- } - -- pllr &= ~layout->pllr_mask; -- pllr |= layout->pllr_mask & -- (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) | -- (out << PLL_OUT_SHIFT) | -- ((pll->mul & layout->mul_mask) << layout->mul_shift)); -- pmc_write(pmc, offset, pllr); -- -- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) { -- enable_irq(pll->irq); -- wait_event(pll->wait, -- pmc_read(pmc, AT91_PMC_SR) & mask); -- } -+ if (characteristics->icpll) -+ regmap_update_bits(regmap, AT91_PMC_PLLICPR, PLL_ICPR_MASK(id), -+ characteristics->icpll[pll->range] << PLL_ICPR_SHIFT(id)); -+ -+ regmap_update_bits(regmap, offset, layout->pllr_mask, -+ pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) | -+ (out << PLL_OUT_SHIFT) | -+ ((pll->mul & layout->mul_mask) << layout->mul_shift)); -+ -+ while (!clk_pll_ready(regmap, pll->id)) -+ cpu_relax(); - - return 0; - } -@@ -130,32 +116,35 @@ - static int clk_pll_is_prepared(struct clk_hw *hw) - { - struct clk_pll *pll = to_clk_pll(hw); -- struct at91_pmc *pmc = pll->pmc; - -- return !!(pmc_read(pmc, AT91_PMC_SR) & -- PLL_STATUS_MASK(pll->id)); -+ return clk_pll_ready(pll->regmap, pll->id); - } - - static void clk_pll_unprepare(struct clk_hw *hw) - { - struct clk_pll *pll = to_clk_pll(hw); -- struct at91_pmc *pmc = pll->pmc; -- const struct clk_pll_layout *layout = pll->layout; -- int offset = PLL_REG(pll->id); -- u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask); -+ unsigned int mask = pll->layout->pllr_mask; - -- pmc_write(pmc, offset, tmp); -+ regmap_update_bits(pll->regmap, PLL_REG(pll->id), mask, ~mask); - } - - static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) - { - struct clk_pll *pll = to_clk_pll(hw); -+ unsigned int pllr; -+ u16 mul; -+ u8 div; -+ -+ regmap_read(pll->regmap, PLL_REG(pll->id), &pllr); -+ -+ div = PLL_DIV(pllr); -+ mul = PLL_MUL(pllr, pll->layout); - -- if (!pll->div || !pll->mul) -+ if (!div || !mul) - return 0; - -- return (parent_rate / pll->div) * (pll->mul + 1); -+ return (parent_rate / div) * (mul + 1); - } - - static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, -@@ -308,7 +297,7 @@ - }; - - static struct clk * __init --at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name, -+at91_clk_register_pll(struct regmap *regmap, const char *name, - const char *parent_name, u8 id, - const struct clk_pll_layout *layout, - const struct clk_pll_characteristics *characteristics) -@@ -316,9 +305,8 @@ - struct clk_pll *pll; - struct clk *clk = NULL; - struct clk_init_data init; -- int ret; - int offset = PLL_REG(id); -- u32 tmp; -+ unsigned int pllr; - - if (id > PLL_MAX_ID) - return ERR_PTR(-EINVAL); -@@ -337,23 +325,13 @@ - pll->hw.init = &init; - pll->layout = layout; - pll->characteristics = characteristics; -- pll->pmc = pmc; -- pll->irq = irq; -- tmp = pmc_read(pmc, offset) & layout->pllr_mask; -- pll->div = PLL_DIV(tmp); -- pll->mul = PLL_MUL(tmp, layout); -- init_waitqueue_head(&pll->wait); -- irq_set_status_flags(pll->irq, IRQ_NOAUTOEN); -- ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH, -- id ? "clk-pllb" : "clk-plla", pll); -- if (ret) { -- kfree(pll); -- return ERR_PTR(ret); -- } -+ pll->regmap = regmap; -+ regmap_read(regmap, offset, &pllr); -+ pll->div = PLL_DIV(pllr); -+ pll->mul = PLL_MUL(pllr, layout); - - clk = clk_register(NULL, &pll->hw); - if (IS_ERR(clk)) { -- free_irq(pll->irq, pll); - kfree(pll); - } - -@@ -483,12 +461,12 @@ - } - - static void __init --of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc, -+of_at91_clk_pll_setup(struct device_node *np, - const struct clk_pll_layout *layout) - { - u32 id; -- unsigned int irq; - struct clk *clk; -+ struct regmap *regmap; - const char *parent_name; - const char *name = np->name; - struct clk_pll_characteristics *characteristics; -@@ -500,15 +478,15 @@ - - of_property_read_string(np, "clock-output-names", &name); - -- characteristics = of_at91_clk_pll_get_characteristics(np); -- if (!characteristics) -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) - return; - -- irq = irq_of_parse_and_map(np, 0); -- if (!irq) -+ characteristics = of_at91_clk_pll_get_characteristics(np); -+ if (!characteristics) - return; - -- clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout, -+ clk = at91_clk_register_pll(regmap, name, parent_name, id, layout, - characteristics); - if (IS_ERR(clk)) - goto out_free_characteristics; -@@ -520,26 +498,30 @@ - kfree(characteristics); - } - --void __init of_at91rm9200_clk_pll_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_pll_setup(struct device_node *np) - { -- of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout); -+ of_at91_clk_pll_setup(np, &at91rm9200_pll_layout); - } -+CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll", -+ of_at91rm9200_clk_pll_setup); - --void __init of_at91sam9g45_clk_pll_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np) - { -- of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout); -+ of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout); - } -+CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll", -+ of_at91sam9g45_clk_pll_setup); - --void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np) - { -- of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout); -+ of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout); - } -+CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb", -+ of_at91sam9g20_clk_pllb_setup); - --void __init of_sama5d3_clk_pll_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_sama5d3_clk_pll_setup(struct device_node *np) - { -- of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout); -+ of_at91_clk_pll_setup(np, &sama5d3_pll_layout); - } -+CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll", -+ of_sama5d3_clk_pll_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-plldiv.c linux-4.4.13/drivers/clk/at91/clk-plldiv.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-plldiv.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-plldiv.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,8 +12,8 @@ - #include - #include - #include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -21,16 +21,18 @@ - - struct clk_plldiv { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - }; - - static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) - { - struct clk_plldiv *plldiv = to_clk_plldiv(hw); -- struct at91_pmc *pmc = plldiv->pmc; -+ unsigned int mckr; - -- if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2) -+ regmap_read(plldiv->regmap, AT91_PMC_MCKR, &mckr); -+ -+ if (mckr & AT91_PMC_PLLADIV2) - return parent_rate / 2; - - return parent_rate; -@@ -57,18 +59,12 @@ - unsigned long parent_rate) - { - struct clk_plldiv *plldiv = to_clk_plldiv(hw); -- struct at91_pmc *pmc = plldiv->pmc; -- u32 tmp; - -- if (parent_rate != rate && (parent_rate / 2) != rate) -+ if ((parent_rate != rate) && (parent_rate / 2 != rate)) - return -EINVAL; - -- pmc_lock(pmc); -- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2; -- if ((parent_rate / 2) == rate) -- tmp |= AT91_PMC_PLLADIV2; -- pmc_write(pmc, AT91_PMC_MCKR, tmp); -- pmc_unlock(pmc); -+ regmap_update_bits(plldiv->regmap, AT91_PMC_MCKR, AT91_PMC_PLLADIV2, -+ parent_rate != rate ? AT91_PMC_PLLADIV2 : 0); - - return 0; - } -@@ -80,7 +76,7 @@ - }; - - static struct clk * __init --at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name, -+at91_clk_register_plldiv(struct regmap *regmap, const char *name, - const char *parent_name) - { - struct clk_plldiv *plldiv; -@@ -98,7 +94,7 @@ - init.flags = CLK_SET_RATE_GATE; - - plldiv->hw.init = &init; -- plldiv->pmc = pmc; -+ plldiv->regmap = regmap; - - clk = clk_register(NULL, &plldiv->hw); - -@@ -109,27 +105,27 @@ - } - - static void __init --of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc) -+of_at91sam9x5_clk_plldiv_setup(struct device_node *np) - { - struct clk *clk; - const char *parent_name; - const char *name = np->name; -+ struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - - of_property_read_string(np, "clock-output-names", &name); - -- clk = at91_clk_register_plldiv(pmc, name, parent_name); -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; - -+ clk = at91_clk_register_plldiv(regmap, name, parent_name); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - return; - } -- --void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np, -- struct at91_pmc *pmc) --{ -- of_at91_clk_plldiv_setup(np, pmc); --} -+CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv", -+ of_at91sam9x5_clk_plldiv_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-programmable.c linux-4.4.13/drivers/clk/at91/clk-programmable.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-programmable.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-programmable.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,10 +12,8 @@ - #include - #include - #include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -24,6 +22,7 @@ - - #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) - #define PROG_PRES_MASK 0x7 -+#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK) - #define PROG_MAX_RM9200_CSS 3 - - struct clk_programmable_layout { -@@ -34,7 +33,7 @@ - - struct clk_programmable { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - u8 id; - const struct clk_programmable_layout *layout; - }; -@@ -44,14 +43,12 @@ - static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) - { -- u32 pres; - struct clk_programmable *prog = to_clk_programmable(hw); -- struct at91_pmc *pmc = prog->pmc; -- const struct clk_programmable_layout *layout = prog->layout; -+ unsigned int pckr; -+ -+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); - -- pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) & -- PROG_PRES_MASK; -- return parent_rate >> pres; -+ return parent_rate >> PROG_PRES(prog->layout, pckr); - } - - static int clk_programmable_determine_rate(struct clk_hw *hw, -@@ -101,36 +98,36 @@ - { - struct clk_programmable *prog = to_clk_programmable(hw); - const struct clk_programmable_layout *layout = prog->layout; -- struct at91_pmc *pmc = prog->pmc; -- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask; -+ unsigned int mask = layout->css_mask; -+ unsigned int pckr = 0; - - if (layout->have_slck_mck) -- tmp &= AT91_PMC_CSSMCK_MCK; -+ mask |= AT91_PMC_CSSMCK_MCK; - - if (index > layout->css_mask) { -- if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) { -- tmp |= AT91_PMC_CSSMCK_MCK; -- return 0; -- } else { -+ if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck) - return -EINVAL; -- } -+ -+ pckr |= AT91_PMC_CSSMCK_MCK; - } - -- pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index); -+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), mask, pckr); -+ - return 0; - } - - static u8 clk_programmable_get_parent(struct clk_hw *hw) - { -- u32 tmp; -- u8 ret; - struct clk_programmable *prog = to_clk_programmable(hw); -- struct at91_pmc *pmc = prog->pmc; - const struct clk_programmable_layout *layout = prog->layout; -+ unsigned int pckr; -+ u8 ret; -+ -+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); -+ -+ ret = pckr & layout->css_mask; - -- tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)); -- ret = tmp & layout->css_mask; -- if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret) -+ if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret) - ret = PROG_MAX_RM9200_CSS + 1; - - return ret; -@@ -140,26 +137,27 @@ - unsigned long parent_rate) - { - struct clk_programmable *prog = to_clk_programmable(hw); -- struct at91_pmc *pmc = prog->pmc; - const struct clk_programmable_layout *layout = prog->layout; - unsigned long div = parent_rate / rate; -+ unsigned int pckr; - int shift = 0; -- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & -- ~(PROG_PRES_MASK << layout->pres_shift); -+ -+ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); - - if (!div) - return -EINVAL; - - shift = fls(div) - 1; - -- if (div != (1<= PROG_PRES_MASK) - return -EINVAL; - -- pmc_write(pmc, AT91_PMC_PCKR(prog->id), -- tmp | (shift << layout->pres_shift)); -+ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), -+ PROG_PRES_MASK << layout->pres_shift, -+ shift << layout->pres_shift); - - return 0; - } -@@ -173,7 +171,7 @@ - }; - - static struct clk * __init --at91_clk_register_programmable(struct at91_pmc *pmc, -+at91_clk_register_programmable(struct regmap *regmap, - const char *name, const char **parent_names, - u8 num_parents, u8 id, - const struct clk_programmable_layout *layout) -@@ -198,7 +196,7 @@ - prog->id = id; - prog->layout = layout; - prog->hw.init = &init; -- prog->pmc = pmc; -+ prog->regmap = regmap; - - clk = clk_register(NULL, &prog->hw); - if (IS_ERR(clk)) -@@ -226,7 +224,7 @@ - }; - - static void __init --of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, -+of_at91_clk_prog_setup(struct device_node *np, - const struct clk_programmable_layout *layout) - { - int num; -@@ -236,6 +234,7 @@ - const char *parent_names[PROG_SOURCE_MAX]; - const char *name; - struct device_node *progclknp; -+ struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX) -@@ -247,6 +246,10 @@ - if (!num || num > (PROG_ID_MAX + 1)) - return; - -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ - for_each_child_of_node(np, progclknp) { - if (of_property_read_u32(progclknp, "reg", &id)) - continue; -@@ -254,7 +257,7 @@ - if (of_property_read_string(np, "clock-output-names", &name)) - name = progclknp->name; - -- clk = at91_clk_register_programmable(pmc, name, -+ clk = at91_clk_register_programmable(regmap, name, - parent_names, num_parents, - id, layout); - if (IS_ERR(clk)) -@@ -265,20 +268,23 @@ - } - - --void __init of_at91rm9200_clk_prog_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_prog_setup(struct device_node *np) - { -- of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout); -+ of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout); - } -+CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable", -+ of_at91rm9200_clk_prog_setup); - --void __init of_at91sam9g45_clk_prog_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np) - { -- of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout); -+ of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout); - } -+CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable", -+ of_at91sam9g45_clk_prog_setup); - --void __init of_at91sam9x5_clk_prog_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np) - { -- of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout); -+ of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout); - } -+CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable", -+ of_at91sam9x5_clk_prog_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-slow.c linux-4.4.13/drivers/clk/at91/clk-slow.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-slow.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-slow.c 2016-06-22 21:55:07.998523867 +0200 -@@ -13,17 +13,11 @@ - #include - #include - #include --#include - #include - #include - #include --#include --#include --#include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - #include "sckc.h" -@@ -59,7 +53,7 @@ - - struct clk_sam9260_slow { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - }; - - #define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw) -@@ -393,8 +387,11 @@ - static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw) - { - struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw); -+ unsigned int status; - -- return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL); -+ regmap_read(slowck->regmap, AT91_PMC_SR, &status); -+ -+ return status & AT91_PMC_OSCSEL ? 1 : 0; - } - - static const struct clk_ops sam9260_slow_ops = { -@@ -402,7 +399,7 @@ - }; - - static struct clk * __init --at91_clk_register_sam9260_slow(struct at91_pmc *pmc, -+at91_clk_register_sam9260_slow(struct regmap *regmap, - const char *name, - const char **parent_names, - int num_parents) -@@ -411,7 +408,7 @@ - struct clk *clk = NULL; - struct clk_init_data init; - -- if (!pmc || !name) -+ if (!name) - return ERR_PTR(-EINVAL); - - if (!parent_names || !num_parents) -@@ -428,7 +425,7 @@ - init.flags = 0; - - slowck->hw.init = &init; -- slowck->pmc = pmc; -+ slowck->regmap = regmap; - - clk = clk_register(NULL, &slowck->hw); - if (IS_ERR(clk)) -@@ -439,29 +436,34 @@ - return clk; - } - --void __init of_at91sam9260_clk_slow_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9260_clk_slow_setup(struct device_node *np) - { - struct clk *clk; - const char *parent_names[2]; - int num_parents; - const char *name = np->name; -+ struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents != 2) - return; - - of_clk_parent_fill(np, parent_names, num_parents); -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; - - of_property_read_string(np, "clock-output-names", &name); - -- clk = at91_clk_register_sam9260_slow(pmc, name, parent_names, -+ clk = at91_clk_register_sam9260_slow(regmap, name, parent_names, - num_parents); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow", -+ of_at91sam9260_clk_slow_setup); - - /* - * FIXME: All slow clk users are not properly claiming it (get + prepare + -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-smd.c linux-4.4.13/drivers/clk/at91/clk-smd.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-smd.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-smd.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,8 +12,8 @@ - #include - #include - #include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -24,7 +24,7 @@ - - struct at91sam9x5_clk_smd { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - }; - - #define to_at91sam9x5_clk_smd(hw) \ -@@ -33,13 +33,13 @@ - static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) - { -- u32 tmp; -- u8 smddiv; - struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); -- struct at91_pmc *pmc = smd->pmc; -+ unsigned int smdr; -+ u8 smddiv; -+ -+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr); -+ smddiv = (smdr & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT; - -- tmp = pmc_read(pmc, AT91_PMC_SMD); -- smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT; - return parent_rate / (smddiv + 1); - } - -@@ -67,40 +67,38 @@ - - static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index) - { -- u32 tmp; - struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); -- struct at91_pmc *pmc = smd->pmc; - - if (index > 1) - return -EINVAL; -- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS; -- if (index) -- tmp |= AT91_PMC_SMDS; -- pmc_write(pmc, AT91_PMC_SMD, tmp); -+ -+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMDS, -+ index ? AT91_PMC_SMDS : 0); -+ - return 0; - } - - static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw) - { - struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); -- struct at91_pmc *pmc = smd->pmc; -+ unsigned int smdr; - -- return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS; -+ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr); -+ -+ return smdr & AT91_PMC_SMDS; - } - - static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) - { -- u32 tmp; - struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); -- struct at91_pmc *pmc = smd->pmc; - unsigned long div = parent_rate / rate; - - if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1)) - return -EINVAL; -- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV; -- tmp |= (div - 1) << SMD_DIV_SHIFT; -- pmc_write(pmc, AT91_PMC_SMD, tmp); -+ -+ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMD_DIV, -+ (div - 1) << SMD_DIV_SHIFT); - - return 0; - } -@@ -114,7 +112,7 @@ - }; - - static struct clk * __init --at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name, -+at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name, - const char **parent_names, u8 num_parents) - { - struct at91sam9x5_clk_smd *smd; -@@ -132,7 +130,7 @@ - init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; - - smd->hw.init = &init; -- smd->pmc = pmc; -+ smd->regmap = regmap; - - clk = clk_register(NULL, &smd->hw); - if (IS_ERR(clk)) -@@ -141,13 +139,13 @@ - return clk; - } - --void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np) - { - struct clk *clk; - int num_parents; - const char *parent_names[SMD_SOURCE_MAX]; - const char *name = np->name; -+ struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX) -@@ -157,10 +155,16 @@ - - of_property_read_string(np, "clock-output-names", &name); - -- clk = at91sam9x5_clk_register_smd(pmc, name, parent_names, -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ -+ clk = at91sam9x5_clk_register_smd(regmap, name, parent_names, - num_parents); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd", -+ of_at91sam9x5_clk_smd_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-system.c linux-4.4.13/drivers/clk/at91/clk-system.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-system.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-system.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,13 +12,8 @@ - #include - #include - #include --#include --#include --#include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -29,9 +24,7 @@ - #define to_clk_system(hw) container_of(hw, struct clk_system, hw) - struct clk_system { - struct clk_hw hw; -- struct at91_pmc *pmc; -- unsigned int irq; -- wait_queue_head_t wait; -+ struct regmap *regmap; - u8 id; - }; - -@@ -39,58 +32,54 @@ - { - return (id >= 8) && (id <= 15); - } --static irqreturn_t clk_system_irq_handler(int irq, void *dev_id) -+ -+static inline bool clk_system_ready(struct regmap *regmap, int id) - { -- struct clk_system *sys = (struct clk_system *)dev_id; -+ unsigned int status; - -- wake_up(&sys->wait); -- disable_irq_nosync(sys->irq); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return IRQ_HANDLED; -+ return status & (1 << id) ? 1 : 0; - } - - static int clk_system_prepare(struct clk_hw *hw) - { - struct clk_system *sys = to_clk_system(hw); -- struct at91_pmc *pmc = sys->pmc; -- u32 mask = 1 << sys->id; - -- pmc_write(pmc, AT91_PMC_SCER, mask); -+ regmap_write(sys->regmap, AT91_PMC_SCER, 1 << sys->id); - - if (!is_pck(sys->id)) - return 0; - -- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) { -- if (sys->irq) { -- enable_irq(sys->irq); -- wait_event(sys->wait, -- pmc_read(pmc, AT91_PMC_SR) & mask); -- } else -- cpu_relax(); -- } -+ while (!clk_system_ready(sys->regmap, sys->id)) -+ cpu_relax(); -+ - return 0; - } - - static void clk_system_unprepare(struct clk_hw *hw) - { - struct clk_system *sys = to_clk_system(hw); -- struct at91_pmc *pmc = sys->pmc; - -- pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id); -+ regmap_write(sys->regmap, AT91_PMC_SCDR, 1 << sys->id); - } - - static int clk_system_is_prepared(struct clk_hw *hw) - { - struct clk_system *sys = to_clk_system(hw); -- struct at91_pmc *pmc = sys->pmc; -+ unsigned int status; -+ -+ regmap_read(sys->regmap, AT91_PMC_SCSR, &status); - -- if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id))) -+ if (!(status & (1 << sys->id))) - return 0; - - if (!is_pck(sys->id)) - return 1; - -- return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id)); -+ regmap_read(sys->regmap, AT91_PMC_SR, &status); -+ -+ return status & (1 << sys->id) ? 1 : 0; - } - - static const struct clk_ops system_ops = { -@@ -100,13 +89,12 @@ - }; - - static struct clk * __init --at91_clk_register_system(struct at91_pmc *pmc, const char *name, -- const char *parent_name, u8 id, int irq) -+at91_clk_register_system(struct regmap *regmap, const char *name, -+ const char *parent_name, u8 id) - { - struct clk_system *sys; - struct clk *clk = NULL; - struct clk_init_data init; -- int ret; - - if (!parent_name || id > SYSTEM_MAX_ID) - return ERR_PTR(-EINVAL); -@@ -123,44 +111,33 @@ - - sys->id = id; - sys->hw.init = &init; -- sys->pmc = pmc; -- sys->irq = irq; -- if (irq) { -- init_waitqueue_head(&sys->wait); -- irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); -- ret = request_irq(sys->irq, clk_system_irq_handler, -- IRQF_TRIGGER_HIGH, name, sys); -- if (ret) { -- kfree(sys); -- return ERR_PTR(ret); -- } -- } -+ sys->regmap = regmap; - - clk = clk_register(NULL, &sys->hw); -- if (IS_ERR(clk)) { -- if (irq) -- free_irq(sys->irq, sys); -+ if (IS_ERR(clk)) - kfree(sys); -- } - - return clk; - } - --static void __init --of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_sys_setup(struct device_node *np) - { - int num; -- int irq = 0; - u32 id; - struct clk *clk; - const char *name; - struct device_node *sysclknp; - const char *parent_name; -+ struct regmap *regmap; - - num = of_get_child_count(np); - if (num > (SYSTEM_MAX_ID + 1)) - return; - -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ - for_each_child_of_node(np, sysclknp) { - if (of_property_read_u32(sysclknp, "reg", &id)) - continue; -@@ -168,21 +145,14 @@ - if (of_property_read_string(np, "clock-output-names", &name)) - name = sysclknp->name; - -- if (is_pck(id)) -- irq = irq_of_parse_and_map(sysclknp, 0); -- - parent_name = of_clk_get_parent_name(sysclknp, 0); - -- clk = at91_clk_register_system(pmc, name, parent_name, id, irq); -+ clk = at91_clk_register_system(regmap, name, parent_name, id); - if (IS_ERR(clk)) - continue; - - of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk); - } - } -- --void __init of_at91rm9200_clk_sys_setup(struct device_node *np, -- struct at91_pmc *pmc) --{ -- of_at91_clk_sys_setup(np, pmc); --} -+CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system", -+ of_at91rm9200_clk_sys_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-usb.c linux-4.4.13/drivers/clk/at91/clk-usb.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-usb.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-usb.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,8 +12,8 @@ - #include - #include - #include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -27,7 +27,7 @@ - - struct at91sam9x5_clk_usb { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - }; - - #define to_at91sam9x5_clk_usb(hw) \ -@@ -35,7 +35,7 @@ - - struct at91rm9200_clk_usb { - struct clk_hw hw; -- struct at91_pmc *pmc; -+ struct regmap *regmap; - u32 divisors[4]; - }; - -@@ -45,13 +45,12 @@ - static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw, - unsigned long parent_rate) - { -- u32 tmp; -- u8 usbdiv; - struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; -+ unsigned int usbr; -+ u8 usbdiv; - -- tmp = pmc_read(pmc, AT91_PMC_USB); -- usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; -+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr); -+ usbdiv = (usbr & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; - - return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); - } -@@ -109,33 +108,31 @@ - - static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) - { -- u32 tmp; - struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; - - if (index > 1) - return -EINVAL; -- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS; -- if (index) -- tmp |= AT91_PMC_USBS; -- pmc_write(pmc, AT91_PMC_USB, tmp); -+ -+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, -+ index ? AT91_PMC_USBS : 0); -+ - return 0; - } - - static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw) - { - struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; -+ unsigned int usbr; - -- return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS; -+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr); -+ -+ return usbr & AT91_PMC_USBS; - } - - static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) - { -- u32 tmp; - struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; - unsigned long div; - - if (!rate) -@@ -145,9 +142,8 @@ - if (div > SAM9X5_USB_MAX_DIV + 1 || !div) - return -EINVAL; - -- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV; -- tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT; -- pmc_write(pmc, AT91_PMC_USB, tmp); -+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_OHCIUSBDIV, -+ (div - 1) << SAM9X5_USB_DIV_SHIFT); - - return 0; - } -@@ -163,28 +159,28 @@ - static int at91sam9n12_clk_usb_enable(struct clk_hw *hw) - { - struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; - -- pmc_write(pmc, AT91_PMC_USB, -- pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS); -+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, -+ AT91_PMC_USBS); -+ - return 0; - } - - static void at91sam9n12_clk_usb_disable(struct clk_hw *hw) - { - struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; - -- pmc_write(pmc, AT91_PMC_USB, -- pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS); -+ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, 0); - } - - static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw) - { - struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; -+ unsigned int usbr; - -- return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS); -+ regmap_read(usb->regmap, AT91_PMC_USB, &usbr); -+ -+ return usbr & AT91_PMC_USBS; - } - - static const struct clk_ops at91sam9n12_usb_ops = { -@@ -197,7 +193,7 @@ - }; - - static struct clk * __init --at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name, -+at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name, - const char **parent_names, u8 num_parents) - { - struct at91sam9x5_clk_usb *usb; -@@ -216,7 +212,7 @@ - CLK_SET_RATE_PARENT; - - usb->hw.init = &init; -- usb->pmc = pmc; -+ usb->regmap = regmap; - - clk = clk_register(NULL, &usb->hw); - if (IS_ERR(clk)) -@@ -226,7 +222,7 @@ - } - - static struct clk * __init --at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name, -+at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name, - const char *parent_name) - { - struct at91sam9x5_clk_usb *usb; -@@ -244,7 +240,7 @@ - init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT; - - usb->hw.init = &init; -- usb->pmc = pmc; -+ usb->regmap = regmap; - - clk = clk_register(NULL, &usb->hw); - if (IS_ERR(clk)) -@@ -257,12 +253,12 @@ - unsigned long parent_rate) - { - struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; -- u32 tmp; -+ unsigned int pllbr; - u8 usbdiv; - -- tmp = pmc_read(pmc, AT91_CKGR_PLLBR); -- usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT; -+ regmap_read(usb->regmap, AT91_CKGR_PLLBR, &pllbr); -+ -+ usbdiv = (pllbr & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT; - if (usb->divisors[usbdiv]) - return parent_rate / usb->divisors[usbdiv]; - -@@ -310,10 +306,8 @@ - static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, - unsigned long parent_rate) - { -- u32 tmp; - int i; - struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw); -- struct at91_pmc *pmc = usb->pmc; - unsigned long div; - - if (!rate) -@@ -323,10 +317,10 @@ - - for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) { - if (usb->divisors[i] == div) { -- tmp = pmc_read(pmc, AT91_CKGR_PLLBR) & -- ~AT91_PMC_USBDIV; -- tmp |= i << RM9200_USB_DIV_SHIFT; -- pmc_write(pmc, AT91_CKGR_PLLBR, tmp); -+ regmap_update_bits(usb->regmap, AT91_CKGR_PLLBR, -+ AT91_PMC_USBDIV, -+ i << RM9200_USB_DIV_SHIFT); -+ - return 0; - } - } -@@ -341,7 +335,7 @@ - }; - - static struct clk * __init --at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name, -+at91rm9200_clk_register_usb(struct regmap *regmap, const char *name, - const char *parent_name, const u32 *divisors) - { - struct at91rm9200_clk_usb *usb; -@@ -359,7 +353,7 @@ - init.flags = CLK_SET_RATE_PARENT; - - usb->hw.init = &init; -- usb->pmc = pmc; -+ usb->regmap = regmap; - memcpy(usb->divisors, divisors, sizeof(usb->divisors)); - - clk = clk_register(NULL, &usb->hw); -@@ -369,13 +363,13 @@ - return clk; - } - --void __init of_at91sam9x5_clk_usb_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np) - { - struct clk *clk; - int num_parents; - const char *parent_names[USB_SOURCE_MAX]; - const char *name = np->name; -+ struct regmap *regmap; - - num_parents = of_clk_get_parent_count(np); - if (num_parents <= 0 || num_parents > USB_SOURCE_MAX) -@@ -385,19 +379,26 @@ - - of_property_read_string(np, "clock-output-names", &name); - -- clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents); -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ -+ clk = at91sam9x5_clk_register_usb(regmap, name, parent_names, -+ num_parents); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb", -+ of_at91sam9x5_clk_usb_setup); - --void __init of_at91sam9n12_clk_usb_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np) - { - struct clk *clk; - const char *parent_name; - const char *name = np->name; -+ struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - if (!parent_name) -@@ -405,20 +406,26 @@ - - of_property_read_string(np, "clock-output-names", &name); - -- clk = at91sam9n12_clk_register_usb(pmc, name, parent_name); -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ -+ clk = at91sam9n12_clk_register_usb(regmap, name, parent_name); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb", -+ of_at91sam9n12_clk_usb_setup); - --void __init of_at91rm9200_clk_usb_setup(struct device_node *np, -- struct at91_pmc *pmc) -+static void __init of_at91rm9200_clk_usb_setup(struct device_node *np) - { - struct clk *clk; - const char *parent_name; - const char *name = np->name; - u32 divisors[4] = {0, 0, 0, 0}; -+ struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - if (!parent_name) -@@ -430,9 +437,15 @@ - - of_property_read_string(np, "clock-output-names", &name); - -- clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors); -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) -+ return; -+ -+ clk = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - } -+CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb", -+ of_at91rm9200_clk_usb_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-utmi.c linux-4.4.13/drivers/clk/at91/clk-utmi.c ---- linux-4.4.13.orig/drivers/clk/at91/clk-utmi.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/clk-utmi.c 2016-06-22 21:55:07.998523867 +0200 -@@ -11,14 +11,9 @@ - #include - #include - #include --#include --#include - #include --#include --#include --#include --#include --#include -+#include -+#include - - #include "pmc.h" - -@@ -26,37 +21,30 @@ - - struct clk_utmi { - struct clk_hw hw; -- struct at91_pmc *pmc; -- unsigned int irq; -- wait_queue_head_t wait; -+ struct regmap *regmap; - }; - - #define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw) - --static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id) -+static inline bool clk_utmi_ready(struct regmap *regmap) - { -- struct clk_utmi *utmi = (struct clk_utmi *)dev_id; -+ unsigned int status; - -- wake_up(&utmi->wait); -- disable_irq_nosync(utmi->irq); -+ regmap_read(regmap, AT91_PMC_SR, &status); - -- return IRQ_HANDLED; -+ return status & AT91_PMC_LOCKU; - } - - static int clk_utmi_prepare(struct clk_hw *hw) - { - struct clk_utmi *utmi = to_clk_utmi(hw); -- struct at91_pmc *pmc = utmi->pmc; -- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) | AT91_PMC_UPLLEN | -- AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN; -- -- pmc_write(pmc, AT91_CKGR_UCKR, tmp); -- -- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) { -- enable_irq(utmi->irq); -- wait_event(utmi->wait, -- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU); -- } -+ unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT | -+ AT91_PMC_BIASEN; -+ -+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr); -+ -+ while (!clk_utmi_ready(utmi->regmap)) -+ cpu_relax(); - - return 0; - } -@@ -64,18 +52,15 @@ - static int clk_utmi_is_prepared(struct clk_hw *hw) - { - struct clk_utmi *utmi = to_clk_utmi(hw); -- struct at91_pmc *pmc = utmi->pmc; - -- return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU); -+ return clk_utmi_ready(utmi->regmap); - } - - static void clk_utmi_unprepare(struct clk_hw *hw) - { - struct clk_utmi *utmi = to_clk_utmi(hw); -- struct at91_pmc *pmc = utmi->pmc; -- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN; - -- pmc_write(pmc, AT91_CKGR_UCKR, tmp); -+ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0); - } - - static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw, -@@ -93,10 +78,9 @@ - }; - - static struct clk * __init --at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq, -+at91_clk_register_utmi(struct regmap *regmap, - const char *name, const char *parent_name) - { -- int ret; - struct clk_utmi *utmi; - struct clk *clk = NULL; - struct clk_init_data init; -@@ -112,52 +96,36 @@ - init.flags = CLK_SET_RATE_GATE; - - utmi->hw.init = &init; -- utmi->pmc = pmc; -- utmi->irq = irq; -- init_waitqueue_head(&utmi->wait); -- irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN); -- ret = request_irq(utmi->irq, clk_utmi_irq_handler, -- IRQF_TRIGGER_HIGH, "clk-utmi", utmi); -- if (ret) { -- kfree(utmi); -- return ERR_PTR(ret); -- } -+ utmi->regmap = regmap; - - clk = clk_register(NULL, &utmi->hw); -- if (IS_ERR(clk)) { -- free_irq(utmi->irq, utmi); -+ if (IS_ERR(clk)) - kfree(utmi); -- } - - return clk; - } - --static void __init --of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc) -+static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np) - { -- unsigned int irq; - struct clk *clk; - const char *parent_name; - const char *name = np->name; -+ struct regmap *regmap; - - parent_name = of_clk_get_parent_name(np, 0); - - of_property_read_string(np, "clock-output-names", &name); - -- irq = irq_of_parse_and_map(np, 0); -- if (!irq) -+ regmap = syscon_node_to_regmap(of_get_parent(np)); -+ if (IS_ERR(regmap)) - return; - -- clk = at91_clk_register_utmi(pmc, irq, name, parent_name); -+ clk = at91_clk_register_utmi(regmap, name, parent_name); - if (IS_ERR(clk)) - return; - - of_clk_add_provider(np, of_clk_src_simple_get, clk); - return; - } -- --void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np, -- struct at91_pmc *pmc) --{ -- of_at91_clk_utmi_setup(np, pmc); --} -+CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi", -+ of_at91sam9x5_clk_utmi_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/pmc.c linux-4.4.13/drivers/clk/at91/pmc.c ---- linux-4.4.13.orig/drivers/clk/at91/pmc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/pmc.c 2016-06-22 21:55:07.998523867 +0200 -@@ -12,36 +12,13 @@ - #include - #include - #include --#include --#include --#include --#include --#include --#include --#include -+#include -+#include - - #include - - #include "pmc.h" - --void __iomem *at91_pmc_base; --EXPORT_SYMBOL_GPL(at91_pmc_base); -- --void at91rm9200_idle(void) --{ -- /* -- * Disable the processor clock. The processor will be automatically -- * re-enabled by an interrupt or by a reset. -- */ -- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); --} -- --void at91sam9_idle(void) --{ -- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); -- cpu_do_idle(); --} -- - int of_at91_get_clk_range(struct device_node *np, const char *propname, - struct clk_range *range) - { -@@ -64,402 +41,3 @@ - return 0; - } - EXPORT_SYMBOL_GPL(of_at91_get_clk_range); -- --static void pmc_irq_mask(struct irq_data *d) --{ -- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); -- -- pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq); --} -- --static void pmc_irq_unmask(struct irq_data *d) --{ -- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); -- -- pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq); --} -- --static int pmc_irq_set_type(struct irq_data *d, unsigned type) --{ -- if (type != IRQ_TYPE_LEVEL_HIGH) { -- pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n"); -- return -EINVAL; -- } -- -- return 0; --} -- --static void pmc_irq_suspend(struct irq_data *d) --{ -- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); -- -- pmc->imr = pmc_read(pmc, AT91_PMC_IMR); -- pmc_write(pmc, AT91_PMC_IDR, pmc->imr); --} -- --static void pmc_irq_resume(struct irq_data *d) --{ -- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); -- -- pmc_write(pmc, AT91_PMC_IER, pmc->imr); --} -- --static struct irq_chip pmc_irq = { -- .name = "PMC", -- .irq_disable = pmc_irq_mask, -- .irq_mask = pmc_irq_mask, -- .irq_unmask = pmc_irq_unmask, -- .irq_set_type = pmc_irq_set_type, -- .irq_suspend = pmc_irq_suspend, -- .irq_resume = pmc_irq_resume, --}; -- --static struct lock_class_key pmc_lock_class; -- --static int pmc_irq_map(struct irq_domain *h, unsigned int virq, -- irq_hw_number_t hw) --{ -- struct at91_pmc *pmc = h->host_data; -- -- irq_set_lockdep_class(virq, &pmc_lock_class); -- -- irq_set_chip_and_handler(virq, &pmc_irq, -- handle_level_irq); -- irq_set_chip_data(virq, pmc); -- -- return 0; --} -- --static int pmc_irq_domain_xlate(struct irq_domain *d, -- struct device_node *ctrlr, -- const u32 *intspec, unsigned int intsize, -- irq_hw_number_t *out_hwirq, -- unsigned int *out_type) --{ -- struct at91_pmc *pmc = d->host_data; -- const struct at91_pmc_caps *caps = pmc->caps; -- -- if (WARN_ON(intsize < 1)) -- return -EINVAL; -- -- *out_hwirq = intspec[0]; -- -- if (!(caps->available_irqs & (1 << *out_hwirq))) -- return -EINVAL; -- -- *out_type = IRQ_TYPE_LEVEL_HIGH; -- -- return 0; --} -- --static const struct irq_domain_ops pmc_irq_ops = { -- .map = pmc_irq_map, -- .xlate = pmc_irq_domain_xlate, --}; -- --static irqreturn_t pmc_irq_handler(int irq, void *data) --{ -- struct at91_pmc *pmc = (struct at91_pmc *)data; -- unsigned long sr; -- int n; -- -- sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR); -- if (!sr) -- return IRQ_NONE; -- -- for_each_set_bit(n, &sr, BITS_PER_LONG) -- generic_handle_irq(irq_find_mapping(pmc->irqdomain, n)); -- -- return IRQ_HANDLED; --} -- --static const struct at91_pmc_caps at91rm9200_caps = { -- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | -- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | -- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | -- AT91_PMC_PCK3RDY, --}; -- --static const struct at91_pmc_caps at91sam9260_caps = { -- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | -- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | -- AT91_PMC_PCK1RDY, --}; -- --static const struct at91_pmc_caps at91sam9g45_caps = { -- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | -- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | -- AT91_PMC_PCK1RDY, --}; -- --static const struct at91_pmc_caps at91sam9n12_caps = { -- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | -- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | -- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS | -- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV, --}; -- --static const struct at91_pmc_caps at91sam9x5_caps = { -- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | -- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | -- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS | -- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV, --}; -- --static const struct at91_pmc_caps sama5d2_caps = { -- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | -- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | -- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | -- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS | -- AT91_PMC_CFDEV | AT91_PMC_GCKRDY, --}; -- --static const struct at91_pmc_caps sama5d3_caps = { -- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | -- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | -- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | -- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS | -- AT91_PMC_CFDEV, --}; -- --static struct at91_pmc *__init at91_pmc_init(struct device_node *np, -- void __iomem *regbase, int virq, -- const struct at91_pmc_caps *caps) --{ -- struct at91_pmc *pmc; -- -- if (!regbase || !virq || !caps) -- return NULL; -- -- at91_pmc_base = regbase; -- -- pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); -- if (!pmc) -- return NULL; -- -- spin_lock_init(&pmc->lock); -- pmc->regbase = regbase; -- pmc->virq = virq; -- pmc->caps = caps; -- -- pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc); -- -- if (!pmc->irqdomain) -- goto out_free_pmc; -- -- pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); -- if (request_irq(pmc->virq, pmc_irq_handler, -- IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc)) -- goto out_remove_irqdomain; -- -- return pmc; -- --out_remove_irqdomain: -- irq_domain_remove(pmc->irqdomain); --out_free_pmc: -- kfree(pmc); -- -- return NULL; --} -- --static const struct of_device_id pmc_clk_ids[] __initconst = { -- /* Slow oscillator */ -- { -- .compatible = "atmel,at91sam9260-clk-slow", -- .data = of_at91sam9260_clk_slow_setup, -- }, -- /* Main clock */ -- { -- .compatible = "atmel,at91rm9200-clk-main-osc", -- .data = of_at91rm9200_clk_main_osc_setup, -- }, -- { -- .compatible = "atmel,at91sam9x5-clk-main-rc-osc", -- .data = of_at91sam9x5_clk_main_rc_osc_setup, -- }, -- { -- .compatible = "atmel,at91rm9200-clk-main", -- .data = of_at91rm9200_clk_main_setup, -- }, -- { -- .compatible = "atmel,at91sam9x5-clk-main", -- .data = of_at91sam9x5_clk_main_setup, -- }, -- /* PLL clocks */ -- { -- .compatible = "atmel,at91rm9200-clk-pll", -- .data = of_at91rm9200_clk_pll_setup, -- }, -- { -- .compatible = "atmel,at91sam9g45-clk-pll", -- .data = of_at91sam9g45_clk_pll_setup, -- }, -- { -- .compatible = "atmel,at91sam9g20-clk-pllb", -- .data = of_at91sam9g20_clk_pllb_setup, -- }, -- { -- .compatible = "atmel,sama5d3-clk-pll", -- .data = of_sama5d3_clk_pll_setup, -- }, -- { -- .compatible = "atmel,at91sam9x5-clk-plldiv", -- .data = of_at91sam9x5_clk_plldiv_setup, -- }, -- /* Master clock */ -- { -- .compatible = "atmel,at91rm9200-clk-master", -- .data = of_at91rm9200_clk_master_setup, -- }, -- { -- .compatible = "atmel,at91sam9x5-clk-master", -- .data = of_at91sam9x5_clk_master_setup, -- }, -- /* System clocks */ -- { -- .compatible = "atmel,at91rm9200-clk-system", -- .data = of_at91rm9200_clk_sys_setup, -- }, -- /* Peripheral clocks */ -- { -- .compatible = "atmel,at91rm9200-clk-peripheral", -- .data = of_at91rm9200_clk_periph_setup, -- }, -- { -- .compatible = "atmel,at91sam9x5-clk-peripheral", -- .data = of_at91sam9x5_clk_periph_setup, -- }, -- /* Programmable clocks */ -- { -- .compatible = "atmel,at91rm9200-clk-programmable", -- .data = of_at91rm9200_clk_prog_setup, -- }, -- { -- .compatible = "atmel,at91sam9g45-clk-programmable", -- .data = of_at91sam9g45_clk_prog_setup, -- }, -- { -- .compatible = "atmel,at91sam9x5-clk-programmable", -- .data = of_at91sam9x5_clk_prog_setup, -- }, -- /* UTMI clock */ --#if defined(CONFIG_HAVE_AT91_UTMI) -- { -- .compatible = "atmel,at91sam9x5-clk-utmi", -- .data = of_at91sam9x5_clk_utmi_setup, -- }, --#endif -- /* USB clock */ --#if defined(CONFIG_HAVE_AT91_USB_CLK) -- { -- .compatible = "atmel,at91rm9200-clk-usb", -- .data = of_at91rm9200_clk_usb_setup, -- }, -- { -- .compatible = "atmel,at91sam9x5-clk-usb", -- .data = of_at91sam9x5_clk_usb_setup, -- }, -- { -- .compatible = "atmel,at91sam9n12-clk-usb", -- .data = of_at91sam9n12_clk_usb_setup, -- }, --#endif -- /* SMD clock */ --#if defined(CONFIG_HAVE_AT91_SMD) -- { -- .compatible = "atmel,at91sam9x5-clk-smd", -- .data = of_at91sam9x5_clk_smd_setup, -- }, --#endif --#if defined(CONFIG_HAVE_AT91_H32MX) -- { -- .compatible = "atmel,sama5d4-clk-h32mx", -- .data = of_sama5d4_clk_h32mx_setup, -- }, --#endif --#if defined(CONFIG_HAVE_AT91_GENERATED_CLK) -- { -- .compatible = "atmel,sama5d2-clk-generated", -- .data = of_sama5d2_clk_generated_setup, -- }, --#endif -- { /*sentinel*/ } --}; -- --static void __init of_at91_pmc_setup(struct device_node *np, -- const struct at91_pmc_caps *caps) --{ -- struct at91_pmc *pmc; -- struct device_node *childnp; -- void (*clk_setup)(struct device_node *, struct at91_pmc *); -- const struct of_device_id *clk_id; -- void __iomem *regbase = of_iomap(np, 0); -- int virq; -- -- if (!regbase) -- return; -- -- virq = irq_of_parse_and_map(np, 0); -- if (!virq) -- return; -- -- pmc = at91_pmc_init(np, regbase, virq, caps); -- if (!pmc) -- return; -- for_each_child_of_node(np, childnp) { -- clk_id = of_match_node(pmc_clk_ids, childnp); -- if (!clk_id) -- continue; -- clk_setup = clk_id->data; -- clk_setup(childnp, pmc); -- } --} -- --static void __init of_at91rm9200_pmc_setup(struct device_node *np) --{ -- of_at91_pmc_setup(np, &at91rm9200_caps); --} --CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc", -- of_at91rm9200_pmc_setup); -- --static void __init of_at91sam9260_pmc_setup(struct device_node *np) --{ -- of_at91_pmc_setup(np, &at91sam9260_caps); --} --CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc", -- of_at91sam9260_pmc_setup); -- --static void __init of_at91sam9g45_pmc_setup(struct device_node *np) --{ -- of_at91_pmc_setup(np, &at91sam9g45_caps); --} --CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc", -- of_at91sam9g45_pmc_setup); -- --static void __init of_at91sam9n12_pmc_setup(struct device_node *np) --{ -- of_at91_pmc_setup(np, &at91sam9n12_caps); --} --CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc", -- of_at91sam9n12_pmc_setup); -- --static void __init of_at91sam9x5_pmc_setup(struct device_node *np) --{ -- of_at91_pmc_setup(np, &at91sam9x5_caps); --} --CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc", -- of_at91sam9x5_pmc_setup); -- --static void __init of_sama5d2_pmc_setup(struct device_node *np) --{ -- of_at91_pmc_setup(np, &sama5d2_caps); --} --CLK_OF_DECLARE(sama5d2_clk_pmc, "atmel,sama5d2-pmc", -- of_sama5d2_pmc_setup); -- --static void __init of_sama5d3_pmc_setup(struct device_node *np) --{ -- of_at91_pmc_setup(np, &sama5d3_caps); --} --CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc", -- of_sama5d3_pmc_setup); -diff -Nur linux-4.4.13.orig/drivers/clk/at91/pmc.h linux-4.4.13/drivers/clk/at91/pmc.h ---- linux-4.4.13.orig/drivers/clk/at91/pmc.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clk/at91/pmc.h 2016-06-22 21:55:07.998523867 +0200 -@@ -14,8 +14,11 @@ - - #include - #include -+#include - #include - -+extern spinlock_t pmc_pcr_lock; -+ - struct clk_range { - unsigned long min; - unsigned long max; -@@ -23,102 +26,7 @@ - - #define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,} - --struct at91_pmc_caps { -- u32 available_irqs; --}; -- --struct at91_pmc { -- void __iomem *regbase; -- int virq; -- spinlock_t lock; -- const struct at91_pmc_caps *caps; -- struct irq_domain *irqdomain; -- u32 imr; --}; -- --static inline void pmc_lock(struct at91_pmc *pmc) --{ -- spin_lock(&pmc->lock); --} -- --static inline void pmc_unlock(struct at91_pmc *pmc) --{ -- spin_unlock(&pmc->lock); --} -- --static inline u32 pmc_read(struct at91_pmc *pmc, int offset) --{ -- return readl(pmc->regbase + offset); --} -- --static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value) --{ -- writel(value, pmc->regbase + offset); --} -- - int of_at91_get_clk_range(struct device_node *np, const char *propname, - struct clk_range *range); - --void of_at91sam9260_clk_slow_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91rm9200_clk_main_osc_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91rm9200_clk_main_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9x5_clk_main_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91rm9200_clk_pll_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9g45_clk_pll_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9g20_clk_pllb_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_sama5d3_clk_pll_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9x5_clk_plldiv_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91rm9200_clk_master_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9x5_clk_master_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91rm9200_clk_sys_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91rm9200_clk_periph_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9x5_clk_periph_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91rm9200_clk_prog_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9g45_clk_prog_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9x5_clk_prog_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91sam9x5_clk_utmi_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91rm9200_clk_usb_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9x5_clk_usb_setup(struct device_node *np, -- struct at91_pmc *pmc); --void of_at91sam9n12_clk_usb_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_at91sam9x5_clk_smd_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_sama5d4_clk_h32mx_setup(struct device_node *np, -- struct at91_pmc *pmc); -- --void of_sama5d2_clk_generated_setup(struct device_node *np, -- struct at91_pmc *pmc); -- - #endif /* __PMC_H_ */ -diff -Nur linux-4.4.13.orig/drivers/clocksource/tcb_clksrc.c linux-4.4.13/drivers/clocksource/tcb_clksrc.c ---- linux-4.4.13.orig/drivers/clocksource/tcb_clksrc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clocksource/tcb_clksrc.c 2016-06-22 21:55:07.998523867 +0200 -@@ -23,8 +23,7 @@ - * this 32 bit free-running counter. the second channel is not used. - * - * - The third channel may be used to provide a 16-bit clockevent -- * source, used in either periodic or oneshot mode. This runs -- * at 32 KiHZ, and can handle delays of up to two seconds. -+ * source, used in either periodic or oneshot mode. - * - * A boot clocksource and clockevent source are also currently needed, - * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so -@@ -74,6 +73,8 @@ - struct tc_clkevt_device { - struct clock_event_device clkevt; - struct clk *clk; -+ bool clk_enabled; -+ u32 freq; - void __iomem *regs; - }; - -@@ -82,15 +83,26 @@ - return container_of(clkevt, struct tc_clkevt_device, clkevt); - } - --/* For now, we always use the 32K clock ... this optimizes for NO_HZ, -- * because using one of the divided clocks would usually mean the -- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). -- * -- * A divided clock could be good for high resolution timers, since -- * 30.5 usec resolution can seem "low". -- */ - static u32 timer_clock; - -+static void tc_clk_disable(struct clock_event_device *d) -+{ -+ struct tc_clkevt_device *tcd = to_tc_clkevt(d); -+ -+ clk_disable(tcd->clk); -+ tcd->clk_enabled = false; -+} -+ -+static void tc_clk_enable(struct clock_event_device *d) -+{ -+ struct tc_clkevt_device *tcd = to_tc_clkevt(d); -+ -+ if (tcd->clk_enabled) -+ return; -+ clk_enable(tcd->clk); -+ tcd->clk_enabled = true; -+} -+ - static int tc_shutdown(struct clock_event_device *d) - { - struct tc_clkevt_device *tcd = to_tc_clkevt(d); -@@ -98,8 +110,14 @@ - - __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); - __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); -+ return 0; -+} -+ -+static int tc_shutdown_clk_off(struct clock_event_device *d) -+{ -+ tc_shutdown(d); - if (!clockevent_state_detached(d)) -- clk_disable(tcd->clk); -+ tc_clk_disable(d); - - return 0; - } -@@ -112,9 +130,9 @@ - if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) - tc_shutdown(d); - -- clk_enable(tcd->clk); -+ tc_clk_enable(d); - -- /* slow clock, count up to RC, then irq and stop */ -+ /* count up to RC, then irq and stop */ - __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | - ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); - __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); -@@ -134,12 +152,12 @@ - /* By not making the gentime core emulate periodic mode on top - * of oneshot, we get lower overhead and improved accuracy. - */ -- clk_enable(tcd->clk); -+ tc_clk_enable(d); - -- /* slow clock, count up to RC, then irq and restart */ -+ /* count up to RC, then irq and restart */ - __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, - regs + ATMEL_TC_REG(2, CMR)); -- __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); -+ __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); - - /* Enable clock and interrupts on RC compare */ - __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); -@@ -166,9 +184,13 @@ - .features = CLOCK_EVT_FEAT_PERIODIC | - CLOCK_EVT_FEAT_ONESHOT, - /* Should be lower than at91rm9200's system timer */ -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - .rating = 125, -+#else -+ .rating = 200, -+#endif - .set_next_event = tc_next_event, -- .set_state_shutdown = tc_shutdown, -+ .set_state_shutdown = tc_shutdown_clk_off, - .set_state_periodic = tc_set_periodic, - .set_state_oneshot = tc_set_oneshot, - }, -@@ -188,8 +210,9 @@ - return IRQ_NONE; - } - --static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) -+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) - { -+ unsigned divisor = atmel_tc_divisors[divisor_idx]; - int ret; - struct clk *t2_clk = tc->clk[2]; - int irq = tc->irq[2]; -@@ -210,7 +233,11 @@ - clkevt.regs = tc->regs; - clkevt.clk = t2_clk; - -- timer_clock = clk32k_divisor_idx; -+ timer_clock = divisor_idx; -+ if (!divisor) -+ clkevt.freq = 32768; -+ else -+ clkevt.freq = clk_get_rate(t2_clk) / divisor; - - clkevt.clkevt.cpumask = cpumask_of(0); - -@@ -221,7 +248,7 @@ - return ret; - } - -- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); -+ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); - - return ret; - } -@@ -358,7 +385,11 @@ - goto err_disable_t1; - - /* channel 2: periodic and oneshot timer support */ -+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK - ret = setup_clkevents(tc, clk32k_divisor_idx); -+#else -+ ret = setup_clkevents(tc, best_divisor_idx); -+#endif - if (ret) - goto err_unregister_clksrc; - -diff -Nur linux-4.4.13.orig/drivers/clocksource/timer-atmel-pit.c linux-4.4.13/drivers/clocksource/timer-atmel-pit.c ---- linux-4.4.13.orig/drivers/clocksource/timer-atmel-pit.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clocksource/timer-atmel-pit.c 2016-06-22 21:55:07.998523867 +0200 -@@ -46,6 +46,7 @@ - u32 cycle; - u32 cnt; - unsigned int irq; -+ bool irq_requested; - struct clk *mck; - }; - -@@ -96,15 +97,29 @@ - - /* disable irq, leaving the clocksource active */ - pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN); -+ if (data->irq_requested) { -+ free_irq(data->irq, data); -+ data->irq_requested = false; -+ } - return 0; - } - -+static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id); - /* - * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) - */ - static int pit_clkevt_set_periodic(struct clock_event_device *dev) - { - struct pit_data *data = clkevt_to_pit_data(dev); -+ int ret; -+ -+ ret = request_irq(data->irq, at91sam926x_pit_interrupt, -+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -+ "at91_tick", data); -+ if (ret) -+ panic(pr_fmt("Unable to setup IRQ\n")); -+ -+ data->irq_requested = true; - - /* update clocksource counter */ - data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); -@@ -181,7 +196,6 @@ - { - unsigned long pit_rate; - unsigned bits; -- int ret; - - /* - * Use our actual MCK to figure out how many MCK/16 ticks per -@@ -206,13 +220,6 @@ - data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; - clocksource_register_hz(&data->clksrc, pit_rate); - -- /* Set up irq handler */ -- ret = request_irq(data->irq, at91sam926x_pit_interrupt, -- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -- "at91_tick", data); -- if (ret) -- panic(pr_fmt("Unable to setup IRQ\n")); -- - /* Set up and register clockevents */ - data->clkevt.name = "pit"; - data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC; -diff -Nur linux-4.4.13.orig/drivers/clocksource/timer-atmel-st.c linux-4.4.13/drivers/clocksource/timer-atmel-st.c ---- linux-4.4.13.orig/drivers/clocksource/timer-atmel-st.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/clocksource/timer-atmel-st.c 2016-06-22 21:55:07.998523867 +0200 -@@ -115,18 +115,29 @@ - last_crtr = read_CRTR(); - } - -+static int atmel_st_irq; -+ - static int clkevt32k_shutdown(struct clock_event_device *evt) - { - clkdev32k_disable_and_flush_irq(); - irqmask = 0; - regmap_write(regmap_st, AT91_ST_IER, irqmask); -+ free_irq(atmel_st_irq, regmap_st); - return 0; - } - - static int clkevt32k_set_oneshot(struct clock_event_device *dev) - { -+ int ret; -+ - clkdev32k_disable_and_flush_irq(); - -+ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, -+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -+ "at91_tick", regmap_st); -+ if (ret) -+ panic(pr_fmt("Unable to setup IRQ\n")); -+ - /* - * ALM for oneshot irqs, set by next_event() - * before 32 seconds have passed. -@@ -139,8 +150,16 @@ - - static int clkevt32k_set_periodic(struct clock_event_device *dev) - { -+ int ret; -+ - clkdev32k_disable_and_flush_irq(); - -+ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, -+ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -+ "at91_tick", regmap_st); -+ if (ret) -+ panic(pr_fmt("Unable to setup IRQ\n")); -+ - /* PIT for periodic irqs; fixed rate of 1/HZ */ - irqmask = AT91_ST_PITS; - regmap_write(regmap_st, AT91_ST_PIMR, timer_latch); -@@ -198,7 +217,7 @@ - { - struct clk *sclk; - unsigned int sclk_rate, val; -- int irq, ret; -+ int ret; - - regmap_st = syscon_node_to_regmap(node); - if (IS_ERR(regmap_st)) -@@ -210,17 +229,10 @@ - regmap_read(regmap_st, AT91_ST_SR, &val); - - /* Get the interrupts property */ -- irq = irq_of_parse_and_map(node, 0); -- if (!irq) -+ atmel_st_irq = irq_of_parse_and_map(node, 0); -+ if (!atmel_st_irq) - panic(pr_fmt("Unable to get IRQ from DT\n")); - -- /* Make IRQs happen for the system timer */ -- ret = request_irq(irq, at91rm9200_timer_interrupt, -- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, -- "at91_tick", regmap_st); -- if (ret) -- panic(pr_fmt("Unable to setup IRQ\n")); -- - sclk = of_clk_get(node, 0); - if (IS_ERR(sclk)) - panic(pr_fmt("Unable to get slow clock\n")); -diff -Nur linux-4.4.13.orig/drivers/cpufreq/Kconfig.x86 linux-4.4.13/drivers/cpufreq/Kconfig.x86 ---- linux-4.4.13.orig/drivers/cpufreq/Kconfig.x86 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/cpufreq/Kconfig.x86 2016-06-22 21:55:07.998523867 +0200 -@@ -123,7 +123,7 @@ - - config X86_POWERNOW_K8 - tristate "AMD Opteron/Athlon64 PowerNow!" -- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ -+ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE - help - This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. - Support for K10 and newer processors is now in acpi-cpufreq. -diff -Nur linux-4.4.13.orig/drivers/cpuidle/coupled.c linux-4.4.13/drivers/cpuidle/coupled.c ---- linux-4.4.13.orig/drivers/cpuidle/coupled.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/cpuidle/coupled.c 2016-06-22 21:55:07.998523867 +0200 -@@ -119,7 +119,6 @@ - - #define CPUIDLE_COUPLED_NOT_IDLE (-1) - --static DEFINE_MUTEX(cpuidle_coupled_lock); - static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); - - /* -diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.4.13/drivers/gpu/drm/i915/i915_gem_execbuffer.c ---- linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2016-06-22 21:55:07.998523867 +0200 -@@ -1264,7 +1264,9 @@ - if (ret) - return ret; - -+#ifndef CONFIG_PREEMPT_RT_BASE - trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); -+#endif - - i915_gem_execbuffer_move_to_active(vmas, params->request); - i915_gem_execbuffer_retire_commands(params); -diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.4.13/drivers/gpu/drm/i915/i915_gem_shrinker.c ---- linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/gpu/drm/i915/i915_gem_shrinker.c 2016-06-22 21:55:07.998523867 +0200 -@@ -39,7 +39,7 @@ - if (!mutex_is_locked(mutex)) - return false; - --#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) -+#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) - return mutex->owner == task; - #else - /* Since UP may be pre-empted, we cannot assume that we own the lock */ -diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/i915_irq.c linux-4.4.13/drivers/gpu/drm/i915/i915_irq.c ---- linux-4.4.13.orig/drivers/gpu/drm/i915/i915_irq.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/gpu/drm/i915/i915_irq.c 2016-06-22 21:55:08.002523867 +0200 -@@ -812,6 +812,7 @@ - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - - /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_disable_rt(); - - /* Get optional system timestamp before query. */ - if (stime) -@@ -863,6 +864,7 @@ - *etime = ktime_get(); - - /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_enable_rt(); - - spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); - -diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/intel_display.c linux-4.4.13/drivers/gpu/drm/i915/intel_display.c ---- linux-4.4.13.orig/drivers/gpu/drm/i915/intel_display.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/gpu/drm/i915/intel_display.c 2016-06-22 21:55:08.002523867 +0200 -@@ -11376,7 +11376,7 @@ - struct intel_crtc *intel_crtc = to_intel_crtc(crtc); - struct intel_unpin_work *work; - -- WARN_ON(!in_interrupt()); -+ WARN_ON_NONRT(!in_interrupt()); - - if (crtc == NULL) - return; -diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.4.13/drivers/gpu/drm/i915/intel_sprite.c ---- linux-4.4.13.orig/drivers/gpu/drm/i915/intel_sprite.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/gpu/drm/i915/intel_sprite.c 2016-06-22 21:55:08.002523867 +0200 -@@ -38,6 +38,7 @@ - #include "intel_drv.h" - #include - #include "i915_drv.h" -+#include - - static bool - format_is_yuv(uint32_t format) -@@ -64,6 +65,8 @@ - 1000 * adjusted_mode->crtc_htotal); - } - -+static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock); -+ - /** - * intel_pipe_update_start() - start update of a set of display registers - * @crtc: the crtc of which the registers are going to be updated -@@ -96,7 +99,7 @@ - min = vblank_start - usecs_to_scanlines(adjusted_mode, 100); - max = vblank_start - 1; - -- local_irq_disable(); -+ local_lock_irq(pipe_update_lock); - - if (min <= 0 || max <= 0) - return; -@@ -126,11 +129,11 @@ - break; - } - -- local_irq_enable(); -+ local_unlock_irq(pipe_update_lock); - - timeout = schedule_timeout(timeout); - -- local_irq_disable(); -+ local_lock_irq(pipe_update_lock); - } - - finish_wait(wq, &wait); -@@ -164,7 +167,7 @@ - - trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); - -- local_irq_enable(); -+ local_unlock_irq(pipe_update_lock); - - if (crtc->debug.start_vbl_count && - crtc->debug.start_vbl_count != end_vbl_count) { -diff -Nur linux-4.4.13.orig/drivers/gpu/drm/radeon/radeon_display.c linux-4.4.13/drivers/gpu/drm/radeon/radeon_display.c ---- linux-4.4.13.orig/drivers/gpu/drm/radeon/radeon_display.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/gpu/drm/radeon/radeon_display.c 2016-06-22 21:55:08.002523867 +0200 -@@ -1862,6 +1862,7 @@ - struct radeon_device *rdev = dev->dev_private; - - /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_disable_rt(); - - /* Get optional system timestamp before query. */ - if (stime) -@@ -1954,6 +1955,7 @@ - *etime = ktime_get(); - - /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ -+ preempt_enable_rt(); - - /* Decode into vertical and horizontal scanout position. */ - *vpos = position & 0x1fff; -diff -Nur linux-4.4.13.orig/drivers/i2c/busses/i2c-omap.c linux-4.4.13/drivers/i2c/busses/i2c-omap.c ---- linux-4.4.13.orig/drivers/i2c/busses/i2c-omap.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/i2c/busses/i2c-omap.c 2016-06-22 21:55:08.006523867 +0200 -@@ -995,15 +995,12 @@ - u16 mask; - u16 stat; - -- spin_lock(&omap->lock); -- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); - stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); -+ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); - - if (stat & mask) - ret = IRQ_WAKE_THREAD; - -- spin_unlock(&omap->lock); -- - return ret; - } - -diff -Nur linux-4.4.13.orig/drivers/ide/alim15x3.c linux-4.4.13/drivers/ide/alim15x3.c ---- linux-4.4.13.orig/drivers/ide/alim15x3.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ide/alim15x3.c 2016-06-22 21:55:08.006523867 +0200 -@@ -234,7 +234,7 @@ - - isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - if (m5229_revision < 0xC2) { - /* -@@ -325,7 +325,7 @@ - } - pci_dev_put(north); - pci_dev_put(isa_dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return 0; - } - -diff -Nur linux-4.4.13.orig/drivers/ide/hpt366.c linux-4.4.13/drivers/ide/hpt366.c ---- linux-4.4.13.orig/drivers/ide/hpt366.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ide/hpt366.c 2016-06-22 21:55:08.006523867 +0200 -@@ -1241,7 +1241,7 @@ - - dma_old = inb(base + 2); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - dma_new = dma_old; - pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); -@@ -1252,7 +1252,7 @@ - if (dma_new != dma_old) - outb(dma_new, base + 2); - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", - hwif->name, base, base + 7); -diff -Nur linux-4.4.13.orig/drivers/ide/ide-io.c linux-4.4.13/drivers/ide/ide-io.c ---- linux-4.4.13.orig/drivers/ide/ide-io.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ide/ide-io.c 2016-06-22 21:55:08.006523867 +0200 -@@ -659,7 +659,7 @@ - /* disable_irq_nosync ?? */ - disable_irq(hwif->irq); - /* local CPU only, as if we were handling an interrupt */ -- local_irq_disable(); -+ local_irq_disable_nort(); - if (hwif->polling) { - startstop = handler(drive); - } else if (drive_is_ready(drive)) { -diff -Nur linux-4.4.13.orig/drivers/ide/ide-iops.c linux-4.4.13/drivers/ide/ide-iops.c ---- linux-4.4.13.orig/drivers/ide/ide-iops.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ide/ide-iops.c 2016-06-22 21:55:08.006523867 +0200 -@@ -129,12 +129,12 @@ - if ((stat & ATA_BUSY) == 0) - break; - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - *rstat = stat; - return -EBUSY; - } - } -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - /* - * Allow status to settle, then read it again. -diff -Nur linux-4.4.13.orig/drivers/ide/ide-io-std.c linux-4.4.13/drivers/ide/ide-io-std.c ---- linux-4.4.13.orig/drivers/ide/ide-io-std.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ide/ide-io-std.c 2016-06-22 21:55:08.006523867 +0200 -@@ -175,7 +175,7 @@ - unsigned long uninitialized_var(flags); - - if ((io_32bit & 2) && !mmio) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - ata_vlb_sync(io_ports->nsect_addr); - } - -@@ -186,7 +186,7 @@ - insl(data_addr, buf, words); - - if ((io_32bit & 2) && !mmio) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - if (((len + 1) & 3) < 2) - return; -@@ -219,7 +219,7 @@ - unsigned long uninitialized_var(flags); - - if ((io_32bit & 2) && !mmio) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - ata_vlb_sync(io_ports->nsect_addr); - } - -@@ -230,7 +230,7 @@ - outsl(data_addr, buf, words); - - if ((io_32bit & 2) && !mmio) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - if (((len + 1) & 3) < 2) - return; -diff -Nur linux-4.4.13.orig/drivers/ide/ide-probe.c linux-4.4.13/drivers/ide/ide-probe.c ---- linux-4.4.13.orig/drivers/ide/ide-probe.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ide/ide-probe.c 2016-06-22 21:55:08.006523867 +0200 -@@ -196,10 +196,10 @@ - int bswap = 1; - - /* local CPU only; some systems need this */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - /* read 512 bytes of id info */ - hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - drive->dev_flags |= IDE_DFLAG_ID_READ; - #ifdef DEBUG -diff -Nur linux-4.4.13.orig/drivers/ide/ide-taskfile.c linux-4.4.13/drivers/ide/ide-taskfile.c ---- linux-4.4.13.orig/drivers/ide/ide-taskfile.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/ide/ide-taskfile.c 2016-06-22 21:55:08.006523867 +0200 -@@ -250,7 +250,7 @@ - - page_is_high = PageHighMem(page); - if (page_is_high) -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - buf = kmap_atomic(page) + offset; - -@@ -271,7 +271,7 @@ - kunmap_atomic(buf); - - if (page_is_high) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - len -= nr_bytes; - } -@@ -414,7 +414,7 @@ - } - - if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) -- local_irq_disable(); -+ local_irq_disable_nort(); - - ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); - -diff -Nur linux-4.4.13.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.4.13/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ---- linux-4.4.13.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2016-06-22 21:55:08.006523867 +0200 -@@ -857,7 +857,7 @@ - - ipoib_dbg_mcast(priv, "restarting multicast task\n"); - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - netif_addr_lock(dev); - spin_lock(&priv->lock); - -@@ -939,7 +939,7 @@ - - spin_unlock(&priv->lock); - netif_addr_unlock(dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - /* - * make sure the in-flight joins have finished before we attempt -diff -Nur linux-4.4.13.orig/drivers/input/gameport/gameport.c linux-4.4.13/drivers/input/gameport/gameport.c ---- linux-4.4.13.orig/drivers/input/gameport/gameport.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/input/gameport/gameport.c 2016-06-22 21:55:08.006523867 +0200 -@@ -91,13 +91,13 @@ - tx = ~0; - - for (i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - t1 = ktime_get_ns(); - for (t = 0; t < 50; t++) - gameport_read(gameport); - t2 = ktime_get_ns(); - t3 = ktime_get_ns(); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - t = (t2 - t1) - (t3 - t2); - if (t < tx) -@@ -124,12 +124,12 @@ - tx = 1 << 30; - - for(i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - GET_TIME(t1); - for (t = 0; t < 50; t++) gameport_read(gameport); - GET_TIME(t2); - GET_TIME(t3); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; - } -@@ -148,11 +148,11 @@ - tx = 1 << 30; - - for(i = 0; i < 50; i++) { -- local_irq_save(flags); -+ local_irq_save_nort(flags); - t1 = rdtsc(); - for (t = 0; t < 50; t++) gameport_read(gameport); - t2 = rdtsc(); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - udelay(i * 10); - if (t2 - t1 < tx) tx = t2 - t1; - } -diff -Nur linux-4.4.13.orig/drivers/iommu/amd_iommu.c linux-4.4.13/drivers/iommu/amd_iommu.c ---- linux-4.4.13.orig/drivers/iommu/amd_iommu.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/iommu/amd_iommu.c 2016-06-22 21:55:08.006523867 +0200 -@@ -2017,10 +2017,10 @@ - int ret; - - /* -- * Must be called with IRQs disabled. Warn here to detect early -- * when its not. -+ * Must be called with IRQs disabled on a non RT kernel. Warn here to -+ * detect early when its not. - */ -- WARN_ON(!irqs_disabled()); -+ WARN_ON_NONRT(!irqs_disabled()); - - /* lock domain */ - spin_lock(&domain->lock); -@@ -2183,10 +2183,10 @@ - struct protection_domain *domain; - - /* -- * Must be called with IRQs disabled. Warn here to detect early -- * when its not. -+ * Must be called with IRQs disabled on a non RT kernel. Warn here to -+ * detect early when its not. - */ -- WARN_ON(!irqs_disabled()); -+ WARN_ON_NONRT(!irqs_disabled()); - - if (WARN_ON(!dev_data->domain)) - return; -diff -Nur linux-4.4.13.orig/drivers/leds/trigger/Kconfig linux-4.4.13/drivers/leds/trigger/Kconfig ---- linux-4.4.13.orig/drivers/leds/trigger/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/leds/trigger/Kconfig 2016-06-22 21:55:08.006523867 +0200 -@@ -61,7 +61,7 @@ - - config LEDS_TRIGGER_CPU - bool "LED CPU Trigger" -- depends on LEDS_TRIGGERS -+ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE - help - This allows LEDs to be controlled by active CPUs. This shows - the active CPUs across an array of LEDs so you can see which -diff -Nur linux-4.4.13.orig/drivers/md/bcache/Kconfig linux-4.4.13/drivers/md/bcache/Kconfig ---- linux-4.4.13.orig/drivers/md/bcache/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/md/bcache/Kconfig 2016-06-22 21:55:08.006523867 +0200 -@@ -1,6 +1,7 @@ - - config BCACHE - tristate "Block device as cache" -+ depends on !PREEMPT_RT_FULL - ---help--- - Allows a block device to be used as cache for other devices; uses - a btree for indexing and the layout is optimized for SSDs. -diff -Nur linux-4.4.13.orig/drivers/md/dm.c linux-4.4.13/drivers/md/dm.c ---- linux-4.4.13.orig/drivers/md/dm.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/md/dm.c 2016-06-22 21:55:08.006523867 +0200 -@@ -2127,7 +2127,7 @@ - /* Establish tio->ti before queuing work (map_tio_request) */ - tio->ti = ti; - queue_kthread_work(&md->kworker, &tio->work); -- BUG_ON(!irqs_disabled()); -+ BUG_ON_NONRT(!irqs_disabled()); - } - - goto out; -diff -Nur linux-4.4.13.orig/drivers/md/raid5.c linux-4.4.13/drivers/md/raid5.c ---- linux-4.4.13.orig/drivers/md/raid5.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/md/raid5.c 2016-06-22 21:55:08.010523867 +0200 -@@ -1920,8 +1920,9 @@ - struct raid5_percpu *percpu; - unsigned long cpu; - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - percpu = per_cpu_ptr(conf->percpu, cpu); -+ spin_lock(&percpu->lock); - if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { - ops_run_biofill(sh); - overlap_clear++; -@@ -1977,7 +1978,8 @@ - if (test_and_clear_bit(R5_Overlap, &dev->flags)) - wake_up(&sh->raid_conf->wait_for_overlap); - } -- put_cpu(); -+ spin_unlock(&percpu->lock); -+ put_cpu_light(); - } - - static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) -@@ -6414,6 +6416,7 @@ - __func__, cpu); - break; - } -+ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); - } - put_online_cpus(); - -diff -Nur linux-4.4.13.orig/drivers/md/raid5.h linux-4.4.13/drivers/md/raid5.h ---- linux-4.4.13.orig/drivers/md/raid5.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/md/raid5.h 2016-06-22 21:55:08.010523867 +0200 -@@ -504,6 +504,7 @@ - int recovery_disabled; - /* per cpu variables */ - struct raid5_percpu { -+ spinlock_t lock; /* Protection for -RT */ - struct page *spare_page; /* Used when checking P/Q in raid6 */ - struct flex_array *scribble; /* space for constructing buffer - * lists and performing address -diff -Nur linux-4.4.13.orig/drivers/media/platform/vsp1/vsp1_video.c linux-4.4.13/drivers/media/platform/vsp1/vsp1_video.c ---- linux-4.4.13.orig/drivers/media/platform/vsp1/vsp1_video.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/media/platform/vsp1/vsp1_video.c 2016-06-22 21:55:08.010523867 +0200 -@@ -520,7 +520,7 @@ - bool stopped; - - spin_lock_irqsave(&pipe->irqlock, flags); -- stopped = pipe->state == VSP1_PIPELINE_STOPPED, -+ stopped = pipe->state == VSP1_PIPELINE_STOPPED; - spin_unlock_irqrestore(&pipe->irqlock, flags); - - return stopped; -diff -Nur linux-4.4.13.orig/drivers/misc/hwlat_detector.c linux-4.4.13/drivers/misc/hwlat_detector.c ---- linux-4.4.13.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/drivers/misc/hwlat_detector.c 2016-06-22 21:55:08.010523867 +0200 -@@ -0,0 +1,1240 @@ -+/* -+ * hwlat_detector.c - A simple Hardware Latency detector. -+ * -+ * Use this module to detect large system latencies induced by the behavior of -+ * certain underlying system hardware or firmware, independent of Linux itself. -+ * The code was developed originally to detect the presence of SMIs on Intel -+ * and AMD systems, although there is no dependency upon x86 herein. -+ * -+ * The classical example usage of this module is in detecting the presence of -+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a -+ * somewhat special form of hardware interrupt spawned from earlier CPU debug -+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge -+ * LPC (or other device) to generate a special interrupt under certain -+ * circumstances, for example, upon expiration of a special SMI timer device, -+ * due to certain external thermal readings, on certain I/O address accesses, -+ * and other situations. An SMI hits a special CPU pin, triggers a special -+ * SMI mode (complete with special memory map), and the OS is unaware. -+ * -+ * Although certain hardware-inducing latencies are necessary (for example, -+ * a modern system often requires an SMI handler for correct thermal control -+ * and remote management) they can wreak havoc upon any OS-level performance -+ * guarantees toward low-latency, especially when the OS is not even made -+ * aware of the presence of these interrupts. For this reason, we need a -+ * somewhat brute force mechanism to detect these interrupts. In this case, -+ * we do it by hogging all of the CPU(s) for configurable timer intervals, -+ * sampling the built-in CPU timer, looking for discontiguous readings. -+ * -+ * WARNING: This implementation necessarily introduces latencies. Therefore, -+ * you should NEVER use this module in a production environment -+ * requiring any kind of low-latency performance guarantee(s). -+ * -+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. -+ * -+ * Includes useful feedback from Clark Williams -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ -+#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ -+#define U64STR_SIZE 22 /* 20 digits max */ -+ -+#define VERSION "1.0.0" -+#define BANNER "hwlat_detector: " -+#define DRVNAME "hwlat_detector" -+#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ -+#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ -+#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ -+ -+/* Module metadata */ -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Jon Masters "); -+MODULE_DESCRIPTION("A simple hardware latency detector"); -+MODULE_VERSION(VERSION); -+ -+/* Module parameters */ -+ -+static int debug; -+static int enabled; -+static int threshold; -+ -+module_param(debug, int, 0); /* enable debug */ -+module_param(enabled, int, 0); /* enable detector */ -+module_param(threshold, int, 0); /* latency threshold */ -+ -+/* Buffering and sampling */ -+ -+static struct ring_buffer *ring_buffer; /* sample buffer */ -+static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ -+static unsigned long buf_size = BUF_SIZE_DEFAULT; -+static struct task_struct *kthread; /* sampling thread */ -+ -+/* DebugFS filesystem entries */ -+ -+static struct dentry *debug_dir; /* debugfs directory */ -+static struct dentry *debug_max; /* maximum TSC delta */ -+static struct dentry *debug_count; /* total detect count */ -+static struct dentry *debug_sample_width; /* sample width us */ -+static struct dentry *debug_sample_window; /* sample window us */ -+static struct dentry *debug_sample; /* raw samples us */ -+static struct dentry *debug_threshold; /* threshold us */ -+static struct dentry *debug_enable; /* enable/disable */ -+ -+/* Individual samples and global state */ -+ -+struct sample; /* latency sample */ -+struct data; /* Global state */ -+ -+/* Sampling functions */ -+static int __buffer_add_sample(struct sample *sample); -+static struct sample *buffer_get_sample(struct sample *sample); -+ -+/* Threading and state */ -+static int kthread_fn(void *unused); -+static int start_kthread(void); -+static int stop_kthread(void); -+static void __reset_stats(void); -+static int init_stats(void); -+ -+/* Debugfs interface */ -+static ssize_t simple_data_read(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos, const u64 *entry); -+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, -+ size_t cnt, loff_t *ppos, u64 *entry); -+static int debug_sample_fopen(struct inode *inode, struct file *filp); -+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos); -+static int debug_sample_release(struct inode *inode, struct file *filp); -+static int debug_enable_fopen(struct inode *inode, struct file *filp); -+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos); -+static ssize_t debug_enable_fwrite(struct file *file, -+ const char __user *user_buffer, -+ size_t user_size, loff_t *offset); -+ -+/* Initialization functions */ -+static int init_debugfs(void); -+static void free_debugfs(void); -+static int detector_init(void); -+static void detector_exit(void); -+ -+/* Individual latency samples are stored here when detected and packed into -+ * the ring_buffer circular buffer, where they are overwritten when -+ * more than buf_size/sizeof(sample) samples are received. */ -+struct sample { -+ u64 seqnum; /* unique sequence */ -+ u64 duration; /* ktime delta */ -+ u64 outer_duration; /* ktime delta (outer loop) */ -+ struct timespec timestamp; /* wall time */ -+ unsigned long lost; -+}; -+ -+/* keep the global state somewhere. */ -+static struct data { -+ -+ struct mutex lock; /* protect changes */ -+ -+ u64 count; /* total since reset */ -+ u64 max_sample; /* max hardware latency */ -+ u64 threshold; /* sample threshold level */ -+ -+ u64 sample_window; /* total sampling window (on+off) */ -+ u64 sample_width; /* active sampling portion of window */ -+ -+ atomic_t sample_open; /* whether the sample file is open */ -+ -+ wait_queue_head_t wq; /* waitqeue for new sample values */ -+ -+} data; -+ -+/** -+ * __buffer_add_sample - add a new latency sample recording to the ring buffer -+ * @sample: The new latency sample value -+ * -+ * This receives a new latency sample and records it in a global ring buffer. -+ * No additional locking is used in this case. -+ */ -+static int __buffer_add_sample(struct sample *sample) -+{ -+ return ring_buffer_write(ring_buffer, -+ sizeof(struct sample), sample); -+} -+ -+/** -+ * buffer_get_sample - remove a hardware latency sample from the ring buffer -+ * @sample: Pre-allocated storage for the sample -+ * -+ * This retrieves a hardware latency sample from the global circular buffer -+ */ -+static struct sample *buffer_get_sample(struct sample *sample) -+{ -+ struct ring_buffer_event *e = NULL; -+ struct sample *s = NULL; -+ unsigned int cpu = 0; -+ -+ if (!sample) -+ return NULL; -+ -+ mutex_lock(&ring_buffer_mutex); -+ for_each_online_cpu(cpu) { -+ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); -+ if (e) -+ break; -+ } -+ -+ if (e) { -+ s = ring_buffer_event_data(e); -+ memcpy(sample, s, sizeof(struct sample)); -+ } else -+ sample = NULL; -+ mutex_unlock(&ring_buffer_mutex); -+ -+ return sample; -+} -+ -+#ifndef CONFIG_TRACING -+#define time_type ktime_t -+#define time_get() ktime_get() -+#define time_to_us(x) ktime_to_us(x) -+#define time_sub(a, b) ktime_sub(a, b) -+#define init_time(a, b) (a).tv64 = b -+#define time_u64(a) ((a).tv64) -+#else -+#define time_type u64 -+#define time_get() trace_clock_local() -+#define time_to_us(x) div_u64(x, 1000) -+#define time_sub(a, b) ((a) - (b)) -+#define init_time(a, b) (a = b) -+#define time_u64(a) a -+#endif -+/** -+ * get_sample - sample the CPU TSC and look for likely hardware latencies -+ * -+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential -+ * hardware-induced latency. Called with interrupts disabled and with -+ * data.lock held. -+ */ -+static int get_sample(void) -+{ -+ time_type start, t1, t2, last_t2; -+ s64 diff, total = 0; -+ u64 sample = 0; -+ u64 outer_sample = 0; -+ int ret = -1; -+ -+ init_time(last_t2, 0); -+ start = time_get(); /* start timestamp */ -+ -+ do { -+ -+ t1 = time_get(); /* we'll look for a discontinuity */ -+ t2 = time_get(); -+ -+ if (time_u64(last_t2)) { -+ /* Check the delta from outer loop (t2 to next t1) */ -+ diff = time_to_us(time_sub(t1, last_t2)); -+ /* This shouldn't happen */ -+ if (diff < 0) { -+ pr_err(BANNER "time running backwards\n"); -+ goto out; -+ } -+ if (diff > outer_sample) -+ outer_sample = diff; -+ } -+ last_t2 = t2; -+ -+ total = time_to_us(time_sub(t2, start)); /* sample width */ -+ -+ /* This checks the inner loop (t1 to t2) */ -+ diff = time_to_us(time_sub(t2, t1)); /* current diff */ -+ -+ /* This shouldn't happen */ -+ if (diff < 0) { -+ pr_err(BANNER "time running backwards\n"); -+ goto out; -+ } -+ -+ if (diff > sample) -+ sample = diff; /* only want highest value */ -+ -+ } while (total <= data.sample_width); -+ -+ ret = 0; -+ -+ /* If we exceed the threshold value, we have found a hardware latency */ -+ if (sample > data.threshold || outer_sample > data.threshold) { -+ struct sample s; -+ -+ ret = 1; -+ -+ data.count++; -+ s.seqnum = data.count; -+ s.duration = sample; -+ s.outer_duration = outer_sample; -+ s.timestamp = CURRENT_TIME; -+ __buffer_add_sample(&s); -+ -+ /* Keep a running maximum ever recorded hardware latency */ -+ if (sample > data.max_sample) -+ data.max_sample = sample; -+ } -+ -+out: -+ return ret; -+} -+ -+/* -+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread -+ * @unused: A required part of the kthread API. -+ * -+ * Used to periodically sample the CPU TSC via a call to get_sample. We -+ * disable interrupts, which does (intentionally) introduce latency since we -+ * need to ensure nothing else might be running (and thus pre-empting). -+ * Obviously this should never be used in production environments. -+ * -+ * Currently this runs on which ever CPU it was scheduled on, but most -+ * real-worald hardware latency situations occur across several CPUs, -+ * but we might later generalize this if we find there are any actualy -+ * systems with alternate SMI delivery or other hardware latencies. -+ */ -+static int kthread_fn(void *unused) -+{ -+ int ret; -+ u64 interval; -+ -+ while (!kthread_should_stop()) { -+ -+ mutex_lock(&data.lock); -+ -+ local_irq_disable(); -+ ret = get_sample(); -+ local_irq_enable(); -+ -+ if (ret > 0) -+ wake_up(&data.wq); /* wake up reader(s) */ -+ -+ interval = data.sample_window - data.sample_width; -+ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ -+ -+ mutex_unlock(&data.lock); -+ -+ if (msleep_interruptible(interval)) -+ break; -+ } -+ -+ return 0; -+} -+ -+/** -+ * start_kthread - Kick off the hardware latency sampling/detector kthread -+ * -+ * This starts a kernel thread that will sit and sample the CPU timestamp -+ * counter (TSC or similar) and look for potential hardware latencies. -+ */ -+static int start_kthread(void) -+{ -+ kthread = kthread_run(kthread_fn, NULL, -+ DRVNAME); -+ if (IS_ERR(kthread)) { -+ pr_err(BANNER "could not start sampling thread\n"); -+ enabled = 0; -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+/** -+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop -+ * -+ * This kicks the running hardware latency sampling/detector kernel thread and -+ * tells it to stop sampling now. Use this on unload and at system shutdown. -+ */ -+static int stop_kthread(void) -+{ -+ int ret; -+ -+ ret = kthread_stop(kthread); -+ -+ return ret; -+} -+ -+/** -+ * __reset_stats - Reset statistics for the hardware latency detector -+ * -+ * We use data to store various statistics and global state. We call this -+ * function in order to reset those when "enable" is toggled on or off, and -+ * also at initialization. Should be called with data.lock held. -+ */ -+static void __reset_stats(void) -+{ -+ data.count = 0; -+ data.max_sample = 0; -+ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ -+} -+ -+/** -+ * init_stats - Setup global state statistics for the hardware latency detector -+ * -+ * We use data to store various statistics and global state. We also use -+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware -+ * induced system latencies. This function initializes these structures and -+ * allocates the global ring buffer also. -+ */ -+static int init_stats(void) -+{ -+ int ret = -ENOMEM; -+ -+ mutex_init(&data.lock); -+ init_waitqueue_head(&data.wq); -+ atomic_set(&data.sample_open, 0); -+ -+ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); -+ -+ if (WARN(!ring_buffer, KERN_ERR BANNER -+ "failed to allocate ring buffer!\n")) -+ goto out; -+ -+ __reset_stats(); -+ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */ -+ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ -+ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ -+ -+ ret = 0; -+ -+out: -+ return ret; -+ -+} -+ -+/* -+ * simple_data_read - Wrapper read function for global state debugfs entries -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * @entry: The entry to read from -+ * -+ * This function provides a generic read implementation for the global state -+ * "data" structure debugfs filesystem entries. It would be nice to use -+ * simple_attr_read directly, but we need to make sure that the data.lock -+ * is held during the actual read. -+ */ -+static ssize_t simple_data_read(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos, const u64 *entry) -+{ -+ char buf[U64STR_SIZE]; -+ u64 val = 0; -+ int len = 0; -+ -+ memset(buf, 0, sizeof(buf)); -+ -+ if (!entry) -+ return -EFAULT; -+ -+ mutex_lock(&data.lock); -+ val = *entry; -+ mutex_unlock(&data.lock); -+ -+ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); -+ -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); -+ -+} -+ -+/* -+ * simple_data_write - Wrapper write function for global state debugfs entries -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to write value from -+ * @cnt: The maximum number of bytes to write -+ * @ppos: The current "file" position -+ * @entry: The entry to write to -+ * -+ * This function provides a generic write implementation for the global state -+ * "data" structure debugfs filesystem entries. It would be nice to use -+ * simple_attr_write directly, but we need to make sure that the data.lock -+ * is held during the actual write. -+ */ -+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, -+ size_t cnt, loff_t *ppos, u64 *entry) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ *entry = val; -+ mutex_unlock(&data.lock); -+ -+ return csize; -+} -+ -+/** -+ * debug_count_fopen - Open function for "count" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "count" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_count_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_count_fread - Read function for "count" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "count" debugfs -+ * interface to the hardware latency detector. Can be used to read the -+ * number of latency readings exceeding the configured threshold since -+ * the detector was last reset (e.g. by writing a zero into "count"). -+ */ -+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); -+} -+ -+/** -+ * debug_count_fwrite - Write function for "count" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "count" debugfs -+ * interface to the hardware latency detector. Can be used to write a -+ * desired value, especially to zero the total count. -+ */ -+static ssize_t debug_count_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); -+} -+ -+/** -+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "enable" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_enable_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_enable_fread - Read function for "enable" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "enable" debugfs -+ * interface to the hardware latency detector. Can be used to determine -+ * whether the detector is currently enabled ("0\n" or "1\n" returned). -+ */ -+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ char buf[4]; -+ -+ if ((cnt < sizeof(buf)) || (*ppos)) -+ return 0; -+ -+ buf[0] = enabled ? '1' : '0'; -+ buf[1] = '\n'; -+ buf[2] = '\0'; -+ if (copy_to_user(ubuf, buf, strlen(buf))) -+ return -EFAULT; -+ return *ppos = strlen(buf); -+} -+ -+/** -+ * debug_enable_fwrite - Write function for "enable" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "enable" debugfs -+ * interface to the hardware latency detector. Can be used to enable or -+ * disable the detector, which will have the side-effect of possibly -+ * also resetting the global stats and kicking off the measuring -+ * kthread (on an enable) or the converse (upon a disable). -+ */ -+static ssize_t debug_enable_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[4]; -+ int csize = min(cnt, sizeof(buf)); -+ long val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[sizeof(buf)-1] = '\0'; /* just in case */ -+ err = kstrtoul(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ if (val) { -+ if (enabled) -+ goto unlock; -+ enabled = 1; -+ __reset_stats(); -+ if (start_kthread()) -+ return -EFAULT; -+ } else { -+ if (!enabled) -+ goto unlock; -+ enabled = 0; -+ err = stop_kthread(); -+ if (err) { -+ pr_err(BANNER "cannot stop kthread\n"); -+ return -EFAULT; -+ } -+ wake_up(&data.wq); /* reader(s) should return */ -+ } -+unlock: -+ return csize; -+} -+ -+/** -+ * debug_max_fopen - Open function for "max" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "max" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_max_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_max_fread - Read function for "max" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "max" debugfs -+ * interface to the hardware latency detector. Can be used to determine -+ * the maximum latency value observed since it was last reset. -+ */ -+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); -+} -+ -+/** -+ * debug_max_fwrite - Write function for "max" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "max" debugfs -+ * interface to the hardware latency detector. Can be used to reset the -+ * maximum or set it to some other desired value - if, then, subsequent -+ * measurements exceed this value, the maximum will be updated. -+ */ -+static ssize_t debug_max_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); -+} -+ -+ -+/** -+ * debug_sample_fopen - An open function for "sample" debugfs interface -+ * @inode: The in-kernel inode representation of this debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function handles opening the "sample" file within the hardware -+ * latency detector debugfs directory interface. This file is used to read -+ * raw samples from the global ring_buffer and allows the user to see a -+ * running latency history. Can be opened blocking or non-blocking, -+ * affecting whether it behaves as a buffer read pipe, or does not. -+ * Implements simple locking to prevent multiple simultaneous use. -+ */ -+static int debug_sample_fopen(struct inode *inode, struct file *filp) -+{ -+ if (!atomic_add_unless(&data.sample_open, 1, 1)) -+ return -EBUSY; -+ else -+ return 0; -+} -+ -+/** -+ * debug_sample_fread - A read function for "sample" debugfs interface -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that will contain the samples read -+ * @cnt: The maximum bytes to read from the debugfs "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function handles reading from the "sample" file within the hardware -+ * latency detector debugfs directory interface. This file is used to read -+ * raw samples from the global ring_buffer and allows the user to see a -+ * running latency history. By default this will block pending a new -+ * value written into the sample buffer, unless there are already a -+ * number of value(s) waiting in the buffer, or the sample file was -+ * previously opened in a non-blocking mode of operation. -+ */ -+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ int len = 0; -+ char buf[64]; -+ struct sample *sample = NULL; -+ -+ if (!enabled) -+ return 0; -+ -+ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); -+ if (!sample) -+ return -ENOMEM; -+ -+ while (!buffer_get_sample(sample)) { -+ -+ DEFINE_WAIT(wait); -+ -+ if (filp->f_flags & O_NONBLOCK) { -+ len = -EAGAIN; -+ goto out; -+ } -+ -+ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); -+ schedule(); -+ finish_wait(&data.wq, &wait); -+ -+ if (signal_pending(current)) { -+ len = -EINTR; -+ goto out; -+ } -+ -+ if (!enabled) { /* enable was toggled */ -+ len = 0; -+ goto out; -+ } -+ } -+ -+ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n", -+ sample->timestamp.tv_sec, -+ sample->timestamp.tv_nsec, -+ sample->duration, -+ sample->outer_duration); -+ -+ -+ /* handling partial reads is more trouble than it's worth */ -+ if (len > cnt) -+ goto out; -+ -+ if (copy_to_user(ubuf, buf, len)) -+ len = -EFAULT; -+ -+out: -+ kfree(sample); -+ return len; -+} -+ -+/** -+ * debug_sample_release - Release function for "sample" debugfs interface -+ * @inode: The in-kernel inode represenation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function completes the close of the debugfs interface "sample" file. -+ * Frees the sample_open "lock" so that other users may open the interface. -+ */ -+static int debug_sample_release(struct inode *inode, struct file *filp) -+{ -+ atomic_dec(&data.sample_open); -+ -+ return 0; -+} -+ -+/** -+ * debug_threshold_fopen - Open function for "threshold" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_threshold_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_threshold_fread - Read function for "threshold" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. It can be used to determine -+ * the current threshold level at which a latency will be recorded in the -+ * global ring buffer, typically on the order of 10us. -+ */ -+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); -+} -+ -+/** -+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "threshold" debugfs -+ * interface to the hardware latency detector. It can be used to configure -+ * the threshold level at which any subsequently detected latencies will -+ * be recorded into the global ring buffer. -+ */ -+static ssize_t debug_threshold_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ int ret; -+ -+ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); -+ -+ if (enabled) -+ wake_up_process(kthread); -+ -+ return ret; -+} -+ -+/** -+ * debug_width_fopen - Open function for "width" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "width" debugfs -+ * interface to the hardware latency detector. -+ */ -+static int debug_width_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_width_fread - Read function for "width" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "width" debugfs -+ * interface to the hardware latency detector. It can be used to determine -+ * for how many us of the total window us we will actively sample for any -+ * hardware-induced latecy periods. Obviously, it is not possible to -+ * sample constantly and have the system respond to a sample reader, or, -+ * worse, without having the system appear to have gone out to lunch. -+ */ -+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); -+} -+ -+/** -+ * debug_width_fwrite - Write function for "width" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "width" debugfs -+ * interface to the hardware latency detector. It can be used to configure -+ * for how many us of the total window us we will actively sample for any -+ * hardware-induced latency periods. Obviously, it is not possible to -+ * sample constantly and have the system respond to a sample reader, or, -+ * worse, without having the system appear to have gone out to lunch. It -+ * is enforced that width is less that the total window size. -+ */ -+static ssize_t debug_width_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ if (val < data.sample_window) -+ data.sample_width = val; -+ else { -+ mutex_unlock(&data.lock); -+ return -EINVAL; -+ } -+ mutex_unlock(&data.lock); -+ -+ if (enabled) -+ wake_up_process(kthread); -+ -+ return csize; -+} -+ -+/** -+ * debug_window_fopen - Open function for "window" debugfs entry -+ * @inode: The in-kernel inode representation of the debugfs "file" -+ * @filp: The active open file structure for the debugfs "file" -+ * -+ * This function provides an open implementation for the "window" debugfs -+ * interface to the hardware latency detector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. -+ */ -+static int debug_window_fopen(struct inode *inode, struct file *filp) -+{ -+ return 0; -+} -+ -+/** -+ * debug_window_fread - Read function for "window" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The userspace provided buffer to read value into -+ * @cnt: The maximum number of bytes to read -+ * @ppos: The current "file" position -+ * -+ * This function provides a read implementation for the "window" debugfs -+ * interface to the hardware latency detector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. Can be used to read the total window size. -+ */ -+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); -+} -+ -+/** -+ * debug_window_fwrite - Write function for "window" debugfs entry -+ * @filp: The active open file structure for the debugfs "file" -+ * @ubuf: The user buffer that contains the value to write -+ * @cnt: The maximum number of bytes to write to "file" -+ * @ppos: The current position in the debugfs "file" -+ * -+ * This function provides a write implementation for the "window" debufds -+ * interface to the hardware latency detetector. The window is the total time -+ * in us that will be considered one sample period. Conceptually, windows -+ * occur back-to-back and contain a sample width period during which -+ * actual sampling occurs. Can be used to write a new total window size. It -+ * is enfoced that any value written must be greater than the sample width -+ * size, or an error results. -+ */ -+static ssize_t debug_window_fwrite(struct file *filp, -+ const char __user *ubuf, -+ size_t cnt, -+ loff_t *ppos) -+{ -+ char buf[U64STR_SIZE]; -+ int csize = min(cnt, sizeof(buf)); -+ u64 val = 0; -+ int err = 0; -+ -+ memset(buf, '\0', sizeof(buf)); -+ if (copy_from_user(buf, ubuf, csize)) -+ return -EFAULT; -+ -+ buf[U64STR_SIZE-1] = '\0'; /* just in case */ -+ err = kstrtoull(buf, 10, &val); -+ if (err) -+ return -EINVAL; -+ -+ mutex_lock(&data.lock); -+ if (data.sample_width < val) -+ data.sample_window = val; -+ else { -+ mutex_unlock(&data.lock); -+ return -EINVAL; -+ } -+ mutex_unlock(&data.lock); -+ -+ return csize; -+} -+ -+/* -+ * Function pointers for the "count" debugfs file operations -+ */ -+static const struct file_operations count_fops = { -+ .open = debug_count_fopen, -+ .read = debug_count_fread, -+ .write = debug_count_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "enable" debugfs file operations -+ */ -+static const struct file_operations enable_fops = { -+ .open = debug_enable_fopen, -+ .read = debug_enable_fread, -+ .write = debug_enable_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "max" debugfs file operations -+ */ -+static const struct file_operations max_fops = { -+ .open = debug_max_fopen, -+ .read = debug_max_fread, -+ .write = debug_max_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "sample" debugfs file operations -+ */ -+static const struct file_operations sample_fops = { -+ .open = debug_sample_fopen, -+ .read = debug_sample_fread, -+ .release = debug_sample_release, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "threshold" debugfs file operations -+ */ -+static const struct file_operations threshold_fops = { -+ .open = debug_threshold_fopen, -+ .read = debug_threshold_fread, -+ .write = debug_threshold_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "width" debugfs file operations -+ */ -+static const struct file_operations width_fops = { -+ .open = debug_width_fopen, -+ .read = debug_width_fread, -+ .write = debug_width_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/* -+ * Function pointers for the "window" debugfs file operations -+ */ -+static const struct file_operations window_fops = { -+ .open = debug_window_fopen, -+ .read = debug_window_fread, -+ .write = debug_window_fwrite, -+ .owner = THIS_MODULE, -+}; -+ -+/** -+ * init_debugfs - A function to initialize the debugfs interface files -+ * -+ * This function creates entries in debugfs for "hwlat_detector", including -+ * files to read values from the detector, current samples, and the -+ * maximum sample that has been captured since the hardware latency -+ * dectector was started. -+ */ -+static int init_debugfs(void) -+{ -+ int ret = -ENOMEM; -+ -+ debug_dir = debugfs_create_dir(DRVNAME, NULL); -+ if (!debug_dir) -+ goto err_debug_dir; -+ -+ debug_sample = debugfs_create_file("sample", 0444, -+ debug_dir, NULL, -+ &sample_fops); -+ if (!debug_sample) -+ goto err_sample; -+ -+ debug_count = debugfs_create_file("count", 0444, -+ debug_dir, NULL, -+ &count_fops); -+ if (!debug_count) -+ goto err_count; -+ -+ debug_max = debugfs_create_file("max", 0444, -+ debug_dir, NULL, -+ &max_fops); -+ if (!debug_max) -+ goto err_max; -+ -+ debug_sample_window = debugfs_create_file("window", 0644, -+ debug_dir, NULL, -+ &window_fops); -+ if (!debug_sample_window) -+ goto err_window; -+ -+ debug_sample_width = debugfs_create_file("width", 0644, -+ debug_dir, NULL, -+ &width_fops); -+ if (!debug_sample_width) -+ goto err_width; -+ -+ debug_threshold = debugfs_create_file("threshold", 0644, -+ debug_dir, NULL, -+ &threshold_fops); -+ if (!debug_threshold) -+ goto err_threshold; -+ -+ debug_enable = debugfs_create_file("enable", 0644, -+ debug_dir, &enabled, -+ &enable_fops); -+ if (!debug_enable) -+ goto err_enable; -+ -+ else { -+ ret = 0; -+ goto out; -+ } -+ -+err_enable: -+ debugfs_remove(debug_threshold); -+err_threshold: -+ debugfs_remove(debug_sample_width); -+err_width: -+ debugfs_remove(debug_sample_window); -+err_window: -+ debugfs_remove(debug_max); -+err_max: -+ debugfs_remove(debug_count); -+err_count: -+ debugfs_remove(debug_sample); -+err_sample: -+ debugfs_remove(debug_dir); -+err_debug_dir: -+out: -+ return ret; -+} -+ -+/** -+ * free_debugfs - A function to cleanup the debugfs file interface -+ */ -+static void free_debugfs(void) -+{ -+ /* could also use a debugfs_remove_recursive */ -+ debugfs_remove(debug_enable); -+ debugfs_remove(debug_threshold); -+ debugfs_remove(debug_sample_width); -+ debugfs_remove(debug_sample_window); -+ debugfs_remove(debug_max); -+ debugfs_remove(debug_count); -+ debugfs_remove(debug_sample); -+ debugfs_remove(debug_dir); -+} -+ -+/** -+ * detector_init - Standard module initialization code -+ */ -+static int detector_init(void) -+{ -+ int ret = -ENOMEM; -+ -+ pr_info(BANNER "version %s\n", VERSION); -+ -+ ret = init_stats(); -+ if (ret) -+ goto out; -+ -+ ret = init_debugfs(); -+ if (ret) -+ goto err_stats; -+ -+ if (enabled) -+ ret = start_kthread(); -+ -+ goto out; -+ -+err_stats: -+ ring_buffer_free(ring_buffer); -+out: -+ return ret; -+ -+} -+ -+/** -+ * detector_exit - Standard module cleanup code -+ */ -+static void detector_exit(void) -+{ -+ int err; -+ -+ if (enabled) { -+ enabled = 0; -+ err = stop_kthread(); -+ if (err) -+ pr_err(BANNER "cannot stop kthread\n"); -+ } -+ -+ free_debugfs(); -+ ring_buffer_free(ring_buffer); /* free up the ring buffer */ -+ -+} -+ -+module_init(detector_init); -+module_exit(detector_exit); -diff -Nur linux-4.4.13.orig/drivers/misc/Kconfig linux-4.4.13/drivers/misc/Kconfig ---- linux-4.4.13.orig/drivers/misc/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/misc/Kconfig 2016-06-22 21:55:08.010523867 +0200 -@@ -54,6 +54,7 @@ - config ATMEL_TCLIB - bool "Atmel AT32/AT91 Timer/Counter Library" - depends on (AVR32 || ARCH_AT91) -+ default y if PREEMPT_RT_FULL - help - Select this if you want a library to allocate the Timer/Counter - blocks found on many Atmel processors. This facilitates using -@@ -69,8 +70,7 @@ - are combined to make a single 32-bit timer. - - When GENERIC_CLOCKEVENTS is defined, the third timer channel -- may be used as a clock event device supporting oneshot mode -- (delays of up to two seconds) based on the 32 KiHz clock. -+ may be used as a clock event device supporting oneshot mode. - - config ATMEL_TCB_CLKSRC_BLOCK - int -@@ -84,6 +84,15 @@ - TC can be used for other purposes, such as PWM generation and - interval timing. - -+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK -+ bool "TC Block use 32 KiHz clock" -+ depends on ATMEL_TCB_CLKSRC -+ default y if !PREEMPT_RT_FULL -+ help -+ Select this to use 32 KiHz base clock rate as TC block clock -+ source for clock events. -+ -+ - config DUMMY_IRQ - tristate "Dummy IRQ handler" - default n -@@ -113,6 +122,35 @@ - for information on the specific driver level and support statement - for your IBM server. - -+config HWLAT_DETECTOR -+ tristate "Testing module to detect hardware-induced latencies" -+ depends on DEBUG_FS -+ depends on RING_BUFFER -+ default m -+ ---help--- -+ A simple hardware latency detector. Use this module to detect -+ large latencies introduced by the behavior of the underlying -+ system firmware external to Linux. We do this using periodic -+ use of stop_machine to grab all available CPUs and measure -+ for unexplainable gaps in the CPU timestamp counter(s). By -+ default, the module is not enabled until the "enable" file -+ within the "hwlat_detector" debugfs directory is toggled. -+ -+ This module is often used to detect SMI (System Management -+ Interrupts) on x86 systems, though is not x86 specific. To -+ this end, we default to using a sample window of 1 second, -+ during which we will sample for 0.5 seconds. If an SMI or -+ similar event occurs during that time, it is recorded -+ into an 8K samples global ring buffer until retreived. -+ -+ WARNING: This software should never be enabled (it can be built -+ but should not be turned on after it is loaded) in a production -+ environment where high latencies are a concern since the -+ sampling mechanism actually introduces latencies for -+ regular tasks while the CPU(s) are being held. -+ -+ If unsure, say N -+ - config PHANTOM - tristate "Sensable PHANToM (PCI)" - depends on PCI -diff -Nur linux-4.4.13.orig/drivers/misc/Makefile linux-4.4.13/drivers/misc/Makefile ---- linux-4.4.13.orig/drivers/misc/Makefile 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/misc/Makefile 2016-06-22 21:55:08.010523867 +0200 -@@ -39,6 +39,7 @@ - obj-$(CONFIG_HMC6352) += hmc6352.o - obj-y += eeprom/ - obj-y += cb710/ -+obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o - obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o - obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o - obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o -diff -Nur linux-4.4.13.orig/drivers/mmc/host/mmci.c linux-4.4.13/drivers/mmc/host/mmci.c ---- linux-4.4.13.orig/drivers/mmc/host/mmci.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/mmc/host/mmci.c 2016-06-22 21:55:08.010523867 +0200 -@@ -1155,15 +1155,12 @@ - struct sg_mapping_iter *sg_miter = &host->sg_miter; - struct variant_data *variant = host->variant; - void __iomem *base = host->base; -- unsigned long flags; - u32 status; - - status = readl(base + MMCISTATUS); - - dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); - -- local_irq_save(flags); -- - do { - unsigned int remain, len; - char *buffer; -@@ -1203,8 +1200,6 @@ - - sg_miter_stop(sg_miter); - -- local_irq_restore(flags); -- - /* - * If we have less than the fifo 'half-full' threshold to transfer, - * trigger a PIO interrupt as soon as any data is available. -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/3com/3c59x.c linux-4.4.13/drivers/net/ethernet/3com/3c59x.c ---- linux-4.4.13.orig/drivers/net/ethernet/3com/3c59x.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/3com/3c59x.c 2016-06-22 21:55:08.010523867 +0200 -@@ -842,9 +842,9 @@ - { - struct vortex_private *vp = netdev_priv(dev); - unsigned long flags; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - #endif - -@@ -1916,12 +1916,12 @@ - * Block interrupts because vortex_interrupt does a bare spin_lock() - */ - unsigned long flags; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (vp->full_bus_master_tx) - boomerang_interrupt(dev->irq, dev); - else - vortex_interrupt(dev->irq, dev); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - } - -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-4.4.13/drivers/net/ethernet/atheros/atl1c/atl1c_main.c ---- linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2016-06-22 21:55:08.010523867 +0200 -@@ -2221,11 +2221,7 @@ - } - - tpd_req = atl1c_cal_tpd_req(skb); -- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { -- if (netif_msg_pktdata(adapter)) -- dev_info(&adapter->pdev->dev, "tx locked\n"); -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&adapter->tx_lock, flags); - - if (atl1c_tpd_avail(adapter, type) < tpd_req) { - /* no enough descriptor, just stop queue */ -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-4.4.13/drivers/net/ethernet/atheros/atl1e/atl1e_main.c ---- linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2016-06-22 21:55:08.010523867 +0200 -@@ -1880,8 +1880,7 @@ - return NETDEV_TX_OK; - } - tpd_req = atl1e_cal_tdp_req(skb); -- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) -- return NETDEV_TX_LOCKED; -+ spin_lock_irqsave(&adapter->tx_lock, flags); - - if (atl1e_tpd_avail(adapter) < tpd_req) { - /* no enough descriptor, just stop queue */ -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.4.13/drivers/net/ethernet/chelsio/cxgb/sge.c ---- linux-4.4.13.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/chelsio/cxgb/sge.c 2016-06-22 21:55:08.014523867 +0200 -@@ -1664,8 +1664,7 @@ - struct cmdQ *q = &sge->cmdQ[qid]; - unsigned int credits, pidx, genbit, count, use_sched_skb = 0; - -- if (!spin_trylock(&q->lock)) -- return NETDEV_TX_LOCKED; -+ spin_lock(&q->lock); - - reclaim_completed_tx(sge, q); - -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/neterion/s2io.c linux-4.4.13/drivers/net/ethernet/neterion/s2io.c ---- linux-4.4.13.orig/drivers/net/ethernet/neterion/s2io.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/neterion/s2io.c 2016-06-22 21:55:08.014523867 +0200 -@@ -4084,12 +4084,7 @@ - [skb->priority & (MAX_TX_FIFOS - 1)]; - fifo = &mac_control->fifos[queue]; - -- if (do_spin_lock) -- spin_lock_irqsave(&fifo->tx_lock, flags); -- else { -- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&fifo->tx_lock, flags); - - if (sp->config.multiq) { - if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-4.4.13/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c ---- linux-4.4.13.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2016-06-22 21:55:08.014523867 +0200 -@@ -2137,10 +2137,8 @@ - struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; - unsigned long flags; - -- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { -- /* Collision - tell upper layer to requeue */ -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&tx_ring->tx_lock, flags); -+ - if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { - netif_stop_queue(netdev); - spin_unlock_irqrestore(&tx_ring->tx_lock, flags); -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/realtek/8139too.c linux-4.4.13/drivers/net/ethernet/realtek/8139too.c ---- linux-4.4.13.orig/drivers/net/ethernet/realtek/8139too.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/realtek/8139too.c 2016-06-22 21:55:08.014523867 +0200 -@@ -2229,7 +2229,7 @@ - struct rtl8139_private *tp = netdev_priv(dev); - const int irq = tp->pci_dev->irq; - -- disable_irq(irq); -+ disable_irq_nosync(irq); - rtl8139_interrupt(irq, dev); - enable_irq(irq); - } -diff -Nur linux-4.4.13.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.4.13/drivers/net/ethernet/tehuti/tehuti.c ---- linux-4.4.13.orig/drivers/net/ethernet/tehuti/tehuti.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/ethernet/tehuti/tehuti.c 2016-06-22 21:55:08.014523867 +0200 -@@ -1629,13 +1629,8 @@ - unsigned long flags; - - ENTER; -- local_irq_save(flags); -- if (!spin_trylock(&priv->tx_lock)) { -- local_irq_restore(flags); -- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", -- BDX_DRV_NAME, ndev->name); -- return NETDEV_TX_LOCKED; -- } -+ -+ spin_lock_irqsave(&priv->tx_lock, flags); - - /* build tx descriptor */ - BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ -diff -Nur linux-4.4.13.orig/drivers/net/rionet.c linux-4.4.13/drivers/net/rionet.c ---- linux-4.4.13.orig/drivers/net/rionet.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/rionet.c 2016-06-22 21:55:08.014523867 +0200 -@@ -174,11 +174,7 @@ - unsigned long flags; - int add_num = 1; - -- local_irq_save(flags); -- if (!spin_trylock(&rnet->tx_lock)) { -- local_irq_restore(flags); -- return NETDEV_TX_LOCKED; -- } -+ spin_lock_irqsave(&rnet->tx_lock, flags); - - if (is_multicast_ether_addr(eth->h_dest)) - add_num = nets[rnet->mport->id].nact; -diff -Nur linux-4.4.13.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.4.13/drivers/net/wireless/orinoco/orinoco_usb.c ---- linux-4.4.13.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/net/wireless/orinoco/orinoco_usb.c 2016-06-22 21:55:08.014523867 +0200 -@@ -697,7 +697,7 @@ - while (!ctx->done.done && msecs--) - udelay(1000); - } else { -- wait_event_interruptible(ctx->done.wait, -+ swait_event_interruptible(ctx->done.wait, - ctx->done.done); - } - break; -diff -Nur linux-4.4.13.orig/drivers/pci/access.c linux-4.4.13/drivers/pci/access.c ---- linux-4.4.13.orig/drivers/pci/access.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/pci/access.c 2016-06-22 21:55:08.014523867 +0200 -@@ -561,7 +561,7 @@ - WARN_ON(!dev->block_cfg_access); - - dev->block_cfg_access = 0; -- wake_up_all(&pci_cfg_wait); -+ wake_up_all_locked(&pci_cfg_wait); - raw_spin_unlock_irqrestore(&pci_lock, flags); - } - EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); -diff -Nur linux-4.4.13.orig/drivers/scsi/fcoe/fcoe.c linux-4.4.13/drivers/scsi/fcoe/fcoe.c ---- linux-4.4.13.orig/drivers/scsi/fcoe/fcoe.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/scsi/fcoe/fcoe.c 2016-06-22 21:55:08.018523867 +0200 -@@ -1286,7 +1286,7 @@ - struct sk_buff *skb; - #ifdef CONFIG_SMP - struct fcoe_percpu_s *p0; -- unsigned targ_cpu = get_cpu(); -+ unsigned targ_cpu = get_cpu_light(); - #endif /* CONFIG_SMP */ - - FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); -@@ -1342,7 +1342,7 @@ - kfree_skb(skb); - spin_unlock_bh(&p->fcoe_rx_list.lock); - } -- put_cpu(); -+ put_cpu_light(); - #else - /* - * This a non-SMP scenario where the singular Rx thread is -@@ -1566,11 +1566,11 @@ - static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) - { - struct fcoe_percpu_s *fps; -- int rc; -+ int rc, cpu = get_cpu_light(); - -- fps = &get_cpu_var(fcoe_percpu); -+ fps = &per_cpu(fcoe_percpu, cpu); - rc = fcoe_get_paged_crc_eof(skb, tlen, fps); -- put_cpu_var(fcoe_percpu); -+ put_cpu_light(); - - return rc; - } -@@ -1766,11 +1766,11 @@ - return 0; - } - -- stats = per_cpu_ptr(lport->stats, get_cpu()); -+ stats = per_cpu_ptr(lport->stats, get_cpu_light()); - stats->InvalidCRCCount++; - if (stats->InvalidCRCCount < 5) - printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); -- put_cpu(); -+ put_cpu_light(); - return -EINVAL; - } - -@@ -1846,13 +1846,13 @@ - goto drop; - - if (!fcoe_filter_frames(lport, fp)) { -- put_cpu(); -+ put_cpu_light(); - fc_exch_recv(lport, fp); - return; - } - drop: - stats->ErrorFrames++; -- put_cpu(); -+ put_cpu_light(); - kfree_skb(skb); - } - -diff -Nur linux-4.4.13.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.4.13/drivers/scsi/fcoe/fcoe_ctlr.c ---- linux-4.4.13.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/scsi/fcoe/fcoe_ctlr.c 2016-06-22 21:55:08.018523867 +0200 -@@ -831,7 +831,7 @@ - - INIT_LIST_HEAD(&del_list); - -- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); -+ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); - - list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { - deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; -@@ -867,7 +867,7 @@ - sel_time = fcf->time; - } - } -- put_cpu(); -+ put_cpu_light(); - - list_for_each_entry_safe(fcf, next, &del_list, list) { - /* Removes fcf from current list */ -diff -Nur linux-4.4.13.orig/drivers/scsi/libfc/fc_exch.c linux-4.4.13/drivers/scsi/libfc/fc_exch.c ---- linux-4.4.13.orig/drivers/scsi/libfc/fc_exch.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/scsi/libfc/fc_exch.c 2016-06-22 21:55:08.018523867 +0200 -@@ -814,10 +814,10 @@ - } - memset(ep, 0, sizeof(*ep)); - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - pool = per_cpu_ptr(mp->pool, cpu); - spin_lock_bh(&pool->lock); -- put_cpu(); -+ put_cpu_light(); - - /* peek cache of free slot */ - if (pool->left != FC_XID_UNKNOWN) { -diff -Nur linux-4.4.13.orig/drivers/scsi/libsas/sas_ata.c linux-4.4.13/drivers/scsi/libsas/sas_ata.c ---- linux-4.4.13.orig/drivers/scsi/libsas/sas_ata.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/scsi/libsas/sas_ata.c 2016-06-22 21:55:08.018523867 +0200 -@@ -190,7 +190,7 @@ - /* TODO: audit callers to ensure they are ready for qc_issue to - * unconditionally re-enable interrupts - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - spin_unlock(ap->lock); - - /* If the device fell off, no sense in issuing commands */ -@@ -255,7 +255,7 @@ - - out: - spin_lock(ap->lock); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return ret; - } - -diff -Nur linux-4.4.13.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.4.13/drivers/scsi/qla2xxx/qla_inline.h ---- linux-4.4.13.orig/drivers/scsi/qla2xxx/qla_inline.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/scsi/qla2xxx/qla_inline.h 2016-06-22 21:55:08.018523867 +0200 -@@ -59,12 +59,12 @@ - { - unsigned long flags; - struct qla_hw_data *ha = rsp->hw; -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (IS_P3P_TYPE(ha)) - qla82xx_poll(0, rsp); - else - ha->isp_ops->intr_handler(0, rsp); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - static inline uint8_t * -diff -Nur linux-4.4.13.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.4.13/drivers/thermal/x86_pkg_temp_thermal.c ---- linux-4.4.13.orig/drivers/thermal/x86_pkg_temp_thermal.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/thermal/x86_pkg_temp_thermal.c 2016-06-22 21:55:08.018523867 +0200 -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -352,7 +353,7 @@ - } - } - --static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+static void platform_thermal_notify_work(struct swork_event *event) - { - unsigned long flags; - int cpu = smp_processor_id(); -@@ -369,7 +370,7 @@ - pkg_work_scheduled[phy_id]) { - disable_pkg_thres_interrupt(); - spin_unlock_irqrestore(&pkg_work_lock, flags); -- return -EINVAL; -+ return; - } - pkg_work_scheduled[phy_id] = 1; - spin_unlock_irqrestore(&pkg_work_lock, flags); -@@ -378,9 +379,48 @@ - schedule_delayed_work_on(cpu, - &per_cpu(pkg_temp_thermal_threshold_work, cpu), - msecs_to_jiffies(notify_delay_ms)); -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct swork_event notify_work; -+ -+static int thermal_notify_work_init(void) -+{ -+ int err; -+ -+ err = swork_get(); -+ if (err) -+ return err; -+ -+ INIT_SWORK(¬ify_work, platform_thermal_notify_work); - return 0; - } - -+static void thermal_notify_work_cleanup(void) -+{ -+ swork_put(); -+} -+ -+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+{ -+ swork_queue(¬ify_work); -+ return 0; -+} -+ -+#else /* !CONFIG_PREEMPT_RT_FULL */ -+ -+static int thermal_notify_work_init(void) { return 0; } -+ -+static void thermal_notify_work_cleanup(void) { } -+ -+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) -+{ -+ platform_thermal_notify_work(NULL); -+ -+ return 0; -+} -+#endif /* CONFIG_PREEMPT_RT_FULL */ -+ - static int find_siblings_cpu(int cpu) - { - int i; -@@ -584,6 +624,9 @@ - if (!x86_match_cpu(pkg_temp_thermal_ids)) - return -ENODEV; - -+ if (!thermal_notify_work_init()) -+ return -ENODEV; -+ - spin_lock_init(&pkg_work_lock); - platform_thermal_package_notify = - pkg_temp_thermal_platform_thermal_notify; -@@ -608,7 +651,7 @@ - kfree(pkg_work_scheduled); - platform_thermal_package_notify = NULL; - platform_thermal_package_rate_control = NULL; -- -+ thermal_notify_work_cleanup(); - return -ENODEV; - } - -@@ -633,6 +676,7 @@ - mutex_unlock(&phy_dev_list_mutex); - platform_thermal_package_notify = NULL; - platform_thermal_package_rate_control = NULL; -+ thermal_notify_work_cleanup(); - for_each_online_cpu(i) - cancel_delayed_work_sync( - &per_cpu(pkg_temp_thermal_threshold_work, i)); -diff -Nur linux-4.4.13.orig/drivers/tty/serial/8250/8250_core.c linux-4.4.13/drivers/tty/serial/8250/8250_core.c ---- linux-4.4.13.orig/drivers/tty/serial/8250/8250_core.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/tty/serial/8250/8250_core.c 2016-06-22 21:55:08.018523867 +0200 -@@ -58,7 +58,16 @@ - - static unsigned int skip_txen_test; /* force skip of txen test at init time */ - --#define PASS_LIMIT 512 -+/* -+ * On -rt we can have a more delays, and legitimately -+ * so - so don't drop work spuriously and spam the -+ * syslog: -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define PASS_LIMIT 1000000 -+#else -+# define PASS_LIMIT 512 -+#endif - - #include - /* -diff -Nur linux-4.4.13.orig/drivers/tty/serial/8250/8250_port.c linux-4.4.13/drivers/tty/serial/8250/8250_port.c ---- linux-4.4.13.orig/drivers/tty/serial/8250/8250_port.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/tty/serial/8250/8250_port.c 2016-06-22 21:55:08.018523867 +0200 -@@ -35,6 +35,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -2843,9 +2844,9 @@ - - serial8250_rpm_get(up); - -- if (port->sysrq) -+ if (port->sysrq || oops_in_progress) - locked = 0; -- else if (oops_in_progress) -+ else if (in_kdb_printk()) - locked = spin_trylock_irqsave(&port->lock, flags); - else - spin_lock_irqsave(&port->lock, flags); -diff -Nur linux-4.4.13.orig/drivers/tty/serial/amba-pl011.c linux-4.4.13/drivers/tty/serial/amba-pl011.c ---- linux-4.4.13.orig/drivers/tty/serial/amba-pl011.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/tty/serial/amba-pl011.c 2016-06-22 21:55:08.018523867 +0200 -@@ -2067,13 +2067,19 @@ - - clk_enable(uap->clk); - -- local_irq_save(flags); -+ /* -+ * local_irq_save(flags); -+ * -+ * This local_irq_save() is nonsense. If we come in via sysrq -+ * handling then interrupts are already disabled. Aside of -+ * that the port.sysrq check is racy on SMP regardless. -+ */ - if (uap->port.sysrq) - locked = 0; - else if (oops_in_progress) -- locked = spin_trylock(&uap->port.lock); -+ locked = spin_trylock_irqsave(&uap->port.lock, flags); - else -- spin_lock(&uap->port.lock); -+ spin_lock_irqsave(&uap->port.lock, flags); - - /* - * First save the CR then disable the interrupts -@@ -2098,8 +2104,7 @@ - writew(old_cr, uap->port.membase + UART011_CR); - - if (locked) -- spin_unlock(&uap->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&uap->port.lock, flags); - - clk_disable(uap->clk); - } -diff -Nur linux-4.4.13.orig/drivers/tty/serial/omap-serial.c linux-4.4.13/drivers/tty/serial/omap-serial.c ---- linux-4.4.13.orig/drivers/tty/serial/omap-serial.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/tty/serial/omap-serial.c 2016-06-22 21:55:08.018523867 +0200 -@@ -1257,13 +1257,10 @@ - - pm_runtime_get_sync(up->dev); - -- local_irq_save(flags); -- if (up->port.sysrq) -- locked = 0; -- else if (oops_in_progress) -- locked = spin_trylock(&up->port.lock); -+ if (up->port.sysrq || oops_in_progress) -+ locked = spin_trylock_irqsave(&up->port.lock, flags); - else -- spin_lock(&up->port.lock); -+ spin_lock_irqsave(&up->port.lock, flags); - - /* - * First save the IER then disable the interrupts -@@ -1292,8 +1289,7 @@ - pm_runtime_mark_last_busy(up->dev); - pm_runtime_put_autosuspend(up->dev); - if (locked) -- spin_unlock(&up->port.lock); -- local_irq_restore(flags); -+ spin_unlock_irqrestore(&up->port.lock, flags); - } - - static int __init -diff -Nur linux-4.4.13.orig/drivers/tty/serial/sc16is7xx.c linux-4.4.13/drivers/tty/serial/sc16is7xx.c ---- linux-4.4.13.orig/drivers/tty/serial/sc16is7xx.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/tty/serial/sc16is7xx.c 2016-06-22 21:55:08.018523867 +0200 -@@ -1230,7 +1230,7 @@ - - /* Setup interrupt */ - ret = devm_request_irq(dev, irq, sc16is7xx_irq, -- IRQF_ONESHOT | flags, dev_name(dev), s); -+ flags, dev_name(dev), s); - if (!ret) - return 0; - -diff -Nur linux-4.4.13.orig/drivers/usb/core/hcd.c linux-4.4.13/drivers/usb/core/hcd.c ---- linux-4.4.13.orig/drivers/usb/core/hcd.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/usb/core/hcd.c 2016-06-22 21:55:08.022523867 +0200 -@@ -1735,9 +1735,9 @@ - * and no one may trigger the above deadlock situation when - * running complete() in tasklet. - */ -- local_irq_save(flags); -+ local_irq_save_nort(flags); - urb->complete(urb); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - - usb_anchor_resume_wakeups(anchor); - atomic_dec(&urb->use_count); -diff -Nur linux-4.4.13.orig/drivers/usb/gadget/function/f_fs.c linux-4.4.13/drivers/usb/gadget/function/f_fs.c ---- linux-4.4.13.orig/drivers/usb/gadget/function/f_fs.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/usb/gadget/function/f_fs.c 2016-06-22 21:55:08.022523867 +0200 -@@ -1404,7 +1404,7 @@ - pr_info("%s(): freeing\n", __func__); - ffs_data_clear(ffs); - BUG_ON(waitqueue_active(&ffs->ev.waitq) || -- waitqueue_active(&ffs->ep0req_completion.wait)); -+ swait_active(&ffs->ep0req_completion.wait)); - kfree(ffs->dev_name); - kfree(ffs); - } -diff -Nur linux-4.4.13.orig/drivers/usb/gadget/legacy/inode.c linux-4.4.13/drivers/usb/gadget/legacy/inode.c ---- linux-4.4.13.orig/drivers/usb/gadget/legacy/inode.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/usb/gadget/legacy/inode.c 2016-06-22 21:55:08.022523867 +0200 -@@ -345,7 +345,7 @@ - spin_unlock_irq (&epdata->dev->lock); - - if (likely (value == 0)) { -- value = wait_event_interruptible (done.wait, done.done); -+ value = swait_event_interruptible (done.wait, done.done); - if (value != 0) { - spin_lock_irq (&epdata->dev->lock); - if (likely (epdata->ep != NULL)) { -@@ -354,7 +354,7 @@ - usb_ep_dequeue (epdata->ep, epdata->req); - spin_unlock_irq (&epdata->dev->lock); - -- wait_event (done.wait, done.done); -+ swait_event (done.wait, done.done); - if (epdata->status == -ECONNRESET) - epdata->status = -EINTR; - } else { -diff -Nur linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.c linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.c ---- linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.c 2016-06-22 21:55:08.022523867 +0200 -@@ -17,7 +17,9 @@ - #include - #include - #include -+#include - #include -+#include - #include - #include - #include -@@ -1888,20 +1890,15 @@ - #ifdef CONFIG_OF - static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on) - { -- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR); -- -- if (is_on) -- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN); -- else -- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN)); -+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, -+ is_on ? AT91_PMC_BIASEN : 0); - } - - static void at91sam9g45_pulse_bias(struct usba_udc *udc) - { -- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR); -- -- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN)); -- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN); -+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0); -+ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, -+ AT91_PMC_BIASEN); - } - - static const struct usba_udc_errata at91sam9rl_errata = { -@@ -1938,6 +1935,9 @@ - return ERR_PTR(-EINVAL); - - udc->errata = match->data; -+ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc"); -+ if (udc->errata && IS_ERR(udc->pmc)) -+ return ERR_CAST(udc->pmc); - - udc->num_ep = 0; - -diff -Nur linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.h linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.h ---- linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.h 2016-06-22 21:55:08.022523867 +0200 -@@ -354,6 +354,8 @@ - struct dentry *debugfs_root; - struct dentry *debugfs_regs; - #endif -+ -+ struct regmap *pmc; - }; - - static inline struct usba_ep *to_usba_ep(struct usb_ep *ep) -diff -Nur linux-4.4.13.orig/fs/aio.c linux-4.4.13/fs/aio.c ---- linux-4.4.13.orig/fs/aio.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/aio.c 2016-06-22 21:55:08.022523867 +0200 -@@ -40,6 +40,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -115,7 +116,7 @@ - struct page **ring_pages; - long nr_pages; - -- struct work_struct free_work; -+ struct swork_event free_work; - - /* - * signals when all in-flight requests are done -@@ -253,6 +254,7 @@ - .mount = aio_mount, - .kill_sb = kill_anon_super, - }; -+ BUG_ON(swork_get()); - aio_mnt = kern_mount(&aio_fs); - if (IS_ERR(aio_mnt)) - panic("Failed to create aio fs mount."); -@@ -568,9 +570,9 @@ - return cancel(&kiocb->common); - } - --static void free_ioctx(struct work_struct *work) -+static void free_ioctx(struct swork_event *sev) - { -- struct kioctx *ctx = container_of(work, struct kioctx, free_work); -+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); - - pr_debug("freeing %p\n", ctx); - -@@ -589,8 +591,8 @@ - if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) - complete(&ctx->rq_wait->comp); - -- INIT_WORK(&ctx->free_work, free_ioctx); -- schedule_work(&ctx->free_work); -+ INIT_SWORK(&ctx->free_work, free_ioctx); -+ swork_queue(&ctx->free_work); - } - - /* -@@ -598,9 +600,9 @@ - * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - - * now it's safe to cancel any that need to be. - */ --static void free_ioctx_users(struct percpu_ref *ref) -+static void free_ioctx_users_work(struct swork_event *sev) - { -- struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); - struct aio_kiocb *req; - - spin_lock_irq(&ctx->ctx_lock); -@@ -619,6 +621,14 @@ - percpu_ref_put(&ctx->reqs); - } - -+static void free_ioctx_users(struct percpu_ref *ref) -+{ -+ struct kioctx *ctx = container_of(ref, struct kioctx, users); -+ -+ INIT_SWORK(&ctx->free_work, free_ioctx_users_work); -+ swork_queue(&ctx->free_work); -+} -+ - static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) - { - unsigned i, new_nr; -diff -Nur linux-4.4.13.orig/fs/autofs4/autofs_i.h linux-4.4.13/fs/autofs4/autofs_i.h ---- linux-4.4.13.orig/fs/autofs4/autofs_i.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/autofs4/autofs_i.h 2016-06-22 21:55:08.022523867 +0200 -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - #include - #include - -diff -Nur linux-4.4.13.orig/fs/autofs4/expire.c linux-4.4.13/fs/autofs4/expire.c ---- linux-4.4.13.orig/fs/autofs4/expire.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/autofs4/expire.c 2016-06-22 21:55:08.022523867 +0200 -@@ -150,7 +150,7 @@ - parent = p->d_parent; - if (!spin_trylock(&parent->d_lock)) { - spin_unlock(&p->d_lock); -- cpu_relax(); -+ cpu_chill(); - goto relock; - } - spin_unlock(&p->d_lock); -diff -Nur linux-4.4.13.orig/fs/buffer.c linux-4.4.13/fs/buffer.c ---- linux-4.4.13.orig/fs/buffer.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/buffer.c 2016-06-22 21:55:08.022523867 +0200 -@@ -305,8 +305,7 @@ - * decide that the page is now completely done. - */ - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -319,8 +318,7 @@ - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - - /* - * If none of the buffers had errors and they are all -@@ -332,9 +330,7 @@ - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /* -@@ -362,8 +358,7 @@ - } - - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - - clear_buffer_async_write(bh); - unlock_buffer(bh); -@@ -375,15 +370,12 @@ - } - tmp = tmp->b_this_page; - } -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - end_page_writeback(page); - return; - - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - EXPORT_SYMBOL(end_buffer_async_write); - -@@ -3325,6 +3317,7 @@ - struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); - if (ret) { - INIT_LIST_HEAD(&ret->b_assoc_buffers); -+ buffer_head_init_locks(ret); - preempt_disable(); - __this_cpu_inc(bh_accounting.nr); - recalc_bh_state(); -diff -Nur linux-4.4.13.orig/fs/dcache.c linux-4.4.13/fs/dcache.c ---- linux-4.4.13.orig/fs/dcache.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/dcache.c 2016-06-22 21:55:08.022523867 +0200 -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -578,7 +579,7 @@ - - failed: - spin_unlock(&dentry->d_lock); -- cpu_relax(); -+ cpu_chill(); - return dentry; /* try again with same dentry */ - } - -@@ -2391,7 +2392,7 @@ - if (dentry->d_lockref.count == 1) { - if (!spin_trylock(&inode->i_lock)) { - spin_unlock(&dentry->d_lock); -- cpu_relax(); -+ cpu_chill(); - goto again; - } - dentry->d_flags &= ~DCACHE_CANT_MOUNT; -diff -Nur linux-4.4.13.orig/fs/eventpoll.c linux-4.4.13/fs/eventpoll.c ---- linux-4.4.13.orig/fs/eventpoll.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/eventpoll.c 2016-06-22 21:55:08.026523867 +0200 -@@ -505,12 +505,12 @@ - */ - static void ep_poll_safewake(wait_queue_head_t *wq) - { -- int this_cpu = get_cpu(); -+ int this_cpu = get_cpu_light(); - - ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, - ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); - -- put_cpu(); -+ put_cpu_light(); - } - - static void ep_remove_wait_queue(struct eppoll_entry *pwq) -diff -Nur linux-4.4.13.orig/fs/exec.c linux-4.4.13/fs/exec.c ---- linux-4.4.13.orig/fs/exec.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/exec.c 2016-06-22 21:55:08.026523867 +0200 -@@ -865,12 +865,14 @@ - } - } - task_lock(tsk); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - tsk->mm = mm; - tsk->active_mm = mm; - activate_mm(active_mm, mm); - tsk->mm->vmacache_seqnum = 0; - vmacache_flush(tsk); -+ preempt_enable_rt(); - task_unlock(tsk); - if (old_mm) { - up_read(&old_mm->mmap_sem); -diff -Nur linux-4.4.13.orig/fs/f2fs/f2fs.h linux-4.4.13/fs/f2fs/f2fs.h ---- linux-4.4.13.orig/fs/f2fs/f2fs.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/f2fs/f2fs.h 2016-06-22 21:55:08.026523867 +0200 -@@ -24,7 +24,6 @@ - - #ifdef CONFIG_F2FS_CHECK_FS - #define f2fs_bug_on(sbi, condition) BUG_ON(condition) --#define f2fs_down_write(x, y) down_write_nest_lock(x, y) - #else - #define f2fs_bug_on(sbi, condition) \ - do { \ -@@ -33,7 +32,6 @@ - set_sbi_flag(sbi, SBI_NEED_FSCK); \ - } \ - } while (0) --#define f2fs_down_write(x, y) down_write(x) - #endif - - /* -@@ -959,7 +957,7 @@ - - static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) - { -- f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex); -+ down_write(&sbi->cp_rwsem); - } - - static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) -diff -Nur linux-4.4.13.orig/fs/jbd2/checkpoint.c linux-4.4.13/fs/jbd2/checkpoint.c ---- linux-4.4.13.orig/fs/jbd2/checkpoint.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/jbd2/checkpoint.c 2016-06-22 21:55:08.026523867 +0200 -@@ -116,6 +116,8 @@ - nblocks = jbd2_space_needed(journal); - while (jbd2_log_space_left(journal) < nblocks) { - write_unlock(&journal->j_state_lock); -+ if (current->plug) -+ io_schedule(); - mutex_lock(&journal->j_checkpoint_mutex); - - /* -diff -Nur linux-4.4.13.orig/fs/namespace.c linux-4.4.13/fs/namespace.c ---- linux-4.4.13.orig/fs/namespace.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/namespace.c 2016-06-22 21:55:08.026523867 +0200 -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include /* init_rootfs */ -@@ -353,8 +354,11 @@ - * incremented count after it has set MNT_WRITE_HOLD. - */ - smp_mb(); -- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) -- cpu_relax(); -+ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { -+ preempt_enable(); -+ cpu_chill(); -+ preempt_disable(); -+ } - /* - * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will - * be set to match its requirements. So we must not load that until -diff -Nur linux-4.4.13.orig/fs/ntfs/aops.c linux-4.4.13/fs/ntfs/aops.c ---- linux-4.4.13.orig/fs/ntfs/aops.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/ntfs/aops.c 2016-06-22 21:55:08.026523867 +0200 -@@ -107,8 +107,7 @@ - "0x%llx.", (unsigned long long)bh->b_blocknr); - } - first = page_buffers(page); -- local_irq_save(flags); -- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); -+ flags = bh_uptodate_lock_irqsave(first); - clear_buffer_async_read(bh); - unlock_buffer(bh); - tmp = bh; -@@ -123,8 +122,7 @@ - } - tmp = tmp->b_this_page; - } while (tmp != bh); -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -+ bh_uptodate_unlock_irqrestore(first, flags); - /* - * If none of the buffers had errors then we can set the page uptodate, - * but we first have to perform the post read mst fixups, if the -@@ -145,13 +143,13 @@ - recs = PAGE_CACHE_SIZE / rec_size; - /* Should have been verified before we got here... */ - BUG_ON(!recs); -- local_irq_save(flags); -+ local_irq_save_nort(flags); - kaddr = kmap_atomic(page); - for (i = 0; i < recs; i++) - post_read_mst_fixup((NTFS_RECORD*)(kaddr + - i * rec_size), rec_size); - kunmap_atomic(kaddr); -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - flush_dcache_page(page); - if (likely(page_uptodate && !PageError(page))) - SetPageUptodate(page); -@@ -159,9 +157,7 @@ - unlock_page(page); - return; - still_busy: -- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); -- local_irq_restore(flags); -- return; -+ bh_uptodate_unlock_irqrestore(first, flags); - } - - /** -diff -Nur linux-4.4.13.orig/fs/timerfd.c linux-4.4.13/fs/timerfd.c ---- linux-4.4.13.orig/fs/timerfd.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/fs/timerfd.c 2016-06-22 21:55:08.026523867 +0200 -@@ -450,7 +450,10 @@ - break; - } - spin_unlock_irq(&ctx->wqh.lock); -- cpu_relax(); -+ if (isalarm(ctx)) -+ hrtimer_wait_for_timer(&ctx->t.alarm.timer); -+ else -+ hrtimer_wait_for_timer(&ctx->t.tmr); - } - - /* -diff -Nur linux-4.4.13.orig/include/acpi/platform/aclinux.h linux-4.4.13/include/acpi/platform/aclinux.h ---- linux-4.4.13.orig/include/acpi/platform/aclinux.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/acpi/platform/aclinux.h 2016-06-22 21:55:08.026523867 +0200 -@@ -127,6 +127,7 @@ - - #define acpi_cache_t struct kmem_cache - #define acpi_spinlock spinlock_t * -+#define acpi_raw_spinlock raw_spinlock_t * - #define acpi_cpu_flags unsigned long - - /* Use native linux version of acpi_os_allocate_zeroed */ -@@ -145,6 +146,20 @@ - #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id - #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock - -+#define acpi_os_create_raw_lock(__handle) \ -+({ \ -+ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ -+ \ -+ if (lock) { \ -+ *(__handle) = lock; \ -+ raw_spin_lock_init(*(__handle)); \ -+ } \ -+ lock ? AE_OK : AE_NO_MEMORY; \ -+ }) -+ -+#define acpi_os_delete_raw_lock(__handle) kfree(__handle) -+ -+ - /* - * OSL interfaces used by debugger/disassembler - */ -diff -Nur linux-4.4.13.orig/include/asm-generic/bug.h linux-4.4.13/include/asm-generic/bug.h ---- linux-4.4.13.orig/include/asm-generic/bug.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/asm-generic/bug.h 2016-06-22 21:55:08.026523867 +0200 -@@ -206,6 +206,20 @@ - # define WARN_ON_SMP(x) ({0;}) - #endif - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define BUG_ON_RT(c) BUG_ON(c) -+# define BUG_ON_NONRT(c) do { } while (0) -+# define WARN_ON_RT(condition) WARN_ON(condition) -+# define WARN_ON_NONRT(condition) do { } while (0) -+# define WARN_ON_ONCE_NONRT(condition) do { } while (0) -+#else -+# define BUG_ON_RT(c) do { } while (0) -+# define BUG_ON_NONRT(c) BUG_ON(c) -+# define WARN_ON_RT(condition) do { } while (0) -+# define WARN_ON_NONRT(condition) WARN_ON(condition) -+# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) -+#endif -+ - #endif /* __ASSEMBLY__ */ - - #endif -diff -Nur linux-4.4.13.orig/include/asm-generic/preempt.h linux-4.4.13/include/asm-generic/preempt.h ---- linux-4.4.13.orig/include/asm-generic/preempt.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/asm-generic/preempt.h 2016-06-22 21:55:08.026523867 +0200 -@@ -7,10 +7,10 @@ - - static __always_inline int preempt_count(void) - { -- return current_thread_info()->preempt_count; -+ return READ_ONCE(current_thread_info()->preempt_count); - } - --static __always_inline int *preempt_count_ptr(void) -+static __always_inline volatile int *preempt_count_ptr(void) - { - return ¤t_thread_info()->preempt_count; - } -diff -Nur linux-4.4.13.orig/include/linux/blkdev.h linux-4.4.13/include/linux/blkdev.h ---- linux-4.4.13.orig/include/linux/blkdev.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/blkdev.h 2016-06-22 21:55:08.026523867 +0200 -@@ -89,6 +89,7 @@ - struct list_head queuelist; - union { - struct call_single_data csd; -+ struct work_struct work; - unsigned long fifo_time; - }; - -@@ -455,7 +456,7 @@ - struct throtl_data *td; - #endif - struct rcu_head rcu_head; -- wait_queue_head_t mq_freeze_wq; -+ struct swait_queue_head mq_freeze_wq; - struct percpu_ref q_usage_counter; - struct list_head all_q_node; - -diff -Nur linux-4.4.13.orig/include/linux/blk-mq.h linux-4.4.13/include/linux/blk-mq.h ---- linux-4.4.13.orig/include/linux/blk-mq.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/blk-mq.h 2016-06-22 21:55:08.026523867 +0200 -@@ -212,6 +212,7 @@ - - struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); - struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); -+void __blk_mq_complete_request_remote_work(struct work_struct *work); - - int blk_mq_request_started(struct request *rq); - void blk_mq_start_request(struct request *rq); -diff -Nur linux-4.4.13.orig/include/linux/bottom_half.h linux-4.4.13/include/linux/bottom_half.h ---- linux-4.4.13.orig/include/linux/bottom_half.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/bottom_half.h 2016-06-22 21:55:08.026523867 +0200 -@@ -3,6 +3,39 @@ - - #include - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+extern void __local_bh_disable(void); -+extern void _local_bh_enable(void); -+extern void __local_bh_enable(void); -+ -+static inline void local_bh_disable(void) -+{ -+ __local_bh_disable(); -+} -+ -+static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) -+{ -+ __local_bh_disable(); -+} -+ -+static inline void local_bh_enable(void) -+{ -+ __local_bh_enable(); -+} -+ -+static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) -+{ -+ __local_bh_enable(); -+} -+ -+static inline void local_bh_enable_ip(unsigned long ip) -+{ -+ __local_bh_enable(); -+} -+ -+#else -+ - #ifdef CONFIG_TRACE_IRQFLAGS - extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); - #else -@@ -30,5 +63,6 @@ - { - __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); - } -+#endif - - #endif /* _LINUX_BH_H */ -diff -Nur linux-4.4.13.orig/include/linux/buffer_head.h linux-4.4.13/include/linux/buffer_head.h ---- linux-4.4.13.orig/include/linux/buffer_head.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/buffer_head.h 2016-06-22 21:55:08.026523867 +0200 -@@ -75,8 +75,50 @@ - struct address_space *b_assoc_map; /* mapping this buffer is - associated with */ - atomic_t b_count; /* users using this buffer_head */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spinlock_t b_uptodate_lock; -+#if IS_ENABLED(CONFIG_JBD2) -+ spinlock_t b_state_lock; -+ spinlock_t b_journal_head_lock; -+#endif -+#endif - }; - -+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) -+{ -+ unsigned long flags; -+ -+#ifndef CONFIG_PREEMPT_RT_BASE -+ local_irq_save(flags); -+ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); -+#else -+ spin_lock_irqsave(&bh->b_uptodate_lock, flags); -+#endif -+ return flags; -+} -+ -+static inline void -+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) -+{ -+#ifndef CONFIG_PREEMPT_RT_BASE -+ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); -+ local_irq_restore(flags); -+#else -+ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); -+#endif -+} -+ -+static inline void buffer_head_init_locks(struct buffer_head *bh) -+{ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ spin_lock_init(&bh->b_uptodate_lock); -+#if IS_ENABLED(CONFIG_JBD2) -+ spin_lock_init(&bh->b_state_lock); -+ spin_lock_init(&bh->b_journal_head_lock); -+#endif -+#endif -+} -+ - /* - * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() - * and buffer_foo() functions. -diff -Nur linux-4.4.13.orig/include/linux/cgroup-defs.h linux-4.4.13/include/linux/cgroup-defs.h ---- linux-4.4.13.orig/include/linux/cgroup-defs.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/cgroup-defs.h 2016-06-22 21:55:08.026523867 +0200 -@@ -16,6 +16,7 @@ - #include - #include - #include -+#include - - #ifdef CONFIG_CGROUPS - -@@ -142,6 +143,7 @@ - /* percpu_ref killing and RCU release */ - struct rcu_head rcu_head; - struct work_struct destroy_work; -+ struct swork_event destroy_swork; - }; - - /* -diff -Nur linux-4.4.13.orig/include/linux/clk/at91_pmc.h linux-4.4.13/include/linux/clk/at91_pmc.h ---- linux-4.4.13.orig/include/linux/clk/at91_pmc.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/clk/at91_pmc.h 2016-06-22 21:55:08.026523867 +0200 -@@ -16,18 +16,6 @@ - #ifndef AT91_PMC_H - #define AT91_PMC_H - --#ifndef __ASSEMBLY__ --extern void __iomem *at91_pmc_base; -- --#define at91_pmc_read(field) \ -- readl_relaxed(at91_pmc_base + field) -- --#define at91_pmc_write(field, value) \ -- writel_relaxed(value, at91_pmc_base + field) --#else --.extern at91_pmc_base --#endif -- - #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ - #define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ - -diff -Nur linux-4.4.13.orig/include/linux/completion.h linux-4.4.13/include/linux/completion.h ---- linux-4.4.13.orig/include/linux/completion.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/completion.h 2016-06-22 21:55:08.030523867 +0200 -@@ -7,8 +7,7 @@ - * Atomic wait-for-completion handler data structures. - * See kernel/sched/completion.c for details. - */ -- --#include -+#include - - /* - * struct completion - structure used to maintain state for a "completion" -@@ -24,11 +23,11 @@ - */ - struct completion { - unsigned int done; -- wait_queue_head_t wait; -+ struct swait_queue_head wait; - }; - - #define COMPLETION_INITIALIZER(work) \ -- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } -+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } - - #define COMPLETION_INITIALIZER_ONSTACK(work) \ - ({ init_completion(&work); work; }) -@@ -73,7 +72,7 @@ - static inline void init_completion(struct completion *x) - { - x->done = 0; -- init_waitqueue_head(&x->wait); -+ init_swait_queue_head(&x->wait); - } - - /** -diff -Nur linux-4.4.13.orig/include/linux/cpu.h linux-4.4.13/include/linux/cpu.h ---- linux-4.4.13.orig/include/linux/cpu.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/cpu.h 2016-06-22 21:55:08.030523867 +0200 -@@ -231,6 +231,8 @@ - extern void put_online_cpus(void); - extern void cpu_hotplug_disable(void); - extern void cpu_hotplug_enable(void); -+extern void pin_current_cpu(void); -+extern void unpin_current_cpu(void); - #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) - #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) - #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) -@@ -248,6 +250,8 @@ - #define put_online_cpus() do { } while (0) - #define cpu_hotplug_disable() do { } while (0) - #define cpu_hotplug_enable() do { } while (0) -+static inline void pin_current_cpu(void) { } -+static inline void unpin_current_cpu(void) { } - #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) - #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) - /* These aren't inline functions due to a GCC bug. */ -diff -Nur linux-4.4.13.orig/include/linux/delay.h linux-4.4.13/include/linux/delay.h ---- linux-4.4.13.orig/include/linux/delay.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/delay.h 2016-06-22 21:55:08.030523867 +0200 -@@ -52,4 +52,10 @@ - msleep(seconds * 1000); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void cpu_chill(void); -+#else -+# define cpu_chill() cpu_relax() -+#endif -+ - #endif /* defined(_LINUX_DELAY_H) */ -diff -Nur linux-4.4.13.orig/include/linux/ftrace.h linux-4.4.13/include/linux/ftrace.h ---- linux-4.4.13.orig/include/linux/ftrace.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/ftrace.h 2016-06-22 21:55:08.030523867 +0200 -@@ -694,6 +694,18 @@ - #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) - #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) - -+static inline unsigned long get_lock_parent_ip(void) -+{ -+ unsigned long addr = CALLER_ADDR0; -+ -+ if (!in_lock_functions(addr)) -+ return addr; -+ addr = CALLER_ADDR1; -+ if (!in_lock_functions(addr)) -+ return addr; -+ return CALLER_ADDR2; -+} -+ - #ifdef CONFIG_IRQSOFF_TRACER - extern void time_hardirqs_on(unsigned long a0, unsigned long a1); - extern void time_hardirqs_off(unsigned long a0, unsigned long a1); -diff -Nur linux-4.4.13.orig/include/linux/highmem.h linux-4.4.13/include/linux/highmem.h ---- linux-4.4.13.orig/include/linux/highmem.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/highmem.h 2016-06-22 21:55:08.030523867 +0200 -@@ -7,6 +7,7 @@ - #include - #include - #include -+#include - - #include - -@@ -65,7 +66,7 @@ - - static inline void *kmap_atomic(struct page *page) - { -- preempt_disable(); -+ preempt_disable_nort(); - pagefault_disable(); - return page_address(page); - } -@@ -74,7 +75,7 @@ - static inline void __kunmap_atomic(void *addr) - { - pagefault_enable(); -- preempt_enable(); -+ preempt_enable_nort(); - } - - #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) -@@ -86,32 +87,51 @@ - - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - -+#ifndef CONFIG_PREEMPT_RT_FULL - DECLARE_PER_CPU(int, __kmap_atomic_idx); -+#endif - - static inline int kmap_atomic_idx_push(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; - --#ifdef CONFIG_DEBUG_HIGHMEM -+# ifdef CONFIG_DEBUG_HIGHMEM - WARN_ON_ONCE(in_irq() && !irqs_disabled()); - BUG_ON(idx >= KM_TYPE_NR); --#endif -+# endif - return idx; -+#else -+ current->kmap_idx++; -+ BUG_ON(current->kmap_idx > KM_TYPE_NR); -+ return current->kmap_idx - 1; -+#endif - } - - static inline int kmap_atomic_idx(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - return __this_cpu_read(__kmap_atomic_idx) - 1; -+#else -+ return current->kmap_idx - 1; -+#endif - } - - static inline void kmap_atomic_idx_pop(void) - { --#ifdef CONFIG_DEBUG_HIGHMEM -+#ifndef CONFIG_PREEMPT_RT_FULL -+# ifdef CONFIG_DEBUG_HIGHMEM - int idx = __this_cpu_dec_return(__kmap_atomic_idx); - - BUG_ON(idx < 0); --#else -+# else - __this_cpu_dec(__kmap_atomic_idx); -+# endif -+#else -+ current->kmap_idx--; -+# ifdef CONFIG_DEBUG_HIGHMEM -+ BUG_ON(current->kmap_idx < 0); -+# endif - #endif - } - -diff -Nur linux-4.4.13.orig/include/linux/hrtimer.h linux-4.4.13/include/linux/hrtimer.h ---- linux-4.4.13.orig/include/linux/hrtimer.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/hrtimer.h 2016-06-22 21:55:08.030523867 +0200 -@@ -87,6 +87,9 @@ - * @function: timer expiry callback function - * @base: pointer to the timer base (per cpu and per clock) - * @state: state information (See bit values above) -+ * @cb_entry: list entry to defer timers from hardirq context -+ * @irqsafe: timer can run in hardirq context -+ * @praecox: timer expiry time if expired at the time of programming - * @is_rel: Set if the timer was armed relative - * @start_pid: timer statistics field to store the pid of the task which - * started the timer -@@ -103,6 +106,11 @@ - enum hrtimer_restart (*function)(struct hrtimer *); - struct hrtimer_clock_base *base; - u8 state; -+ struct list_head cb_entry; -+ int irqsafe; -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ ktime_t praecox; -+#endif - u8 is_rel; - #ifdef CONFIG_TIMER_STATS - int start_pid; -@@ -123,11 +131,7 @@ - struct task_struct *task; - }; - --#ifdef CONFIG_64BIT - # define HRTIMER_CLOCK_BASE_ALIGN 64 --#else --# define HRTIMER_CLOCK_BASE_ALIGN 32 --#endif - - /** - * struct hrtimer_clock_base - the timer base for a specific clock -@@ -136,6 +140,7 @@ - * timer to a base on another cpu. - * @clockid: clock id for per_cpu support - * @active: red black tree root node for the active timers -+ * @expired: list head for deferred timers. - * @get_time: function to retrieve the current time of the clock - * @offset: offset of this clock to the monotonic base - */ -@@ -144,6 +149,7 @@ - int index; - clockid_t clockid; - struct timerqueue_head active; -+ struct list_head expired; - ktime_t (*get_time)(void); - ktime_t offset; - } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); -@@ -187,6 +193,7 @@ - raw_spinlock_t lock; - seqcount_t seq; - struct hrtimer *running; -+ struct hrtimer *running_soft; - unsigned int cpu; - unsigned int active_bases; - unsigned int clock_was_set_seq; -@@ -203,6 +210,9 @@ - unsigned int nr_hangs; - unsigned int max_hang_time; - #endif -+#ifdef CONFIG_PREEMPT_RT_BASE -+ wait_queue_head_t wait; -+#endif - struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; - } ____cacheline_aligned; - -@@ -412,6 +422,13 @@ - hrtimer_start_expires(timer, HRTIMER_MODE_ABS); - } - -+/* Softirq preemption could deadlock timer removal */ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); -+#else -+# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) -+#endif -+ - /* Query timers: */ - extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); - -@@ -436,7 +453,7 @@ - * Helper function to check, whether the timer is running the callback - * function - */ --static inline int hrtimer_callback_running(struct hrtimer *timer) -+static inline int hrtimer_callback_running(const struct hrtimer *timer) - { - return timer->base->cpu_base->running == timer; - } -diff -Nur linux-4.4.13.orig/include/linux/idr.h linux-4.4.13/include/linux/idr.h ---- linux-4.4.13.orig/include/linux/idr.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/idr.h 2016-06-22 21:55:08.030523867 +0200 -@@ -95,10 +95,14 @@ - * Each idr_preload() should be matched with an invocation of this - * function. See idr_preload() for details. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+void idr_preload_end(void); -+#else - static inline void idr_preload_end(void) - { - preempt_enable(); - } -+#endif - - /** - * idr_find - return pointer for given id -diff -Nur linux-4.4.13.orig/include/linux/init_task.h linux-4.4.13/include/linux/init_task.h ---- linux-4.4.13.orig/include/linux/init_task.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/init_task.h 2016-06-22 21:55:08.030523867 +0200 -@@ -148,9 +148,15 @@ - # define INIT_PERF_EVENTS(tsk) - #endif - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define INIT_TIMER_LIST .posix_timer_list = NULL, -+#else -+# define INIT_TIMER_LIST -+#endif -+ - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - # define INIT_VTIME(tsk) \ -- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ -+ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ - .vtime_snap = 0, \ - .vtime_snap_whence = VTIME_SYS, - #else -@@ -239,6 +245,7 @@ - .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ - .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ - .timer_slack_ns = 50000, /* 50 usec default slack */ \ -+ INIT_TIMER_LIST \ - .pids = { \ - [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ - [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ -diff -Nur linux-4.4.13.orig/include/linux/interrupt.h linux-4.4.13/include/linux/interrupt.h ---- linux-4.4.13.orig/include/linux/interrupt.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/interrupt.h 2016-06-22 21:55:08.030523867 +0200 -@@ -61,6 +61,7 @@ - * interrupt handler after suspending interrupts. For system - * wakeup devices users need to implement wakeup detection in - * their interrupt handlers. -+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) - */ - #define IRQF_SHARED 0x00000080 - #define IRQF_PROBE_SHARED 0x00000100 -@@ -74,6 +75,7 @@ - #define IRQF_NO_THREAD 0x00010000 - #define IRQF_EARLY_RESUME 0x00020000 - #define IRQF_COND_SUSPEND 0x00040000 -+#define IRQF_NO_SOFTIRQ_CALL 0x00080000 - - #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) - -@@ -186,7 +188,7 @@ - #ifdef CONFIG_LOCKDEP - # define local_irq_enable_in_hardirq() do { } while (0) - #else --# define local_irq_enable_in_hardirq() local_irq_enable() -+# define local_irq_enable_in_hardirq() local_irq_enable_nort() - #endif - - extern void disable_irq_nosync(unsigned int irq); -@@ -206,6 +208,7 @@ - * @irq: Interrupt to which notification applies - * @kref: Reference count, for internal use - * @work: Work item, for internal use -+ * @list: List item for deferred callbacks - * @notify: Function to be called on change. This will be - * called in process context. - * @release: Function to be called on release. This will be -@@ -217,6 +220,7 @@ - unsigned int irq; - struct kref kref; - struct work_struct work; -+ struct list_head list; - void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); - void (*release)(struct kref *ref); - }; -@@ -379,9 +383,13 @@ - bool state); - - #ifdef CONFIG_IRQ_FORCED_THREADING -+# ifndef CONFIG_PREEMPT_RT_BASE - extern bool force_irqthreads; -+# else -+# define force_irqthreads (true) -+# endif - #else --#define force_irqthreads (0) -+#define force_irqthreads (false) - #endif - - #ifndef __ARCH_SET_SOFTIRQ_PENDING -@@ -438,9 +446,10 @@ - void (*action)(struct softirq_action *); - }; - -+#ifndef CONFIG_PREEMPT_RT_FULL - asmlinkage void do_softirq(void); - asmlinkage void __do_softirq(void); -- -+static inline void thread_do_softirq(void) { do_softirq(); } - #ifdef __ARCH_HAS_DO_SOFTIRQ - void do_softirq_own_stack(void); - #else -@@ -449,13 +458,25 @@ - __do_softirq(); - } - #endif -+#else -+extern void thread_do_softirq(void); -+#endif - - extern void open_softirq(int nr, void (*action)(struct softirq_action *)); - extern void softirq_init(void); - extern void __raise_softirq_irqoff(unsigned int nr); -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void __raise_softirq_irqoff_ksoft(unsigned int nr); -+#else -+static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) -+{ -+ __raise_softirq_irqoff(nr); -+} -+#endif - - extern void raise_softirq_irqoff(unsigned int nr); - extern void raise_softirq(unsigned int nr); -+extern void softirq_check_pending_idle(void); - - DECLARE_PER_CPU(struct task_struct *, ksoftirqd); - -@@ -477,8 +498,9 @@ - to be executed on some cpu at least once after this. - * If the tasklet is already scheduled, but its execution is still not - started, it will be executed only once. -- * If this tasklet is already running on another CPU (or schedule is called -- from tasklet itself), it is rescheduled for later. -+ * If this tasklet is already running on another CPU, it is rescheduled -+ for later. -+ * Schedule must not be called from the tasklet itself (a lockup occurs) - * Tasklet is strictly serialized wrt itself, but not - wrt another tasklets. If client needs some intertask synchronization, - he makes it with spinlocks. -@@ -503,27 +525,36 @@ - enum - { - TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ -- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ -+ TASKLET_STATE_PENDING /* Tasklet is pending */ - }; - --#ifdef CONFIG_SMP -+#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) -+#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) -+#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) -+ -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - static inline int tasklet_trylock(struct tasklet_struct *t) - { - return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); - } - -+static inline int tasklet_tryunlock(struct tasklet_struct *t) -+{ -+ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; -+} -+ - static inline void tasklet_unlock(struct tasklet_struct *t) - { - smp_mb__before_atomic(); - clear_bit(TASKLET_STATE_RUN, &(t)->state); - } - --static inline void tasklet_unlock_wait(struct tasklet_struct *t) --{ -- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } --} -+extern void tasklet_unlock_wait(struct tasklet_struct *t); -+ - #else - #define tasklet_trylock(t) 1 -+#define tasklet_tryunlock(t) 1 - #define tasklet_unlock_wait(t) do { } while (0) - #define tasklet_unlock(t) do { } while (0) - #endif -@@ -572,12 +603,7 @@ - smp_mb(); - } - --static inline void tasklet_enable(struct tasklet_struct *t) --{ -- smp_mb__before_atomic(); -- atomic_dec(&t->count); --} -- -+extern void tasklet_enable(struct tasklet_struct *t); - extern void tasklet_kill(struct tasklet_struct *t); - extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); - extern void tasklet_init(struct tasklet_struct *t, -@@ -608,6 +634,12 @@ - tasklet_kill(&ttimer->tasklet); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+extern void softirq_early_init(void); -+#else -+static inline void softirq_early_init(void) { } -+#endif -+ - /* - * Autoprobing for irqs: - * -diff -Nur linux-4.4.13.orig/include/linux/irqdesc.h linux-4.4.13/include/linux/irqdesc.h ---- linux-4.4.13.orig/include/linux/irqdesc.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/irqdesc.h 2016-06-22 21:55:08.030523867 +0200 -@@ -61,6 +61,7 @@ - unsigned int irqs_unhandled; - atomic_t threads_handled; - int threads_handled_last; -+ u64 random_ip; - raw_spinlock_t lock; - struct cpumask *percpu_enabled; - #ifdef CONFIG_SMP -diff -Nur linux-4.4.13.orig/include/linux/irqflags.h linux-4.4.13/include/linux/irqflags.h ---- linux-4.4.13.orig/include/linux/irqflags.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/irqflags.h 2016-06-22 21:55:08.030523867 +0200 -@@ -25,8 +25,6 @@ - # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) - # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) - # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) --# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) --# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) - # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, - #else - # define trace_hardirqs_on() do { } while (0) -@@ -39,9 +37,15 @@ - # define trace_softirqs_enabled(p) 0 - # define trace_hardirq_enter() do { } while (0) - # define trace_hardirq_exit() do { } while (0) -+# define INIT_TRACE_IRQFLAGS -+#endif -+ -+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) -+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) -+# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) -+#else - # define lockdep_softirq_enter() do { } while (0) - # define lockdep_softirq_exit() do { } while (0) --# define INIT_TRACE_IRQFLAGS - #endif - - #if defined(CONFIG_IRQSOFF_TRACER) || \ -@@ -148,4 +152,23 @@ - - #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) - -+/* -+ * local_irq* variants depending on RT/!RT -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define local_irq_disable_nort() do { } while (0) -+# define local_irq_enable_nort() do { } while (0) -+# define local_irq_save_nort(flags) local_save_flags(flags) -+# define local_irq_restore_nort(flags) (void)(flags) -+# define local_irq_disable_rt() local_irq_disable() -+# define local_irq_enable_rt() local_irq_enable() -+#else -+# define local_irq_disable_nort() local_irq_disable() -+# define local_irq_enable_nort() local_irq_enable() -+# define local_irq_save_nort(flags) local_irq_save(flags) -+# define local_irq_restore_nort(flags) local_irq_restore(flags) -+# define local_irq_disable_rt() do { } while (0) -+# define local_irq_enable_rt() do { } while (0) -+#endif -+ - #endif -diff -Nur linux-4.4.13.orig/include/linux/irq.h linux-4.4.13/include/linux/irq.h ---- linux-4.4.13.orig/include/linux/irq.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/irq.h 2016-06-22 21:55:08.030523867 +0200 -@@ -72,6 +72,7 @@ - * IRQ_IS_POLLED - Always polled by another interrupt. Exclude - * it from the spurious interrupt detection - * mechanism and from core side polling. -+ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) - * IRQ_DISABLE_UNLAZY - Disable lazy irq disable - */ - enum { -@@ -99,13 +100,14 @@ - IRQ_PER_CPU_DEVID = (1 << 17), - IRQ_IS_POLLED = (1 << 18), - IRQ_DISABLE_UNLAZY = (1 << 19), -+ IRQ_NO_SOFTIRQ_CALL = (1 << 20), - }; - - #define IRQF_MODIFY_MASK \ - (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ - IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ - IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ -- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) -+ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) - - #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) - -diff -Nur linux-4.4.13.orig/include/linux/irq_work.h linux-4.4.13/include/linux/irq_work.h ---- linux-4.4.13.orig/include/linux/irq_work.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/irq_work.h 2016-06-22 21:55:08.030523867 +0200 -@@ -16,6 +16,7 @@ - #define IRQ_WORK_BUSY 2UL - #define IRQ_WORK_FLAGS 3UL - #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ -+#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ - - struct irq_work { - unsigned long flags; -@@ -51,4 +52,10 @@ - static inline void irq_work_run(void) { } - #endif - -+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -+void irq_work_tick_soft(void); -+#else -+static inline void irq_work_tick_soft(void) { } -+#endif -+ - #endif /* _LINUX_IRQ_WORK_H */ -diff -Nur linux-4.4.13.orig/include/linux/jbd2.h linux-4.4.13/include/linux/jbd2.h ---- linux-4.4.13.orig/include/linux/jbd2.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/jbd2.h 2016-06-22 21:55:08.030523867 +0200 -@@ -352,32 +352,56 @@ - - static inline void jbd_lock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_State, &bh->b_state); -+#else -+ spin_lock(&bh->b_state_lock); -+#endif - } - - static inline int jbd_trylock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_trylock(BH_State, &bh->b_state); -+#else -+ return spin_trylock(&bh->b_state_lock); -+#endif - } - - static inline int jbd_is_locked_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - return bit_spin_is_locked(BH_State, &bh->b_state); -+#else -+ return spin_is_locked(&bh->b_state_lock); -+#endif - } - - static inline void jbd_unlock_bh_state(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_State, &bh->b_state); -+#else -+ spin_unlock(&bh->b_state_lock); -+#endif - } - - static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(BH_JournalHead, &bh->b_state); -+#else -+ spin_lock(&bh->b_journal_head_lock); -+#endif - } - - static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_unlock(BH_JournalHead, &bh->b_state); -+#else -+ spin_unlock(&bh->b_journal_head_lock); -+#endif - } - - #define J_ASSERT(assert) BUG_ON(!(assert)) -diff -Nur linux-4.4.13.orig/include/linux/kdb.h linux-4.4.13/include/linux/kdb.h ---- linux-4.4.13.orig/include/linux/kdb.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/kdb.h 2016-06-22 21:55:08.030523867 +0200 -@@ -167,6 +167,7 @@ - extern __printf(1, 2) int kdb_printf(const char *, ...); - typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); - -+#define in_kdb_printk() (kdb_trap_printk) - extern void kdb_init(int level); - - /* Access to kdb specific polling devices */ -@@ -201,6 +202,7 @@ - extern int kdb_unregister(char *); - #else /* ! CONFIG_KGDB_KDB */ - static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } -+#define in_kdb_printk() (0) - static inline void kdb_init(int level) {} - static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen) { return 0; } -diff -Nur linux-4.4.13.orig/include/linux/kernel.h linux-4.4.13/include/linux/kernel.h ---- linux-4.4.13.orig/include/linux/kernel.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/kernel.h 2016-06-22 21:55:08.030523867 +0200 -@@ -188,6 +188,9 @@ - */ - # define might_sleep() \ - do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) -+ -+# define might_sleep_no_state_check() \ -+ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) - # define sched_annotate_sleep() (current->task_state_change = 0) - #else - static inline void ___might_sleep(const char *file, int line, -@@ -195,6 +198,7 @@ - static inline void __might_sleep(const char *file, int line, - int preempt_offset) { } - # define might_sleep() do { might_resched(); } while (0) -+# define might_sleep_no_state_check() do { might_resched(); } while (0) - # define sched_annotate_sleep() do { } while (0) - #endif - -@@ -255,6 +259,7 @@ - __printf(1, 2) - void panic(const char *fmt, ...) - __noreturn __cold; -+void nmi_panic(struct pt_regs *regs, const char *msg); - extern void oops_enter(void); - extern void oops_exit(void); - void print_oops_end_marker(void); -@@ -446,6 +451,14 @@ - extern bool crash_kexec_post_notifiers; - - /* -+ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It -+ * holds a CPU number which is executing panic() currently. A value of -+ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). -+ */ -+extern atomic_t panic_cpu; -+#define PANIC_CPU_INVALID -1 -+ -+/* - * Only to be used by arch init code. If the user over-wrote the default - * CONFIG_PANIC_TIMEOUT, honor it. - */ -@@ -473,6 +486,7 @@ - SYSTEM_HALT, - SYSTEM_POWER_OFF, - SYSTEM_RESTART, -+ SYSTEM_SUSPEND, - } system_state; - - #define TAINT_PROPRIETARY_MODULE 0 -diff -Nur linux-4.4.13.orig/include/linux/kvm_host.h linux-4.4.13/include/linux/kvm_host.h ---- linux-4.4.13.orig/include/linux/kvm_host.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/kvm_host.h 2016-06-22 21:55:08.030523867 +0200 -@@ -25,6 +25,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -243,7 +244,7 @@ - int fpu_active; - int guest_fpu_loaded, guest_xcr0_loaded; - unsigned char fpu_counter; -- wait_queue_head_t wq; -+ struct swait_queue_head wq; - struct pid *pid; - int sigset_active; - sigset_t sigset; -@@ -794,7 +795,7 @@ - } - #endif - --static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) -+static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) - { - #ifdef __KVM_HAVE_ARCH_WQP - return vcpu->arch.wqp; -diff -Nur linux-4.4.13.orig/include/linux/lglock.h linux-4.4.13/include/linux/lglock.h ---- linux-4.4.13.orig/include/linux/lglock.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/lglock.h 2016-06-22 21:55:08.030523867 +0200 -@@ -34,13 +34,30 @@ - #endif - - struct lglock { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ struct rt_mutex __percpu *lock; -+#else - arch_spinlock_t __percpu *lock; -+#endif - #ifdef CONFIG_DEBUG_LOCK_ALLOC - struct lock_class_key lock_key; - struct lockdep_map lock_dep_map; - #endif - }; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define DEFINE_LGLOCK(name) \ -+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ -+ = __RT_MUTEX_INITIALIZER( name ## _lock); \ -+ struct lglock name = { .lock = &name ## _lock } -+ -+# define DEFINE_STATIC_LGLOCK(name) \ -+ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ -+ = __RT_MUTEX_INITIALIZER( name ## _lock); \ -+ static struct lglock name = { .lock = &name ## _lock } -+ -+#else -+ - #define DEFINE_LGLOCK(name) \ - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ -@@ -50,6 +67,7 @@ - static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ - = __ARCH_SPIN_LOCK_UNLOCKED; \ - static struct lglock name = { .lock = &name ## _lock } -+#endif - - void lg_lock_init(struct lglock *lg, char *name); - -@@ -64,6 +82,12 @@ - void lg_global_lock(struct lglock *lg); - void lg_global_unlock(struct lglock *lg); - -+#ifndef CONFIG_PREEMPT_RT_FULL -+#define lg_global_trylock_relax(name) lg_global_lock(name) -+#else -+void lg_global_trylock_relax(struct lglock *lg); -+#endif -+ - #else - /* When !CONFIG_SMP, map lglock to spinlock */ - #define lglock spinlock -diff -Nur linux-4.4.13.orig/include/linux/list_bl.h linux-4.4.13/include/linux/list_bl.h ---- linux-4.4.13.orig/include/linux/list_bl.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/list_bl.h 2016-06-22 21:55:08.030523867 +0200 -@@ -2,6 +2,7 @@ - #define _LINUX_LIST_BL_H - - #include -+#include - #include - - /* -@@ -32,13 +33,24 @@ - - struct hlist_bl_head { - struct hlist_bl_node *first; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ raw_spinlock_t lock; -+#endif - }; - - struct hlist_bl_node { - struct hlist_bl_node *next, **pprev; - }; --#define INIT_HLIST_BL_HEAD(ptr) \ -- ((ptr)->first = NULL) -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+#define INIT_HLIST_BL_HEAD(h) \ -+do { \ -+ (h)->first = NULL; \ -+ raw_spin_lock_init(&(h)->lock); \ -+} while (0) -+#else -+#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL -+#endif - - static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) - { -@@ -118,12 +130,26 @@ - - static inline void hlist_bl_lock(struct hlist_bl_head *b) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - bit_spin_lock(0, (unsigned long *)b); -+#else -+ raw_spin_lock(&b->lock); -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __set_bit(0, (unsigned long *)b); -+#endif -+#endif - } - - static inline void hlist_bl_unlock(struct hlist_bl_head *b) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - __bit_spin_unlock(0, (unsigned long *)b); -+#else -+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) -+ __clear_bit(0, (unsigned long *)b); -+#endif -+ raw_spin_unlock(&b->lock); -+#endif - } - - static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) -diff -Nur linux-4.4.13.orig/include/linux/locallock.h linux-4.4.13/include/linux/locallock.h ---- linux-4.4.13.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/locallock.h 2016-06-22 21:55:08.030523867 +0200 -@@ -0,0 +1,270 @@ -+#ifndef _LINUX_LOCALLOCK_H -+#define _LINUX_LOCALLOCK_H -+ -+#include -+#include -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+ -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define LL_WARN(cond) WARN_ON(cond) -+#else -+# define LL_WARN(cond) do { } while (0) -+#endif -+ -+/* -+ * per cpu lock based substitute for local_irq_*() -+ */ -+struct local_irq_lock { -+ spinlock_t lock; -+ struct task_struct *owner; -+ int nestcnt; -+ unsigned long flags; -+}; -+ -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ -+ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ -+ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } -+ -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ -+ DECLARE_PER_CPU(struct local_irq_lock, lvar) -+ -+#define local_irq_lock_init(lvar) \ -+ do { \ -+ int __cpu; \ -+ for_each_possible_cpu(__cpu) \ -+ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ -+ } while (0) -+ -+/* -+ * spin_lock|trylock|unlock_local flavour that does not migrate disable -+ * used for __local_lock|trylock|unlock where get_local_var/put_local_var -+ * already takes care of the migrate_disable/enable -+ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. -+ */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define spin_lock_local(lock) rt_spin_lock__no_mg(lock) -+# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock) -+# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock) -+#else -+# define spin_lock_local(lock) spin_lock(lock) -+# define spin_trylock_local(lock) spin_trylock(lock) -+# define spin_unlock_local(lock) spin_unlock(lock) -+#endif -+ -+static inline void __local_lock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ spin_lock_local(&lv->lock); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ } -+ lv->nestcnt++; -+} -+ -+#define local_lock(lvar) \ -+ do { __local_lock(&get_local_var(lvar)); } while (0) -+ -+static inline int __local_trylock(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current && spin_trylock_local(&lv->lock)) { -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+ return 1; -+ } -+ return 0; -+} -+ -+#define local_trylock(lvar) \ -+ ({ \ -+ int __locked; \ -+ __locked = __local_trylock(&get_local_var(lvar)); \ -+ if (!__locked) \ -+ put_local_var(lvar); \ -+ __locked; \ -+ }) -+ -+static inline void __local_unlock(struct local_irq_lock *lv) -+{ -+ LL_WARN(lv->nestcnt == 0); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return; -+ -+ lv->owner = NULL; -+ spin_unlock_local(&lv->lock); -+} -+ -+#define local_unlock(lvar) \ -+ do { \ -+ __local_unlock(this_cpu_ptr(&lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+static inline void __local_lock_irq(struct local_irq_lock *lv) -+{ -+ spin_lock_irqsave(&lv->lock, lv->flags); -+ LL_WARN(lv->owner); -+ LL_WARN(lv->nestcnt); -+ lv->owner = current; -+ lv->nestcnt = 1; -+} -+ -+#define local_lock_irq(lvar) \ -+ do { __local_lock_irq(&get_local_var(lvar)); } while (0) -+ -+#define local_lock_irq_on(lvar, cpu) \ -+ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) -+ -+static inline void __local_unlock_irq(struct local_irq_lock *lv) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ lv->owner = NULL; -+ lv->nestcnt = 0; -+ spin_unlock_irq(&lv->lock); -+} -+ -+#define local_unlock_irq(lvar) \ -+ do { \ -+ __local_unlock_irq(this_cpu_ptr(&lvar)); \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irq_on(lvar, cpu) \ -+ do { \ -+ __local_unlock_irq(&per_cpu(lvar, cpu)); \ -+ } while (0) -+ -+static inline int __local_lock_irqsave(struct local_irq_lock *lv) -+{ -+ if (lv->owner != current) { -+ __local_lock_irq(lv); -+ return 0; -+ } else { -+ lv->nestcnt++; -+ return 1; -+ } -+} -+ -+#define local_lock_irqsave(lvar, _flags) \ -+ do { \ -+ if (__local_lock_irqsave(&get_local_var(lvar))) \ -+ put_local_var(lvar); \ -+ _flags = __this_cpu_read(lvar.flags); \ -+ } while (0) -+ -+#define local_lock_irqsave_on(lvar, _flags, cpu) \ -+ do { \ -+ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ -+ _flags = per_cpu(lvar, cpu).flags; \ -+ } while (0) -+ -+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, -+ unsigned long flags) -+{ -+ LL_WARN(!lv->nestcnt); -+ LL_WARN(lv->owner != current); -+ if (--lv->nestcnt) -+ return 0; -+ -+ lv->owner = NULL; -+ spin_unlock_irqrestore(&lv->lock, lv->flags); -+ return 1; -+} -+ -+#define local_unlock_irqrestore(lvar, flags) \ -+ do { \ -+ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ -+ put_local_var(lvar); \ -+ } while (0) -+ -+#define local_unlock_irqrestore_on(lvar, flags, cpu) \ -+ do { \ -+ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ -+ } while (0) -+ -+#define local_spin_trylock_irq(lvar, lock) \ -+ ({ \ -+ int __locked; \ -+ local_lock_irq(lvar); \ -+ __locked = spin_trylock(lock); \ -+ if (!__locked) \ -+ local_unlock_irq(lvar); \ -+ __locked; \ -+ }) -+ -+#define local_spin_lock_irq(lvar, lock) \ -+ do { \ -+ local_lock_irq(lvar); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irq(lvar, lock) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irq(lvar); \ -+ } while (0) -+ -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ do { \ -+ local_lock_irqsave(lvar, flags); \ -+ spin_lock(lock); \ -+ } while (0) -+ -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ do { \ -+ spin_unlock(lock); \ -+ local_unlock_irqrestore(lvar, flags); \ -+ } while (0) -+ -+#define get_locked_var(lvar, var) \ -+ (*({ \ -+ local_lock(lvar); \ -+ this_cpu_ptr(&var); \ -+ })) -+ -+#define put_locked_var(lvar, var) local_unlock(lvar); -+ -+#define local_lock_cpu(lvar) \ -+ ({ \ -+ local_lock(lvar); \ -+ smp_processor_id(); \ -+ }) -+ -+#define local_unlock_cpu(lvar) local_unlock(lvar) -+ -+#else /* PREEMPT_RT_BASE */ -+ -+#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar -+#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar -+ -+static inline void local_irq_lock_init(int lvar) { } -+ -+#define local_lock(lvar) preempt_disable() -+#define local_unlock(lvar) preempt_enable() -+#define local_lock_irq(lvar) local_irq_disable() -+#define local_unlock_irq(lvar) local_irq_enable() -+#define local_lock_irqsave(lvar, flags) local_irq_save(flags) -+#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) -+ -+#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) -+#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) -+#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) -+#define local_spin_lock_irqsave(lvar, lock, flags) \ -+ spin_lock_irqsave(lock, flags) -+#define local_spin_unlock_irqrestore(lvar, lock, flags) \ -+ spin_unlock_irqrestore(lock, flags) -+ -+#define get_locked_var(lvar, var) get_cpu_var(var) -+#define put_locked_var(lvar, var) put_cpu_var(var) -+ -+#define local_lock_cpu(lvar) get_cpu() -+#define local_unlock_cpu(lvar) put_cpu() -+ -+#endif -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/mm_types.h linux-4.4.13/include/linux/mm_types.h ---- linux-4.4.13.orig/include/linux/mm_types.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/mm_types.h 2016-06-22 21:55:08.030523867 +0200 -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -504,6 +505,9 @@ - bool tlb_flush_pending; - #endif - struct uprobes_state uprobes_state; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head delayed_drop; -+#endif - #ifdef CONFIG_X86_INTEL_MPX - /* address of the bounds directory */ - void __user *bd_addr; -diff -Nur linux-4.4.13.orig/include/linux/mutex.h linux-4.4.13/include/linux/mutex.h ---- linux-4.4.13.orig/include/linux/mutex.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/mutex.h 2016-06-22 21:55:08.034523867 +0200 -@@ -19,6 +19,17 @@ - #include - #include - -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -+ , .dep_map = { .name = #lockname } -+#else -+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) -+#endif -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+ - /* - * Simple, straightforward mutexes with strict semantics: - * -@@ -99,13 +110,6 @@ - static inline void mutex_destroy(struct mutex *lock) {} - #endif - --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ -- , .dep_map = { .name = #lockname } --#else --# define __DEP_MAP_MUTEX_INITIALIZER(lockname) --#endif -- - #define __MUTEX_INITIALIZER(lockname) \ - { .count = ATOMIC_INIT(1) \ - , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ -@@ -173,6 +177,8 @@ - extern int mutex_trylock(struct mutex *lock); - extern void mutex_unlock(struct mutex *lock); - -+#endif /* !PREEMPT_RT_FULL */ -+ - extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); - - #endif /* __LINUX_MUTEX_H */ -diff -Nur linux-4.4.13.orig/include/linux/mutex_rt.h linux-4.4.13/include/linux/mutex_rt.h ---- linux-4.4.13.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/mutex_rt.h 2016-06-22 21:55:08.034523867 +0200 -@@ -0,0 +1,84 @@ -+#ifndef __LINUX_MUTEX_RT_H -+#define __LINUX_MUTEX_RT_H -+ -+#ifndef __LINUX_MUTEX_H -+#error "Please include mutex.h" -+#endif -+ -+#include -+ -+/* FIXME: Just for __lockfunc */ -+#include -+ -+struct mutex { -+ struct rt_mutex lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; -+ -+#define __MUTEX_INITIALIZER(mutexname) \ -+ { \ -+ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ -+ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ -+ } -+ -+#define DEFINE_MUTEX(mutexname) \ -+ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) -+ -+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); -+extern void __lockfunc _mutex_lock(struct mutex *lock); -+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); -+extern int __lockfunc _mutex_lock_killable(struct mutex *lock); -+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); -+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); -+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); -+extern int __lockfunc _mutex_trylock(struct mutex *lock); -+extern void __lockfunc _mutex_unlock(struct mutex *lock); -+ -+#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) -+#define mutex_lock(l) _mutex_lock(l) -+#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) -+#define mutex_lock_killable(l) _mutex_lock_killable(l) -+#define mutex_trylock(l) _mutex_trylock(l) -+#define mutex_unlock(l) _mutex_unlock(l) -+#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible_nested(l, s) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable_nested(l, s) -+ -+# define mutex_lock_nest_lock(lock, nest_lock) \ -+do { \ -+ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ -+ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ -+} while (0) -+ -+#else -+# define mutex_lock_nested(l, s) _mutex_lock(l) -+# define mutex_lock_interruptible_nested(l, s) \ -+ _mutex_lock_interruptible(l) -+# define mutex_lock_killable_nested(l, s) \ -+ _mutex_lock_killable(l) -+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) -+#endif -+ -+# define mutex_init(mutex) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), #mutex, &__key); \ -+} while (0) -+ -+# define __mutex_init(mutex, name, key) \ -+do { \ -+ rt_mutex_init(&(mutex)->lock); \ -+ __mutex_do_init((mutex), name, key); \ -+} while (0) -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/netdevice.h linux-4.4.13/include/linux/netdevice.h ---- linux-4.4.13.orig/include/linux/netdevice.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/netdevice.h 2016-06-22 21:55:08.034523867 +0200 -@@ -2249,11 +2249,20 @@ - void synchronize_net(void); - int init_dummy_netdev(struct net_device *dev); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline int dev_recursion_level(void) -+{ -+ return current->xmit_recursion; -+} -+ -+#else -+ - DECLARE_PER_CPU(int, xmit_recursion); - static inline int dev_recursion_level(void) - { - return this_cpu_read(xmit_recursion); - } -+#endif - - struct net_device *dev_get_by_index(struct net *net, int ifindex); - struct net_device *__dev_get_by_index(struct net *net, int ifindex); -@@ -2564,6 +2573,7 @@ - unsigned int dropped; - struct sk_buff_head input_pkt_queue; - struct napi_struct backlog; -+ struct sk_buff_head tofree_queue; - - }; - -diff -Nur linux-4.4.13.orig/include/linux/netfilter/x_tables.h linux-4.4.13/include/linux/netfilter/x_tables.h ---- linux-4.4.13.orig/include/linux/netfilter/x_tables.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/netfilter/x_tables.h 2016-06-22 21:55:08.034523867 +0200 -@@ -4,6 +4,7 @@ - - #include - #include -+#include - #include - - /** -@@ -282,6 +283,8 @@ - */ - DECLARE_PER_CPU(seqcount_t, xt_recseq); - -+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); -+ - /* xt_tee_enabled - true if x_tables needs to handle reentrancy - * - * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. -@@ -302,6 +305,9 @@ - { - unsigned int addend; - -+ /* RT protection */ -+ local_lock(xt_write_lock); -+ - /* - * Low order bit of sequence is set if we already - * called xt_write_recseq_begin(). -@@ -332,6 +338,7 @@ - /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ - smp_wmb(); - __this_cpu_add(xt_recseq.sequence, addend); -+ local_unlock(xt_write_lock); - } - - /* -diff -Nur linux-4.4.13.orig/include/linux/notifier.h linux-4.4.13/include/linux/notifier.h ---- linux-4.4.13.orig/include/linux/notifier.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/notifier.h 2016-06-22 21:55:08.034523867 +0200 -@@ -6,7 +6,7 @@ - * - * Alan Cox - */ -- -+ - #ifndef _LINUX_NOTIFIER_H - #define _LINUX_NOTIFIER_H - #include -@@ -42,9 +42,7 @@ - * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. - * As compensation, srcu_notifier_chain_unregister() is rather expensive. - * SRCU notifier chains should be used when the chain will be called very -- * often but notifier_blocks will seldom be removed. Also, SRCU notifier -- * chains are slightly more difficult to use because they require special -- * runtime initialization. -+ * often but notifier_blocks will seldom be removed. - */ - - typedef int (*notifier_fn_t)(struct notifier_block *nb, -@@ -88,7 +86,7 @@ - (name)->head = NULL; \ - } while (0) - --/* srcu_notifier_heads must be initialized and cleaned up dynamically */ -+/* srcu_notifier_heads must be cleaned up dynamically */ - extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); - #define srcu_cleanup_notifier_head(name) \ - cleanup_srcu_struct(&(name)->srcu); -@@ -101,7 +99,13 @@ - .head = NULL } - #define RAW_NOTIFIER_INIT(name) { \ - .head = NULL } --/* srcu_notifier_heads cannot be initialized statically */ -+ -+#define SRCU_NOTIFIER_INIT(name, pcpu) \ -+ { \ -+ .mutex = __MUTEX_INITIALIZER(name.mutex), \ -+ .head = NULL, \ -+ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ -+ } - - #define ATOMIC_NOTIFIER_HEAD(name) \ - struct atomic_notifier_head name = \ -@@ -113,6 +117,18 @@ - struct raw_notifier_head name = \ - RAW_NOTIFIER_INIT(name) - -+#define _SRCU_NOTIFIER_HEAD(name, mod) \ -+ static DEFINE_PER_CPU(struct srcu_struct_array, \ -+ name##_head_srcu_array); \ -+ mod struct srcu_notifier_head name = \ -+ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) -+ -+#define SRCU_NOTIFIER_HEAD(name) \ -+ _SRCU_NOTIFIER_HEAD(name, ) -+ -+#define SRCU_NOTIFIER_HEAD_STATIC(name) \ -+ _SRCU_NOTIFIER_HEAD(name, static) -+ - #ifdef __KERNEL__ - - extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, -@@ -182,12 +198,12 @@ - - /* - * Declared notifiers so far. I can imagine quite a few more chains -- * over time (eg laptop power reset chains, reboot chain (to clean -+ * over time (eg laptop power reset chains, reboot chain (to clean - * device units up), device [un]mount chain, module load/unload chain, -- * low memory chain, screenblank chain (for plug in modular screenblankers) -+ * low memory chain, screenblank chain (for plug in modular screenblankers) - * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... - */ -- -+ - /* CPU notfiers are defined in include/linux/cpu.h. */ - - /* netdevice notifiers are defined in include/linux/netdevice.h */ -diff -Nur linux-4.4.13.orig/include/linux/percpu.h linux-4.4.13/include/linux/percpu.h ---- linux-4.4.13.orig/include/linux/percpu.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/percpu.h 2016-06-22 21:55:08.034523867 +0200 -@@ -24,6 +24,35 @@ - PERCPU_MODULE_RESERVE) - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+#define get_local_var(var) (*({ \ -+ migrate_disable(); \ -+ this_cpu_ptr(&var); })) -+ -+#define put_local_var(var) do { \ -+ (void)&(var); \ -+ migrate_enable(); \ -+} while (0) -+ -+# define get_local_ptr(var) ({ \ -+ migrate_disable(); \ -+ this_cpu_ptr(var); }) -+ -+# define put_local_ptr(var) do { \ -+ (void)(var); \ -+ migrate_enable(); \ -+} while (0) -+ -+#else -+ -+#define get_local_var(var) get_cpu_var(var) -+#define put_local_var(var) put_cpu_var(var) -+#define get_local_ptr(var) get_cpu_ptr(var) -+#define put_local_ptr(var) put_cpu_ptr(var) -+ -+#endif -+ - /* minimum unit size, also is the maximum supported allocation size */ - #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) - -diff -Nur linux-4.4.13.orig/include/linux/pid.h linux-4.4.13/include/linux/pid.h ---- linux-4.4.13.orig/include/linux/pid.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/pid.h 2016-06-22 21:55:08.034523867 +0200 -@@ -2,6 +2,7 @@ - #define _LINUX_PID_H - - #include -+#include - - enum pid_type - { -diff -Nur linux-4.4.13.orig/include/linux/preempt.h linux-4.4.13/include/linux/preempt.h ---- linux-4.4.13.orig/include/linux/preempt.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/preempt.h 2016-06-22 21:55:08.034523867 +0200 -@@ -50,7 +50,11 @@ - #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) - #define NMI_OFFSET (1UL << NMI_SHIFT) - --#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) -+#else -+# define SOFTIRQ_DISABLE_OFFSET (0) -+#endif - - /* We use the MSB mostly because its available */ - #define PREEMPT_NEED_RESCHED 0x80000000 -@@ -59,9 +63,15 @@ - #include - - #define hardirq_count() (preempt_count() & HARDIRQ_MASK) --#define softirq_count() (preempt_count() & SOFTIRQ_MASK) - #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ - | NMI_MASK)) -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define softirq_count() (preempt_count() & SOFTIRQ_MASK) -+# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) -+#else -+# define softirq_count() (0UL) -+extern int in_serving_softirq(void); -+#endif - - /* - * Are we doing bottom half or hardware interrupt processing? -@@ -72,7 +82,6 @@ - #define in_irq() (hardirq_count()) - #define in_softirq() (softirq_count()) - #define in_interrupt() (irq_count()) --#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) - - /* - * Are we in NMI context? -@@ -91,7 +100,11 @@ - /* - * The preempt_count offset after spin_lock() - */ -+#if !defined(CONFIG_PREEMPT_RT_FULL) - #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET -+#else -+#define PREEMPT_LOCK_OFFSET 0 -+#endif - - /* - * The preempt_count offset needed for things like: -@@ -140,6 +153,20 @@ - #define preempt_count_inc() preempt_count_add(1) - #define preempt_count_dec() preempt_count_sub(1) - -+#ifdef CONFIG_PREEMPT_LAZY -+#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) -+#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) -+#define inc_preempt_lazy_count() add_preempt_lazy_count(1) -+#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) -+#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) -+#else -+#define add_preempt_lazy_count(val) do { } while (0) -+#define sub_preempt_lazy_count(val) do { } while (0) -+#define inc_preempt_lazy_count() do { } while (0) -+#define dec_preempt_lazy_count() do { } while (0) -+#define preempt_lazy_count() (0) -+#endif -+ - #ifdef CONFIG_PREEMPT_COUNT - - #define preempt_disable() \ -@@ -148,13 +175,25 @@ - barrier(); \ - } while (0) - -+#define preempt_lazy_disable() \ -+do { \ -+ inc_preempt_lazy_count(); \ -+ barrier(); \ -+} while (0) -+ - #define sched_preempt_enable_no_resched() \ - do { \ - barrier(); \ - preempt_count_dec(); \ - } while (0) - --#define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define preempt_enable_no_resched() sched_preempt_enable_no_resched() -+# define preempt_check_resched_rt() preempt_check_resched() -+#else -+# define preempt_enable_no_resched() preempt_enable() -+# define preempt_check_resched_rt() barrier(); -+#endif - - #define preemptible() (preempt_count() == 0 && !irqs_disabled()) - -@@ -179,6 +218,13 @@ - __preempt_schedule(); \ - } while (0) - -+#define preempt_lazy_enable() \ -+do { \ -+ dec_preempt_lazy_count(); \ -+ barrier(); \ -+ preempt_check_resched(); \ -+} while (0) -+ - #else /* !CONFIG_PREEMPT */ - #define preempt_enable() \ - do { \ -@@ -224,6 +270,7 @@ - #define preempt_disable_notrace() barrier() - #define preempt_enable_no_resched_notrace() barrier() - #define preempt_enable_notrace() barrier() -+#define preempt_check_resched_rt() barrier() - #define preemptible() 0 - - #endif /* CONFIG_PREEMPT_COUNT */ -@@ -244,10 +291,31 @@ - } while (0) - #define preempt_fold_need_resched() \ - do { \ -- if (tif_need_resched()) \ -+ if (tif_need_resched_now()) \ - set_preempt_need_resched(); \ - } while (0) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define preempt_disable_rt() preempt_disable() -+# define preempt_enable_rt() preempt_enable() -+# define preempt_disable_nort() barrier() -+# define preempt_enable_nort() barrier() -+# ifdef CONFIG_SMP -+ extern void migrate_disable(void); -+ extern void migrate_enable(void); -+# else /* CONFIG_SMP */ -+# define migrate_disable() barrier() -+# define migrate_enable() barrier() -+# endif /* CONFIG_SMP */ -+#else -+# define preempt_disable_rt() barrier() -+# define preempt_enable_rt() barrier() -+# define preempt_disable_nort() preempt_disable() -+# define preempt_enable_nort() preempt_enable() -+# define migrate_disable() preempt_disable() -+# define migrate_enable() preempt_enable() -+#endif -+ - #ifdef CONFIG_PREEMPT_NOTIFIERS - - struct preempt_notifier; -diff -Nur linux-4.4.13.orig/include/linux/printk.h linux-4.4.13/include/linux/printk.h ---- linux-4.4.13.orig/include/linux/printk.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/printk.h 2016-06-22 21:55:08.034523867 +0200 -@@ -117,9 +117,11 @@ - #ifdef CONFIG_EARLY_PRINTK - extern asmlinkage __printf(1, 2) - void early_printk(const char *fmt, ...); -+extern void printk_kill(void); - #else - static inline __printf(1, 2) __cold - void early_printk(const char *s, ...) { } -+static inline void printk_kill(void) { } - #endif - - typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); -diff -Nur linux-4.4.13.orig/include/linux/radix-tree.h linux-4.4.13/include/linux/radix-tree.h ---- linux-4.4.13.orig/include/linux/radix-tree.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/radix-tree.h 2016-06-22 21:55:08.034523867 +0200 -@@ -277,8 +277,13 @@ - unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, - void ***results, unsigned long *indices, - unsigned long first_index, unsigned int max_items); -+#ifndef CONFIG_PREEMPT_RT_FULL - int radix_tree_preload(gfp_t gfp_mask); - int radix_tree_maybe_preload(gfp_t gfp_mask); -+#else -+static inline int radix_tree_preload(gfp_t gm) { return 0; } -+static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } -+#endif - void radix_tree_init(void); - void *radix_tree_tag_set(struct radix_tree_root *root, - unsigned long index, unsigned int tag); -@@ -303,7 +308,7 @@ - - static inline void radix_tree_preload_end(void) - { -- preempt_enable(); -+ preempt_enable_nort(); - } - - /** -diff -Nur linux-4.4.13.orig/include/linux/random.h linux-4.4.13/include/linux/random.h ---- linux-4.4.13.orig/include/linux/random.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/random.h 2016-06-22 21:55:08.034523867 +0200 -@@ -20,7 +20,7 @@ - extern void add_device_randomness(const void *, unsigned int); - extern void add_input_randomness(unsigned int type, unsigned int code, - unsigned int value); --extern void add_interrupt_randomness(int irq, int irq_flags); -+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); - - extern void get_random_bytes(void *buf, int nbytes); - extern int add_random_ready_callback(struct random_ready_callback *rdy); -diff -Nur linux-4.4.13.orig/include/linux/rbtree.h linux-4.4.13/include/linux/rbtree.h ---- linux-4.4.13.orig/include/linux/rbtree.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/rbtree.h 2016-06-22 21:55:08.034523867 +0200 -@@ -31,7 +31,6 @@ - - #include - #include --#include - - struct rb_node { - unsigned long __rb_parent_color; -@@ -86,14 +85,8 @@ - *rb_link = node; - } - --static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, -- struct rb_node **rb_link) --{ -- node->__rb_parent_color = (unsigned long)parent; -- node->rb_left = node->rb_right = NULL; -- -- rcu_assign_pointer(*rb_link, node); --} -+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, -+ struct rb_node **rb_link); - - #define rb_entry_safe(ptr, type, member) \ - ({ typeof(ptr) ____ptr = (ptr); \ -diff -Nur linux-4.4.13.orig/include/linux/rcupdate.h linux-4.4.13/include/linux/rcupdate.h ---- linux-4.4.13.orig/include/linux/rcupdate.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/rcupdate.h 2016-06-22 21:55:08.034523867 +0200 -@@ -169,6 +169,9 @@ - - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define call_rcu_bh call_rcu -+#else - /** - * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. - * @head: structure to be used for queueing the RCU updates. -@@ -192,6 +195,7 @@ - */ - void call_rcu_bh(struct rcu_head *head, - rcu_callback_t func); -+#endif - - /** - * call_rcu_sched() - Queue an RCU for invocation after sched grace period. -@@ -292,6 +296,11 @@ - * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. - */ - #define rcu_preempt_depth() (current->rcu_read_lock_nesting) -+#ifndef CONFIG_PREEMPT_RT_FULL -+#define sched_rcu_preempt_depth() rcu_preempt_depth() -+#else -+static inline int sched_rcu_preempt_depth(void) { return 0; } -+#endif - - #else /* #ifdef CONFIG_PREEMPT_RCU */ - -@@ -317,6 +326,8 @@ - return 0; - } - -+#define sched_rcu_preempt_depth() rcu_preempt_depth() -+ - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - - /* Internal to kernel */ -@@ -489,7 +500,14 @@ - int debug_lockdep_rcu_enabled(void); - - int rcu_read_lock_held(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline int rcu_read_lock_bh_held(void) -+{ -+ return rcu_read_lock_held(); -+} -+#else - int rcu_read_lock_bh_held(void); -+#endif - - /** - * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? -@@ -937,10 +955,14 @@ - static inline void rcu_read_lock_bh(void) - { - local_bh_disable(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_lock(); -+#else - __acquire(RCU_BH); - rcu_lock_acquire(&rcu_bh_lock_map); - RCU_LOCKDEP_WARN(!rcu_is_watching(), - "rcu_read_lock_bh() used illegally while idle"); -+#endif - } - - /* -@@ -950,10 +972,14 @@ - */ - static inline void rcu_read_unlock_bh(void) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ rcu_read_unlock(); -+#else - RCU_LOCKDEP_WARN(!rcu_is_watching(), - "rcu_read_unlock_bh() used illegally while idle"); - rcu_lock_release(&rcu_bh_lock_map); - __release(RCU_BH); -+#endif - local_bh_enable(); - } - -diff -Nur linux-4.4.13.orig/include/linux/rcutree.h linux-4.4.13/include/linux/rcutree.h ---- linux-4.4.13.orig/include/linux/rcutree.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/rcutree.h 2016-06-22 21:55:08.034523867 +0200 -@@ -44,7 +44,11 @@ - rcu_note_context_switch(); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define synchronize_rcu_bh synchronize_rcu -+#else - void synchronize_rcu_bh(void); -+#endif - void synchronize_sched_expedited(void); - void synchronize_rcu_expedited(void); - -@@ -72,7 +76,11 @@ - } - - void rcu_barrier(void); -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define rcu_barrier_bh rcu_barrier -+#else - void rcu_barrier_bh(void); -+#endif - void rcu_barrier_sched(void); - unsigned long get_state_synchronize_rcu(void); - void cond_synchronize_rcu(unsigned long oldstate); -@@ -85,12 +93,10 @@ - unsigned long rcu_batches_started_bh(void); - unsigned long rcu_batches_started_sched(void); - unsigned long rcu_batches_completed(void); --unsigned long rcu_batches_completed_bh(void); - unsigned long rcu_batches_completed_sched(void); - void show_rcu_gp_kthreads(void); - - void rcu_force_quiescent_state(void); --void rcu_bh_force_quiescent_state(void); - void rcu_sched_force_quiescent_state(void); - - void rcu_idle_enter(void); -@@ -105,6 +111,14 @@ - - bool rcu_is_watching(void); - -+#ifndef CONFIG_PREEMPT_RT_FULL -+void rcu_bh_force_quiescent_state(void); -+unsigned long rcu_batches_completed_bh(void); -+#else -+# define rcu_bh_force_quiescent_state rcu_force_quiescent_state -+# define rcu_batches_completed_bh rcu_batches_completed -+#endif -+ - void rcu_all_qs(void); - - #endif /* __LINUX_RCUTREE_H */ -diff -Nur linux-4.4.13.orig/include/linux/rtmutex.h linux-4.4.13/include/linux/rtmutex.h ---- linux-4.4.13.orig/include/linux/rtmutex.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/rtmutex.h 2016-06-22 21:55:08.034523867 +0200 -@@ -13,11 +13,15 @@ - #define __LINUX_RT_MUTEX_H - - #include -+#include - #include --#include - - extern int max_lock_depth; /* for sysctl */ - -+#ifdef CONFIG_DEBUG_MUTEXES -+#include -+#endif -+ - /** - * The rt_mutex structure - * -@@ -31,8 +35,8 @@ - struct rb_root waiters; - struct rb_node *waiters_leftmost; - struct task_struct *owner; --#ifdef CONFIG_DEBUG_RT_MUTEXES - int save_state; -+#ifdef CONFIG_DEBUG_RT_MUTEXES - const char *name, *file; - int line; - void *magic; -@@ -55,22 +59,33 @@ - # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) - #endif - -+# define rt_mutex_init(mutex) \ -+ do { \ -+ raw_spin_lock_init(&(mutex)->wait_lock); \ -+ __rt_mutex_init(mutex, #mutex); \ -+ } while (0) -+ - #ifdef CONFIG_DEBUG_RT_MUTEXES - # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ - , .name = #mutexname, .file = __FILE__, .line = __LINE__ --# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) - extern void rt_mutex_debug_task_free(struct task_struct *tsk); - #else - # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) --# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) - # define rt_mutex_debug_task_free(t) do { } while (0) - #endif - --#define __RT_MUTEX_INITIALIZER(mutexname) \ -- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ -+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ - , .waiters = RB_ROOT \ - , .owner = NULL \ -- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} -+ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) -+ -+#define __RT_MUTEX_INITIALIZER(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } -+ -+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ -+ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ -+ , .save_state = 1 } - - #define DEFINE_RT_MUTEX(mutexname) \ - struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) -@@ -91,6 +106,7 @@ - - extern void rt_mutex_lock(struct rt_mutex *lock); - extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); -+extern int rt_mutex_lock_killable(struct rt_mutex *lock); - extern int rt_mutex_timed_lock(struct rt_mutex *lock, - struct hrtimer_sleeper *timeout); - -diff -Nur linux-4.4.13.orig/include/linux/rwlock_rt.h linux-4.4.13/include/linux/rwlock_rt.h ---- linux-4.4.13.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/rwlock_rt.h 2016-06-22 21:55:08.034523867 +0200 -@@ -0,0 +1,99 @@ -+#ifndef __LINUX_RWLOCK_RT_H -+#define __LINUX_RWLOCK_RT_H -+ -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif -+ -+#define rwlock_init(rwl) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(rwl)->lock); \ -+ __rt_rwlock_init(rwl, #rwl, &__key); \ -+} while (0) -+ -+extern void __lockfunc rt_write_lock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_lock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); -+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); -+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); -+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); -+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); -+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); -+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); -+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); -+ -+#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) -+#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) -+ -+#define write_trylock_irqsave(lock, flags) \ -+ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) -+ -+#define read_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = rt_read_lock_irqsave(lock); \ -+ } while (0) -+ -+#define write_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = rt_write_lock_irqsave(lock); \ -+ } while (0) -+ -+#define read_lock(lock) rt_read_lock(lock) -+ -+#define read_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_read_lock(lock); \ -+ } while (0) -+ -+#define read_lock_irq(lock) read_lock(lock) -+ -+#define write_lock(lock) rt_write_lock(lock) -+ -+#define write_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_write_lock(lock); \ -+ } while (0) -+ -+#define write_lock_irq(lock) write_lock(lock) -+ -+#define read_unlock(lock) rt_read_unlock(lock) -+ -+#define read_unlock_bh(lock) \ -+ do { \ -+ rt_read_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define read_unlock_irq(lock) read_unlock(lock) -+ -+#define write_unlock(lock) rt_write_unlock(lock) -+ -+#define write_unlock_bh(lock) \ -+ do { \ -+ rt_write_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define write_unlock_irq(lock) write_unlock(lock) -+ -+#define read_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_read_unlock(lock); \ -+ } while (0) -+ -+#define write_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ rt_write_unlock(lock); \ -+ } while (0) -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/rwlock_types.h linux-4.4.13/include/linux/rwlock_types.h ---- linux-4.4.13.orig/include/linux/rwlock_types.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/rwlock_types.h 2016-06-22 21:55:08.034523867 +0200 -@@ -1,6 +1,10 @@ - #ifndef __LINUX_RWLOCK_TYPES_H - #define __LINUX_RWLOCK_TYPES_H - -+#if !defined(__LINUX_SPINLOCK_TYPES_H) -+# error "Do not include directly, include spinlock_types.h" -+#endif -+ - /* - * include/linux/rwlock_types.h - generic rwlock type definitions - * and initializers -@@ -43,6 +47,7 @@ - RW_DEP_MAP_INIT(lockname) } - #endif - --#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) -+#define DEFINE_RWLOCK(name) \ -+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) - - #endif /* __LINUX_RWLOCK_TYPES_H */ -diff -Nur linux-4.4.13.orig/include/linux/rwlock_types_rt.h linux-4.4.13/include/linux/rwlock_types_rt.h ---- linux-4.4.13.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/rwlock_types_rt.h 2016-06-22 21:55:08.034523867 +0200 -@@ -0,0 +1,33 @@ -+#ifndef __LINUX_RWLOCK_TYPES_RT_H -+#define __LINUX_RWLOCK_TYPES_RT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+/* -+ * rwlocks - rtmutex which allows single reader recursion -+ */ -+typedef struct { -+ struct rt_mutex lock; -+ int read_depth; -+ unsigned int break_lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} rwlock_t; -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define RW_DEP_MAP_INIT(lockname) -+#endif -+ -+#define __RW_LOCK_UNLOCKED(name) \ -+ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ -+ RW_DEP_MAP_INIT(name) } -+ -+#define DEFINE_RWLOCK(name) \ -+ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/rwsem.h linux-4.4.13/include/linux/rwsem.h ---- linux-4.4.13.orig/include/linux/rwsem.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/rwsem.h 2016-06-22 21:55:08.034523867 +0200 -@@ -18,6 +18,10 @@ - #include - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+#include -+#else /* PREEMPT_RT_FULL */ -+ - struct rw_semaphore; - - #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK -@@ -177,4 +181,6 @@ - # define up_read_non_owner(sem) up_read(sem) - #endif - -+#endif /* !PREEMPT_RT_FULL */ -+ - #endif /* _LINUX_RWSEM_H */ -diff -Nur linux-4.4.13.orig/include/linux/rwsem_rt.h linux-4.4.13/include/linux/rwsem_rt.h ---- linux-4.4.13.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/rwsem_rt.h 2016-06-22 21:55:08.034523867 +0200 -@@ -0,0 +1,152 @@ -+#ifndef _LINUX_RWSEM_RT_H -+#define _LINUX_RWSEM_RT_H -+ -+#ifndef _LINUX_RWSEM_H -+#error "Include rwsem.h" -+#endif -+ -+/* -+ * RW-semaphores are a spinlock plus a reader-depth count. -+ * -+ * Note that the semantics are different from the usual -+ * Linux rw-sems, in PREEMPT_RT mode we do not allow -+ * multiple readers to hold the lock at once, we only allow -+ * a read-lock owner to read-lock recursively. This is -+ * better for latency, makes the implementation inherently -+ * fair and makes it simpler as well. -+ */ -+ -+#include -+ -+struct rw_semaphore { -+ struct rt_mutex lock; -+ int read_depth; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+}; -+ -+#define __RWSEM_INITIALIZER(name) \ -+ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ -+ RW_DEP_MAP_INIT(name) } -+ -+#define DECLARE_RWSEM(lockname) \ -+ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) -+ -+extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, -+ struct lock_class_key *key); -+ -+#define __rt_init_rwsem(sem, name, key) \ -+ do { \ -+ rt_mutex_init(&(sem)->lock); \ -+ __rt_rwsem_init((sem), (name), (key));\ -+ } while (0) -+ -+#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) -+ -+# define rt_init_rwsem(sem) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ __rt_init_rwsem((sem), #sem, &__key); \ -+} while (0) -+ -+extern void rt_down_write(struct rw_semaphore *rwsem); -+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); -+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); -+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, -+ struct lockdep_map *nest); -+extern void rt__down_read(struct rw_semaphore *rwsem); -+extern void rt_down_read(struct rw_semaphore *rwsem); -+extern int rt_down_write_trylock(struct rw_semaphore *rwsem); -+extern int rt__down_read_trylock(struct rw_semaphore *rwsem); -+extern int rt_down_read_trylock(struct rw_semaphore *rwsem); -+extern void __rt_up_read(struct rw_semaphore *rwsem); -+extern void rt_up_read(struct rw_semaphore *rwsem); -+extern void rt_up_write(struct rw_semaphore *rwsem); -+extern void rt_downgrade_write(struct rw_semaphore *rwsem); -+ -+#define init_rwsem(sem) rt_init_rwsem(sem) -+#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) -+ -+static inline int rwsem_is_contended(struct rw_semaphore *sem) -+{ -+ /* rt_mutex_has_waiters() */ -+ return !RB_EMPTY_ROOT(&sem->lock.waiters); -+} -+ -+static inline void __down_read(struct rw_semaphore *sem) -+{ -+ rt__down_read(sem); -+} -+ -+static inline void down_read(struct rw_semaphore *sem) -+{ -+ rt_down_read(sem); -+} -+ -+static inline int __down_read_trylock(struct rw_semaphore *sem) -+{ -+ return rt__down_read_trylock(sem); -+} -+ -+static inline int down_read_trylock(struct rw_semaphore *sem) -+{ -+ return rt_down_read_trylock(sem); -+} -+ -+static inline void down_write(struct rw_semaphore *sem) -+{ -+ rt_down_write(sem); -+} -+ -+static inline int down_write_trylock(struct rw_semaphore *sem) -+{ -+ return rt_down_write_trylock(sem); -+} -+ -+static inline void __up_read(struct rw_semaphore *sem) -+{ -+ __rt_up_read(sem); -+} -+ -+static inline void up_read(struct rw_semaphore *sem) -+{ -+ rt_up_read(sem); -+} -+ -+static inline void up_write(struct rw_semaphore *sem) -+{ -+ rt_up_write(sem); -+} -+ -+static inline void downgrade_write(struct rw_semaphore *sem) -+{ -+ rt_downgrade_write(sem); -+} -+ -+static inline void down_read_nested(struct rw_semaphore *sem, int subclass) -+{ -+ return rt_down_read_nested(sem, subclass); -+} -+ -+static inline void down_write_nested(struct rw_semaphore *sem, int subclass) -+{ -+ rt_down_write_nested(sem, subclass); -+} -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+static inline void down_write_nest_lock(struct rw_semaphore *sem, -+ struct rw_semaphore *nest_lock) -+{ -+ rt_down_write_nested_lock(sem, &nest_lock->dep_map); -+} -+ -+#else -+ -+static inline void down_write_nest_lock(struct rw_semaphore *sem, -+ struct rw_semaphore *nest_lock) -+{ -+ rt_down_write_nested_lock(sem, NULL); -+} -+#endif -+#endif -diff -Nur linux-4.4.13.orig/include/linux/sched.h linux-4.4.13/include/linux/sched.h ---- linux-4.4.13.orig/include/linux/sched.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/sched.h 2016-06-22 21:55:08.038523867 +0200 -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -182,8 +183,6 @@ - static inline void update_cpu_load_nohz(void) { } - #endif - --extern unsigned long get_parent_ip(unsigned long addr); -- - extern void dump_cpu_task(int cpu); - - struct seq_file; -@@ -242,10 +241,7 @@ - TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ - __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) - --#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) - #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) --#define task_is_stopped_or_traced(task) \ -- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) - #define task_contributes_to_load(task) \ - ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ - (task->flags & PF_FROZEN) == 0 && \ -@@ -311,6 +307,11 @@ - - #endif - -+#define __set_current_state_no_track(state_value) \ -+ do { current->state = (state_value); } while (0) -+#define set_current_state_no_track(state_value) \ -+ set_mb(current->state, (state_value)) -+ - /* Task command name length */ - #define TASK_COMM_LEN 16 - -@@ -970,8 +971,18 @@ - struct wake_q_head name = { WAKE_Q_TAIL, &name.first } - - extern void wake_q_add(struct wake_q_head *head, -- struct task_struct *task); --extern void wake_up_q(struct wake_q_head *head); -+ struct task_struct *task); -+extern void __wake_up_q(struct wake_q_head *head, bool sleeper); -+ -+static inline void wake_up_q(struct wake_q_head *head) -+{ -+ __wake_up_q(head, false); -+} -+ -+static inline void wake_up_q_sleeper(struct wake_q_head *head) -+{ -+ __wake_up_q(head, true); -+} - - /* - * sched-domains (multiprocessor balancing) declarations: -@@ -1379,6 +1390,7 @@ - - struct task_struct { - volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ -+ volatile long saved_state; /* saved state for "spinlock sleepers" */ - void *stack; - atomic_t usage; - unsigned int flags; /* per process flags, defined below */ -@@ -1415,6 +1427,12 @@ - #endif - - unsigned int policy; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int migrate_disable; -+# ifdef CONFIG_SCHED_DEBUG -+ int migrate_disable_atomic; -+# endif -+#endif - int nr_cpus_allowed; - cpumask_t cpus_allowed; - -@@ -1522,11 +1540,14 @@ - cputime_t gtime; - struct prev_cputime prev_cputime; - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -- seqlock_t vtime_seqlock; -+ seqcount_t vtime_seqcount; - unsigned long long vtime_snap; - enum { -- VTIME_SLEEPING = 0, -+ /* Task is sleeping or running in a CPU with VTIME inactive */ -+ VTIME_INACTIVE = 0, -+ /* Task runs in userspace in a CPU with VTIME active */ - VTIME_USER, -+ /* Task runs in kernelspace in a CPU with VTIME active */ - VTIME_SYS, - } vtime_snap_whence; - #endif -@@ -1538,6 +1559,9 @@ - - struct task_cputime cputime_expires; - struct list_head cpu_timers[3]; -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct task_struct *posix_timer_list; -+#endif - - /* process credentials */ - const struct cred __rcu *real_cred; /* objective and real subjective task -@@ -1568,10 +1592,15 @@ - /* signal handlers */ - struct signal_struct *signal; - struct sighand_struct *sighand; -+ struct sigqueue *sigqueue_cache; - - sigset_t blocked, real_blocked; - sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ - struct sigpending pending; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* TODO: move me into ->restart_block ? */ -+ struct siginfo forced_info; -+#endif - - unsigned long sas_ss_sp; - size_t sas_ss_size; -@@ -1795,6 +1824,12 @@ - unsigned long trace; - /* bitmask and counter of trace recursion */ - unsigned long trace_recursion; -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ u64 preempt_timestamp_hist; -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ long timer_offset; -+#endif -+#endif - #endif /* CONFIG_TRACING */ - #ifdef CONFIG_MEMCG - struct mem_cgroup *memcg_in_oom; -@@ -1811,9 +1846,23 @@ - unsigned int sequential_io; - unsigned int sequential_io_avg; - #endif -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct rcu_head put_rcu; -+ int softirq_nestcnt; -+ unsigned int softirqs_raised; -+#endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 -+ int kmap_idx; -+ pte_t kmap_pte[KM_TYPE_NR]; -+# endif -+#endif - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - unsigned long task_state_change; - #endif -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int xmit_recursion; -+#endif - int pagefault_disabled; - /* CPU-specific state of this task */ - struct thread_struct thread; -@@ -1831,9 +1880,6 @@ - # define arch_task_struct_size (sizeof(struct task_struct)) - #endif - --/* Future-safe accessor for struct task_struct's cpus_allowed. */ --#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -- - #define TNF_MIGRATED 0x01 - #define TNF_NO_GROUP 0x02 - #define TNF_SHARED 0x04 -@@ -2023,6 +2069,15 @@ - extern void free_task(struct task_struct *tsk); - #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) - -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __put_task_struct_cb(struct rcu_head *rhp); -+ -+static inline void put_task_struct(struct task_struct *t) -+{ -+ if (atomic_dec_and_test(&t->usage)) -+ call_rcu(&t->put_rcu, __put_task_struct_cb); -+} -+#else - extern void __put_task_struct(struct task_struct *t); - - static inline void put_task_struct(struct task_struct *t) -@@ -2030,6 +2085,7 @@ - if (atomic_dec_and_test(&t->usage)) - __put_task_struct(t); - } -+#endif - - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN - extern void task_cputime(struct task_struct *t, -@@ -2068,6 +2124,7 @@ - /* - * Per process flags - */ -+#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ - #define PF_EXITING 0x00000004 /* getting shut down */ - #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ - #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ -@@ -2232,6 +2289,10 @@ - - extern int set_cpus_allowed_ptr(struct task_struct *p, - const struct cpumask *new_mask); -+int migrate_me(void); -+void tell_sched_cpu_down_begin(int cpu); -+void tell_sched_cpu_down_done(int cpu); -+ - #else - static inline void do_set_cpus_allowed(struct task_struct *p, - const struct cpumask *new_mask) -@@ -2244,6 +2305,9 @@ - return -EINVAL; - return 0; - } -+static inline int migrate_me(void) { return 0; } -+static inline void tell_sched_cpu_down_begin(int cpu) { } -+static inline void tell_sched_cpu_down_done(int cpu) { } - #endif - - #ifdef CONFIG_NO_HZ_COMMON -@@ -2453,6 +2517,7 @@ - - extern int wake_up_state(struct task_struct *tsk, unsigned int state); - extern int wake_up_process(struct task_struct *tsk); -+extern int wake_up_lock_sleeper(struct task_struct * tsk); - extern void wake_up_new_task(struct task_struct *tsk); - #ifdef CONFIG_SMP - extern void kick_process(struct task_struct *tsk); -@@ -2576,12 +2641,24 @@ - - /* mmdrop drops the mm and the page tables */ - extern void __mmdrop(struct mm_struct *); -+ - static inline void mmdrop(struct mm_struct * mm) - { - if (unlikely(atomic_dec_and_test(&mm->mm_count))) - __mmdrop(mm); - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+extern void __mmdrop_delayed(struct rcu_head *rhp); -+static inline void mmdrop_delayed(struct mm_struct *mm) -+{ -+ if (atomic_dec_and_test(&mm->mm_count)) -+ call_rcu(&mm->delayed_drop, __mmdrop_delayed); -+} -+#else -+# define mmdrop_delayed(mm) mmdrop(mm) -+#endif -+ - /* mmput gets rid of the mappings and all user-space */ - extern void mmput(struct mm_struct *); - /* Grab a reference to a task's mm, if it is not already going away */ -@@ -2891,6 +2968,43 @@ - return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) -+{ -+ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); -+} -+ -+static inline int need_resched_lazy(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED_LAZY); -+} -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#else -+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } -+static inline int need_resched_lazy(void) { return 0; } -+ -+static inline int need_resched_now(void) -+{ -+ return test_thread_flag(TIF_NEED_RESCHED); -+} -+ -+#endif -+ - static inline int restart_syscall(void) - { - set_tsk_thread_flag(current, TIF_SIGPENDING); -@@ -2922,6 +3036,51 @@ - return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); - } - -+static inline bool __task_is_stopped_or_traced(struct task_struct *task) -+{ -+ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) -+ return true; -+#endif -+ return false; -+} -+ -+static inline bool task_is_stopped_or_traced(struct task_struct *task) -+{ -+ bool traced_stopped; -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&task->pi_lock, flags); -+ traced_stopped = __task_is_stopped_or_traced(task); -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+#else -+ traced_stopped = __task_is_stopped_or_traced(task); -+#endif -+ return traced_stopped; -+} -+ -+static inline bool task_is_traced(struct task_struct *task) -+{ -+ bool traced = false; -+ -+ if (task->state & __TASK_TRACED) -+ return true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* in case the task is sleeping on tasklist_lock */ -+ raw_spin_lock_irq(&task->pi_lock); -+ if (task->state & __TASK_TRACED) -+ traced = true; -+ else if (task->saved_state & __TASK_TRACED) -+ traced = true; -+ raw_spin_unlock_irq(&task->pi_lock); -+#endif -+ return traced; -+} -+ - /* - * cond_resched() and cond_resched_lock(): latency reduction via - * explicit rescheduling in places that are safe. The return -@@ -2943,12 +3102,16 @@ - __cond_resched_lock(lock); \ - }) - -+#ifndef CONFIG_PREEMPT_RT_FULL - extern int __cond_resched_softirq(void); - - #define cond_resched_softirq() ({ \ - ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ - __cond_resched_softirq(); \ - }) -+#else -+# define cond_resched_softirq() cond_resched() -+#endif - - static inline void cond_resched_rcu(void) - { -@@ -3110,6 +3273,31 @@ - - #endif /* CONFIG_SMP */ - -+static inline int __migrate_disabled(struct task_struct *p) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ return p->migrate_disable; -+#else -+ return 0; -+#endif -+} -+ -+/* Future-safe accessor for struct task_struct's cpus_allowed. */ -+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) -+{ -+ if (__migrate_disabled(p)) -+ return cpumask_of(task_cpu(p)); -+ -+ return &p->cpus_allowed; -+} -+ -+static inline int tsk_nr_cpus_allowed(struct task_struct *p) -+{ -+ if (__migrate_disabled(p)) -+ return 1; -+ return p->nr_cpus_allowed; -+} -+ - extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); - extern long sched_getaffinity(pid_t pid, struct cpumask *mask); - -diff -Nur linux-4.4.13.orig/include/linux/seqlock.h linux-4.4.13/include/linux/seqlock.h ---- linux-4.4.13.orig/include/linux/seqlock.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/seqlock.h 2016-06-22 21:55:08.038523867 +0200 -@@ -220,20 +220,30 @@ - return __read_seqcount_retry(s, start); - } - -- -- --static inline void raw_write_seqcount_begin(seqcount_t *s) -+static inline void __raw_write_seqcount_begin(seqcount_t *s) - { - s->sequence++; - smp_wmb(); - } - --static inline void raw_write_seqcount_end(seqcount_t *s) -+static inline void raw_write_seqcount_begin(seqcount_t *s) -+{ -+ preempt_disable_rt(); -+ __raw_write_seqcount_begin(s); -+} -+ -+static inline void __raw_write_seqcount_end(seqcount_t *s) - { - smp_wmb(); - s->sequence++; - } - -+static inline void raw_write_seqcount_end(seqcount_t *s) -+{ -+ __raw_write_seqcount_end(s); -+ preempt_enable_rt(); -+} -+ - /** - * raw_write_seqcount_barrier - do a seq write barrier - * @s: pointer to seqcount_t -@@ -425,10 +435,32 @@ - /* - * Read side functions for starting and finalizing a read side section. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - static inline unsigned read_seqbegin(const seqlock_t *sl) - { - return read_seqcount_begin(&sl->seqcount); - } -+#else -+/* -+ * Starvation safe read side for RT -+ */ -+static inline unsigned read_seqbegin(seqlock_t *sl) -+{ -+ unsigned ret; -+ -+repeat: -+ ret = ACCESS_ONCE(sl->seqcount.sequence); -+ if (unlikely(ret & 1)) { -+ /* -+ * Take the lock and let the writer proceed (i.e. evtl -+ * boost it), otherwise we could loop here forever. -+ */ -+ spin_unlock_wait(&sl->lock); -+ goto repeat; -+ } -+ return ret; -+} -+#endif - - static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) - { -@@ -443,36 +475,36 @@ - static inline void write_seqlock(seqlock_t *sl) - { - spin_lock(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock(&sl->lock); - } - - static inline void write_seqlock_bh(seqlock_t *sl) - { - spin_lock_bh(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock_bh(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_bh(&sl->lock); - } - - static inline void write_seqlock_irq(seqlock_t *sl) - { - spin_lock_irq(&sl->lock); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - } - - static inline void write_sequnlock_irq(seqlock_t *sl) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irq(&sl->lock); - } - -@@ -481,7 +513,7 @@ - unsigned long flags; - - spin_lock_irqsave(&sl->lock, flags); -- write_seqcount_begin(&sl->seqcount); -+ __raw_write_seqcount_begin(&sl->seqcount); - return flags; - } - -@@ -491,7 +523,7 @@ - static inline void - write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) - { -- write_seqcount_end(&sl->seqcount); -+ __raw_write_seqcount_end(&sl->seqcount); - spin_unlock_irqrestore(&sl->lock, flags); - } - -diff -Nur linux-4.4.13.orig/include/linux/signal.h linux-4.4.13/include/linux/signal.h ---- linux-4.4.13.orig/include/linux/signal.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/signal.h 2016-06-22 21:55:08.038523867 +0200 -@@ -233,6 +233,7 @@ - } - - extern void flush_sigqueue(struct sigpending *queue); -+extern void flush_task_sigqueue(struct task_struct *tsk); - - /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ - static inline int valid_signal(unsigned long sig) -diff -Nur linux-4.4.13.orig/include/linux/skbuff.h linux-4.4.13/include/linux/skbuff.h ---- linux-4.4.13.orig/include/linux/skbuff.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/skbuff.h 2016-06-22 21:55:08.038523867 +0200 -@@ -203,6 +203,7 @@ - - __u32 qlen; - spinlock_t lock; -+ raw_spinlock_t raw_lock; - }; - - struct sk_buff; -@@ -1464,6 +1465,12 @@ - __skb_queue_head_init(list); - } - -+static inline void skb_queue_head_init_raw(struct sk_buff_head *list) -+{ -+ raw_spin_lock_init(&list->raw_lock); -+ __skb_queue_head_init(list); -+} -+ - static inline void skb_queue_head_init_class(struct sk_buff_head *list, - struct lock_class_key *class) - { -diff -Nur linux-4.4.13.orig/include/linux/smp.h linux-4.4.13/include/linux/smp.h ---- linux-4.4.13.orig/include/linux/smp.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/smp.h 2016-06-22 21:55:08.038523867 +0200 -@@ -185,6 +185,9 @@ - #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) - #define put_cpu() preempt_enable() - -+#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) -+#define put_cpu_light() migrate_enable() -+ - /* - * Callback to arch code if there's nosmp or maxcpus=0 on the - * boot command line: -diff -Nur linux-4.4.13.orig/include/linux/spinlock_api_smp.h linux-4.4.13/include/linux/spinlock_api_smp.h ---- linux-4.4.13.orig/include/linux/spinlock_api_smp.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/spinlock_api_smp.h 2016-06-22 21:55:08.038523867 +0200 -@@ -189,6 +189,8 @@ - return 0; - } - --#include -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+#endif - - #endif /* __LINUX_SPINLOCK_API_SMP_H */ -diff -Nur linux-4.4.13.orig/include/linux/spinlock.h linux-4.4.13/include/linux/spinlock.h ---- linux-4.4.13.orig/include/linux/spinlock.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/spinlock.h 2016-06-22 21:55:08.038523867 +0200 -@@ -271,7 +271,11 @@ - #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) - - /* Include rwlock functions */ --#include -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else -+# include -+#endif - - /* - * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: -@@ -282,6 +286,10 @@ - # include - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+# include -+#else /* PREEMPT_RT_FULL */ -+ - /* - * Map the spin_lock functions to the raw variants for PREEMPT_RT=n - */ -@@ -416,4 +424,6 @@ - #define atomic_dec_and_lock(atomic, lock) \ - __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) - -+#endif /* !PREEMPT_RT_FULL */ -+ - #endif /* __LINUX_SPINLOCK_H */ -diff -Nur linux-4.4.13.orig/include/linux/spinlock_rt.h linux-4.4.13/include/linux/spinlock_rt.h ---- linux-4.4.13.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/spinlock_rt.h 2016-06-22 21:55:08.038523867 +0200 -@@ -0,0 +1,163 @@ -+#ifndef __LINUX_SPINLOCK_RT_H -+#define __LINUX_SPINLOCK_RT_H -+ -+#ifndef __LINUX_SPINLOCK_H -+#error Do not include directly. Use spinlock.h -+#endif -+ -+#include -+ -+extern void -+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); -+ -+#define spin_lock_init(slock) \ -+do { \ -+ static struct lock_class_key __key; \ -+ \ -+ rt_mutex_init(&(slock)->lock); \ -+ __rt_spin_lock_init(slock, #slock, &__key); \ -+} while (0) -+ -+void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock); -+void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock); -+int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock); -+ -+extern void __lockfunc rt_spin_lock(spinlock_t *lock); -+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); -+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); -+extern void __lockfunc rt_spin_unlock(spinlock_t *lock); -+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); -+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); -+extern int __lockfunc rt_spin_trylock(spinlock_t *lock); -+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); -+ -+/* -+ * lockdep-less calls, for derived types like rwlock: -+ * (for trylock they can use rt_mutex_trylock() directly. -+ */ -+extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock); -+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); -+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); -+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); -+ -+#define spin_lock(lock) rt_spin_lock(lock) -+ -+#define spin_lock_bh(lock) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_spin_lock(lock); \ -+ } while (0) -+ -+#define spin_lock_irq(lock) spin_lock(lock) -+ -+#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) -+ -+#define spin_trylock(lock) \ -+({ \ -+ int __locked; \ -+ __locked = spin_do_trylock(lock); \ -+ __locked; \ -+}) -+ -+#ifdef CONFIG_LOCKDEP -+# define spin_lock_nested(lock, subclass) \ -+ do { \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+ -+#define spin_lock_bh_nested(lock, subclass) \ -+ do { \ -+ local_bh_disable(); \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ rt_spin_lock_nested(lock, subclass); \ -+ } while (0) -+#else -+# define spin_lock_nested(lock, subclass) spin_lock(lock) -+# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) -+ -+# define spin_lock_irqsave_nested(lock, flags, subclass) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+#endif -+ -+#define spin_lock_irqsave(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ flags = 0; \ -+ spin_lock(lock); \ -+ } while (0) -+ -+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) -+{ -+ unsigned long flags = 0; -+#ifdef CONFIG_TRACE_IRQFLAGS -+ flags = rt_spin_lock_trace_flags(lock); -+#else -+ spin_lock(lock); /* lock_local */ -+#endif -+ return flags; -+} -+ -+/* FIXME: we need rt_spin_lock_nest_lock */ -+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) -+ -+#define spin_unlock(lock) rt_spin_unlock(lock) -+ -+#define spin_unlock_bh(lock) \ -+ do { \ -+ rt_spin_unlock(lock); \ -+ local_bh_enable(); \ -+ } while (0) -+ -+#define spin_unlock_irq(lock) spin_unlock(lock) -+ -+#define spin_unlock_irqrestore(lock, flags) \ -+ do { \ -+ typecheck(unsigned long, flags); \ -+ (void) flags; \ -+ spin_unlock(lock); \ -+ } while (0) -+ -+#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) -+#define spin_trylock_irq(lock) spin_trylock(lock) -+ -+#define spin_trylock_irqsave(lock, flags) \ -+ rt_spin_trylock_irqsave(lock, &(flags)) -+ -+#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) -+ -+#ifdef CONFIG_GENERIC_LOCKBREAK -+# define spin_is_contended(lock) ((lock)->break_lock) -+#else -+# define spin_is_contended(lock) (((void)(lock), 0)) -+#endif -+ -+static inline int spin_can_lock(spinlock_t *lock) -+{ -+ return !rt_mutex_is_locked(&lock->lock); -+} -+ -+static inline int spin_is_locked(spinlock_t *lock) -+{ -+ return rt_mutex_is_locked(&lock->lock); -+} -+ -+static inline void assert_spin_locked(spinlock_t *lock) -+{ -+ BUG_ON(!spin_is_locked(lock)); -+} -+ -+#define atomic_dec_and_lock(atomic, lock) \ -+ atomic_dec_and_spin_lock(atomic, lock) -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/spinlock_types.h linux-4.4.13/include/linux/spinlock_types.h ---- linux-4.4.13.orig/include/linux/spinlock_types.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/spinlock_types.h 2016-06-22 21:55:08.038523867 +0200 -@@ -9,80 +9,15 @@ - * Released under the General Public License (GPL). - */ - --#if defined(CONFIG_SMP) --# include --#else --# include --#endif -- --#include -- --typedef struct raw_spinlock { -- arch_spinlock_t raw_lock; --#ifdef CONFIG_GENERIC_LOCKBREAK -- unsigned int break_lock; --#endif --#ifdef CONFIG_DEBUG_SPINLOCK -- unsigned int magic, owner_cpu; -- void *owner; --#endif --#ifdef CONFIG_DEBUG_LOCK_ALLOC -- struct lockdep_map dep_map; --#endif --} raw_spinlock_t; -- --#define SPINLOCK_MAGIC 0xdead4ead -- --#define SPINLOCK_OWNER_INIT ((void *)-1L) -- --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } --#else --# define SPIN_DEP_MAP_INIT(lockname) --#endif -+#include - --#ifdef CONFIG_DEBUG_SPINLOCK --# define SPIN_DEBUG_INIT(lockname) \ -- .magic = SPINLOCK_MAGIC, \ -- .owner_cpu = -1, \ -- .owner = SPINLOCK_OWNER_INIT, -+#ifndef CONFIG_PREEMPT_RT_FULL -+# include -+# include - #else --# define SPIN_DEBUG_INIT(lockname) -+# include -+# include -+# include - #endif - --#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -- { \ -- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -- SPIN_DEBUG_INIT(lockname) \ -- SPIN_DEP_MAP_INIT(lockname) } -- --#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -- --typedef struct spinlock { -- union { -- struct raw_spinlock rlock; -- --#ifdef CONFIG_DEBUG_LOCK_ALLOC --# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -- struct { -- u8 __padding[LOCK_PADSIZE]; -- struct lockdep_map dep_map; -- }; --#endif -- }; --} spinlock_t; -- --#define __SPIN_LOCK_INITIALIZER(lockname) \ -- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -- --#define __SPIN_LOCK_UNLOCKED(lockname) \ -- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -- --#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -- --#include -- - #endif /* __LINUX_SPINLOCK_TYPES_H */ -diff -Nur linux-4.4.13.orig/include/linux/spinlock_types_nort.h linux-4.4.13/include/linux/spinlock_types_nort.h ---- linux-4.4.13.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/spinlock_types_nort.h 2016-06-22 21:55:08.038523867 +0200 -@@ -0,0 +1,33 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H -+#define __LINUX_SPINLOCK_TYPES_NORT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+/* -+ * The non RT version maps spinlocks to raw_spinlocks -+ */ -+typedef struct spinlock { -+ union { -+ struct raw_spinlock rlock; -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) -+ struct { -+ u8 __padding[LOCK_PADSIZE]; -+ struct lockdep_map dep_map; -+ }; -+#endif -+ }; -+} spinlock_t; -+ -+#define __SPIN_LOCK_INITIALIZER(lockname) \ -+ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } -+ -+#define __SPIN_LOCK_UNLOCKED(lockname) \ -+ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) -+ -+#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/spinlock_types_raw.h linux-4.4.13/include/linux/spinlock_types_raw.h ---- linux-4.4.13.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/spinlock_types_raw.h 2016-06-22 21:55:08.038523867 +0200 -@@ -0,0 +1,56 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H -+#define __LINUX_SPINLOCK_TYPES_RAW_H -+ -+#if defined(CONFIG_SMP) -+# include -+#else -+# include -+#endif -+ -+#include -+ -+typedef struct raw_spinlock { -+ arch_spinlock_t raw_lock; -+#ifdef CONFIG_GENERIC_LOCKBREAK -+ unsigned int break_lock; -+#endif -+#ifdef CONFIG_DEBUG_SPINLOCK -+ unsigned int magic, owner_cpu; -+ void *owner; -+#endif -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} raw_spinlock_t; -+ -+#define SPINLOCK_MAGIC 0xdead4ead -+ -+#define SPINLOCK_OWNER_INIT ((void *)-1L) -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } -+#else -+# define SPIN_DEP_MAP_INIT(lockname) -+#endif -+ -+#ifdef CONFIG_DEBUG_SPINLOCK -+# define SPIN_DEBUG_INIT(lockname) \ -+ .magic = SPINLOCK_MAGIC, \ -+ .owner_cpu = -1, \ -+ .owner = SPINLOCK_OWNER_INIT, -+#else -+# define SPIN_DEBUG_INIT(lockname) -+#endif -+ -+#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ -+ { \ -+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ -+ SPIN_DEBUG_INIT(lockname) \ -+ SPIN_DEP_MAP_INIT(lockname) } -+ -+#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ -+ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) -+ -+#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/spinlock_types_rt.h linux-4.4.13/include/linux/spinlock_types_rt.h ---- linux-4.4.13.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/spinlock_types_rt.h 2016-06-22 21:55:08.038523867 +0200 -@@ -0,0 +1,51 @@ -+#ifndef __LINUX_SPINLOCK_TYPES_RT_H -+#define __LINUX_SPINLOCK_TYPES_RT_H -+ -+#ifndef __LINUX_SPINLOCK_TYPES_H -+#error "Do not include directly. Include spinlock_types.h instead" -+#endif -+ -+#include -+ -+/* -+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: -+ */ -+typedef struct spinlock { -+ struct rt_mutex lock; -+ unsigned int break_lock; -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ struct lockdep_map dep_map; -+#endif -+} spinlock_t; -+ -+#ifdef CONFIG_DEBUG_RT_MUTEXES -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ .file = __FILE__, \ -+ .line = __LINE__ , \ -+ } -+#else -+# define __RT_SPIN_INITIALIZER(name) \ -+ { \ -+ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ -+ .save_state = 1, \ -+ } -+#endif -+ -+/* -+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) -+*/ -+ -+#define __SPIN_LOCK_UNLOCKED(name) \ -+ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ -+ SPIN_DEP_MAP_INIT(name) } -+ -+#define __DEFINE_SPINLOCK(name) \ -+ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) -+ -+#define DEFINE_SPINLOCK(name) \ -+ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) -+ -+#endif -diff -Nur linux-4.4.13.orig/include/linux/srcu.h linux-4.4.13/include/linux/srcu.h ---- linux-4.4.13.orig/include/linux/srcu.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/srcu.h 2016-06-22 21:55:08.038523867 +0200 -@@ -84,10 +84,10 @@ - - void process_srcu(struct work_struct *work); - --#define __SRCU_STRUCT_INIT(name) \ -+#define __SRCU_STRUCT_INIT(name, pcpu_name) \ - { \ - .completed = -300, \ -- .per_cpu_ref = &name##_srcu_array, \ -+ .per_cpu_ref = &pcpu_name, \ - .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ - .running = false, \ - .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ -@@ -104,7 +104,7 @@ - */ - #define __DEFINE_SRCU(name, is_static) \ - static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ -- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) -+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array) - #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) - #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) - -diff -Nur linux-4.4.13.orig/include/linux/suspend.h linux-4.4.13/include/linux/suspend.h ---- linux-4.4.13.orig/include/linux/suspend.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/suspend.h 2016-06-22 21:55:08.038523867 +0200 -@@ -194,6 +194,12 @@ - void (*end)(void); - }; - -+#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) -+extern bool pm_in_action; -+#else -+# define pm_in_action false -+#endif -+ - #ifdef CONFIG_SUSPEND - /** - * suspend_set_ops - set platform dependent suspend operations -diff -Nur linux-4.4.13.orig/include/linux/swait.h linux-4.4.13/include/linux/swait.h ---- linux-4.4.13.orig/include/linux/swait.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/swait.h 2016-06-22 21:55:08.038523867 +0200 -@@ -0,0 +1,173 @@ -+#ifndef _LINUX_SWAIT_H -+#define _LINUX_SWAIT_H -+ -+#include -+#include -+#include -+#include -+ -+/* -+ * Simple wait queues -+ * -+ * While these are very similar to the other/complex wait queues (wait.h) the -+ * most important difference is that the simple waitqueue allows for -+ * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold -+ * times. -+ * -+ * In order to make this so, we had to drop a fair number of features of the -+ * other waitqueue code; notably: -+ * -+ * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; -+ * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right -+ * sleeper state. -+ * -+ * - the exclusive mode; because this requires preserving the list order -+ * and this is hard. -+ * -+ * - custom wake functions; because you cannot give any guarantees about -+ * random code. -+ * -+ * As a side effect of this; the data structures are slimmer. -+ * -+ * One would recommend using this wait queue where possible. -+ */ -+ -+struct task_struct; -+ -+struct swait_queue_head { -+ raw_spinlock_t lock; -+ struct list_head task_list; -+}; -+ -+struct swait_queue { -+ struct task_struct *task; -+ struct list_head task_list; -+}; -+ -+#define __SWAITQUEUE_INITIALIZER(name) { \ -+ .task = current, \ -+ .task_list = LIST_HEAD_INIT((name).task_list), \ -+} -+ -+#define DECLARE_SWAITQUEUE(name) \ -+ struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) -+ -+#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ -+ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ -+ .task_list = LIST_HEAD_INIT((name).task_list), \ -+} -+ -+#define DECLARE_SWAIT_QUEUE_HEAD(name) \ -+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) -+ -+extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, -+ struct lock_class_key *key); -+ -+#define init_swait_queue_head(q) \ -+ do { \ -+ static struct lock_class_key __key; \ -+ __init_swait_queue_head((q), #q, &__key); \ -+ } while (0) -+ -+#ifdef CONFIG_LOCKDEP -+# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ -+ ({ init_swait_queue_head(&name); name; }) -+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ -+ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) -+#else -+# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ -+ DECLARE_SWAIT_QUEUE_HEAD(name) -+#endif -+ -+static inline int swait_active(struct swait_queue_head *q) -+{ -+ return !list_empty(&q->task_list); -+} -+ -+extern void swake_up(struct swait_queue_head *q); -+extern void swake_up_all(struct swait_queue_head *q); -+extern void swake_up_locked(struct swait_queue_head *q); -+extern void swake_up_all_locked(struct swait_queue_head *q); -+ -+extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); -+extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); -+extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); -+ -+extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); -+extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); -+ -+/* as per ___wait_event() but for swait, therefore "exclusive == 0" */ -+#define ___swait_event(wq, condition, state, ret, cmd) \ -+({ \ -+ struct swait_queue __wait; \ -+ long __ret = ret; \ -+ \ -+ INIT_LIST_HEAD(&__wait.task_list); \ -+ for (;;) { \ -+ long __int = prepare_to_swait_event(&wq, &__wait, state);\ -+ \ -+ if (condition) \ -+ break; \ -+ \ -+ if (___wait_is_interruptible(state) && __int) { \ -+ __ret = __int; \ -+ break; \ -+ } \ -+ \ -+ cmd; \ -+ } \ -+ finish_swait(&wq, &__wait); \ -+ __ret; \ -+}) -+ -+#define __swait_event(wq, condition) \ -+ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ -+ schedule()) -+ -+#define swait_event(wq, condition) \ -+do { \ -+ if (condition) \ -+ break; \ -+ __swait_event(wq, condition); \ -+} while (0) -+ -+#define __swait_event_timeout(wq, condition, timeout) \ -+ ___swait_event(wq, ___wait_cond_timeout(condition), \ -+ TASK_UNINTERRUPTIBLE, timeout, \ -+ __ret = schedule_timeout(__ret)) -+ -+#define swait_event_timeout(wq, condition, timeout) \ -+({ \ -+ long __ret = timeout; \ -+ if (!___wait_cond_timeout(condition)) \ -+ __ret = __swait_event_timeout(wq, condition, timeout); \ -+ __ret; \ -+}) -+ -+#define __swait_event_interruptible(wq, condition) \ -+ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ -+ schedule()) -+ -+#define swait_event_interruptible(wq, condition) \ -+({ \ -+ int __ret = 0; \ -+ if (!(condition)) \ -+ __ret = __swait_event_interruptible(wq, condition); \ -+ __ret; \ -+}) -+ -+#define __swait_event_interruptible_timeout(wq, condition, timeout) \ -+ ___swait_event(wq, ___wait_cond_timeout(condition), \ -+ TASK_INTERRUPTIBLE, timeout, \ -+ __ret = schedule_timeout(__ret)) -+ -+#define swait_event_interruptible_timeout(wq, condition, timeout) \ -+({ \ -+ long __ret = timeout; \ -+ if (!___wait_cond_timeout(condition)) \ -+ __ret = __swait_event_interruptible_timeout(wq, \ -+ condition, timeout); \ -+ __ret; \ -+}) -+ -+#endif /* _LINUX_SWAIT_H */ -diff -Nur linux-4.4.13.orig/include/linux/swap.h linux-4.4.13/include/linux/swap.h ---- linux-4.4.13.orig/include/linux/swap.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/swap.h 2016-06-22 21:55:08.038523867 +0200 -@@ -11,6 +11,7 @@ - #include - #include - #include -+#include - #include - - struct notifier_block; -@@ -252,7 +253,8 @@ - void *workingset_eviction(struct address_space *mapping, struct page *page); - bool workingset_refault(void *shadow); - void workingset_activation(struct page *page); --extern struct list_lru workingset_shadow_nodes; -+extern struct list_lru __workingset_shadow_nodes; -+DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock); - - static inline unsigned int workingset_node_pages(struct radix_tree_node *node) - { -@@ -296,6 +298,7 @@ - - - /* linux/mm/swap.c */ -+DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); - extern void lru_cache_add(struct page *); - extern void lru_cache_add_anon(struct page *page); - extern void lru_cache_add_file(struct page *page); -diff -Nur linux-4.4.13.orig/include/linux/thread_info.h linux-4.4.13/include/linux/thread_info.h ---- linux-4.4.13.orig/include/linux/thread_info.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/thread_info.h 2016-06-22 21:55:08.038523867 +0200 -@@ -102,7 +102,17 @@ - #define test_thread_flag(flag) \ - test_ti_thread_flag(current_thread_info(), flag) - --#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#ifdef CONFIG_PREEMPT_LAZY -+#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ -+ test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) -+#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) -+ -+#else -+#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) -+#define tif_need_resched_lazy() 0 -+#endif - - #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK - /* -diff -Nur linux-4.4.13.orig/include/linux/timer.h linux-4.4.13/include/linux/timer.h ---- linux-4.4.13.orig/include/linux/timer.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/timer.h 2016-06-22 21:55:08.038523867 +0200 -@@ -225,7 +225,7 @@ - - extern int try_to_del_timer_sync(struct timer_list *timer); - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - extern int del_timer_sync(struct timer_list *timer); - #else - # define del_timer_sync(t) del_timer(t) -diff -Nur linux-4.4.13.orig/include/linux/trace_events.h linux-4.4.13/include/linux/trace_events.h ---- linux-4.4.13.orig/include/linux/trace_events.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/trace_events.h 2016-06-22 21:55:08.038523867 +0200 -@@ -66,6 +66,9 @@ - unsigned char flags; - unsigned char preempt_count; - int pid; -+ unsigned short migrate_disable; -+ unsigned short padding; -+ unsigned char preempt_lazy_count; - }; - - #define TRACE_EVENT_TYPE_MAX \ -diff -Nur linux-4.4.13.orig/include/linux/uaccess.h linux-4.4.13/include/linux/uaccess.h ---- linux-4.4.13.orig/include/linux/uaccess.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/uaccess.h 2016-06-22 21:55:08.038523867 +0200 -@@ -24,6 +24,7 @@ - */ - static inline void pagefault_disable(void) - { -+ migrate_disable(); - pagefault_disabled_inc(); - /* - * make sure to have issued the store before a pagefault -@@ -40,6 +41,7 @@ - */ - barrier(); - pagefault_disabled_dec(); -+ migrate_enable(); - } - - /* -diff -Nur linux-4.4.13.orig/include/linux/uprobes.h linux-4.4.13/include/linux/uprobes.h ---- linux-4.4.13.orig/include/linux/uprobes.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/uprobes.h 2016-06-22 21:55:08.038523867 +0200 -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include - - struct vm_area_struct; - struct mm_struct; -diff -Nur linux-4.4.13.orig/include/linux/vmstat.h linux-4.4.13/include/linux/vmstat.h ---- linux-4.4.13.orig/include/linux/vmstat.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/vmstat.h 2016-06-22 21:55:08.042523867 +0200 -@@ -33,7 +33,9 @@ - */ - static inline void __count_vm_event(enum vm_event_item item) - { -+ preempt_disable_rt(); - raw_cpu_inc(vm_event_states.event[item]); -+ preempt_enable_rt(); - } - - static inline void count_vm_event(enum vm_event_item item) -@@ -43,7 +45,9 @@ - - static inline void __count_vm_events(enum vm_event_item item, long delta) - { -+ preempt_disable_rt(); - raw_cpu_add(vm_event_states.event[item], delta); -+ preempt_enable_rt(); - } - - static inline void count_vm_events(enum vm_event_item item, long delta) -diff -Nur linux-4.4.13.orig/include/linux/wait.h linux-4.4.13/include/linux/wait.h ---- linux-4.4.13.orig/include/linux/wait.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/linux/wait.h 2016-06-22 21:55:08.042523867 +0200 -@@ -8,6 +8,7 @@ - #include - #include - #include -+#include - - typedef struct __wait_queue wait_queue_t; - typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); -diff -Nur linux-4.4.13.orig/include/linux/work-simple.h linux-4.4.13/include/linux/work-simple.h ---- linux-4.4.13.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/linux/work-simple.h 2016-06-22 21:55:08.042523867 +0200 -@@ -0,0 +1,24 @@ -+#ifndef _LINUX_SWORK_H -+#define _LINUX_SWORK_H -+ -+#include -+ -+struct swork_event { -+ struct list_head item; -+ unsigned long flags; -+ void (*func)(struct swork_event *); -+}; -+ -+static inline void INIT_SWORK(struct swork_event *event, -+ void (*func)(struct swork_event *)) -+{ -+ event->flags = 0; -+ event->func = func; -+} -+ -+bool swork_queue(struct swork_event *sev); -+ -+int swork_get(void); -+void swork_put(void); -+ -+#endif /* _LINUX_SWORK_H */ -diff -Nur linux-4.4.13.orig/include/net/dst.h linux-4.4.13/include/net/dst.h ---- linux-4.4.13.orig/include/net/dst.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/net/dst.h 2016-06-22 21:55:08.042523867 +0200 -@@ -437,7 +437,7 @@ - static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, - struct sk_buff *skb) - { -- const struct hh_cache *hh; -+ struct hh_cache *hh; - - if (dst->pending_confirm) { - unsigned long now = jiffies; -diff -Nur linux-4.4.13.orig/include/net/neighbour.h linux-4.4.13/include/net/neighbour.h ---- linux-4.4.13.orig/include/net/neighbour.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/net/neighbour.h 2016-06-22 21:55:08.042523867 +0200 -@@ -446,7 +446,7 @@ - } - #endif - --static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) -+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) - { - unsigned int seq; - int hh_len; -@@ -501,7 +501,7 @@ - - #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) - --static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, -+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, - const struct net_device *dev) - { - unsigned int seq; -diff -Nur linux-4.4.13.orig/include/net/netns/ipv4.h linux-4.4.13/include/net/netns/ipv4.h ---- linux-4.4.13.orig/include/net/netns/ipv4.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/net/netns/ipv4.h 2016-06-22 21:55:08.042523867 +0200 -@@ -70,6 +70,7 @@ - - int sysctl_icmp_echo_ignore_all; - int sysctl_icmp_echo_ignore_broadcasts; -+ int sysctl_icmp_echo_sysrq; - int sysctl_icmp_ignore_bogus_error_responses; - int sysctl_icmp_ratelimit; - int sysctl_icmp_ratemask; -diff -Nur linux-4.4.13.orig/include/trace/events/hist.h linux-4.4.13/include/trace/events/hist.h ---- linux-4.4.13.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/trace/events/hist.h 2016-06-22 21:55:08.042523867 +0200 -@@ -0,0 +1,73 @@ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM hist -+ -+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _TRACE_HIST_H -+ -+#include "latency_hist.h" -+#include -+ -+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) -+#define trace_preemptirqsoff_hist(a, b) -+#define trace_preemptirqsoff_hist_rcuidle(a, b) -+#else -+TRACE_EVENT(preemptirqsoff_hist, -+ -+ TP_PROTO(int reason, int starthist), -+ -+ TP_ARGS(reason, starthist), -+ -+ TP_STRUCT__entry( -+ __field(int, reason) -+ __field(int, starthist) -+ ), -+ -+ TP_fast_assign( -+ __entry->reason = reason; -+ __entry->starthist = starthist; -+ ), -+ -+ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), -+ __entry->starthist ? "start" : "stop") -+); -+#endif -+ -+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST -+#define trace_hrtimer_interrupt(a, b, c, d) -+#else -+TRACE_EVENT(hrtimer_interrupt, -+ -+ TP_PROTO(int cpu, long long offset, struct task_struct *curr, -+ struct task_struct *task), -+ -+ TP_ARGS(cpu, offset, curr, task), -+ -+ TP_STRUCT__entry( -+ __field(int, cpu) -+ __field(long long, offset) -+ __array(char, ccomm, TASK_COMM_LEN) -+ __field(int, cprio) -+ __array(char, tcomm, TASK_COMM_LEN) -+ __field(int, tprio) -+ ), -+ -+ TP_fast_assign( -+ __entry->cpu = cpu; -+ __entry->offset = offset; -+ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); -+ __entry->cprio = curr->prio; -+ memcpy(__entry->tcomm, task != NULL ? task->comm : "", -+ task != NULL ? TASK_COMM_LEN : 7); -+ __entry->tprio = task != NULL ? task->prio : -1; -+ ), -+ -+ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", -+ __entry->cpu, __entry->offset, __entry->ccomm, -+ __entry->cprio, __entry->tcomm, __entry->tprio) -+); -+#endif -+ -+#endif /* _TRACE_HIST_H */ -+ -+/* This part must be outside protection */ -+#include -diff -Nur linux-4.4.13.orig/include/trace/events/latency_hist.h linux-4.4.13/include/trace/events/latency_hist.h ---- linux-4.4.13.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/include/trace/events/latency_hist.h 2016-06-22 21:55:08.042523867 +0200 -@@ -0,0 +1,29 @@ -+#ifndef _LATENCY_HIST_H -+#define _LATENCY_HIST_H -+ -+enum hist_action { -+ IRQS_ON, -+ PREEMPT_ON, -+ TRACE_STOP, -+ IRQS_OFF, -+ PREEMPT_OFF, -+ TRACE_START, -+}; -+ -+static char *actions[] = { -+ "IRQS_ON", -+ "PREEMPT_ON", -+ "TRACE_STOP", -+ "IRQS_OFF", -+ "PREEMPT_OFF", -+ "TRACE_START", -+}; -+ -+static inline char *getaction(int action) -+{ -+ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) -+ return actions[action]; -+ return "unknown"; -+} -+ -+#endif /* _LATENCY_HIST_H */ -diff -Nur linux-4.4.13.orig/include/trace/events/writeback.h linux-4.4.13/include/trace/events/writeback.h ---- linux-4.4.13.orig/include/trace/events/writeback.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/include/trace/events/writeback.h 2016-06-22 21:55:08.042523867 +0200 -@@ -134,58 +134,28 @@ - #ifdef CREATE_TRACE_POINTS - #ifdef CONFIG_CGROUP_WRITEBACK - --static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb) -+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) - { -- return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1; -+ return wb->memcg_css->cgroup->kn->ino; - } - --static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb) --{ -- struct cgroup *cgrp = wb->memcg_css->cgroup; -- char *path; -- -- path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1); -- WARN_ON_ONCE(path != buf); --} -- --static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc) --{ -- if (wbc->wb) -- return __trace_wb_cgroup_size(wbc->wb); -- else -- return 2; --} -- --static inline void __trace_wbc_assign_cgroup(char *buf, -- struct writeback_control *wbc) -+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) - { - if (wbc->wb) -- __trace_wb_assign_cgroup(buf, wbc->wb); -+ return __trace_wb_assign_cgroup(wbc->wb); - else -- strcpy(buf, "/"); -+ return -1U; - } -- - #else /* CONFIG_CGROUP_WRITEBACK */ - --static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb) --{ -- return 2; --} -- --static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb) --{ -- strcpy(buf, "/"); --} -- --static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc) -+static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) - { -- return 2; -+ return -1U; - } - --static inline void __trace_wbc_assign_cgroup(char *buf, -- struct writeback_control *wbc) -+static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) - { -- strcpy(buf, "/"); -+ return -1U; - } - - #endif /* CONFIG_CGROUP_WRITEBACK */ -@@ -201,7 +171,7 @@ - __array(char, name, 32) - __field(unsigned long, ino) - __field(int, sync_mode) -- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) -+ __field(unsigned int, cgroup_ino) - ), - - TP_fast_assign( -@@ -209,14 +179,14 @@ - dev_name(inode_to_bdi(inode)->dev), 32); - __entry->ino = inode->i_ino; - __entry->sync_mode = wbc->sync_mode; -- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); -+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); - ), - -- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s", -+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u", - __entry->name, - __entry->ino, - __entry->sync_mode, -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - -@@ -246,7 +216,7 @@ - __field(int, range_cyclic) - __field(int, for_background) - __field(int, reason) -- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) -+ __field(unsigned int, cgroup_ino) - ), - TP_fast_assign( - strncpy(__entry->name, -@@ -258,10 +228,10 @@ - __entry->range_cyclic = work->range_cyclic; - __entry->for_background = work->for_background; - __entry->reason = work->reason; -- __trace_wb_assign_cgroup(__get_str(cgroup), wb); -+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); - ), - TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " -- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s", -+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u", - __entry->name, - MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), - __entry->nr_pages, -@@ -270,7 +240,7 @@ - __entry->range_cyclic, - __entry->for_background, - __print_symbolic(__entry->reason, WB_WORK_REASON), -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - #define DEFINE_WRITEBACK_WORK_EVENT(name) \ -@@ -300,15 +270,15 @@ - TP_ARGS(wb), - TP_STRUCT__entry( - __array(char, name, 32) -- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) -+ __field(unsigned int, cgroup_ino) - ), - TP_fast_assign( - strncpy(__entry->name, dev_name(wb->bdi->dev), 32); -- __trace_wb_assign_cgroup(__get_str(cgroup), wb); -+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); - ), -- TP_printk("bdi %s: cgroup=%s", -+ TP_printk("bdi %s: cgroup_ino=%u", - __entry->name, -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - #define DEFINE_WRITEBACK_EVENT(name) \ -@@ -347,7 +317,7 @@ - __field(int, range_cyclic) - __field(long, range_start) - __field(long, range_end) -- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) -+ __field(unsigned int, cgroup_ino) - ), - - TP_fast_assign( -@@ -361,12 +331,12 @@ - __entry->range_cyclic = wbc->range_cyclic; - __entry->range_start = (long)wbc->range_start; - __entry->range_end = (long)wbc->range_end; -- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); -+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); - ), - - TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " - "bgrd=%d reclm=%d cyclic=%d " -- "start=0x%lx end=0x%lx cgroup=%s", -+ "start=0x%lx end=0x%lx cgroup_ino=%u", - __entry->name, - __entry->nr_to_write, - __entry->pages_skipped, -@@ -377,7 +347,7 @@ - __entry->range_cyclic, - __entry->range_start, - __entry->range_end, -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ) - -@@ -398,7 +368,7 @@ - __field(long, age) - __field(int, moved) - __field(int, reason) -- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) -+ __field(unsigned int, cgroup_ino) - ), - TP_fast_assign( - unsigned long *older_than_this = work->older_than_this; -@@ -408,15 +378,15 @@ - (jiffies - *older_than_this) * 1000 / HZ : -1; - __entry->moved = moved; - __entry->reason = work->reason; -- __trace_wb_assign_cgroup(__get_str(cgroup), wb); -+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); - ), -- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s", -+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", - __entry->name, - __entry->older, /* older_than_this in jiffies */ - __entry->age, /* older_than_this in relative milliseconds */ - __entry->moved, - __print_symbolic(__entry->reason, WB_WORK_REASON), -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - -@@ -484,7 +454,7 @@ - __field(unsigned long, dirty_ratelimit) - __field(unsigned long, task_ratelimit) - __field(unsigned long, balanced_dirty_ratelimit) -- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) -+ __field(unsigned int, cgroup_ino) - ), - - TP_fast_assign( -@@ -496,13 +466,13 @@ - __entry->task_ratelimit = KBps(task_ratelimit); - __entry->balanced_dirty_ratelimit = - KBps(wb->balanced_dirty_ratelimit); -- __trace_wb_assign_cgroup(__get_str(cgroup), wb); -+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); - ), - - TP_printk("bdi %s: " - "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " - "dirty_ratelimit=%lu task_ratelimit=%lu " -- "balanced_dirty_ratelimit=%lu cgroup=%s", -+ "balanced_dirty_ratelimit=%lu cgroup_ino=%u", - __entry->bdi, - __entry->write_bw, /* write bandwidth */ - __entry->avg_write_bw, /* avg write bandwidth */ -@@ -510,7 +480,7 @@ - __entry->dirty_ratelimit, /* base ratelimit */ - __entry->task_ratelimit, /* ratelimit with position control */ - __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - -@@ -548,7 +518,7 @@ - __field( long, pause) - __field(unsigned long, period) - __field( long, think) -- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) -+ __field(unsigned int, cgroup_ino) - ), - - TP_fast_assign( -@@ -571,7 +541,7 @@ - __entry->period = period * 1000 / HZ; - __entry->pause = pause * 1000 / HZ; - __entry->paused = (jiffies - start_time) * 1000 / HZ; -- __trace_wb_assign_cgroup(__get_str(cgroup), wb); -+ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); - ), - - -@@ -580,7 +550,7 @@ - "bdi_setpoint=%lu bdi_dirty=%lu " - "dirty_ratelimit=%lu task_ratelimit=%lu " - "dirtied=%u dirtied_pause=%u " -- "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s", -+ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u", - __entry->bdi, - __entry->limit, - __entry->setpoint, -@@ -595,7 +565,7 @@ - __entry->pause, /* ms */ - __entry->period, /* ms */ - __entry->think, /* ms */ -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - -@@ -609,8 +579,7 @@ - __field(unsigned long, ino) - __field(unsigned long, state) - __field(unsigned long, dirtied_when) -- __dynamic_array(char, cgroup, -- __trace_wb_cgroup_size(inode_to_wb(inode))) -+ __field(unsigned int, cgroup_ino) - ), - - TP_fast_assign( -@@ -619,16 +588,16 @@ - __entry->ino = inode->i_ino; - __entry->state = inode->i_state; - __entry->dirtied_when = inode->dirtied_when; -- __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode)); -+ __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); - ), - -- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s", -+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u", - __entry->name, - __entry->ino, - show_inode_state(__entry->state), - __entry->dirtied_when, - (jiffies - __entry->dirtied_when) / HZ, -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - -@@ -684,7 +653,7 @@ - __field(unsigned long, writeback_index) - __field(long, nr_to_write) - __field(unsigned long, wrote) -- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) -+ __field(unsigned int, cgroup_ino) - ), - - TP_fast_assign( -@@ -696,11 +665,11 @@ - __entry->writeback_index = inode->i_mapping->writeback_index; - __entry->nr_to_write = nr_to_write; - __entry->wrote = nr_to_write - wbc->nr_to_write; -- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); -+ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); - ), - - TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " -- "index=%lu to_write=%ld wrote=%lu cgroup=%s", -+ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u", - __entry->name, - __entry->ino, - show_inode_state(__entry->state), -@@ -709,7 +678,7 @@ - __entry->writeback_index, - __entry->nr_to_write, - __entry->wrote, -- __get_str(cgroup) -+ __entry->cgroup_ino - ) - ); - -diff -Nur linux-4.4.13.orig/init/Kconfig linux-4.4.13/init/Kconfig ---- linux-4.4.13.orig/init/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/init/Kconfig 2016-06-22 21:55:08.042523867 +0200 -@@ -498,7 +498,7 @@ - - config RCU_EXPERT - bool "Make expert-level adjustments to RCU configuration" -- default n -+ default y if PREEMPT_RT_FULL - help - This option needs to be enabled if you wish to make - expert-level adjustments to RCU configuration. By default, -@@ -614,7 +614,7 @@ - - config RCU_FAST_NO_HZ - bool "Accelerate last non-dyntick-idle CPU's grace periods" -- depends on NO_HZ_COMMON && SMP && RCU_EXPERT -+ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL - default n - help - This option permits CPUs to enter dynticks-idle state even if -@@ -641,7 +641,7 @@ - config RCU_BOOST - bool "Enable RCU priority boosting" - depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT -- default n -+ default y if PREEMPT_RT_FULL - help - This option boosts the priority of preempted RCU readers that - block the current preemptible RCU grace period for too long. -@@ -1106,6 +1106,7 @@ - config RT_GROUP_SCHED - bool "Group scheduling for SCHED_RR/FIFO" - depends on CGROUP_SCHED -+ depends on !PREEMPT_RT_FULL - default n - help - This feature lets you explicitly allocate real CPU bandwidth -@@ -1719,6 +1720,7 @@ - - config SLAB - bool "SLAB" -+ depends on !PREEMPT_RT_FULL - help - The regular slab allocator that is established and known to work - well in all environments. It organizes cache hot objects in -@@ -1737,6 +1739,7 @@ - config SLOB - depends on EXPERT - bool "SLOB (Simple Allocator)" -+ depends on !PREEMPT_RT_FULL - help - SLOB replaces the stock allocator with a drastically simpler - allocator. SLOB is generally more space efficient but -@@ -1746,7 +1749,7 @@ - - config SLUB_CPU_PARTIAL - default y -- depends on SLUB && SMP -+ depends on SLUB && SMP && !PREEMPT_RT_FULL - bool "SLUB per cpu partial cache" - help - Per cpu partial caches accellerate objects allocation and freeing -diff -Nur linux-4.4.13.orig/init/main.c linux-4.4.13/init/main.c ---- linux-4.4.13.orig/init/main.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/init/main.c 2016-06-22 21:55:08.042523867 +0200 -@@ -530,6 +530,7 @@ - setup_command_line(command_line); - setup_nr_cpu_ids(); - setup_per_cpu_areas(); -+ softirq_early_init(); - smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ - - build_all_zonelists(NULL, NULL); -diff -Nur linux-4.4.13.orig/init/Makefile linux-4.4.13/init/Makefile ---- linux-4.4.13.orig/init/Makefile 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/init/Makefile 2016-06-22 21:55:08.042523867 +0200 -@@ -33,4 +33,4 @@ - include/generated/compile.h: FORCE - @$($(quiet)chk_compile.h) - $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ -- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" -+ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" -diff -Nur linux-4.4.13.orig/ipc/msg.c linux-4.4.13/ipc/msg.c ---- linux-4.4.13.orig/ipc/msg.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/ipc/msg.c 2016-06-22 21:55:08.042523867 +0200 -@@ -183,20 +183,14 @@ - } - } - --static void expunge_all(struct msg_queue *msq, int res) -+static void expunge_all(struct msg_queue *msq, int res, -+ struct wake_q_head *wake_q) - { - struct msg_receiver *msr, *t; - - list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { -- msr->r_msg = NULL; /* initialize expunge ordering */ -- wake_up_process(msr->r_tsk); -- /* -- * Ensure that the wakeup is visible before setting r_msg as -- * the receiving end depends on it: either spinning on a nil, -- * or dealing with -EAGAIN cases. See lockless receive part 1 -- * and 2 in do_msgrcv(). -- */ -- smp_wmb(); /* barrier (B) */ -+ -+ wake_q_add(wake_q, msr->r_tsk); - msr->r_msg = ERR_PTR(res); - } - } -@@ -213,11 +207,13 @@ - { - struct msg_msg *msg, *t; - struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); -+ WAKE_Q(wake_q); - -- expunge_all(msq, -EIDRM); -+ expunge_all(msq, -EIDRM, &wake_q); - ss_wakeup(&msq->q_senders, 1); - msg_rmid(ns, msq); - ipc_unlock_object(&msq->q_perm); -+ wake_up_q(&wake_q); - rcu_read_unlock(); - - list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { -@@ -342,6 +338,7 @@ - struct kern_ipc_perm *ipcp; - struct msqid64_ds uninitialized_var(msqid64); - struct msg_queue *msq; -+ WAKE_Q(wake_q); - int err; - - if (cmd == IPC_SET) { -@@ -389,7 +386,7 @@ - /* sleeping receivers might be excluded by - * stricter permissions. - */ -- expunge_all(msq, -EAGAIN); -+ expunge_all(msq, -EAGAIN, &wake_q); - /* sleeping senders might be able to send - * due to a larger queue size. - */ -@@ -402,6 +399,7 @@ - - out_unlock0: - ipc_unlock_object(&msq->q_perm); -+ wake_up_q(&wake_q); - out_unlock1: - rcu_read_unlock(); - out_up: -@@ -566,7 +564,8 @@ - return 0; - } - --static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) -+static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, -+ struct wake_q_head *wake_q) - { - struct msg_receiver *msr, *t; - -@@ -577,27 +576,13 @@ - - list_del(&msr->r_list); - if (msr->r_maxsize < msg->m_ts) { -- /* initialize pipelined send ordering */ -- msr->r_msg = NULL; -- wake_up_process(msr->r_tsk); -- /* barrier (B) see barrier comment below */ -- smp_wmb(); -+ wake_q_add(wake_q, msr->r_tsk); - msr->r_msg = ERR_PTR(-E2BIG); - } else { -- msr->r_msg = NULL; - msq->q_lrpid = task_pid_vnr(msr->r_tsk); - msq->q_rtime = get_seconds(); -- wake_up_process(msr->r_tsk); -- /* -- * Ensure that the wakeup is visible before -- * setting r_msg, as the receiving can otherwise -- * exit - once r_msg is set, the receiver can -- * continue. See lockless receive part 1 and 2 -- * in do_msgrcv(). Barrier (B). -- */ -- smp_wmb(); -+ wake_q_add(wake_q, msr->r_tsk); - msr->r_msg = msg; -- - return 1; - } - } -@@ -613,6 +598,7 @@ - struct msg_msg *msg; - int err; - struct ipc_namespace *ns; -+ WAKE_Q(wake_q); - - ns = current->nsproxy->ipc_ns; - -@@ -698,7 +684,7 @@ - msq->q_lspid = task_tgid_vnr(current); - msq->q_stime = get_seconds(); - -- if (!pipelined_send(msq, msg)) { -+ if (!pipelined_send(msq, msg, &wake_q)) { - /* no one is waiting for this message, enqueue it */ - list_add_tail(&msg->m_list, &msq->q_messages); - msq->q_cbytes += msgsz; -@@ -712,6 +698,7 @@ - - out_unlock0: - ipc_unlock_object(&msq->q_perm); -+ wake_up_q(&wake_q); - out_unlock1: - rcu_read_unlock(); - if (msg != NULL) -@@ -932,57 +919,25 @@ - rcu_read_lock(); - - /* Lockless receive, part 2: -- * Wait until pipelined_send or expunge_all are outside of -- * wake_up_process(). There is a race with exit(), see -- * ipc/mqueue.c for the details. The correct serialization -- * ensures that a receiver cannot continue without the wakeup -- * being visibible _before_ setting r_msg: -+ * The work in pipelined_send() and expunge_all(): -+ * - Set pointer to message -+ * - Queue the receiver task for later wakeup -+ * - Wake up the process after the lock is dropped. - * -- * CPU 0 CPU 1 -- * -- * smp_rmb(); (A) <-- pair -. -- * r_msg> | msr->r_msg = NULL; -- * | wake_up_process(); -- * `------> smp_wmb(); (B) -- * msr->r_msg = msg; -- * -- * Where (A) orders the message value read and where (B) orders -- * the write to the r_msg -- done in both pipelined_send and -- * expunge_all. -+ * Should the process wake up before this wakeup (due to a -+ * signal) it will either see the message and continue … - */ -- for (;;) { -- /* -- * Pairs with writer barrier in pipelined_send -- * or expunge_all. -- */ -- smp_rmb(); /* barrier (A) */ -- msg = (struct msg_msg *)msr_d.r_msg; -- if (msg) -- break; - -- /* -- * The cpu_relax() call is a compiler barrier -- * which forces everything in this loop to be -- * re-loaded. -- */ -- cpu_relax(); -- } -- -- /* Lockless receive, part 3: -- * If there is a message or an error then accept it without -- * locking. -- */ -+ msg = (struct msg_msg *)msr_d.r_msg; - if (msg != ERR_PTR(-EAGAIN)) - goto out_unlock1; - -- /* Lockless receive, part 3: -- * Acquire the queue spinlock. -- */ -+ /* -+ * … or see -EAGAIN, acquire the lock to check the message -+ * again. -+ */ - ipc_lock_object(&msq->q_perm); - -- /* Lockless receive, part 4: -- * Repeat test after acquiring the spinlock. -- */ - msg = (struct msg_msg *)msr_d.r_msg; - if (msg != ERR_PTR(-EAGAIN)) - goto out_unlock0; -diff -Nur linux-4.4.13.orig/ipc/sem.c linux-4.4.13/ipc/sem.c ---- linux-4.4.13.orig/ipc/sem.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/ipc/sem.c 2016-06-22 21:55:08.042523867 +0200 -@@ -690,6 +690,13 @@ - static void wake_up_sem_queue_prepare(struct list_head *pt, - struct sem_queue *q, int error) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ struct task_struct *p = q->sleeper; -+ get_task_struct(p); -+ q->status = error; -+ wake_up_process(p); -+ put_task_struct(p); -+#else - if (list_empty(pt)) { - /* - * Hold preempt off so that we don't get preempted and have the -@@ -701,6 +708,7 @@ - q->pid = error; - - list_add_tail(&q->list, pt); -+#endif - } - - /** -@@ -714,6 +722,7 @@ - */ - static void wake_up_sem_queue_do(struct list_head *pt) - { -+#ifndef CONFIG_PREEMPT_RT_BASE - struct sem_queue *q, *t; - int did_something; - -@@ -726,6 +735,7 @@ - } - if (did_something) - preempt_enable(); -+#endif - } - - static void unlink_queue(struct sem_array *sma, struct sem_queue *q) -diff -Nur linux-4.4.13.orig/kernel/cgroup.c linux-4.4.13/kernel/cgroup.c ---- linux-4.4.13.orig/kernel/cgroup.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/cgroup.c 2016-06-22 21:55:08.042523867 +0200 -@@ -4737,10 +4737,10 @@ - queue_work(cgroup_destroy_wq, &css->destroy_work); - } - --static void css_release_work_fn(struct work_struct *work) -+static void css_release_work_fn(struct swork_event *sev) - { - struct cgroup_subsys_state *css = -- container_of(work, struct cgroup_subsys_state, destroy_work); -+ container_of(sev, struct cgroup_subsys_state, destroy_swork); - struct cgroup_subsys *ss = css->ss; - struct cgroup *cgrp = css->cgroup; - -@@ -4779,8 +4779,8 @@ - struct cgroup_subsys_state *css = - container_of(ref, struct cgroup_subsys_state, refcnt); - -- INIT_WORK(&css->destroy_work, css_release_work_fn); -- queue_work(cgroup_destroy_wq, &css->destroy_work); -+ INIT_SWORK(&css->destroy_swork, css_release_work_fn); -+ swork_queue(&css->destroy_swork); - } - - static void init_and_link_css(struct cgroup_subsys_state *css, -@@ -5396,6 +5396,7 @@ - */ - cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); - BUG_ON(!cgroup_destroy_wq); -+ BUG_ON(swork_get()); - - /* - * Used to destroy pidlists and separate to serve as flush domain. -diff -Nur linux-4.4.13.orig/kernel/cpu.c linux-4.4.13/kernel/cpu.c ---- linux-4.4.13.orig/kernel/cpu.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/cpu.c 2016-06-22 21:55:08.046523867 +0200 -@@ -75,8 +75,8 @@ - #endif - } cpu_hotplug = { - .active_writer = NULL, -- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), - .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), -+ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), - #ifdef CONFIG_DEBUG_LOCK_ALLOC - .dep_map = {.name = "cpu_hotplug.lock" }, - #endif -@@ -89,6 +89,289 @@ - #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) - #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) - -+/** -+ * hotplug_pcp - per cpu hotplug descriptor -+ * @unplug: set when pin_current_cpu() needs to sync tasks -+ * @sync_tsk: the task that waits for tasks to finish pinned sections -+ * @refcount: counter of tasks in pinned sections -+ * @grab_lock: set when the tasks entering pinned sections should wait -+ * @synced: notifier for @sync_tsk to tell cpu_down it's finished -+ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) -+ * @mutex_init: zero if the mutex hasn't been initialized yet. -+ * -+ * Although @unplug and @sync_tsk may point to the same task, the @unplug -+ * is used as a flag and still exists after @sync_tsk has exited and -+ * @sync_tsk set to NULL. -+ */ -+struct hotplug_pcp { -+ struct task_struct *unplug; -+ struct task_struct *sync_tsk; -+ int refcount; -+ int grab_lock; -+ struct completion synced; -+ struct completion unplug_wait; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * Note, on PREEMPT_RT, the hotplug lock must save the state of -+ * the task, otherwise the mutex will cause the task to fail -+ * to sleep when required. (Because it's called from migrate_disable()) -+ * -+ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's -+ * state. -+ */ -+ spinlock_t lock; -+#else -+ struct mutex mutex; -+#endif -+ int mutex_init; -+}; -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock) -+# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock) -+#else -+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) -+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) -+#endif -+ -+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); -+ -+/** -+ * pin_current_cpu - Prevent the current cpu from being unplugged -+ * -+ * Lightweight version of get_online_cpus() to prevent cpu from being -+ * unplugged when code runs in a migration disabled region. -+ * -+ * Must be called with preemption disabled (preempt_count = 1)! -+ */ -+void pin_current_cpu(void) -+{ -+ struct hotplug_pcp *hp; -+ int force = 0; -+ -+retry: -+ hp = this_cpu_ptr(&hotplug_pcp); -+ -+ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || -+ hp->unplug == current) { -+ hp->refcount++; -+ return; -+ } -+ if (hp->grab_lock) { -+ preempt_enable(); -+ hotplug_lock(hp); -+ hotplug_unlock(hp); -+ } else { -+ preempt_enable(); -+ /* -+ * Try to push this task off of this CPU. -+ */ -+ if (!migrate_me()) { -+ preempt_disable(); -+ hp = this_cpu_ptr(&hotplug_pcp); -+ if (!hp->grab_lock) { -+ /* -+ * Just let it continue it's already pinned -+ * or about to sleep. -+ */ -+ force = 1; -+ goto retry; -+ } -+ preempt_enable(); -+ } -+ } -+ preempt_disable(); -+ goto retry; -+} -+ -+/** -+ * unpin_current_cpu - Allow unplug of current cpu -+ * -+ * Must be called with preemption or interrupts disabled! -+ */ -+void unpin_current_cpu(void) -+{ -+ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp); -+ -+ WARN_ON(hp->refcount <= 0); -+ -+ /* This is safe. sync_unplug_thread is pinned to this cpu */ -+ if (!--hp->refcount && hp->unplug && hp->unplug != current) -+ wake_up_process(hp->unplug); -+} -+ -+static void wait_for_pinned_cpus(struct hotplug_pcp *hp) -+{ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (hp->refcount) { -+ schedule_preempt_disabled(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+} -+ -+static int sync_unplug_thread(void *data) -+{ -+ struct hotplug_pcp *hp = data; -+ -+ wait_for_completion(&hp->unplug_wait); -+ preempt_disable(); -+ hp->unplug = current; -+ wait_for_pinned_cpus(hp); -+ -+ /* -+ * This thread will synchronize the cpu_down() with threads -+ * that have pinned the CPU. When the pinned CPU count reaches -+ * zero, we inform the cpu_down code to continue to the next step. -+ */ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ preempt_enable(); -+ complete(&hp->synced); -+ -+ /* -+ * If all succeeds, the next step will need tasks to wait till -+ * the CPU is offline before continuing. To do this, the grab_lock -+ * is set and tasks going into pin_current_cpu() will block on the -+ * mutex. But we still need to wait for those that are already in -+ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() -+ * will kick this thread out. -+ */ -+ while (!hp->grab_lock && !kthread_should_stop()) { -+ schedule(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+ -+ /* Make sure grab_lock is seen before we see a stale completion */ -+ smp_mb(); -+ -+ /* -+ * Now just before cpu_down() enters stop machine, we need to make -+ * sure all tasks that are in pinned CPU sections are out, and new -+ * tasks will now grab the lock, keeping them from entering pinned -+ * CPU sections. -+ */ -+ if (!kthread_should_stop()) { -+ preempt_disable(); -+ wait_for_pinned_cpus(hp); -+ preempt_enable(); -+ complete(&hp->synced); -+ } -+ -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ while (!kthread_should_stop()) { -+ schedule(); -+ set_current_state(TASK_UNINTERRUPTIBLE); -+ } -+ set_current_state(TASK_RUNNING); -+ -+ /* -+ * Force this thread off this CPU as it's going down and -+ * we don't want any more work on this CPU. -+ */ -+ current->flags &= ~PF_NO_SETAFFINITY; -+ set_cpus_allowed_ptr(current, cpu_present_mask); -+ migrate_me(); -+ return 0; -+} -+ -+static void __cpu_unplug_sync(struct hotplug_pcp *hp) -+{ -+ wake_up_process(hp->sync_tsk); -+ wait_for_completion(&hp->synced); -+} -+ -+static void __cpu_unplug_wait(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ complete(&hp->unplug_wait); -+ wait_for_completion(&hp->synced); -+} -+ -+/* -+ * Start the sync_unplug_thread on the target cpu and wait for it to -+ * complete. -+ */ -+static int cpu_unplug_begin(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ int err; -+ -+ /* Protected by cpu_hotplug.lock */ -+ if (!hp->mutex_init) { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ spin_lock_init(&hp->lock); -+#else -+ mutex_init(&hp->mutex); -+#endif -+ hp->mutex_init = 1; -+ } -+ -+ /* Inform the scheduler to migrate tasks off this CPU */ -+ tell_sched_cpu_down_begin(cpu); -+ -+ init_completion(&hp->synced); -+ init_completion(&hp->unplug_wait); -+ -+ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); -+ if (IS_ERR(hp->sync_tsk)) { -+ err = PTR_ERR(hp->sync_tsk); -+ hp->sync_tsk = NULL; -+ return err; -+ } -+ kthread_bind(hp->sync_tsk, cpu); -+ -+ /* -+ * Wait for tasks to get out of the pinned sections, -+ * it's still OK if new tasks enter. Some CPU notifiers will -+ * wait for tasks that are going to enter these sections and -+ * we must not have them block. -+ */ -+ wake_up_process(hp->sync_tsk); -+ return 0; -+} -+ -+static void cpu_unplug_sync(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ init_completion(&hp->synced); -+ /* The completion needs to be initialzied before setting grab_lock */ -+ smp_wmb(); -+ -+ /* Grab the mutex before setting grab_lock */ -+ hotplug_lock(hp); -+ hp->grab_lock = 1; -+ -+ /* -+ * The CPU notifiers have been completed. -+ * Wait for tasks to get out of pinned CPU sections and have new -+ * tasks block until the CPU is completely down. -+ */ -+ __cpu_unplug_sync(hp); -+ -+ /* All done with the sync thread */ -+ kthread_stop(hp->sync_tsk); -+ hp->sync_tsk = NULL; -+} -+ -+static void cpu_unplug_done(unsigned int cpu) -+{ -+ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); -+ -+ hp->unplug = NULL; -+ /* Let all tasks know cpu unplug is finished before cleaning up */ -+ smp_wmb(); -+ -+ if (hp->sync_tsk) -+ kthread_stop(hp->sync_tsk); -+ -+ if (hp->grab_lock) { -+ hotplug_unlock(hp); -+ /* protected by cpu_hotplug.lock */ -+ hp->grab_lock = 0; -+ } -+ tell_sched_cpu_down_done(cpu); -+} - - void get_online_cpus(void) - { -@@ -338,13 +621,15 @@ - /* Requires cpu_add_remove_lock to be held */ - static int _cpu_down(unsigned int cpu, int tasks_frozen) - { -- int err, nr_calls = 0; -+ int mycpu, err, nr_calls = 0; - void *hcpu = (void *)(long)cpu; - unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; - struct take_cpu_down_param tcd_param = { - .mod = mod, - .hcpu = hcpu, - }; -+ cpumask_var_t cpumask; -+ cpumask_var_t cpumask_org; - - if (num_online_cpus() == 1) - return -EBUSY; -@@ -352,7 +637,34 @@ - if (!cpu_online(cpu)) - return -EINVAL; - -+ /* Move the downtaker off the unplug cpu */ -+ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) -+ return -ENOMEM; -+ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { -+ free_cpumask_var(cpumask); -+ return -ENOMEM; -+ } -+ -+ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); -+ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); -+ set_cpus_allowed_ptr(current, cpumask); -+ free_cpumask_var(cpumask); -+ migrate_disable(); -+ mycpu = smp_processor_id(); -+ if (mycpu == cpu) { -+ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); -+ migrate_enable(); -+ err = -EBUSY; -+ goto restore_cpus; -+ } -+ migrate_enable(); -+ - cpu_hotplug_begin(); -+ err = cpu_unplug_begin(cpu); -+ if (err) { -+ printk("cpu_unplug_begin(%d) failed\n", cpu); -+ goto out_cancel; -+ } - - err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); - if (err) { -@@ -378,8 +690,12 @@ - else - synchronize_rcu(); - -+ __cpu_unplug_wait(cpu); - smpboot_park_threads(cpu); - -+ /* Notifiers are done. Don't let any more tasks pin this CPU. */ -+ cpu_unplug_sync(cpu); -+ - /* - * Prevent irq alloc/free while the dying cpu reorganizes the - * interrupt affinities. -@@ -424,9 +740,14 @@ - check_for_tasks(cpu); - - out_release: -+ cpu_unplug_done(cpu); -+out_cancel: - cpu_hotplug_done(); - if (!err) - cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); -+restore_cpus: -+ set_cpus_allowed_ptr(current, cpumask_org); -+ free_cpumask_var(cpumask_org); - return err; - } - -diff -Nur linux-4.4.13.orig/kernel/debug/kdb/kdb_io.c linux-4.4.13/kernel/debug/kdb/kdb_io.c ---- linux-4.4.13.orig/kernel/debug/kdb/kdb_io.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/debug/kdb/kdb_io.c 2016-06-22 21:55:08.046523867 +0200 -@@ -554,7 +554,6 @@ - int linecount; - int colcount; - int logging, saved_loglevel = 0; -- int saved_trap_printk; - int got_printf_lock = 0; - int retlen = 0; - int fnd, len; -@@ -565,8 +564,6 @@ - unsigned long uninitialized_var(flags); - - preempt_disable(); -- saved_trap_printk = kdb_trap_printk; -- kdb_trap_printk = 0; - - /* Serialize kdb_printf if multiple cpus try to write at once. - * But if any cpu goes recursive in kdb, just print the output, -@@ -855,7 +852,6 @@ - } else { - __release(kdb_printf_lock); - } -- kdb_trap_printk = saved_trap_printk; - preempt_enable(); - return retlen; - } -@@ -865,9 +861,11 @@ - va_list ap; - int r; - -+ kdb_trap_printk++; - va_start(ap, fmt); - r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); - va_end(ap); -+ kdb_trap_printk--; - - return r; - } -diff -Nur linux-4.4.13.orig/kernel/events/core.c linux-4.4.13/kernel/events/core.c ---- linux-4.4.13.orig/kernel/events/core.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/events/core.c 2016-06-22 21:55:08.046523867 +0200 -@@ -802,6 +802,7 @@ - raw_spin_lock_init(&cpuctx->hrtimer_lock); - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); - timer->function = perf_mux_hrtimer_handler; -+ timer->irqsafe = 1; - } - - static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) -@@ -7219,6 +7220,7 @@ - - hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hwc->hrtimer.function = perf_swevent_hrtimer; -+ hwc->hrtimer.irqsafe = 1; - - /* - * Since hrtimers have a fixed rate, we can do a static freq->period -diff -Nur linux-4.4.13.orig/kernel/exit.c linux-4.4.13/kernel/exit.c ---- linux-4.4.13.orig/kernel/exit.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/exit.c 2016-06-22 21:55:08.046523867 +0200 -@@ -144,7 +144,7 @@ - * Do this under ->siglock, we can race with another thread - * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. - */ -- flush_sigqueue(&tsk->pending); -+ flush_task_sigqueue(tsk); - tsk->sighand = NULL; - spin_unlock(&sighand->siglock); - -diff -Nur linux-4.4.13.orig/kernel/fork.c linux-4.4.13/kernel/fork.c ---- linux-4.4.13.orig/kernel/fork.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/fork.c 2016-06-22 21:55:08.046523867 +0200 -@@ -108,7 +108,7 @@ - - DEFINE_PER_CPU(unsigned long, process_counts) = 0; - --__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ -+DEFINE_RWLOCK(tasklist_lock); /* outer */ - - #ifdef CONFIG_PROVE_RCU - int lockdep_tasklist_lock_is_held(void) -@@ -244,7 +244,9 @@ - if (atomic_dec_and_test(&sig->sigcnt)) - free_signal_struct(sig); - } -- -+#ifdef CONFIG_PREEMPT_RT_BASE -+static -+#endif - void __put_task_struct(struct task_struct *tsk) - { - WARN_ON(!tsk->exit_state); -@@ -261,7 +263,18 @@ - if (!profile_handoff_task(tsk)) - free_task(tsk); - } -+#ifndef CONFIG_PREEMPT_RT_BASE - EXPORT_SYMBOL_GPL(__put_task_struct); -+#else -+void __put_task_struct_cb(struct rcu_head *rhp) -+{ -+ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); -+ -+ __put_task_struct(tsk); -+ -+} -+EXPORT_SYMBOL_GPL(__put_task_struct_cb); -+#endif - - void __init __weak arch_task_cache_init(void) { } - -@@ -689,6 +702,19 @@ - } - EXPORT_SYMBOL_GPL(__mmdrop); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+/* -+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't -+ * want another facility to make this work. -+ */ -+void __mmdrop_delayed(struct rcu_head *rhp) -+{ -+ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); -+ -+ __mmdrop(mm); -+} -+#endif -+ - /* - * Decrement the use count and release all resources for an mm. - */ -@@ -1218,6 +1244,9 @@ - */ - static void posix_cpu_timers_init(struct task_struct *tsk) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ tsk->posix_timer_list = NULL; -+#endif - tsk->cputime_expires.prof_exp = 0; - tsk->cputime_expires.virt_exp = 0; - tsk->cputime_expires.sched_exp = 0; -@@ -1343,15 +1372,16 @@ - spin_lock_init(&p->alloc_lock); - - init_sigpending(&p->pending); -+ p->sigqueue_cache = NULL; - - p->utime = p->stime = p->gtime = 0; - p->utimescaled = p->stimescaled = 0; - prev_cputime_init(&p->prev_cputime); - - #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN -- seqlock_init(&p->vtime_seqlock); -+ seqcount_init(&p->vtime_seqcount); - p->vtime_snap = 0; -- p->vtime_snap_whence = VTIME_SLEEPING; -+ p->vtime_snap_whence = VTIME_INACTIVE; - #endif - - #if defined(SPLIT_RSS_COUNTING) -diff -Nur linux-4.4.13.orig/kernel/futex.c linux-4.4.13/kernel/futex.c ---- linux-4.4.13.orig/kernel/futex.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/futex.c 2016-06-22 21:55:08.046523867 +0200 -@@ -815,7 +815,9 @@ - * task still owns the PI-state: - */ - if (head->next != next) { -+ raw_spin_unlock_irq(&curr->pi_lock); - spin_unlock(&hb->lock); -+ raw_spin_lock_irq(&curr->pi_lock); - continue; - } - -@@ -1210,6 +1212,7 @@ - struct futex_pi_state *pi_state = this->pi_state; - u32 uninitialized_var(curval), newval; - WAKE_Q(wake_q); -+ WAKE_Q(wake_sleeper_q); - bool deboost; - int ret = 0; - -@@ -1223,7 +1226,7 @@ - if (pi_state->owner != current) - return -EINVAL; - -- raw_spin_lock(&pi_state->pi_mutex.wait_lock); -+ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); - new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); - - /* -@@ -1259,24 +1262,25 @@ - ret = -EINVAL; - } - if (ret) { -- raw_spin_unlock(&pi_state->pi_mutex.wait_lock); -+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - return ret; - } - -- raw_spin_lock_irq(&pi_state->owner->pi_lock); -+ raw_spin_lock(&pi_state->owner->pi_lock); - WARN_ON(list_empty(&pi_state->list)); - list_del_init(&pi_state->list); -- raw_spin_unlock_irq(&pi_state->owner->pi_lock); -+ raw_spin_unlock(&pi_state->owner->pi_lock); - -- raw_spin_lock_irq(&new_owner->pi_lock); -+ raw_spin_lock(&new_owner->pi_lock); - WARN_ON(!list_empty(&pi_state->list)); - list_add(&pi_state->list, &new_owner->pi_state_list); - pi_state->owner = new_owner; -- raw_spin_unlock_irq(&new_owner->pi_lock); -+ raw_spin_unlock(&new_owner->pi_lock); - -- raw_spin_unlock(&pi_state->pi_mutex.wait_lock); -+ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); - -- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); -+ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, -+ &wake_sleeper_q); - - /* - * First unlock HB so the waiter does not spin on it once he got woken -@@ -1286,6 +1290,7 @@ - */ - spin_unlock(&hb->lock); - wake_up_q(&wake_q); -+ wake_up_q_sleeper(&wake_sleeper_q); - if (deboost) - rt_mutex_adjust_prio(current); - -@@ -1822,6 +1827,16 @@ - requeue_pi_wake_futex(this, &key2, hb2); - drop_count++; - continue; -+ } else if (ret == -EAGAIN) { -+ /* -+ * Waiter was woken by timeout or -+ * signal and has set pi_blocked_on to -+ * PI_WAKEUP_INPROGRESS before we -+ * tried to enqueue it on the rtmutex. -+ */ -+ this->pi_state = NULL; -+ free_pi_state(pi_state); -+ continue; - } else if (ret) { - /* -EDEADLK */ - this->pi_state = NULL; -@@ -2139,11 +2154,11 @@ - * we returned due to timeout or signal without taking the - * rt_mutex. Too late. - */ -- raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); -+ raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); - owner = rt_mutex_owner(&q->pi_state->pi_mutex); - if (!owner) - owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); -- raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); -+ raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); - ret = fixup_pi_state_owner(uaddr, q, owner); - goto out; - } -@@ -2691,7 +2706,7 @@ - struct hrtimer_sleeper timeout, *to = NULL; - struct rt_mutex_waiter rt_waiter; - struct rt_mutex *pi_mutex = NULL; -- struct futex_hash_bucket *hb; -+ struct futex_hash_bucket *hb, *hb2; - union futex_key key2 = FUTEX_KEY_INIT; - struct futex_q q = futex_q_init; - int res, ret; -@@ -2716,10 +2731,7 @@ - * The waiter is allocated on our stack, manipulated by the requeue - * code while we sleep on uaddr. - */ -- debug_rt_mutex_init_waiter(&rt_waiter); -- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); -- RB_CLEAR_NODE(&rt_waiter.tree_entry); -- rt_waiter.task = NULL; -+ rt_mutex_init_waiter(&rt_waiter, false); - - ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); - if (unlikely(ret != 0)) -@@ -2750,20 +2762,55 @@ - /* Queue the futex_q, drop the hb lock, wait for wakeup. */ - futex_wait_queue_me(hb, &q, to); - -- spin_lock(&hb->lock); -- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -- spin_unlock(&hb->lock); -- if (ret) -- goto out_put_keys; -+ /* -+ * On RT we must avoid races with requeue and trying to block -+ * on two mutexes (hb->lock and uaddr2's rtmutex) by -+ * serializing access to pi_blocked_on with pi_lock. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ if (current->pi_blocked_on) { -+ /* -+ * We have been requeued or are in the process of -+ * being requeued. -+ */ -+ raw_spin_unlock_irq(¤t->pi_lock); -+ } else { -+ /* -+ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS -+ * prevents a concurrent requeue from moving us to the -+ * uaddr2 rtmutex. After that we can safely acquire -+ * (and possibly block on) hb->lock. -+ */ -+ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ spin_lock(&hb->lock); -+ -+ /* -+ * Clean up pi_blocked_on. We might leak it otherwise -+ * when we succeeded with the hb->lock in the fast -+ * path. -+ */ -+ raw_spin_lock_irq(¤t->pi_lock); -+ current->pi_blocked_on = NULL; -+ raw_spin_unlock_irq(¤t->pi_lock); -+ -+ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); -+ spin_unlock(&hb->lock); -+ if (ret) -+ goto out_put_keys; -+ } - - /* -- * In order for us to be here, we know our q.key == key2, and since -- * we took the hb->lock above, we also know that futex_requeue() has -- * completed and we no longer have to concern ourselves with a wakeup -- * race with the atomic proxy lock acquisition by the requeue code. The -- * futex_requeue dropped our key1 reference and incremented our key2 -- * reference count. -+ * In order to be here, we have either been requeued, are in -+ * the process of being requeued, or requeue successfully -+ * acquired uaddr2 on our behalf. If pi_blocked_on was -+ * non-null above, we may be racing with a requeue. Do not -+ * rely on q->lock_ptr to be hb2->lock until after blocking on -+ * hb->lock or hb2->lock. The futex_requeue dropped our key1 -+ * reference and incremented our key2 reference count. - */ -+ hb2 = hash_futex(&key2); - - /* Check if the requeue code acquired the second futex for us. */ - if (!q.rt_waiter) { -@@ -2772,14 +2819,15 @@ - * did a lock-steal - fix up the PI-state in that case. - */ - if (q.pi_state && (q.pi_state->owner != current)) { -- spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - ret = fixup_pi_state_owner(uaddr2, &q, current); - /* - * Drop the reference to the pi state which - * the requeue_pi() code acquired for us. - */ - free_pi_state(q.pi_state); -- spin_unlock(q.lock_ptr); -+ spin_unlock(&hb2->lock); - } - } else { - /* -@@ -2792,7 +2840,8 @@ - ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter); - debug_rt_mutex_free_waiter(&rt_waiter); - -- spin_lock(q.lock_ptr); -+ spin_lock(&hb2->lock); -+ BUG_ON(&hb2->lock != q.lock_ptr); - /* - * Fixup the pi_state owner and possibly acquire the lock if we - * haven't already. -diff -Nur linux-4.4.13.orig/kernel/irq/handle.c linux-4.4.13/kernel/irq/handle.c ---- linux-4.4.13.orig/kernel/irq/handle.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/irq/handle.c 2016-06-22 21:55:08.046523867 +0200 -@@ -134,6 +134,8 @@ - - irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) - { -+ struct pt_regs *regs = get_irq_regs(); -+ u64 ip = regs ? instruction_pointer(regs) : 0; - irqreturn_t retval = IRQ_NONE; - unsigned int flags = 0, irq = desc->irq_data.irq; - struct irqaction *action = desc->action; -@@ -176,7 +178,11 @@ - action = action->next; - } - -- add_interrupt_randomness(irq, flags); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ desc->random_ip = ip; -+#else -+ add_interrupt_randomness(irq, flags, ip); -+#endif - - if (!noirqdebug) - note_interrupt(desc, retval); -diff -Nur linux-4.4.13.orig/kernel/irq/irqdesc.c linux-4.4.13/kernel/irq/irqdesc.c ---- linux-4.4.13.orig/kernel/irq/irqdesc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/irq/irqdesc.c 2016-06-22 21:55:08.046523867 +0200 -@@ -24,10 +24,27 @@ - static struct lock_class_key irq_desc_lock_class; - - #if defined(CONFIG_SMP) -+static int __init irq_affinity_setup(char *str) -+{ -+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); -+ cpulist_parse(str, irq_default_affinity); -+ /* -+ * Set at least the boot cpu. We don't want to end up with -+ * bugreports caused by random comandline masks -+ */ -+ cpumask_set_cpu(smp_processor_id(), irq_default_affinity); -+ return 1; -+} -+__setup("irqaffinity=", irq_affinity_setup); -+ - static void __init init_irq_default_affinity(void) - { -- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); -- cpumask_setall(irq_default_affinity); -+#ifdef CONFIG_CPUMASK_OFFSTACK -+ if (!irq_default_affinity) -+ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); -+#endif -+ if (cpumask_empty(irq_default_affinity)) -+ cpumask_setall(irq_default_affinity); - } - #else - static void __init init_irq_default_affinity(void) -diff -Nur linux-4.4.13.orig/kernel/irq/manage.c linux-4.4.13/kernel/irq/manage.c ---- linux-4.4.13.orig/kernel/irq/manage.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/irq/manage.c 2016-06-22 21:55:08.046523867 +0200 -@@ -22,6 +22,7 @@ - #include "internals.h" - - #ifdef CONFIG_IRQ_FORCED_THREADING -+# ifndef CONFIG_PREEMPT_RT_BASE - __read_mostly bool force_irqthreads; - - static int __init setup_forced_irqthreads(char *arg) -@@ -30,6 +31,7 @@ - return 0; - } - early_param("threadirqs", setup_forced_irqthreads); -+# endif - #endif - - static void __synchronize_hardirq(struct irq_desc *desc) -@@ -181,6 +183,62 @@ - irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void _irq_affinity_notify(struct irq_affinity_notify *notify); -+static struct task_struct *set_affinity_helper; -+static LIST_HEAD(affinity_list); -+static DEFINE_RAW_SPINLOCK(affinity_list_lock); -+ -+static int set_affinity_thread(void *unused) -+{ -+ while (1) { -+ struct irq_affinity_notify *notify; -+ int empty; -+ -+ set_current_state(TASK_INTERRUPTIBLE); -+ -+ raw_spin_lock_irq(&affinity_list_lock); -+ empty = list_empty(&affinity_list); -+ raw_spin_unlock_irq(&affinity_list_lock); -+ -+ if (empty) -+ schedule(); -+ if (kthread_should_stop()) -+ break; -+ set_current_state(TASK_RUNNING); -+try_next: -+ notify = NULL; -+ -+ raw_spin_lock_irq(&affinity_list_lock); -+ if (!list_empty(&affinity_list)) { -+ notify = list_first_entry(&affinity_list, -+ struct irq_affinity_notify, list); -+ list_del_init(¬ify->list); -+ } -+ raw_spin_unlock_irq(&affinity_list_lock); -+ -+ if (!notify) -+ continue; -+ _irq_affinity_notify(notify); -+ goto try_next; -+ } -+ return 0; -+} -+ -+static void init_helper_thread(void) -+{ -+ if (set_affinity_helper) -+ return; -+ set_affinity_helper = kthread_run(set_affinity_thread, NULL, -+ "affinity-cb"); -+ WARN_ON(IS_ERR(set_affinity_helper)); -+} -+#else -+ -+static inline void init_helper_thread(void) { } -+ -+#endif -+ - int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, - bool force) - { -@@ -220,7 +278,17 @@ - - if (desc->affinity_notify) { - kref_get(&desc->affinity_notify->kref); -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ raw_spin_lock(&affinity_list_lock); -+ if (list_empty(&desc->affinity_notify->list)) -+ list_add_tail(&affinity_list, -+ &desc->affinity_notify->list); -+ raw_spin_unlock(&affinity_list_lock); -+ wake_up_process(set_affinity_helper); -+#else - schedule_work(&desc->affinity_notify->work); -+#endif - } - irqd_set(data, IRQD_AFFINITY_SET); - -@@ -258,10 +326,8 @@ - } - EXPORT_SYMBOL_GPL(irq_set_affinity_hint); - --static void irq_affinity_notify(struct work_struct *work) -+static void _irq_affinity_notify(struct irq_affinity_notify *notify) - { -- struct irq_affinity_notify *notify = -- container_of(work, struct irq_affinity_notify, work); - struct irq_desc *desc = irq_to_desc(notify->irq); - cpumask_var_t cpumask; - unsigned long flags; -@@ -283,6 +349,13 @@ - kref_put(¬ify->kref, notify->release); - } - -+static void irq_affinity_notify(struct work_struct *work) -+{ -+ struct irq_affinity_notify *notify = -+ container_of(work, struct irq_affinity_notify, work); -+ _irq_affinity_notify(notify); -+} -+ - /** - * irq_set_affinity_notifier - control notification of IRQ affinity changes - * @irq: Interrupt for which to enable/disable notification -@@ -312,6 +385,8 @@ - notify->irq = irq; - kref_init(¬ify->kref); - INIT_WORK(¬ify->work, irq_affinity_notify); -+ INIT_LIST_HEAD(¬ify->list); -+ init_helper_thread(); - } - - raw_spin_lock_irqsave(&desc->lock, flags); -@@ -865,7 +940,15 @@ - local_bh_disable(); - ret = action->thread_fn(action->irq, action->dev_id); - irq_finalize_oneshot(desc, action); -- local_bh_enable(); -+ /* -+ * Interrupts which have real time requirements can be set up -+ * to avoid softirq processing in the thread handler. This is -+ * safe as these interrupts do not raise soft interrupts. -+ */ -+ if (irq_settings_no_softirq_call(desc)) -+ _local_bh_enable(); -+ else -+ local_bh_enable(); - return ret; - } - -@@ -962,6 +1045,12 @@ - if (action_ret == IRQ_WAKE_THREAD) - irq_wake_secondary(desc, action); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ migrate_disable(); -+ add_interrupt_randomness(action->irq, 0, -+ desc->random_ip ^ (unsigned long) action); -+ migrate_enable(); -+#endif - wake_threads_waitq(desc); - } - -@@ -1315,6 +1404,9 @@ - irqd_set(&desc->irq_data, IRQD_NO_BALANCING); - } - -+ if (new->flags & IRQF_NO_SOFTIRQ_CALL) -+ irq_settings_set_no_softirq_call(desc); -+ - /* Set default affinity mask once everything is setup */ - setup_affinity(desc, mask); - -@@ -1968,7 +2060,7 @@ - * This call sets the internal irqchip state of an interrupt, - * depending on the value of @which. - * -- * This function should be called with preemption disabled if the -+ * This function should be called with migration disabled if the - * interrupt controller has per-cpu registers. - */ - int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -diff -Nur linux-4.4.13.orig/kernel/irq/settings.h linux-4.4.13/kernel/irq/settings.h ---- linux-4.4.13.orig/kernel/irq/settings.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/irq/settings.h 2016-06-22 21:55:08.046523867 +0200 -@@ -16,6 +16,7 @@ - _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, - _IRQ_IS_POLLED = IRQ_IS_POLLED, - _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, -+ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, - _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, - }; - -@@ -30,6 +31,7 @@ - #define IRQ_PER_CPU_DEVID GOT_YOU_MORON - #define IRQ_IS_POLLED GOT_YOU_MORON - #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON -+#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON - #undef IRQF_MODIFY_MASK - #define IRQF_MODIFY_MASK GOT_YOU_MORON - -@@ -40,6 +42,16 @@ - desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); - } - -+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) -+{ -+ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; -+} -+ -+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) -+{ -+ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; -+} -+ - static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) - { - return desc->status_use_accessors & _IRQ_PER_CPU; -diff -Nur linux-4.4.13.orig/kernel/irq/spurious.c linux-4.4.13/kernel/irq/spurious.c ---- linux-4.4.13.orig/kernel/irq/spurious.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/irq/spurious.c 2016-06-22 21:55:08.050523867 +0200 -@@ -444,6 +444,10 @@ - - static int __init irqfixup_setup(char *str) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); -+ return 1; -+#endif - irqfixup = 1; - printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); - printk(KERN_WARNING "This may impact system performance.\n"); -@@ -456,6 +460,10 @@ - - static int __init irqpoll_setup(char *str) - { -+#ifdef CONFIG_PREEMPT_RT_BASE -+ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); -+ return 1; -+#endif - irqfixup = 2; - printk(KERN_WARNING "Misrouted IRQ fixup and polling support " - "enabled\n"); -diff -Nur linux-4.4.13.orig/kernel/irq_work.c linux-4.4.13/kernel/irq_work.c ---- linux-4.4.13.orig/kernel/irq_work.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/irq_work.c 2016-06-22 21:55:08.050523867 +0200 -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - #include - - -@@ -65,6 +66,8 @@ - */ - bool irq_work_queue_on(struct irq_work *work, int cpu) - { -+ struct llist_head *list; -+ - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(cpu)); - -@@ -75,7 +78,12 @@ - if (!irq_work_claim(work)) - return false; - -- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) -+ list = &per_cpu(lazy_list, cpu); -+ else -+ list = &per_cpu(raised_list, cpu); -+ -+ if (llist_add(&work->llnode, list)) - arch_send_call_function_single_ipi(cpu); - - return true; -@@ -86,6 +94,9 @@ - /* Enqueue the irq work @work on the current CPU */ - bool irq_work_queue(struct irq_work *work) - { -+ struct llist_head *list; -+ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); -+ - /* Only queue if not already pending */ - if (!irq_work_claim(work)) - return false; -@@ -93,13 +104,15 @@ - /* Queue the entry and raise the IPI if needed. */ - preempt_disable(); - -- /* If the work is "lazy", handle it from next tick if any */ -- if (work->flags & IRQ_WORK_LAZY) { -- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && -- tick_nohz_tick_stopped()) -- arch_irq_work_raise(); -- } else { -- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) -+ lazy_work = work->flags & IRQ_WORK_LAZY; -+ -+ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) -+ list = this_cpu_ptr(&lazy_list); -+ else -+ list = this_cpu_ptr(&raised_list); -+ -+ if (llist_add(&work->llnode, list)) { -+ if (!lazy_work || tick_nohz_tick_stopped()) - arch_irq_work_raise(); - } - -@@ -116,9 +129,8 @@ - raised = this_cpu_ptr(&raised_list); - lazy = this_cpu_ptr(&lazy_list); - -- if (llist_empty(raised) || arch_irq_work_has_interrupt()) -- if (llist_empty(lazy)) -- return false; -+ if (llist_empty(raised) && llist_empty(lazy)) -+ return false; - - /* All work should have been flushed before going offline */ - WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); -@@ -132,7 +144,7 @@ - struct irq_work *work; - struct llist_node *llnode; - -- BUG_ON(!irqs_disabled()); -+ BUG_ON_NONRT(!irqs_disabled()); - - if (llist_empty(list)) - return; -@@ -169,7 +181,16 @@ - void irq_work_run(void) - { - irq_work_run_list(this_cpu_ptr(&raised_list)); -- irq_work_run_list(this_cpu_ptr(&lazy_list)); -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { -+ /* -+ * NOTE: we raise softirq via IPI for safety, -+ * and execute in irq_work_tick() to move the -+ * overhead from hard to soft irq context. -+ */ -+ if (!llist_empty(this_cpu_ptr(&lazy_list))) -+ raise_softirq(TIMER_SOFTIRQ); -+ } else -+ irq_work_run_list(this_cpu_ptr(&lazy_list)); - } - EXPORT_SYMBOL_GPL(irq_work_run); - -@@ -179,8 +200,17 @@ - - if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) - irq_work_run_list(raised); -+ -+ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) -+ irq_work_run_list(this_cpu_ptr(&lazy_list)); -+} -+ -+#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) -+void irq_work_tick_soft(void) -+{ - irq_work_run_list(this_cpu_ptr(&lazy_list)); - } -+#endif - - /* - * Synchronize against the irq_work @entry, ensures the entry is not -diff -Nur linux-4.4.13.orig/kernel/Kconfig.locks linux-4.4.13/kernel/Kconfig.locks ---- linux-4.4.13.orig/kernel/Kconfig.locks 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/Kconfig.locks 2016-06-22 21:55:08.050523867 +0200 -@@ -225,11 +225,11 @@ - - config MUTEX_SPIN_ON_OWNER - def_bool y -- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW -+ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL - - config RWSEM_SPIN_ON_OWNER - def_bool y -- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW -+ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL - - config LOCK_SPIN_ON_OWNER - def_bool y -diff -Nur linux-4.4.13.orig/kernel/Kconfig.preempt linux-4.4.13/kernel/Kconfig.preempt ---- linux-4.4.13.orig/kernel/Kconfig.preempt 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/Kconfig.preempt 2016-06-22 21:55:08.050523867 +0200 -@@ -1,3 +1,16 @@ -+config PREEMPT -+ bool -+ select PREEMPT_COUNT -+ -+config PREEMPT_RT_BASE -+ bool -+ select PREEMPT -+ -+config HAVE_PREEMPT_LAZY -+ bool -+ -+config PREEMPT_LAZY -+ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL - - choice - prompt "Preemption Model" -@@ -33,9 +46,9 @@ - - Select this if you are building a kernel for a desktop system. - --config PREEMPT -+config PREEMPT__LL - bool "Preemptible Kernel (Low-Latency Desktop)" -- select PREEMPT_COUNT -+ select PREEMPT - select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK - help - This option reduces the latency of the kernel by making -@@ -52,6 +65,22 @@ - embedded system with latency requirements in the milliseconds - range. - -+config PREEMPT_RTB -+ bool "Preemptible Kernel (Basic RT)" -+ select PREEMPT_RT_BASE -+ help -+ This option is basically the same as (Low-Latency Desktop) but -+ enables changes which are preliminary for the full preemptible -+ RT kernel. -+ -+config PREEMPT_RT_FULL -+ bool "Fully Preemptible Kernel (RT)" -+ depends on IRQ_FORCED_THREADING -+ select PREEMPT_RT_BASE -+ select PREEMPT_RCU -+ help -+ All and everything -+ - endchoice - - config PREEMPT_COUNT -diff -Nur linux-4.4.13.orig/kernel/ksysfs.c linux-4.4.13/kernel/ksysfs.c ---- linux-4.4.13.orig/kernel/ksysfs.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/ksysfs.c 2016-06-22 21:55:08.050523867 +0200 -@@ -136,6 +136,15 @@ - - #endif /* CONFIG_KEXEC_CORE */ - -+#if defined(CONFIG_PREEMPT_RT_FULL) -+static ssize_t realtime_show(struct kobject *kobj, -+ struct kobj_attribute *attr, char *buf) -+{ -+ return sprintf(buf, "%d\n", 1); -+} -+KERNEL_ATTR_RO(realtime); -+#endif -+ - /* whether file capabilities are enabled */ - static ssize_t fscaps_show(struct kobject *kobj, - struct kobj_attribute *attr, char *buf) -@@ -203,6 +212,9 @@ - &vmcoreinfo_attr.attr, - #endif - &rcu_expedited_attr.attr, -+#ifdef CONFIG_PREEMPT_RT_FULL -+ &realtime_attr.attr, -+#endif - NULL - }; - -diff -Nur linux-4.4.13.orig/kernel/locking/lglock.c linux-4.4.13/kernel/locking/lglock.c ---- linux-4.4.13.orig/kernel/locking/lglock.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/lglock.c 2016-06-22 21:55:08.050523867 +0200 -@@ -4,6 +4,15 @@ - #include - #include - -+#ifndef CONFIG_PREEMPT_RT_FULL -+# define lg_lock_ptr arch_spinlock_t -+# define lg_do_lock(l) arch_spin_lock(l) -+# define lg_do_unlock(l) arch_spin_unlock(l) -+#else -+# define lg_lock_ptr struct rt_mutex -+# define lg_do_lock(l) __rt_spin_lock__no_mg(l) -+# define lg_do_unlock(l) __rt_spin_unlock(l) -+#endif - /* - * Note there is no uninit, so lglocks cannot be defined in - * modules (but it's fine to use them from there) -@@ -12,51 +21,60 @@ - - void lg_lock_init(struct lglock *lg, char *name) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int i; -+ -+ for_each_possible_cpu(i) { -+ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); -+ -+ rt_mutex_init(lock); -+ } -+#endif - LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); - } - EXPORT_SYMBOL(lg_lock_init); - - void lg_local_lock(struct lglock *lg) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - -- preempt_disable(); -+ migrate_disable(); - lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); - lock = this_cpu_ptr(lg->lock); -- arch_spin_lock(lock); -+ lg_do_lock(lock); - } - EXPORT_SYMBOL(lg_local_lock); - - void lg_local_unlock(struct lglock *lg) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - - lock_release(&lg->lock_dep_map, 1, _RET_IP_); - lock = this_cpu_ptr(lg->lock); -- arch_spin_unlock(lock); -- preempt_enable(); -+ lg_do_unlock(lock); -+ migrate_enable(); - } - EXPORT_SYMBOL(lg_local_unlock); - - void lg_local_lock_cpu(struct lglock *lg, int cpu) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - -- preempt_disable(); -+ preempt_disable_nort(); - lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); - lock = per_cpu_ptr(lg->lock, cpu); -- arch_spin_lock(lock); -+ lg_do_lock(lock); - } - EXPORT_SYMBOL(lg_local_lock_cpu); - - void lg_local_unlock_cpu(struct lglock *lg, int cpu) - { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - - lock_release(&lg->lock_dep_map, 1, _RET_IP_); - lock = per_cpu_ptr(lg->lock, cpu); -- arch_spin_unlock(lock); -- preempt_enable(); -+ lg_do_unlock(lock); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(lg_local_unlock_cpu); - -@@ -68,30 +86,30 @@ - if (cpu2 < cpu1) - swap(cpu1, cpu2); - -- preempt_disable(); -+ preempt_disable_nort(); - lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); -- arch_spin_lock(per_cpu_ptr(lg->lock, cpu1)); -- arch_spin_lock(per_cpu_ptr(lg->lock, cpu2)); -+ lg_do_lock(per_cpu_ptr(lg->lock, cpu1)); -+ lg_do_lock(per_cpu_ptr(lg->lock, cpu2)); - } - - void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2) - { - lock_release(&lg->lock_dep_map, 1, _RET_IP_); -- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1)); -- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2)); -- preempt_enable(); -+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu1)); -+ lg_do_unlock(per_cpu_ptr(lg->lock, cpu2)); -+ preempt_enable_nort(); - } - - void lg_global_lock(struct lglock *lg) - { - int i; - -- preempt_disable(); -+ preempt_disable_nort(); - lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); - for_each_possible_cpu(i) { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - lock = per_cpu_ptr(lg->lock, i); -- arch_spin_lock(lock); -+ lg_do_lock(lock); - } - } - EXPORT_SYMBOL(lg_global_lock); -@@ -102,10 +120,35 @@ - - lock_release(&lg->lock_dep_map, 1, _RET_IP_); - for_each_possible_cpu(i) { -- arch_spinlock_t *lock; -+ lg_lock_ptr *lock; - lock = per_cpu_ptr(lg->lock, i); -- arch_spin_unlock(lock); -+ lg_do_unlock(lock); - } -- preempt_enable(); -+ preempt_enable_nort(); - } - EXPORT_SYMBOL(lg_global_unlock); -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * HACK: If you use this, you get to keep the pieces. -+ * Used in queue_stop_cpus_work() when stop machinery -+ * is called from inactive CPU, so we can't schedule. -+ */ -+# define lg_do_trylock_relax(l) \ -+ do { \ -+ while (!__rt_spin_trylock(l)) \ -+ cpu_relax(); \ -+ } while (0) -+ -+void lg_global_trylock_relax(struct lglock *lg) -+{ -+ int i; -+ -+ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); -+ for_each_possible_cpu(i) { -+ lg_lock_ptr *lock; -+ lock = per_cpu_ptr(lg->lock, i); -+ lg_do_trylock_relax(lock); -+ } -+} -+#endif -diff -Nur linux-4.4.13.orig/kernel/locking/lockdep.c linux-4.4.13/kernel/locking/lockdep.c ---- linux-4.4.13.orig/kernel/locking/lockdep.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/lockdep.c 2016-06-22 21:55:08.050523867 +0200 -@@ -3525,6 +3525,7 @@ - } - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * We dont accurately track softirq state in e.g. - * hardirq contexts (such as on 4KSTACKS), so only -@@ -3539,6 +3540,7 @@ - DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); - } - } -+#endif - - if (!debug_locks) - print_irqtrace_events(current); -diff -Nur linux-4.4.13.orig/kernel/locking/locktorture.c linux-4.4.13/kernel/locking/locktorture.c ---- linux-4.4.13.orig/kernel/locking/locktorture.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/locktorture.c 2016-06-22 21:55:08.050523867 +0200 -@@ -26,7 +26,6 @@ - #include - #include - #include --#include - #include - #include - #include -diff -Nur linux-4.4.13.orig/kernel/locking/Makefile linux-4.4.13/kernel/locking/Makefile ---- linux-4.4.13.orig/kernel/locking/Makefile 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/Makefile 2016-06-22 21:55:08.050523867 +0200 -@@ -1,5 +1,5 @@ - --obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o -+obj-y += semaphore.o percpu-rwsem.o - - ifdef CONFIG_FUNCTION_TRACER - CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) -@@ -8,7 +8,11 @@ - CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) - endif - -+ifneq ($(CONFIG_PREEMPT_RT_FULL),y) -+obj-y += mutex.o - obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o -+obj-y += rwsem.o -+endif - obj-$(CONFIG_LOCKDEP) += lockdep.o - ifeq ($(CONFIG_PROC_FS),y) - obj-$(CONFIG_LOCKDEP) += lockdep_proc.o -@@ -22,7 +26,10 @@ - obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o - obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o - obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o -+ifneq ($(CONFIG_PREEMPT_RT_FULL),y) - obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o - obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o -+endif -+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o - obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o - obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o -diff -Nur linux-4.4.13.orig/kernel/locking/rt.c linux-4.4.13/kernel/locking/rt.c ---- linux-4.4.13.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/kernel/locking/rt.c 2016-06-22 21:55:08.050523867 +0200 -@@ -0,0 +1,474 @@ -+/* -+ * kernel/rt.c -+ * -+ * Real-Time Preemption Support -+ * -+ * started by Ingo Molnar: -+ * -+ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar -+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner -+ * -+ * historic credit for proving that Linux spinlocks can be implemented via -+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow -+ * and others) who prototyped it on 2.4 and did lots of comparative -+ * research and analysis; TimeSys, for proving that you can implement a -+ * fully preemptible kernel via the use of IRQ threading and mutexes; -+ * Bill Huey for persuasively arguing on lkml that the mutex model is the -+ * right one; and to MontaVista, who ported pmutexes to 2.6. -+ * -+ * This code is a from-scratch implementation and is not based on pmutexes, -+ * but the idea of converting spinlocks to mutexes is used here too. -+ * -+ * lock debugging, locking tree, deadlock detection: -+ * -+ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey -+ * Released under the General Public License (GPL). -+ * -+ * Includes portions of the generic R/W semaphore implementation from: -+ * -+ * Copyright (c) 2001 David Howells (dhowells@redhat.com). -+ * - Derived partially from idea by Andrea Arcangeli -+ * - Derived also from comments by Linus -+ * -+ * Pending ownership of locks and ownership stealing: -+ * -+ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt -+ * -+ * (also by Steven Rostedt) -+ * - Converted single pi_lock to individual task locks. -+ * -+ * By Esben Nielsen: -+ * Doing priority inheritance with help of the scheduler. -+ * -+ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner -+ * - major rework based on Esben Nielsens initial patch -+ * - replaced thread_info references by task_struct refs -+ * - removed task->pending_owner dependency -+ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks -+ * in the scheduler return path as discussed with Steven Rostedt -+ * -+ * Copyright (C) 2006, Kihon Technologies Inc. -+ * Steven Rostedt -+ * - debugged and patched Thomas Gleixner's rework. -+ * - added back the cmpxchg to the rework. -+ * - turned atomic require back on for SMP. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "rtmutex_common.h" -+ -+/* -+ * struct mutex functions -+ */ -+void __mutex_do_init(struct mutex *mutex, const char *name, -+ struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); -+ lockdep_init_map(&mutex->dep_map, name, key, 0); -+#endif -+ mutex->lock.save_state = 0; -+} -+EXPORT_SYMBOL(__mutex_do_init); -+ -+void __lockfunc _mutex_lock(struct mutex *lock) -+{ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ rt_mutex_lock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_lock); -+ -+int __lockfunc _mutex_lock_interruptible(struct mutex *lock) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ ret = rt_mutex_lock_interruptible(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_interruptible); -+ -+int __lockfunc _mutex_lock_killable(struct mutex *lock) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+ ret = rt_mutex_lock_killable(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_killable); -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) -+{ -+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); -+ rt_mutex_lock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_lock_nested); -+ -+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) -+{ -+ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); -+ rt_mutex_lock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_lock_nest_lock); -+ -+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) -+{ -+ int ret; -+ -+ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); -+ ret = rt_mutex_lock_interruptible(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_interruptible_nested); -+ -+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) -+{ -+ int ret; -+ -+ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -+ ret = rt_mutex_lock_killable(&lock->lock); -+ if (ret) -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_lock_killable_nested); -+#endif -+ -+int __lockfunc _mutex_trylock(struct mutex *lock) -+{ -+ int ret = rt_mutex_trylock(&lock->lock); -+ -+ if (ret) -+ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ -+ return ret; -+} -+EXPORT_SYMBOL(_mutex_trylock); -+ -+void __lockfunc _mutex_unlock(struct mutex *lock) -+{ -+ mutex_release(&lock->dep_map, 1, _RET_IP_); -+ rt_mutex_unlock(&lock->lock); -+} -+EXPORT_SYMBOL(_mutex_unlock); -+ -+/* -+ * rwlock_t functions -+ */ -+int __lockfunc rt_write_trylock(rwlock_t *rwlock) -+{ -+ int ret; -+ -+ migrate_disable(); -+ ret = rt_mutex_trylock(&rwlock->lock); -+ if (ret) -+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ -+ return ret; -+} -+EXPORT_SYMBOL(rt_write_trylock); -+ -+int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) -+{ -+ int ret; -+ -+ *flags = 0; -+ ret = rt_write_trylock(rwlock); -+ return ret; -+} -+EXPORT_SYMBOL(rt_write_trylock_irqsave); -+ -+int __lockfunc rt_read_trylock(rwlock_t *rwlock) -+{ -+ struct rt_mutex *lock = &rwlock->lock; -+ int ret = 1; -+ -+ /* -+ * recursive read locks succeed when current owns the lock, -+ * but not when read_depth == 0 which means that the lock is -+ * write locked. -+ */ -+ if (rt_mutex_owner(lock) != current) { -+ migrate_disable(); -+ ret = rt_mutex_trylock(lock); -+ if (ret) -+ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ -+ } else if (!rwlock->read_depth) { -+ ret = 0; -+ } -+ -+ if (ret) -+ rwlock->read_depth++; -+ -+ return ret; -+} -+EXPORT_SYMBOL(rt_read_trylock); -+ -+void __lockfunc rt_write_lock(rwlock_t *rwlock) -+{ -+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); -+ __rt_spin_lock(&rwlock->lock); -+} -+EXPORT_SYMBOL(rt_write_lock); -+ -+void __lockfunc rt_read_lock(rwlock_t *rwlock) -+{ -+ struct rt_mutex *lock = &rwlock->lock; -+ -+ -+ /* -+ * recursive read locks succeed when current owns the lock -+ */ -+ if (rt_mutex_owner(lock) != current) { -+ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); -+ __rt_spin_lock(lock); -+ } -+ rwlock->read_depth++; -+} -+ -+EXPORT_SYMBOL(rt_read_lock); -+ -+void __lockfunc rt_write_unlock(rwlock_t *rwlock) -+{ -+ /* NOTE: we always pass in '1' for nested, for simplicity */ -+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); -+ __rt_spin_unlock(&rwlock->lock); -+ migrate_enable(); -+} -+EXPORT_SYMBOL(rt_write_unlock); -+ -+void __lockfunc rt_read_unlock(rwlock_t *rwlock) -+{ -+ /* Release the lock only when read_depth is down to 0 */ -+ if (--rwlock->read_depth == 0) { -+ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); -+ __rt_spin_unlock(&rwlock->lock); -+ migrate_enable(); -+ } -+} -+EXPORT_SYMBOL(rt_read_unlock); -+ -+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) -+{ -+ rt_write_lock(rwlock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(rt_write_lock_irqsave); -+ -+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) -+{ -+ rt_read_lock(rwlock); -+ -+ return 0; -+} -+EXPORT_SYMBOL(rt_read_lock_irqsave); -+ -+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); -+ lockdep_init_map(&rwlock->dep_map, name, key, 0); -+#endif -+ rwlock->lock.save_state = 1; -+ rwlock->read_depth = 0; -+} -+EXPORT_SYMBOL(__rt_rwlock_init); -+ -+/* -+ * rw_semaphores -+ */ -+ -+void rt_up_write(struct rw_semaphore *rwsem) -+{ -+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); -+ rt_mutex_unlock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_up_write); -+ -+void __rt_up_read(struct rw_semaphore *rwsem) -+{ -+ if (--rwsem->read_depth == 0) -+ rt_mutex_unlock(&rwsem->lock); -+} -+ -+void rt_up_read(struct rw_semaphore *rwsem) -+{ -+ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); -+ __rt_up_read(rwsem); -+} -+EXPORT_SYMBOL(rt_up_read); -+ -+/* -+ * downgrade a write lock into a read lock -+ * - just wake up any readers at the front of the queue -+ */ -+void rt_downgrade_write(struct rw_semaphore *rwsem) -+{ -+ BUG_ON(rt_mutex_owner(&rwsem->lock) != current); -+ rwsem->read_depth = 1; -+} -+EXPORT_SYMBOL(rt_downgrade_write); -+ -+int rt_down_write_trylock(struct rw_semaphore *rwsem) -+{ -+ int ret = rt_mutex_trylock(&rwsem->lock); -+ -+ if (ret) -+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(rt_down_write_trylock); -+ -+void rt_down_write(struct rw_semaphore *rwsem) -+{ -+ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); -+ rt_mutex_lock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_down_write); -+ -+void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) -+{ -+ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); -+ rt_mutex_lock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_down_write_nested); -+ -+void rt_down_write_nested_lock(struct rw_semaphore *rwsem, -+ struct lockdep_map *nest) -+{ -+ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); -+ rt_mutex_lock(&rwsem->lock); -+} -+EXPORT_SYMBOL(rt_down_write_nested_lock); -+ -+int rt__down_read_trylock(struct rw_semaphore *rwsem) -+{ -+ struct rt_mutex *lock = &rwsem->lock; -+ int ret = 1; -+ -+ /* -+ * recursive read locks succeed when current owns the rwsem, -+ * but not when read_depth == 0 which means that the rwsem is -+ * write locked. -+ */ -+ if (rt_mutex_owner(lock) != current) -+ ret = rt_mutex_trylock(&rwsem->lock); -+ else if (!rwsem->read_depth) -+ ret = 0; -+ -+ if (ret) -+ rwsem->read_depth++; -+ return ret; -+ -+} -+ -+int rt_down_read_trylock(struct rw_semaphore *rwsem) -+{ -+ int ret; -+ -+ ret = rt__down_read_trylock(rwsem); -+ if (ret) -+ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); -+ -+ return ret; -+} -+EXPORT_SYMBOL(rt_down_read_trylock); -+ -+void rt__down_read(struct rw_semaphore *rwsem) -+{ -+ struct rt_mutex *lock = &rwsem->lock; -+ -+ if (rt_mutex_owner(lock) != current) -+ rt_mutex_lock(&rwsem->lock); -+ rwsem->read_depth++; -+} -+EXPORT_SYMBOL(rt__down_read); -+ -+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) -+{ -+ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); -+ rt__down_read(rwsem); -+} -+ -+void rt_down_read(struct rw_semaphore *rwsem) -+{ -+ __rt_down_read(rwsem, 0); -+} -+EXPORT_SYMBOL(rt_down_read); -+ -+void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) -+{ -+ __rt_down_read(rwsem, subclass); -+} -+EXPORT_SYMBOL(rt_down_read_nested); -+ -+void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, -+ struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); -+ lockdep_init_map(&rwsem->dep_map, name, key, 0); -+#endif -+ rwsem->read_depth = 0; -+ rwsem->lock.save_state = 0; -+} -+EXPORT_SYMBOL(__rt_rwsem_init); -+ -+/** -+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 -+ * @cnt: the atomic which we are to dec -+ * @lock: the mutex to return holding if we dec to 0 -+ * -+ * return true and hold lock if we dec to 0, return false otherwise -+ */ -+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) -+{ -+ /* dec if we can't possibly hit 0 */ -+ if (atomic_add_unless(cnt, -1, 1)) -+ return 0; -+ /* we might hit 0, so take the lock */ -+ mutex_lock(lock); -+ if (!atomic_dec_and_test(cnt)) { -+ /* when we actually did the dec, we didn't hit 0 */ -+ mutex_unlock(lock); -+ return 0; -+ } -+ /* we hit 0, and we hold the lock */ -+ return 1; -+} -+EXPORT_SYMBOL(atomic_dec_and_mutex_lock); -diff -Nur linux-4.4.13.orig/kernel/locking/rtmutex.c linux-4.4.13/kernel/locking/rtmutex.c ---- linux-4.4.13.orig/kernel/locking/rtmutex.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/rtmutex.c 2016-06-22 21:55:08.050523867 +0200 -@@ -7,6 +7,11 @@ - * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner - * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt - * Copyright (C) 2006 Esben Nielsen -+ * Adaptive Spinlocks: -+ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, -+ * and Peter Morreale, -+ * Adaptive Spinlocks simplification: -+ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt - * - * See Documentation/locking/rt-mutex-design.txt for details. - */ -@@ -16,6 +21,7 @@ - #include - #include - #include -+#include - - #include "rtmutex_common.h" - -@@ -69,6 +75,12 @@ - clear_rt_mutex_waiters(lock); - } - -+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) -+{ -+ return waiter && waiter != PI_WAKEUP_INPROGRESS && -+ waiter != PI_REQUEUE_INPROGRESS; -+} -+ - /* - * We can speed up the acquire/release, if there's no debugging state to be - * set up. -@@ -99,13 +111,14 @@ - * 2) Drop lock->wait_lock - * 3) Try to unlock the lock with cmpxchg - */ --static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) -+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, -+ unsigned long flags) - __releases(lock->wait_lock) - { - struct task_struct *owner = rt_mutex_owner(lock); - - clear_rt_mutex_waiters(lock); -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - /* - * If a new waiter comes in between the unlock and the cmpxchg - * we have two situations: -@@ -147,11 +160,12 @@ - /* - * Simple slow path only version: lock->owner is protected by lock->wait_lock. - */ --static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) -+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, -+ unsigned long flags) - __releases(lock->wait_lock) - { - lock->owner = NULL; -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - return true; - } - #endif -@@ -348,6 +362,14 @@ - return debug_rt_mutex_detect_deadlock(waiter, chwalk); - } - -+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) -+{ -+ if (waiter->savestate) -+ wake_up_lock_sleeper(waiter->task); -+ else -+ wake_up_process(waiter->task); -+} -+ - /* - * Max number of times we'll walk the boosting chain: - */ -@@ -355,7 +377,8 @@ - - static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) - { -- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; -+ return rt_mutex_real_waiter(p->pi_blocked_on) ? -+ p->pi_blocked_on->lock : NULL; - } - - /* -@@ -433,7 +456,6 @@ - int ret = 0, depth = 0; - struct rt_mutex *lock; - bool detect_deadlock; -- unsigned long flags; - bool requeue = true; - - detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); -@@ -476,7 +498,7 @@ - /* - * [1] Task cannot go away as we did a get_task() before ! - */ -- raw_spin_lock_irqsave(&task->pi_lock, flags); -+ raw_spin_lock_irq(&task->pi_lock); - - /* - * [2] Get the waiter on which @task is blocked on. -@@ -492,7 +514,7 @@ - * reached or the state of the chain has changed while we - * dropped the locks. - */ -- if (!waiter) -+ if (!rt_mutex_real_waiter(waiter)) - goto out_unlock_pi; - - /* -@@ -560,7 +582,7 @@ - * operations. - */ - if (!raw_spin_trylock(&lock->wait_lock)) { -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+ raw_spin_unlock_irq(&task->pi_lock); - cpu_relax(); - goto retry; - } -@@ -591,7 +613,7 @@ - /* - * No requeue[7] here. Just release @task [8] - */ -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+ raw_spin_unlock(&task->pi_lock); - put_task_struct(task); - - /* -@@ -599,14 +621,14 @@ - * If there is no owner of the lock, end of chain. - */ - if (!rt_mutex_owner(lock)) { -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - return 0; - } - - /* [10] Grab the next task, i.e. owner of @lock */ - task = rt_mutex_owner(lock); - get_task_struct(task); -- raw_spin_lock_irqsave(&task->pi_lock, flags); -+ raw_spin_lock(&task->pi_lock); - - /* - * No requeue [11] here. We just do deadlock detection. -@@ -621,8 +643,8 @@ - top_waiter = rt_mutex_top_waiter(lock); - - /* [13] Drop locks */ -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock(&task->pi_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - - /* If owner is not blocked, end of chain. */ - if (!next_lock) -@@ -643,7 +665,7 @@ - rt_mutex_enqueue(lock, waiter); - - /* [8] Release the task */ -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+ raw_spin_unlock(&task->pi_lock); - put_task_struct(task); - - /* -@@ -654,21 +676,24 @@ - * follow here. This is the end of the chain we are walking. - */ - if (!rt_mutex_owner(lock)) { -+ struct rt_mutex_waiter *lock_top_waiter; -+ - /* - * If the requeue [7] above changed the top waiter, - * then we need to wake the new top waiter up to try - * to get the lock. - */ -- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) -- wake_up_process(rt_mutex_top_waiter(lock)->task); -- raw_spin_unlock(&lock->wait_lock); -+ lock_top_waiter = rt_mutex_top_waiter(lock); -+ if (prerequeue_top_waiter != lock_top_waiter) -+ rt_mutex_wake_waiter(lock_top_waiter); -+ raw_spin_unlock_irq(&lock->wait_lock); - return 0; - } - - /* [10] Grab the next task, i.e. the owner of @lock */ - task = rt_mutex_owner(lock); - get_task_struct(task); -- raw_spin_lock_irqsave(&task->pi_lock, flags); -+ raw_spin_lock(&task->pi_lock); - - /* [11] requeue the pi waiters if necessary */ - if (waiter == rt_mutex_top_waiter(lock)) { -@@ -722,8 +747,8 @@ - top_waiter = rt_mutex_top_waiter(lock); - - /* [13] Drop the locks */ -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock(&task->pi_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - - /* - * Make the actual exit decisions [12], based on the stored -@@ -746,28 +771,46 @@ - goto again; - - out_unlock_pi: -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+ raw_spin_unlock_irq(&task->pi_lock); - out_put_task: - put_task_struct(task); - - return ret; - } - -+ -+#define STEAL_NORMAL 0 -+#define STEAL_LATERAL 1 -+ -+/* -+ * Note that RT tasks are excluded from lateral-steals to prevent the -+ * introduction of an unbounded latency -+ */ -+static inline int lock_is_stealable(struct task_struct *task, -+ struct task_struct *pendowner, int mode) -+{ -+ if (mode == STEAL_NORMAL || rt_task(task)) { -+ if (task->prio >= pendowner->prio) -+ return 0; -+ } else if (task->prio > pendowner->prio) -+ return 0; -+ return 1; -+} -+ - /* - * Try to take an rt-mutex - * -- * Must be called with lock->wait_lock held. -+ * Must be called with lock->wait_lock held and interrupts disabled - * - * @lock: The lock to be acquired. - * @task: The task which wants to acquire the lock - * @waiter: The waiter that is queued to the lock's wait tree if the - * callsite called task_blocked_on_lock(), otherwise NULL - */ --static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, -- struct rt_mutex_waiter *waiter) -+static int __try_to_take_rt_mutex(struct rt_mutex *lock, -+ struct task_struct *task, -+ struct rt_mutex_waiter *waiter, int mode) - { -- unsigned long flags; -- - /* - * Before testing whether we can acquire @lock, we set the - * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all -@@ -803,8 +846,10 @@ - * If waiter is not the highest priority waiter of - * @lock, give up. - */ -- if (waiter != rt_mutex_top_waiter(lock)) -+ if (waiter != rt_mutex_top_waiter(lock)) { -+ /* XXX lock_is_stealable() ? */ - return 0; -+ } - - /* - * We can acquire the lock. Remove the waiter from the -@@ -822,14 +867,10 @@ - * not need to be dequeued. - */ - if (rt_mutex_has_waiters(lock)) { -- /* -- * If @task->prio is greater than or equal to -- * the top waiter priority (kernel view), -- * @task lost. -- */ -- if (task->prio >= rt_mutex_top_waiter(lock)->prio) -- return 0; -+ struct task_struct *pown = rt_mutex_top_waiter(lock)->task; - -+ if (task != pown && !lock_is_stealable(task, pown, mode)) -+ return 0; - /* - * The current top waiter stays enqueued. We - * don't have to change anything in the lock -@@ -852,7 +893,7 @@ - * case, but conditionals are more expensive than a redundant - * store. - */ -- raw_spin_lock_irqsave(&task->pi_lock, flags); -+ raw_spin_lock(&task->pi_lock); - task->pi_blocked_on = NULL; - /* - * Finish the lock acquisition. @task is the new owner. If -@@ -861,7 +902,7 @@ - */ - if (rt_mutex_has_waiters(lock)) - rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+ raw_spin_unlock(&task->pi_lock); - - takeit: - /* We got the lock. */ -@@ -878,12 +919,405 @@ - return 1; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * preemptible spin_lock functions: -+ */ -+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, -+ void (*slowfn)(struct rt_mutex *lock, -+ bool mg_off), -+ bool do_mig_dis) -+{ -+ might_sleep_no_state_check(); -+ -+ if (do_mig_dis) -+ migrate_disable(); -+ -+ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) -+ rt_mutex_deadlock_account_lock(lock, current); -+ else -+ slowfn(lock, do_mig_dis); -+} -+ -+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, -+ void (*slowfn)(struct rt_mutex *lock)) -+{ -+ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) -+ rt_mutex_deadlock_account_unlock(current); -+ else -+ slowfn(lock); -+} -+#ifdef CONFIG_SMP -+/* -+ * Note that owner is a speculative pointer and dereferencing relies -+ * on rcu_read_lock() and the check against the lock owner. -+ */ -+static int adaptive_wait(struct rt_mutex *lock, -+ struct task_struct *owner) -+{ -+ int res = 0; -+ -+ rcu_read_lock(); -+ for (;;) { -+ if (owner != rt_mutex_owner(lock)) -+ break; -+ /* -+ * Ensure that owner->on_cpu is dereferenced _after_ -+ * checking the above to be valid. -+ */ -+ barrier(); -+ if (!owner->on_cpu) { -+ res = 1; -+ break; -+ } -+ cpu_relax(); -+ } -+ rcu_read_unlock(); -+ return res; -+} -+#else -+static int adaptive_wait(struct rt_mutex *lock, -+ struct task_struct *orig_owner) -+{ -+ return 1; -+} -+#endif -+ -+static int task_blocks_on_rt_mutex(struct rt_mutex *lock, -+ struct rt_mutex_waiter *waiter, -+ struct task_struct *task, -+ enum rtmutex_chainwalk chwalk); -+/* -+ * Slow path lock function spin_lock style: this variant is very -+ * careful not to miss any non-lock wakeups. -+ * -+ * We store the current state under p->pi_lock in p->saved_state and -+ * the try_to_wake_up() code handles this accordingly. -+ */ -+static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock, -+ bool mg_off) -+{ -+ struct task_struct *lock_owner, *self = current; -+ struct rt_mutex_waiter waiter, *top_waiter; -+ unsigned long flags; -+ int ret; -+ -+ rt_mutex_init_waiter(&waiter, true); -+ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ -+ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ return; -+ } -+ -+ BUG_ON(rt_mutex_owner(lock) == self); -+ -+ /* -+ * We save whatever state the task is in and we'll restore it -+ * after acquiring the lock taking real wakeups into account -+ * as well. We are serialized via pi_lock against wakeups. See -+ * try_to_wake_up(). -+ */ -+ raw_spin_lock(&self->pi_lock); -+ self->saved_state = self->state; -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ raw_spin_unlock(&self->pi_lock); -+ -+ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK); -+ BUG_ON(ret); -+ -+ for (;;) { -+ /* Try to acquire the lock again. */ -+ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) -+ break; -+ -+ top_waiter = rt_mutex_top_waiter(lock); -+ lock_owner = rt_mutex_owner(lock); -+ -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ -+ debug_rt_mutex_print_deadlock(&waiter); -+ -+ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) { -+ if (mg_off) -+ migrate_enable(); -+ schedule(); -+ if (mg_off) -+ migrate_disable(); -+ } -+ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ -+ raw_spin_lock(&self->pi_lock); -+ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); -+ raw_spin_unlock(&self->pi_lock); -+ } -+ -+ /* -+ * Restore the task state to current->saved_state. We set it -+ * to the original state above and the try_to_wake_up() code -+ * has possibly updated it when a real (non-rtmutex) wakeup -+ * happened while we were blocked. Clear saved_state so -+ * try_to_wakeup() does not get confused. -+ */ -+ raw_spin_lock(&self->pi_lock); -+ __set_current_state_no_track(self->saved_state); -+ self->saved_state = TASK_RUNNING; -+ raw_spin_unlock(&self->pi_lock); -+ -+ /* -+ * try_to_take_rt_mutex() sets the waiter bit -+ * unconditionally. We might have to fix that up: -+ */ -+ fixup_rt_mutex_waiters(lock); -+ -+ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); -+ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry)); -+ -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ -+ debug_rt_mutex_free_waiter(&waiter); -+} -+ -+static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q, -+ struct rt_mutex *lock); -+/* -+ * Slow path to release a rt_mutex spin_lock style -+ */ -+static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) -+{ -+ unsigned long flags; -+ WAKE_Q(wake_q); -+ WAKE_Q(wake_sleeper_q); -+ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); -+ -+ debug_rt_mutex_unlock(lock); -+ -+ rt_mutex_deadlock_account_unlock(current); -+ -+ if (!rt_mutex_has_waiters(lock)) { -+ lock->owner = NULL; -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ return; -+ } -+ -+ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock); -+ -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); -+ wake_up_q(&wake_q); -+ wake_up_q_sleeper(&wake_sleeper_q); -+ -+ /* Undo pi boosting.when necessary */ -+ rt_mutex_adjust_prio(current); -+} -+ -+void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock) -+{ -+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false); -+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+} -+EXPORT_SYMBOL(rt_spin_lock__no_mg); -+ -+void __lockfunc rt_spin_lock(spinlock_t *lock) -+{ -+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); -+ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); -+} -+EXPORT_SYMBOL(rt_spin_lock); -+ -+void __lockfunc __rt_spin_lock(struct rt_mutex *lock) -+{ -+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true); -+} -+EXPORT_SYMBOL(__rt_spin_lock); -+ -+void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock) -+{ -+ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false); -+} -+EXPORT_SYMBOL(__rt_spin_lock__no_mg); -+ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) -+{ -+ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); -+ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); -+} -+EXPORT_SYMBOL(rt_spin_lock_nested); -+#endif -+ -+void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock) -+{ -+ /* NOTE: we always pass in '1' for nested, for simplicity */ -+ spin_release(&lock->dep_map, 1, _RET_IP_); -+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); -+} -+EXPORT_SYMBOL(rt_spin_unlock__no_mg); -+ -+void __lockfunc rt_spin_unlock(spinlock_t *lock) -+{ -+ /* NOTE: we always pass in '1' for nested, for simplicity */ -+ spin_release(&lock->dep_map, 1, _RET_IP_); -+ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); -+ migrate_enable(); -+} -+EXPORT_SYMBOL(rt_spin_unlock); -+ -+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) -+{ -+ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); -+} -+EXPORT_SYMBOL(__rt_spin_unlock); -+ -+/* -+ * Wait for the lock to get unlocked: instead of polling for an unlock -+ * (like raw spinlocks do), we lock and unlock, to force the kernel to -+ * schedule if there's contention: -+ */ -+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) -+{ -+ spin_lock(lock); -+ spin_unlock(lock); -+} -+EXPORT_SYMBOL(rt_spin_unlock_wait); -+ -+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock) -+{ -+ return rt_mutex_trylock(lock); -+} -+ -+int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock) -+{ -+ int ret; -+ -+ ret = rt_mutex_trylock(&lock->lock); -+ if (ret) -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock__no_mg); -+ -+int __lockfunc rt_spin_trylock(spinlock_t *lock) -+{ -+ int ret; -+ -+ migrate_disable(); -+ ret = rt_mutex_trylock(&lock->lock); -+ if (ret) -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ else -+ migrate_enable(); -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock); -+ -+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) -+{ -+ int ret; -+ -+ local_bh_disable(); -+ ret = rt_mutex_trylock(&lock->lock); -+ if (ret) { -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ } else -+ local_bh_enable(); -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock_bh); -+ -+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) -+{ -+ int ret; -+ -+ *flags = 0; -+ ret = rt_mutex_trylock(&lock->lock); -+ if (ret) { -+ migrate_disable(); -+ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); -+ } -+ return ret; -+} -+EXPORT_SYMBOL(rt_spin_trylock_irqsave); -+ -+int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) -+{ -+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ -+ if (atomic_add_unless(atomic, -1, 1)) -+ return 0; -+ rt_spin_lock(lock); -+ if (atomic_dec_and_test(atomic)) -+ return 1; -+ rt_spin_unlock(lock); -+ return 0; -+} -+EXPORT_SYMBOL(atomic_dec_and_spin_lock); -+ -+ void -+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) -+{ -+#ifdef CONFIG_DEBUG_LOCK_ALLOC -+ /* -+ * Make sure we are not reinitializing a held lock: -+ */ -+ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); -+ lockdep_init_map(&lock->dep_map, name, key, 0); -+#endif -+} -+EXPORT_SYMBOL(__rt_spin_lock_init); -+ -+#endif /* PREEMPT_RT_FULL */ -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ static inline int __sched -+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); -+ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); -+ -+ if (!hold_ctx) -+ return 0; -+ -+ if (unlikely(ctx == hold_ctx)) -+ return -EALREADY; -+ -+ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && -+ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { -+#ifdef CONFIG_DEBUG_MUTEXES -+ DEBUG_LOCKS_WARN_ON(ctx->contending_lock); -+ ctx->contending_lock = ww; -+#endif -+ return -EDEADLK; -+ } -+ -+ return 0; -+} -+#else -+ static inline int __sched -+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+ BUG(); -+ return 0; -+} -+ -+#endif -+ -+static inline int -+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, -+ struct rt_mutex_waiter *waiter) -+{ -+ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); -+} -+ - /* - * Task blocks on lock. - * - * Prepare waiter and propagate pi chain - * -- * This must be called with lock->wait_lock held. -+ * This must be called with lock->wait_lock held and interrupts disabled - */ - static int task_blocks_on_rt_mutex(struct rt_mutex *lock, - struct rt_mutex_waiter *waiter, -@@ -894,7 +1328,6 @@ - struct rt_mutex_waiter *top_waiter = waiter; - struct rt_mutex *next_lock; - int chain_walk = 0, res; -- unsigned long flags; - - /* - * Early deadlock detection. We really don't want the task to -@@ -908,7 +1341,24 @@ - if (owner == task) - return -EDEADLK; - -- raw_spin_lock_irqsave(&task->pi_lock, flags); -+ raw_spin_lock(&task->pi_lock); -+ -+ /* -+ * In the case of futex requeue PI, this will be a proxy -+ * lock. The task will wake unaware that it is enqueueed on -+ * this lock. Avoid blocking on two locks and corrupting -+ * pi_blocked_on via the PI_WAKEUP_INPROGRESS -+ * flag. futex_wait_requeue_pi() sets this when it wakes up -+ * before requeue (due to a signal or timeout). Do not enqueue -+ * the task if PI_WAKEUP_INPROGRESS is set. -+ */ -+ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { -+ raw_spin_unlock(&task->pi_lock); -+ return -EAGAIN; -+ } -+ -+ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); -+ - __rt_mutex_adjust_prio(task); - waiter->task = task; - waiter->lock = lock; -@@ -921,18 +1371,18 @@ - - task->pi_blocked_on = waiter; - -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); -+ raw_spin_unlock(&task->pi_lock); - - if (!owner) - return 0; - -- raw_spin_lock_irqsave(&owner->pi_lock, flags); -+ raw_spin_lock(&owner->pi_lock); - if (waiter == rt_mutex_top_waiter(lock)) { - rt_mutex_dequeue_pi(owner, top_waiter); - rt_mutex_enqueue_pi(owner, waiter); - - __rt_mutex_adjust_prio(owner); -- if (owner->pi_blocked_on) -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) - chain_walk = 1; - } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { - chain_walk = 1; -@@ -941,7 +1391,7 @@ - /* Store the lock on which owner is blocked or NULL */ - next_lock = task_blocked_on_lock(owner); - -- raw_spin_unlock_irqrestore(&owner->pi_lock, flags); -+ raw_spin_unlock(&owner->pi_lock); - /* - * Even if full deadlock detection is on, if the owner is not - * blocked itself, we can avoid finding this out in the chain -@@ -957,12 +1407,12 @@ - */ - get_task_struct(owner); - -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - - res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, - next_lock, waiter, task); - -- raw_spin_lock(&lock->wait_lock); -+ raw_spin_lock_irq(&lock->wait_lock); - - return res; - } -@@ -971,15 +1421,15 @@ - * Remove the top waiter from the current tasks pi waiter tree and - * queue it up. - * -- * Called with lock->wait_lock held. -+ * Called with lock->wait_lock held and interrupts disabled. - */ - static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q, - struct rt_mutex *lock) - { - struct rt_mutex_waiter *waiter; -- unsigned long flags; - -- raw_spin_lock_irqsave(¤t->pi_lock, flags); -+ raw_spin_lock(¤t->pi_lock); - - waiter = rt_mutex_top_waiter(lock); - -@@ -1001,15 +1451,18 @@ - */ - lock->owner = (void *) RT_MUTEX_HAS_WAITERS; - -- raw_spin_unlock_irqrestore(¤t->pi_lock, flags); -+ raw_spin_unlock(¤t->pi_lock); - -- wake_q_add(wake_q, waiter->task); -+ if (waiter->savestate) -+ wake_q_add(wake_sleeper_q, waiter->task); -+ else -+ wake_q_add(wake_q, waiter->task); - } - - /* - * Remove a waiter from a lock and give up - * -- * Must be called with lock->wait_lock held and -+ * Must be called with lock->wait_lock held and interrupts disabled. I must - * have just failed to try_to_take_rt_mutex(). - */ - static void remove_waiter(struct rt_mutex *lock, -@@ -1017,13 +1470,12 @@ - { - bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); - struct task_struct *owner = rt_mutex_owner(lock); -- struct rt_mutex *next_lock; -- unsigned long flags; -+ struct rt_mutex *next_lock = NULL; - -- raw_spin_lock_irqsave(¤t->pi_lock, flags); -+ raw_spin_lock(¤t->pi_lock); - rt_mutex_dequeue(lock, waiter); - current->pi_blocked_on = NULL; -- raw_spin_unlock_irqrestore(¤t->pi_lock, flags); -+ raw_spin_unlock(¤t->pi_lock); - - /* - * Only update priority if the waiter was the highest priority -@@ -1032,7 +1484,7 @@ - if (!owner || !is_top_waiter) - return; - -- raw_spin_lock_irqsave(&owner->pi_lock, flags); -+ raw_spin_lock(&owner->pi_lock); - - rt_mutex_dequeue_pi(owner, waiter); - -@@ -1042,9 +1494,10 @@ - __rt_mutex_adjust_prio(owner); - - /* Store the lock on which owner is blocked or NULL */ -- next_lock = task_blocked_on_lock(owner); -+ if (rt_mutex_real_waiter(owner->pi_blocked_on)) -+ next_lock = task_blocked_on_lock(owner); - -- raw_spin_unlock_irqrestore(&owner->pi_lock, flags); -+ raw_spin_unlock(&owner->pi_lock); - - /* - * Don't walk the chain, if the owner task is not blocked -@@ -1056,12 +1509,12 @@ - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(owner); - -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - - rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, - next_lock, NULL, current); - -- raw_spin_lock(&lock->wait_lock); -+ raw_spin_lock_irq(&lock->wait_lock); - } - - /* -@@ -1078,17 +1531,17 @@ - raw_spin_lock_irqsave(&task->pi_lock, flags); - - waiter = task->pi_blocked_on; -- if (!waiter || (waiter->prio == task->prio && -+ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio && - !dl_prio(task->prio))) { - raw_spin_unlock_irqrestore(&task->pi_lock, flags); - return; - } - next_lock = waiter->lock; -- raw_spin_unlock_irqrestore(&task->pi_lock, flags); - - /* gets dropped in rt_mutex_adjust_prio_chain()! */ - get_task_struct(task); - -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); - rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, - next_lock, NULL, task); - } -@@ -1097,16 +1550,17 @@ - * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop - * @lock: the rt_mutex to take - * @state: the state the task should block in (TASK_INTERRUPTIBLE -- * or TASK_UNINTERRUPTIBLE) -+ * or TASK_UNINTERRUPTIBLE) - * @timeout: the pre-initialized and started timer, or NULL for none - * @waiter: the pre-initialized rt_mutex_waiter - * -- * lock->wait_lock must be held by the caller. -+ * Must be called with lock->wait_lock held and interrupts disabled - */ - static int __sched - __rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- struct rt_mutex_waiter *waiter) -+ struct rt_mutex_waiter *waiter, -+ struct ww_acquire_ctx *ww_ctx) - { - int ret = 0; - -@@ -1129,13 +1583,19 @@ - break; - } - -- raw_spin_unlock(&lock->wait_lock); -+ if (ww_ctx && ww_ctx->acquired > 0) { -+ ret = __mutex_lock_check_stamp(lock, ww_ctx); -+ if (ret) -+ break; -+ } -+ -+ raw_spin_unlock_irq(&lock->wait_lock); - - debug_rt_mutex_print_deadlock(waiter); - - schedule(); - -- raw_spin_lock(&lock->wait_lock); -+ raw_spin_lock_irq(&lock->wait_lock); - set_current_state(state); - } - -@@ -1163,26 +1623,112 @@ - } - } - -+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+#ifdef CONFIG_DEBUG_MUTEXES -+ /* -+ * If this WARN_ON triggers, you used ww_mutex_lock to acquire, -+ * but released with a normal mutex_unlock in this call. -+ * -+ * This should never happen, always use ww_mutex_unlock. -+ */ -+ DEBUG_LOCKS_WARN_ON(ww->ctx); -+ -+ /* -+ * Not quite done after calling ww_acquire_done() ? -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); -+ -+ if (ww_ctx->contending_lock) { -+ /* -+ * After -EDEADLK you tried to -+ * acquire a different ww_mutex? Bad! -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); -+ -+ /* -+ * You called ww_mutex_lock after receiving -EDEADLK, -+ * but 'forgot' to unlock everything else first? -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); -+ ww_ctx->contending_lock = NULL; -+ } -+ -+ /* -+ * Naughty, using a different class will lead to undefined behavior! -+ */ -+ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); -+#endif -+ ww_ctx->acquired++; -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void ww_mutex_account_lock(struct rt_mutex *lock, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); -+ struct rt_mutex_waiter *waiter, *n; -+ -+ /* -+ * This branch gets optimized out for the common case, -+ * and is only important for ww_mutex_lock. -+ */ -+ ww_mutex_lock_acquired(ww, ww_ctx); -+ ww->ctx = ww_ctx; -+ -+ /* -+ * Give any possible sleeping processes the chance to wake up, -+ * so they can recheck if they have to back off. -+ */ -+ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters, -+ tree_entry) { -+ /* XXX debug rt mutex waiter wakeup */ -+ -+ BUG_ON(waiter->lock != lock); -+ rt_mutex_wake_waiter(waiter); -+ } -+} -+ -+#else -+ -+static void ww_mutex_account_lock(struct rt_mutex *lock, -+ struct ww_acquire_ctx *ww_ctx) -+{ -+ BUG(); -+} -+#endif -+ - /* - * Slow path lock function: - */ - static int __sched - rt_mutex_slowlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx) - { - struct rt_mutex_waiter waiter; -+ unsigned long flags; - int ret = 0; - -- debug_rt_mutex_init_waiter(&waiter); -- RB_CLEAR_NODE(&waiter.pi_tree_entry); -- RB_CLEAR_NODE(&waiter.tree_entry); -+ rt_mutex_init_waiter(&waiter, false); - -- raw_spin_lock(&lock->wait_lock); -+ /* -+ * Technically we could use raw_spin_[un]lock_irq() here, but this can -+ * be called in early boot if the cmpxchg() fast path is disabled -+ * (debug, no architecture support). In this case we will acquire the -+ * rtmutex with lock->wait_lock held. But we cannot unconditionally -+ * enable interrupts in that early boot case. So we need to use the -+ * irqsave/restore variants. -+ */ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); - - /* Try to acquire the lock again: */ - if (try_to_take_rt_mutex(lock, current, NULL)) { -- raw_spin_unlock(&lock->wait_lock); -+ if (ww_ctx) -+ ww_mutex_account_lock(lock, ww_ctx); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - return 0; - } - -@@ -1196,13 +1742,23 @@ - - if (likely(!ret)) - /* sleep on the mutex */ -- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); -+ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, -+ ww_ctx); -+ else if (ww_ctx) { -+ /* ww_mutex received EDEADLK, let it become EALREADY */ -+ ret = __mutex_lock_check_stamp(lock, ww_ctx); -+ BUG_ON(!ret); -+ } - - if (unlikely(ret)) { - __set_current_state(TASK_RUNNING); - if (rt_mutex_has_waiters(lock)) - remove_waiter(lock, &waiter); -- rt_mutex_handle_deadlock(ret, chwalk, &waiter); -+ /* ww_mutex want to report EDEADLK/EALREADY, let them */ -+ if (!ww_ctx) -+ rt_mutex_handle_deadlock(ret, chwalk, &waiter); -+ } else if (ww_ctx) { -+ ww_mutex_account_lock(lock, ww_ctx); - } - - /* -@@ -1211,7 +1767,7 @@ - */ - fixup_rt_mutex_waiters(lock); - -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - /* Remove pending timer: */ - if (unlikely(timeout)) -@@ -1227,6 +1783,7 @@ - */ - static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) - { -+ unsigned long flags; - int ret; - - /* -@@ -1238,10 +1795,10 @@ - return 0; - - /* -- * The mutex has currently no owner. Lock the wait lock and -- * try to acquire the lock. -+ * The mutex has currently no owner. Lock the wait lock and try to -+ * acquire the lock. We use irqsave here to support early boot calls. - */ -- raw_spin_lock(&lock->wait_lock); -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); - - ret = try_to_take_rt_mutex(lock, current, NULL); - -@@ -1251,7 +1808,7 @@ - */ - fixup_rt_mutex_waiters(lock); - -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - return ret; - } -@@ -1261,9 +1818,13 @@ - * Return whether the current task needs to undo a potential priority boosting. - */ - static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, -- struct wake_q_head *wake_q) -+ struct wake_q_head *wake_q, -+ struct wake_q_head *wake_sleeper_q) - { -- raw_spin_lock(&lock->wait_lock); -+ unsigned long flags; -+ -+ /* irqsave required to support early boot calls */ -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); - - debug_rt_mutex_unlock(lock); - -@@ -1302,10 +1863,10 @@ - */ - while (!rt_mutex_has_waiters(lock)) { - /* Drops lock->wait_lock ! */ -- if (unlock_rt_mutex_safe(lock) == true) -+ if (unlock_rt_mutex_safe(lock, flags) == true) - return false; - /* Relock the rtmutex and try again */ -- raw_spin_lock(&lock->wait_lock); -+ raw_spin_lock_irqsave(&lock->wait_lock, flags); - } - - /* -@@ -1314,9 +1875,9 @@ - * - * Queue the next waiter for wakeup once we release the wait_lock. - */ -- mark_wakeup_next_waiter(wake_q, lock); -+ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); - -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); - - /* check PI boosting */ - return true; -@@ -1330,31 +1891,36 @@ - */ - static inline int - rt_mutex_fastlock(struct rt_mutex *lock, int state, -+ struct ww_acquire_ctx *ww_ctx, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk)) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx)) - { - if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { - rt_mutex_deadlock_account_lock(lock, current); - return 0; - } else -- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); -+ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, -+ ww_ctx); - } - - static inline int - rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, - enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx, - int (*slowfn)(struct rt_mutex *lock, int state, - struct hrtimer_sleeper *timeout, -- enum rtmutex_chainwalk chwalk)) -+ enum rtmutex_chainwalk chwalk, -+ struct ww_acquire_ctx *ww_ctx)) - { - if (chwalk == RT_MUTEX_MIN_CHAINWALK && - likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { - rt_mutex_deadlock_account_lock(lock, current); - return 0; - } else -- return slowfn(lock, state, timeout, chwalk); -+ return slowfn(lock, state, timeout, chwalk, ww_ctx); - } - - static inline int -@@ -1371,17 +1937,20 @@ - static inline void - rt_mutex_fastunlock(struct rt_mutex *lock, - bool (*slowfn)(struct rt_mutex *lock, -- struct wake_q_head *wqh)) -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper)) - { - WAKE_Q(wake_q); -+ WAKE_Q(wake_sleeper_q); - - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { - rt_mutex_deadlock_account_unlock(current); - - } else { -- bool deboost = slowfn(lock, &wake_q); -+ bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q); - - wake_up_q(&wake_q); -+ wake_up_q_sleeper(&wake_sleeper_q); - - /* Undo pi boosting if necessary: */ - if (deboost) -@@ -1398,7 +1967,7 @@ - { - might_sleep(); - -- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); -+ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock); - -@@ -1415,7 +1984,7 @@ - { - might_sleep(); - -- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); -+ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock); - } - EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); - -@@ -1428,11 +1997,30 @@ - might_sleep(); - - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, -- RT_MUTEX_FULL_CHAINWALK, -+ RT_MUTEX_FULL_CHAINWALK, NULL, - rt_mutex_slowlock); - } - - /** -+ * rt_mutex_lock_killable - lock a rt_mutex killable -+ * -+ * @lock: the rt_mutex to be locked -+ * @detect_deadlock: deadlock detection on/off -+ * -+ * Returns: -+ * 0 on success -+ * -EINTR when interrupted by a signal -+ * -EDEADLK when the lock would deadlock (when deadlock detection is on) -+ */ -+int __sched rt_mutex_lock_killable(struct rt_mutex *lock) -+{ -+ might_sleep(); -+ -+ return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock); -+} -+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); -+ -+/** - * rt_mutex_timed_lock - lock a rt_mutex interruptible - * the timeout structure is provided - * by the caller -@@ -1452,6 +2040,7 @@ - - return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, - RT_MUTEX_MIN_CHAINWALK, -+ NULL, - rt_mutex_slowlock); - } - EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); -@@ -1469,7 +2058,11 @@ - */ - int __sched rt_mutex_trylock(struct rt_mutex *lock) - { -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (WARN_ON_ONCE(in_irq() || in_nmi())) -+#else - if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq())) -+#endif - return 0; - - return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); -@@ -1495,13 +2088,14 @@ - * required or not. - */ - bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, -- struct wake_q_head *wqh) -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper) - { - if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { - rt_mutex_deadlock_account_unlock(current); - return false; - } -- return rt_mutex_slowunlock(lock, wqh); -+ return rt_mutex_slowunlock(lock, wqh, wq_sleeper); - } - - /** -@@ -1534,13 +2128,12 @@ - void __rt_mutex_init(struct rt_mutex *lock, const char *name) - { - lock->owner = NULL; -- raw_spin_lock_init(&lock->wait_lock); - lock->waiters = RB_ROOT; - lock->waiters_leftmost = NULL; - - debug_rt_mutex_init(lock, name); - } --EXPORT_SYMBOL_GPL(__rt_mutex_init); -+EXPORT_SYMBOL(__rt_mutex_init); - - /** - * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a -@@ -1555,7 +2148,7 @@ - void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner) - { -- __rt_mutex_init(lock, NULL); -+ rt_mutex_init(lock); - debug_rt_mutex_proxy_lock(lock, proxy_owner); - rt_mutex_set_owner(lock, proxy_owner); - rt_mutex_deadlock_account_lock(lock, proxy_owner); -@@ -1596,13 +2189,42 @@ - { - int ret; - -- raw_spin_lock(&lock->wait_lock); -+ raw_spin_lock_irq(&lock->wait_lock); - - if (try_to_take_rt_mutex(lock, task, NULL)) { -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - return 1; - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * In PREEMPT_RT there's an added race. -+ * If the task, that we are about to requeue, times out, -+ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue -+ * to skip this task. But right after the task sets -+ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then -+ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. -+ * This will replace the PI_WAKEUP_INPROGRESS with the actual -+ * lock that it blocks on. We *must not* place this task -+ * on this proxy lock in that case. -+ * -+ * To prevent this race, we first take the task's pi_lock -+ * and check if it has updated its pi_blocked_on. If it has, -+ * we assume that it woke up and we return -EAGAIN. -+ * Otherwise, we set the task's pi_blocked_on to -+ * PI_REQUEUE_INPROGRESS, so that if the task is waking up -+ * it will know that we are in the process of requeuing it. -+ */ -+ raw_spin_lock(&task->pi_lock); -+ if (task->pi_blocked_on) { -+ raw_spin_unlock(&task->pi_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); -+ return -EAGAIN; -+ } -+ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; -+ raw_spin_unlock(&task->pi_lock); -+#endif -+ - /* We enforce deadlock detection for futexes */ - ret = task_blocks_on_rt_mutex(lock, waiter, task, - RT_MUTEX_FULL_CHAINWALK); -@@ -1617,10 +2239,10 @@ - ret = 0; - } - -- if (unlikely(ret)) -+ if (ret && rt_mutex_has_waiters(lock)) - remove_waiter(lock, waiter); - -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - - debug_rt_mutex_print_deadlock(waiter); - -@@ -1668,12 +2290,12 @@ - { - int ret; - -- raw_spin_lock(&lock->wait_lock); -+ raw_spin_lock_irq(&lock->wait_lock); - - set_current_state(TASK_INTERRUPTIBLE); - - /* sleep on the mutex */ -- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); -+ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); - - if (unlikely(ret)) - remove_waiter(lock, waiter); -@@ -1684,7 +2306,93 @@ - */ - fixup_rt_mutex_waiters(lock); - -- raw_spin_unlock(&lock->wait_lock); -+ raw_spin_unlock_irq(&lock->wait_lock); - - return ret; - } -+ -+static inline int -+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) -+{ -+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH -+ unsigned tmp; -+ -+ if (ctx->deadlock_inject_countdown-- == 0) { -+ tmp = ctx->deadlock_inject_interval; -+ if (tmp > UINT_MAX/4) -+ tmp = UINT_MAX; -+ else -+ tmp = tmp*2 + tmp + tmp/2; -+ -+ ctx->deadlock_inject_interval = tmp; -+ ctx->deadlock_inject_countdown = tmp; -+ ctx->contending_lock = lock; -+ -+ ww_mutex_unlock(lock); -+ -+ return -EDEADLK; -+ } -+#endif -+ -+ return 0; -+} -+ -+#ifdef CONFIG_PREEMPT_RT_FULL -+int __sched -+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -+{ -+ int ret; -+ -+ might_sleep(); -+ -+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); -+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); -+ if (ret) -+ mutex_release(&lock->base.dep_map, 1, _RET_IP_); -+ else if (!ret && ww_ctx->acquired > 1) -+ return ww_mutex_deadlock_injection(lock, ww_ctx); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); -+ -+int __sched -+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) -+{ -+ int ret; -+ -+ might_sleep(); -+ -+ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); -+ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); -+ if (ret) -+ mutex_release(&lock->base.dep_map, 1, _RET_IP_); -+ else if (!ret && ww_ctx->acquired > 1) -+ return ww_mutex_deadlock_injection(lock, ww_ctx); -+ -+ return ret; -+} -+EXPORT_SYMBOL_GPL(__ww_mutex_lock); -+ -+void __sched ww_mutex_unlock(struct ww_mutex *lock) -+{ -+ int nest = !!lock->ctx; -+ -+ /* -+ * The unlocking fastpath is the 0->1 transition from 'locked' -+ * into 'unlocked' state: -+ */ -+ if (nest) { -+#ifdef CONFIG_DEBUG_MUTEXES -+ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); -+#endif -+ if (lock->ctx->acquired > 0) -+ lock->ctx->acquired--; -+ lock->ctx = NULL; -+ } -+ -+ mutex_release(&lock->base.dep_map, nest, _RET_IP_); -+ rt_mutex_unlock(&lock->base.lock); -+} -+EXPORT_SYMBOL(ww_mutex_unlock); -+#endif -diff -Nur linux-4.4.13.orig/kernel/locking/rtmutex_common.h linux-4.4.13/kernel/locking/rtmutex_common.h ---- linux-4.4.13.orig/kernel/locking/rtmutex_common.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/rtmutex_common.h 2016-06-22 21:55:08.050523867 +0200 -@@ -27,6 +27,7 @@ - struct rb_node pi_tree_entry; - struct task_struct *task; - struct rt_mutex *lock; -+ bool savestate; - #ifdef CONFIG_DEBUG_RT_MUTEXES - unsigned long ip; - struct pid *deadlock_task_pid; -@@ -97,6 +98,9 @@ - /* - * PI-futex support (proxy locking functions, etc.): - */ -+#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) -+#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) -+ - extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); - extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, - struct task_struct *proxy_owner); -@@ -110,7 +114,8 @@ - struct rt_mutex_waiter *waiter); - extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); - extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, -- struct wake_q_head *wqh); -+ struct wake_q_head *wqh, -+ struct wake_q_head *wq_sleeper); - extern void rt_mutex_adjust_prio(struct task_struct *task); - - #ifdef CONFIG_DEBUG_RT_MUTEXES -@@ -119,4 +124,14 @@ - # include "rtmutex.h" - #endif - -+static inline void -+rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) -+{ -+ debug_rt_mutex_init_waiter(waiter); -+ waiter->task = NULL; -+ waiter->savestate = savestate; -+ RB_CLEAR_NODE(&waiter->pi_tree_entry); -+ RB_CLEAR_NODE(&waiter->tree_entry); -+} -+ - #endif -diff -Nur linux-4.4.13.orig/kernel/locking/spinlock.c linux-4.4.13/kernel/locking/spinlock.c ---- linux-4.4.13.orig/kernel/locking/spinlock.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/spinlock.c 2016-06-22 21:55:08.050523867 +0200 -@@ -124,8 +124,11 @@ - * __[spin|read|write]_lock_bh() - */ - BUILD_LOCK_OPS(spin, raw_spinlock); -+ -+#ifndef CONFIG_PREEMPT_RT_FULL - BUILD_LOCK_OPS(read, rwlock); - BUILD_LOCK_OPS(write, rwlock); -+#endif - - #endif - -@@ -209,6 +212,8 @@ - EXPORT_SYMBOL(_raw_spin_unlock_bh); - #endif - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #ifndef CONFIG_INLINE_READ_TRYLOCK - int __lockfunc _raw_read_trylock(rwlock_t *lock) - { -@@ -353,6 +358,8 @@ - EXPORT_SYMBOL(_raw_write_unlock_bh); - #endif - -+#endif /* !PREEMPT_RT_FULL */ -+ - #ifdef CONFIG_DEBUG_LOCK_ALLOC - - void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) -diff -Nur linux-4.4.13.orig/kernel/locking/spinlock_debug.c linux-4.4.13/kernel/locking/spinlock_debug.c ---- linux-4.4.13.orig/kernel/locking/spinlock_debug.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/locking/spinlock_debug.c 2016-06-22 21:55:08.050523867 +0200 -@@ -31,6 +31,7 @@ - - EXPORT_SYMBOL(__raw_spin_lock_init); - -+#ifndef CONFIG_PREEMPT_RT_FULL - void __rwlock_init(rwlock_t *lock, const char *name, - struct lock_class_key *key) - { -@@ -48,6 +49,7 @@ - } - - EXPORT_SYMBOL(__rwlock_init); -+#endif - - static void spin_dump(raw_spinlock_t *lock, const char *msg) - { -@@ -159,6 +161,7 @@ - arch_spin_unlock(&lock->raw_lock); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - static void rwlock_bug(rwlock_t *lock, const char *msg) - { - if (!debug_locks_off()) -@@ -300,3 +303,5 @@ - debug_write_unlock(lock); - arch_write_unlock(&lock->raw_lock); - } -+ -+#endif -diff -Nur linux-4.4.13.orig/kernel/panic.c linux-4.4.13/kernel/panic.c ---- linux-4.4.13.orig/kernel/panic.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/panic.c 2016-06-22 21:55:08.050523867 +0200 -@@ -61,6 +61,37 @@ - cpu_relax(); - } - -+/* -+ * Stop ourselves in NMI context if another CPU has already panicked. Arch code -+ * may override this to prepare for crash dumping, e.g. save regs info. -+ */ -+void __weak nmi_panic_self_stop(struct pt_regs *regs) -+{ -+ panic_smp_self_stop(); -+} -+ -+atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); -+ -+/* -+ * A variant of panic() called from NMI context. We return if we've already -+ * panicked on this CPU. If another CPU already panicked, loop in -+ * nmi_panic_self_stop() which can provide architecture dependent code such -+ * as saving register state for crash dump. -+ */ -+void nmi_panic(struct pt_regs *regs, const char *msg) -+{ -+ int old_cpu, cpu; -+ -+ cpu = raw_smp_processor_id(); -+ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); -+ -+ if (old_cpu == PANIC_CPU_INVALID) -+ panic("%s", msg); -+ else if (old_cpu != cpu) -+ nmi_panic_self_stop(regs); -+} -+EXPORT_SYMBOL(nmi_panic); -+ - /** - * panic - halt the system - * @fmt: The text string to print -@@ -71,17 +102,17 @@ - */ - void panic(const char *fmt, ...) - { -- static DEFINE_SPINLOCK(panic_lock); - static char buf[1024]; - va_list args; - long i, i_next = 0; - int state = 0; -+ int old_cpu, this_cpu; - - /* - * Disable local interrupts. This will prevent panic_smp_self_stop - * from deadlocking the first cpu that invokes the panic, since - * there is nothing to prevent an interrupt handler (that runs -- * after the panic_lock is acquired) from invoking panic again. -+ * after setting panic_cpu) from invoking panic() again. - */ - local_irq_disable(); - -@@ -94,8 +125,16 @@ - * multiple parallel invocations of panic, all other CPUs either - * stop themself or will wait until they are stopped by the 1st CPU - * with smp_send_stop(). -+ * -+ * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which -+ * comes here, so go ahead. -+ * `old_cpu == this_cpu' means we came from nmi_panic() which sets -+ * panic_cpu to this CPU. In this case, this is also the 1st CPU. - */ -- if (!spin_trylock(&panic_lock)) -+ this_cpu = raw_smp_processor_id(); -+ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); -+ -+ if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) - panic_smp_self_stop(); - - console_verbose(); -@@ -400,9 +439,11 @@ - - static int init_oops_id(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - if (!oops_id) - get_random_bytes(&oops_id, sizeof(oops_id)); - else -+#endif - oops_id++; - - return 0; -diff -Nur linux-4.4.13.orig/kernel/power/hibernate.c linux-4.4.13/kernel/power/hibernate.c ---- linux-4.4.13.orig/kernel/power/hibernate.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/power/hibernate.c 2016-06-22 21:55:08.054523867 +0200 -@@ -285,6 +285,8 @@ - - local_irq_disable(); - -+ system_state = SYSTEM_SUSPEND; -+ - error = syscore_suspend(); - if (error) { - printk(KERN_ERR "PM: Some system devices failed to power down, " -@@ -314,6 +316,7 @@ - syscore_resume(); - - Enable_irqs: -+ system_state = SYSTEM_RUNNING; - local_irq_enable(); - - Enable_cpus: -@@ -438,6 +441,7 @@ - goto Enable_cpus; - - local_irq_disable(); -+ system_state = SYSTEM_SUSPEND; - - error = syscore_suspend(); - if (error) -@@ -471,6 +475,7 @@ - syscore_resume(); - - Enable_irqs: -+ system_state = SYSTEM_RUNNING; - local_irq_enable(); - - Enable_cpus: -@@ -556,6 +561,7 @@ - goto Enable_cpus; - - local_irq_disable(); -+ system_state = SYSTEM_SUSPEND; - syscore_suspend(); - if (pm_wakeup_pending()) { - error = -EAGAIN; -@@ -568,6 +574,7 @@ - - Power_up: - syscore_resume(); -+ system_state = SYSTEM_RUNNING; - local_irq_enable(); - - Enable_cpus: -@@ -642,6 +649,10 @@ - cpu_relax(); - } - -+#ifndef CONFIG_SUSPEND -+bool pm_in_action; -+#endif -+ - /** - * hibernate - Carry out system hibernation, including saving the image. - */ -@@ -654,6 +665,8 @@ - return -EPERM; - } - -+ pm_in_action = true; -+ - lock_system_sleep(); - /* The snapshot device should not be opened while we're running */ - if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { -@@ -719,6 +732,7 @@ - atomic_inc(&snapshot_device_available); - Unlock: - unlock_system_sleep(); -+ pm_in_action = false; - return error; - } - -diff -Nur linux-4.4.13.orig/kernel/power/suspend.c linux-4.4.13/kernel/power/suspend.c ---- linux-4.4.13.orig/kernel/power/suspend.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/power/suspend.c 2016-06-22 21:55:08.054523867 +0200 -@@ -359,6 +359,8 @@ - arch_suspend_disable_irqs(); - BUG_ON(!irqs_disabled()); - -+ system_state = SYSTEM_SUSPEND; -+ - error = syscore_suspend(); - if (!error) { - *wakeup = pm_wakeup_pending(); -@@ -375,6 +377,8 @@ - syscore_resume(); - } - -+ system_state = SYSTEM_RUNNING; -+ - arch_suspend_enable_irqs(); - BUG_ON(irqs_disabled()); - -@@ -518,6 +522,8 @@ - return error; - } - -+bool pm_in_action; -+ - /** - * pm_suspend - Externally visible function for suspending the system. - * @state: System sleep state to enter. -@@ -532,6 +538,8 @@ - if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) - return -EINVAL; - -+ pm_in_action = true; -+ - error = enter_state(state); - if (error) { - suspend_stats.fail++; -@@ -539,6 +547,7 @@ - } else { - suspend_stats.success++; - } -+ pm_in_action = false; - return error; - } - EXPORT_SYMBOL(pm_suspend); -diff -Nur linux-4.4.13.orig/kernel/printk/printk.c linux-4.4.13/kernel/printk/printk.c ---- linux-4.4.13.orig/kernel/printk/printk.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/printk/printk.c 2016-06-22 21:55:08.054523867 +0200 -@@ -241,6 +241,65 @@ - */ - static DEFINE_RAW_SPINLOCK(logbuf_lock); - -+#ifdef CONFIG_EARLY_PRINTK -+struct console *early_console; -+ -+static void early_vprintk(const char *fmt, va_list ap) -+{ -+ if (early_console) { -+ char buf[512]; -+ int n = vscnprintf(buf, sizeof(buf), fmt, ap); -+ -+ early_console->write(early_console, buf, n); -+ } -+} -+ -+asmlinkage void early_printk(const char *fmt, ...) -+{ -+ va_list ap; -+ -+ va_start(ap, fmt); -+ early_vprintk(fmt, ap); -+ va_end(ap); -+} -+ -+/* -+ * This is independent of any log levels - a global -+ * kill switch that turns off all of printk. -+ * -+ * Used by the NMI watchdog if early-printk is enabled. -+ */ -+static bool __read_mostly printk_killswitch; -+ -+static int __init force_early_printk_setup(char *str) -+{ -+ printk_killswitch = true; -+ return 0; -+} -+early_param("force_early_printk", force_early_printk_setup); -+ -+void printk_kill(void) -+{ -+ printk_killswitch = true; -+} -+ -+#ifdef CONFIG_PRINTK -+static int forced_early_printk(const char *fmt, va_list ap) -+{ -+ if (!printk_killswitch) -+ return 0; -+ early_vprintk(fmt, ap); -+ return 1; -+} -+#endif -+ -+#else -+static inline int forced_early_printk(const char *fmt, va_list ap) -+{ -+ return 0; -+} -+#endif -+ - #ifdef CONFIG_PRINTK - DECLARE_WAIT_QUEUE_HEAD(log_wait); - /* the next printk record to read by syslog(READ) or /proc/kmsg */ -@@ -1203,6 +1262,7 @@ - { - char *text; - int len = 0; -+ int attempts = 0; - - text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); - if (!text) -@@ -1214,7 +1274,14 @@ - u64 seq; - u32 idx; - enum log_flags prev; -- -+ int num_msg; -+try_again: -+ attempts++; -+ if (attempts > 10) { -+ len = -EBUSY; -+ goto out; -+ } -+ num_msg = 0; - if (clear_seq < log_first_seq) { - /* messages are gone, move to first available one */ - clear_seq = log_first_seq; -@@ -1235,6 +1302,14 @@ - prev = msg->flags; - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ raw_spin_unlock_irq(&logbuf_lock); -+ raw_spin_lock_irq(&logbuf_lock); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } - - /* move first record forward until length fits into the buffer */ -@@ -1248,6 +1323,14 @@ - prev = msg->flags; - idx = log_next(idx); - seq++; -+ num_msg++; -+ if (num_msg > 5) { -+ num_msg = 0; -+ raw_spin_unlock_irq(&logbuf_lock); -+ raw_spin_lock_irq(&logbuf_lock); -+ if (clear_seq < log_first_seq) -+ goto try_again; -+ } - } - - /* last message fitting into this dump */ -@@ -1288,6 +1371,7 @@ - clear_seq = log_next_seq; - clear_idx = log_next_idx; - } -+out: - raw_spin_unlock_irq(&logbuf_lock); - - kfree(text); -@@ -1443,6 +1527,12 @@ - if (!console_drivers) - return; - -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { -+ if (in_irq() || in_nmi()) -+ return; -+ } -+ -+ migrate_disable(); - for_each_console(con) { - if (exclusive_console && con != exclusive_console) - continue; -@@ -1458,6 +1548,7 @@ - else - con->write(con, text, len); - } -+ migrate_enable(); - } - - /* -@@ -1518,6 +1609,15 @@ - static int console_trylock_for_printk(void) - { - unsigned int cpu = smp_processor_id(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ int lock = !early_boot_irqs_disabled && (preempt_count() == 0) && -+ !irqs_disabled(); -+#else -+ int lock = 1; -+#endif -+ -+ if (!lock) -+ return 0; - - if (!console_trylock()) - return 0; -@@ -1672,6 +1772,13 @@ - /* cpu currently holding logbuf_lock in this function */ - static unsigned int logbuf_cpu = UINT_MAX; - -+ /* -+ * Fall back to early_printk if a debugging subsystem has -+ * killed printk output -+ */ -+ if (unlikely(forced_early_printk(fmt, args))) -+ return 1; -+ - if (level == LOGLEVEL_SCHED) { - level = LOGLEVEL_DEFAULT; - in_sched = true; -@@ -1813,8 +1920,7 @@ - * console_sem which would prevent anyone from printing to - * console - */ -- preempt_disable(); -- -+ migrate_disable(); - /* - * Try to acquire and then immediately release the console - * semaphore. The release will print out buffers and wake up -@@ -1822,7 +1928,7 @@ - */ - if (console_trylock_for_printk()) - console_unlock(); -- preempt_enable(); -+ migrate_enable(); - lockdep_on(); - } - -@@ -1961,26 +2067,6 @@ - - #endif /* CONFIG_PRINTK */ - --#ifdef CONFIG_EARLY_PRINTK --struct console *early_console; -- --asmlinkage __visible void early_printk(const char *fmt, ...) --{ -- va_list ap; -- char buf[512]; -- int n; -- -- if (!early_console) -- return; -- -- va_start(ap, fmt); -- n = vscnprintf(buf, sizeof(buf), fmt, ap); -- va_end(ap); -- -- early_console->write(early_console, buf, n); --} --#endif -- - static int __add_preferred_console(char *name, int idx, char *options, - char *brl_options) - { -@@ -2202,11 +2288,16 @@ - goto out; - - len = cont_print_text(text, size); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ raw_spin_unlock_irqrestore(&logbuf_lock, flags); -+ call_console_drivers(cont.level, NULL, 0, text, len); -+#else - raw_spin_unlock(&logbuf_lock); - stop_critical_timings(); - call_console_drivers(cont.level, NULL, 0, text, len); - start_critical_timings(); - local_irq_restore(flags); -+#endif - return; - out: - raw_spin_unlock_irqrestore(&logbuf_lock, flags); -@@ -2316,13 +2407,17 @@ - console_idx = log_next(console_idx); - console_seq++; - console_prev = msg->flags; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ raw_spin_unlock_irqrestore(&logbuf_lock, flags); -+ call_console_drivers(level, ext_text, ext_len, text, len); -+#else - raw_spin_unlock(&logbuf_lock); - - stop_critical_timings(); /* don't trace print latency */ - call_console_drivers(level, ext_text, ext_len, text, len); - start_critical_timings(); - local_irq_restore(flags); -- -+#endif - if (do_cond_resched) - cond_resched(); - } -@@ -2374,6 +2469,11 @@ - { - struct console *c; - -+ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { -+ if (in_irq() || in_nmi()) -+ return; -+ } -+ - /* - * console_unblank can no longer be called in interrupt context unless - * oops_in_progress is set to 1.. -diff -Nur linux-4.4.13.orig/kernel/ptrace.c linux-4.4.13/kernel/ptrace.c ---- linux-4.4.13.orig/kernel/ptrace.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/ptrace.c 2016-06-22 21:55:08.054523867 +0200 -@@ -129,7 +129,14 @@ - - spin_lock_irq(&task->sighand->siglock); - if (task_is_traced(task) && !__fatal_signal_pending(task)) { -- task->state = __TASK_TRACED; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&task->pi_lock, flags); -+ if (task->state & __TASK_TRACED) -+ task->state = __TASK_TRACED; -+ else -+ task->saved_state = __TASK_TRACED; -+ raw_spin_unlock_irqrestore(&task->pi_lock, flags); - ret = true; - } - spin_unlock_irq(&task->sighand->siglock); -diff -Nur linux-4.4.13.orig/kernel/rcu/rcutorture.c linux-4.4.13/kernel/rcu/rcutorture.c ---- linux-4.4.13.orig/kernel/rcu/rcutorture.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/rcu/rcutorture.c 2016-06-22 21:55:08.054523867 +0200 -@@ -390,6 +390,7 @@ - .name = "rcu" - }; - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Definitions for rcu_bh torture testing. - */ -@@ -429,6 +430,12 @@ - .name = "rcu_bh" - }; - -+#else -+static struct rcu_torture_ops rcu_bh_ops = { -+ .ttype = INVALID_RCU_FLAVOR, -+}; -+#endif -+ - /* - * Don't even think about trying any of these in real life!!! - * The names includes "busted", and they really means it! -diff -Nur linux-4.4.13.orig/kernel/rcu/tree.c linux-4.4.13/kernel/rcu/tree.c ---- linux-4.4.13.orig/kernel/rcu/tree.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/rcu/tree.c 2016-06-22 21:55:08.054523867 +0200 -@@ -56,6 +56,11 @@ - #include - #include - #include -+#include -+#include -+#include -+#include -+#include "../time/tick-internal.h" - - #include "tree.h" - #include "rcu.h" -@@ -266,6 +271,19 @@ - } - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void rcu_preempt_qs(void); -+ -+void rcu_bh_qs(void) -+{ -+ unsigned long flags; -+ -+ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ -+ local_irq_save(flags); -+ rcu_preempt_qs(); -+ local_irq_restore(flags); -+} -+#else - void rcu_bh_qs(void) - { - if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { -@@ -275,6 +293,7 @@ - __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); - } - } -+#endif - - static DEFINE_PER_CPU(int, rcu_sched_qs_mask); - -@@ -435,11 +454,13 @@ - /* - * Return the number of RCU BH batches started thus far for debug & stats. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - unsigned long rcu_batches_started_bh(void) - { - return rcu_bh_state.gpnum; - } - EXPORT_SYMBOL_GPL(rcu_batches_started_bh); -+#endif - - /* - * Return the number of RCU batches completed thus far for debug & stats. -@@ -459,6 +480,7 @@ - } - EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Return the number of RCU BH batches completed thus far for debug & stats. - */ -@@ -486,6 +508,13 @@ - } - EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); - -+#else -+void rcu_force_quiescent_state(void) -+{ -+} -+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); -+#endif -+ - /* - * Force a quiescent state for RCU-sched. - */ -@@ -536,9 +565,11 @@ - case RCU_FLAVOR: - rsp = rcu_state_p; - break; -+#ifndef CONFIG_PREEMPT_RT_FULL - case RCU_BH_FLAVOR: - rsp = &rcu_bh_state; - break; -+#endif - case RCU_SCHED_FLAVOR: - rsp = &rcu_sched_state; - break; -@@ -1590,7 +1621,6 @@ - int needmore; - struct rcu_data *rdp = this_cpu_ptr(rsp->rda); - -- rcu_nocb_gp_cleanup(rsp, rnp); - rnp->need_future_gp[c & 0x1] = 0; - needmore = rnp->need_future_gp[(c + 1) & 0x1]; - trace_rcu_future_gp(rnp, rdp, c, -@@ -1611,7 +1641,7 @@ - !READ_ONCE(rsp->gp_flags) || - !rsp->gp_kthread) - return; -- wake_up(&rsp->gp_wq); -+ swake_up(&rsp->gp_wq); - } - - /* -@@ -1991,6 +2021,7 @@ - int nocb = 0; - struct rcu_data *rdp; - struct rcu_node *rnp = rcu_get_root(rsp); -+ struct swait_queue_head *sq; - - WRITE_ONCE(rsp->gp_activity, jiffies); - raw_spin_lock_irq(&rnp->lock); -@@ -2029,7 +2060,9 @@ - needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; - /* smp_mb() provided by prior unlock-lock pair. */ - nocb += rcu_future_gp_cleanup(rsp, rnp); -+ sq = rcu_nocb_gp_get(rnp); - raw_spin_unlock_irq(&rnp->lock); -+ rcu_nocb_gp_cleanup(sq); - cond_resched_rcu_qs(); - WRITE_ONCE(rsp->gp_activity, jiffies); - rcu_gp_slow(rsp, gp_cleanup_delay); -@@ -2076,7 +2109,7 @@ - READ_ONCE(rsp->gpnum), - TPS("reqwait")); - rsp->gp_state = RCU_GP_WAIT_GPS; -- wait_event_interruptible(rsp->gp_wq, -+ swait_event_interruptible(rsp->gp_wq, - READ_ONCE(rsp->gp_flags) & - RCU_GP_FLAG_INIT); - rsp->gp_state = RCU_GP_DONE_GPS; -@@ -2106,7 +2139,7 @@ - READ_ONCE(rsp->gpnum), - TPS("fqswait")); - rsp->gp_state = RCU_GP_WAIT_FQS; -- ret = wait_event_interruptible_timeout(rsp->gp_wq, -+ ret = swait_event_interruptible_timeout(rsp->gp_wq, - rcu_gp_fqs_check_wake(rsp, &gf), j); - rsp->gp_state = RCU_GP_DOING_FQS; - /* Locking provides needed memory barriers. */ -@@ -2230,7 +2263,7 @@ - WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); - WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); - raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); -- rcu_gp_kthread_wake(rsp); -+ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ - } - - /* -@@ -2891,7 +2924,7 @@ - } - WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); - raw_spin_unlock_irqrestore(&rnp_old->lock, flags); -- rcu_gp_kthread_wake(rsp); -+ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ - } - - /* -@@ -2934,18 +2967,17 @@ - /* - * Do RCU core processing for the current CPU. - */ --static void rcu_process_callbacks(struct softirq_action *unused) -+static void rcu_process_callbacks(void) - { - struct rcu_state *rsp; - - if (cpu_is_offline(smp_processor_id())) - return; -- trace_rcu_utilization(TPS("Start RCU core")); - for_each_rcu_flavor(rsp) - __rcu_process_callbacks(rsp); -- trace_rcu_utilization(TPS("End RCU core")); - } - -+static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); - /* - * Schedule RCU callback invocation. If the specified type of RCU - * does not support RCU priority boosting, just do a direct call, -@@ -2957,18 +2989,105 @@ - { - if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) - return; -- if (likely(!rsp->boost)) { -- rcu_do_batch(rsp, rdp); -+ rcu_do_batch(rsp, rdp); -+} -+ -+static void rcu_wake_cond(struct task_struct *t, int status) -+{ -+ /* -+ * If the thread is yielding, only wake it when this -+ * is invoked from idle -+ */ -+ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) -+ wake_up_process(t); -+} -+ -+/* -+ * Wake up this CPU's rcuc kthread to do RCU core processing. -+ */ -+static void invoke_rcu_core(void) -+{ -+ unsigned long flags; -+ struct task_struct *t; -+ -+ if (!cpu_online(smp_processor_id())) - return; -+ local_irq_save(flags); -+ __this_cpu_write(rcu_cpu_has_work, 1); -+ t = __this_cpu_read(rcu_cpu_kthread_task); -+ if (t != NULL && current != t) -+ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); -+ local_irq_restore(flags); -+} -+ -+static void rcu_cpu_kthread_park(unsigned int cpu) -+{ -+ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; -+} -+ -+static int rcu_cpu_kthread_should_run(unsigned int cpu) -+{ -+ return __this_cpu_read(rcu_cpu_has_work); -+} -+ -+/* -+ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the -+ * RCU softirq used in flavors and configurations of RCU that do not -+ * support RCU priority boosting. -+ */ -+static void rcu_cpu_kthread(unsigned int cpu) -+{ -+ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); -+ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); -+ int spincnt; -+ -+ for (spincnt = 0; spincnt < 10; spincnt++) { -+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); -+ local_bh_disable(); -+ *statusp = RCU_KTHREAD_RUNNING; -+ this_cpu_inc(rcu_cpu_kthread_loops); -+ local_irq_disable(); -+ work = *workp; -+ *workp = 0; -+ local_irq_enable(); -+ if (work) -+ rcu_process_callbacks(); -+ local_bh_enable(); -+ if (*workp == 0) { -+ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); -+ *statusp = RCU_KTHREAD_WAITING; -+ return; -+ } - } -- invoke_rcu_callbacks_kthread(); -+ *statusp = RCU_KTHREAD_YIELDING; -+ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); -+ schedule_timeout_interruptible(2); -+ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); -+ *statusp = RCU_KTHREAD_WAITING; - } - --static void invoke_rcu_core(void) -+static struct smp_hotplug_thread rcu_cpu_thread_spec = { -+ .store = &rcu_cpu_kthread_task, -+ .thread_should_run = rcu_cpu_kthread_should_run, -+ .thread_fn = rcu_cpu_kthread, -+ .thread_comm = "rcuc/%u", -+ .setup = rcu_cpu_kthread_setup, -+ .park = rcu_cpu_kthread_park, -+}; -+ -+/* -+ * Spawn per-CPU RCU core processing kthreads. -+ */ -+static int __init rcu_spawn_core_kthreads(void) - { -- if (cpu_online(smp_processor_id())) -- raise_softirq(RCU_SOFTIRQ); -+ int cpu; -+ -+ for_each_possible_cpu(cpu) -+ per_cpu(rcu_cpu_has_work, cpu) = 0; -+ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); -+ return 0; - } -+early_initcall(rcu_spawn_core_kthreads); - - /* - * Handle any core-RCU processing required by a call_rcu() invocation. -@@ -3114,6 +3233,7 @@ - } - EXPORT_SYMBOL_GPL(call_rcu_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Queue an RCU callback for invocation after a quicker grace period. - */ -@@ -3122,6 +3242,7 @@ - __call_rcu(head, func, &rcu_bh_state, -1, 0); - } - EXPORT_SYMBOL_GPL(call_rcu_bh); -+#endif - - /* - * Queue an RCU callback for lazy invocation after a grace period. -@@ -3213,6 +3334,7 @@ - } - EXPORT_SYMBOL_GPL(synchronize_sched); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. - * -@@ -3239,6 +3361,7 @@ - wait_rcu_gp(call_rcu_bh); - } - EXPORT_SYMBOL_GPL(synchronize_rcu_bh); -+#endif - - /** - * get_state_synchronize_rcu - Snapshot current RCU state -@@ -3524,7 +3647,7 @@ - raw_spin_unlock_irqrestore(&rnp->lock, flags); - if (wake) { - smp_mb(); /* EGP done before wake_up(). */ -- wake_up(&rsp->expedited_wq); -+ swake_up(&rsp->expedited_wq); - } - break; - } -@@ -3781,7 +3904,7 @@ - jiffies_start = jiffies; - - for (;;) { -- ret = wait_event_interruptible_timeout( -+ ret = swait_event_timeout( - rsp->expedited_wq, - sync_rcu_preempt_exp_done(rnp_root), - jiffies_stall); -@@ -3789,7 +3912,7 @@ - return; - if (ret < 0) { - /* Hit a signal, disable CPU stall warnings. */ -- wait_event(rsp->expedited_wq, -+ swait_event(rsp->expedited_wq, - sync_rcu_preempt_exp_done(rnp_root)); - return; - } -@@ -4101,6 +4224,7 @@ - mutex_unlock(&rsp->barrier_mutex); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. - */ -@@ -4109,6 +4233,7 @@ - _rcu_barrier(&rcu_bh_state); - } - EXPORT_SYMBOL_GPL(rcu_barrier_bh); -+#endif - - /** - * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. -@@ -4455,8 +4580,8 @@ - } - } - -- init_waitqueue_head(&rsp->gp_wq); -- init_waitqueue_head(&rsp->expedited_wq); -+ init_swait_queue_head(&rsp->gp_wq); -+ init_swait_queue_head(&rsp->expedited_wq); - rnp = rsp->level[rcu_num_lvls - 1]; - for_each_possible_cpu(i) { - while (i > rnp->grphi) -@@ -4576,12 +4701,13 @@ - - rcu_bootup_announce(); - rcu_init_geometry(); -+#ifndef CONFIG_PREEMPT_RT_FULL - rcu_init_one(&rcu_bh_state, &rcu_bh_data); -+#endif - rcu_init_one(&rcu_sched_state, &rcu_sched_data); - if (dump_tree) - rcu_dump_rcu_node_tree(&rcu_sched_state); - __rcu_init_preempt(); -- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); - - /* - * We don't need protection against CPU-hotplug here because -diff -Nur linux-4.4.13.orig/kernel/rcu/tree.h linux-4.4.13/kernel/rcu/tree.h ---- linux-4.4.13.orig/kernel/rcu/tree.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/rcu/tree.h 2016-06-22 21:55:08.054523867 +0200 -@@ -27,6 +27,7 @@ - #include - #include - #include -+#include - #include - - /* -@@ -241,7 +242,7 @@ - /* Refused to boost: not sure why, though. */ - /* This can happen due to race conditions. */ - #ifdef CONFIG_RCU_NOCB_CPU -- wait_queue_head_t nocb_gp_wq[2]; -+ struct swait_queue_head nocb_gp_wq[2]; - /* Place for rcu_nocb_kthread() to wait GP. */ - #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ - int need_future_gp[2]; -@@ -393,7 +394,7 @@ - atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ - struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ - struct rcu_head **nocb_follower_tail; -- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ -+ struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */ - struct task_struct *nocb_kthread; - int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ - -@@ -472,7 +473,7 @@ - unsigned long gpnum; /* Current gp number. */ - unsigned long completed; /* # of last completed gp. */ - struct task_struct *gp_kthread; /* Task for grace periods. */ -- wait_queue_head_t gp_wq; /* Where GP task waits. */ -+ struct swait_queue_head gp_wq; /* Where GP task waits. */ - short gp_flags; /* Commands for GP task. */ - short gp_state; /* GP kthread sleep state. */ - -@@ -504,7 +505,7 @@ - atomic_long_t expedited_workdone3; /* # done by others #3. */ - atomic_long_t expedited_normal; /* # fallbacks to normal. */ - atomic_t expedited_need_qs; /* # CPUs left to check in. */ -- wait_queue_head_t expedited_wq; /* Wait for check-ins. */ -+ struct swait_queue_head expedited_wq; /* Wait for check-ins. */ - int ncpus_snap; /* # CPUs seen last time. */ - - unsigned long jiffies_force_qs; /* Time at which to invoke */ -@@ -556,18 +557,18 @@ - */ - extern struct rcu_state rcu_sched_state; - -+#ifndef CONFIG_PREEMPT_RT_FULL - extern struct rcu_state rcu_bh_state; -+#endif - - #ifdef CONFIG_PREEMPT_RCU - extern struct rcu_state rcu_preempt_state; - #endif /* #ifdef CONFIG_PREEMPT_RCU */ - --#ifdef CONFIG_RCU_BOOST - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); - DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); - DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); - DECLARE_PER_CPU(char, rcu_cpu_has_work); --#endif /* #ifdef CONFIG_RCU_BOOST */ - - #ifndef RCU_TREE_NONCORE - -@@ -587,10 +588,9 @@ - static void __init __rcu_init_preempt(void); - static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); - static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); --static void invoke_rcu_callbacks_kthread(void); - static bool rcu_is_callbacks_kthread(void); -+static void rcu_cpu_kthread_setup(unsigned int cpu); - #ifdef CONFIG_RCU_BOOST --static void rcu_preempt_do_callbacks(void); - static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, - struct rcu_node *rnp); - #endif /* #ifdef CONFIG_RCU_BOOST */ -@@ -607,7 +607,8 @@ - static void increment_cpu_stall_ticks(void); - static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); - static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); --static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); -+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); -+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); - static void rcu_init_one_nocb(struct rcu_node *rnp); - static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, - bool lazy, unsigned long flags); -diff -Nur linux-4.4.13.orig/kernel/rcu/tree_plugin.h linux-4.4.13/kernel/rcu/tree_plugin.h ---- linux-4.4.13.orig/kernel/rcu/tree_plugin.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/rcu/tree_plugin.h 2016-06-22 21:55:08.054523867 +0200 -@@ -24,25 +24,10 @@ - * Paul E. McKenney - */ - --#include --#include --#include --#include --#include "../time/tick-internal.h" -- - #ifdef CONFIG_RCU_BOOST - - #include "../locking/rtmutex_common.h" - --/* -- * Control variables for per-CPU and per-rcu_node kthreads. These -- * handle all flavors of RCU. -- */ --static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); --DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); --DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); --DEFINE_PER_CPU(char, rcu_cpu_has_work); -- - #else /* #ifdef CONFIG_RCU_BOOST */ - - /* -@@ -55,6 +40,14 @@ - - #endif /* #else #ifdef CONFIG_RCU_BOOST */ - -+/* -+ * Control variables for per-CPU and per-rcu_node kthreads. These -+ * handle all flavors of RCU. -+ */ -+DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); -+DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); -+DEFINE_PER_CPU(char, rcu_cpu_has_work); -+ - #ifdef CONFIG_RCU_NOCB_CPU - static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ - static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ -@@ -432,7 +425,7 @@ - } - - /* Hardware IRQ handlers cannot block, complain if they get here. */ -- if (in_irq() || in_serving_softirq()) { -+ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { - lockdep_rcu_suspicious(__FILE__, __LINE__, - "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); - pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", -@@ -645,15 +638,6 @@ - t->rcu_read_unlock_special.b.need_qs = true; - } - --#ifdef CONFIG_RCU_BOOST -- --static void rcu_preempt_do_callbacks(void) --{ -- rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); --} -- --#endif /* #ifdef CONFIG_RCU_BOOST */ -- - /* - * Queue a preemptible-RCU callback for invocation after a grace period. - */ -@@ -930,6 +914,19 @@ - - #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ - -+/* -+ * If boosting, set rcuc kthreads to realtime priority. -+ */ -+static void rcu_cpu_kthread_setup(unsigned int cpu) -+{ -+#ifdef CONFIG_RCU_BOOST -+ struct sched_param sp; -+ -+ sp.sched_priority = kthread_prio; -+ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); -+#endif /* #ifdef CONFIG_RCU_BOOST */ -+} -+ - #ifdef CONFIG_RCU_BOOST - - #include "../locking/rtmutex_common.h" -@@ -961,16 +958,6 @@ - - #endif /* #else #ifdef CONFIG_RCU_TRACE */ - --static void rcu_wake_cond(struct task_struct *t, int status) --{ -- /* -- * If the thread is yielding, only wake it when this -- * is invoked from idle -- */ -- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) -- wake_up_process(t); --} -- - /* - * Carry out RCU priority boosting on the task indicated by ->exp_tasks - * or ->boost_tasks, advancing the pointer to the next task in the -@@ -1115,23 +1102,6 @@ - } - - /* -- * Wake up the per-CPU kthread to invoke RCU callbacks. -- */ --static void invoke_rcu_callbacks_kthread(void) --{ -- unsigned long flags; -- -- local_irq_save(flags); -- __this_cpu_write(rcu_cpu_has_work, 1); -- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && -- current != __this_cpu_read(rcu_cpu_kthread_task)) { -- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), -- __this_cpu_read(rcu_cpu_kthread_status)); -- } -- local_irq_restore(flags); --} -- --/* - * Is the current CPU running the RCU-callbacks kthread? - * Caller must have preemption disabled. - */ -@@ -1186,67 +1156,6 @@ - return 0; - } - --static void rcu_kthread_do_work(void) --{ -- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); -- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); -- rcu_preempt_do_callbacks(); --} -- --static void rcu_cpu_kthread_setup(unsigned int cpu) --{ -- struct sched_param sp; -- -- sp.sched_priority = kthread_prio; -- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); --} -- --static void rcu_cpu_kthread_park(unsigned int cpu) --{ -- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; --} -- --static int rcu_cpu_kthread_should_run(unsigned int cpu) --{ -- return __this_cpu_read(rcu_cpu_has_work); --} -- --/* -- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the -- * RCU softirq used in flavors and configurations of RCU that do not -- * support RCU priority boosting. -- */ --static void rcu_cpu_kthread(unsigned int cpu) --{ -- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); -- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); -- int spincnt; -- -- for (spincnt = 0; spincnt < 10; spincnt++) { -- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); -- local_bh_disable(); -- *statusp = RCU_KTHREAD_RUNNING; -- this_cpu_inc(rcu_cpu_kthread_loops); -- local_irq_disable(); -- work = *workp; -- *workp = 0; -- local_irq_enable(); -- if (work) -- rcu_kthread_do_work(); -- local_bh_enable(); -- if (*workp == 0) { -- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); -- *statusp = RCU_KTHREAD_WAITING; -- return; -- } -- } -- *statusp = RCU_KTHREAD_YIELDING; -- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); -- schedule_timeout_interruptible(2); -- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); -- *statusp = RCU_KTHREAD_WAITING; --} -- - /* - * Set the per-rcu_node kthread's affinity to cover all CPUs that are - * served by the rcu_node in question. The CPU hotplug lock is still -@@ -1276,26 +1185,12 @@ - free_cpumask_var(cm); - } - --static struct smp_hotplug_thread rcu_cpu_thread_spec = { -- .store = &rcu_cpu_kthread_task, -- .thread_should_run = rcu_cpu_kthread_should_run, -- .thread_fn = rcu_cpu_kthread, -- .thread_comm = "rcuc/%u", -- .setup = rcu_cpu_kthread_setup, -- .park = rcu_cpu_kthread_park, --}; -- - /* - * Spawn boost kthreads -- called as soon as the scheduler is running. - */ - static void __init rcu_spawn_boost_kthreads(void) - { - struct rcu_node *rnp; -- int cpu; -- -- for_each_possible_cpu(cpu) -- per_cpu(rcu_cpu_has_work, cpu) = 0; -- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); - rcu_for_each_leaf_node(rcu_state_p, rnp) - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); - } -@@ -1318,11 +1213,6 @@ - raw_spin_unlock_irqrestore(&rnp->lock, flags); - } - --static void invoke_rcu_callbacks_kthread(void) --{ -- WARN_ON_ONCE(1); --} -- - static bool rcu_is_callbacks_kthread(void) - { - return false; -@@ -1346,7 +1236,7 @@ - - #endif /* #else #ifdef CONFIG_RCU_BOOST */ - --#if !defined(CONFIG_RCU_FAST_NO_HZ) -+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) - - /* - * Check to see if any future RCU-related work will need to be done -@@ -1363,7 +1253,9 @@ - return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) - ? 0 : rcu_cpu_has_callbacks(NULL); - } -+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ - -+#if !defined(CONFIG_RCU_FAST_NO_HZ) - /* - * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up - * after it. -@@ -1459,6 +1351,8 @@ - return cbs_ready; - } - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready - * to invoke. If the CPU has callbacks, try to advance them. Tell the -@@ -1504,6 +1398,7 @@ - *nextevt = basemono + dj * TICK_NSEC; - return 0; - } -+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ - - /* - * Prepare a CPU for idle from an RCU perspective. The first major task -@@ -1822,9 +1717,9 @@ - * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended - * grace period. - */ --static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) -+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) - { -- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); -+ swake_up_all(sq); - } - - /* -@@ -1840,10 +1735,15 @@ - rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; - } - -+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) -+{ -+ return &rnp->nocb_gp_wq[rnp->completed & 0x1]; -+} -+ - static void rcu_init_one_nocb(struct rcu_node *rnp) - { -- init_waitqueue_head(&rnp->nocb_gp_wq[0]); -- init_waitqueue_head(&rnp->nocb_gp_wq[1]); -+ init_swait_queue_head(&rnp->nocb_gp_wq[0]); -+ init_swait_queue_head(&rnp->nocb_gp_wq[1]); - } - - #ifndef CONFIG_RCU_NOCB_CPU_ALL -@@ -1868,7 +1768,7 @@ - if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { - /* Prior smp_mb__after_atomic() orders against prior enqueue. */ - WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); -- wake_up(&rdp_leader->nocb_wq); -+ swake_up(&rdp_leader->nocb_wq); - } - } - -@@ -2081,7 +1981,7 @@ - */ - trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); - for (;;) { -- wait_event_interruptible( -+ swait_event_interruptible( - rnp->nocb_gp_wq[c & 0x1], - (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); - if (likely(d)) -@@ -2109,7 +2009,7 @@ - /* Wait for callbacks to appear. */ - if (!rcu_nocb_poll) { - trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); -- wait_event_interruptible(my_rdp->nocb_wq, -+ swait_event_interruptible(my_rdp->nocb_wq, - !READ_ONCE(my_rdp->nocb_leader_sleep)); - /* Memory barrier handled by smp_mb() calls below and repoll. */ - } else if (firsttime) { -@@ -2184,7 +2084,7 @@ - * List was empty, wake up the follower. - * Memory barriers supplied by atomic_long_add(). - */ -- wake_up(&rdp->nocb_wq); -+ swake_up(&rdp->nocb_wq); - } - } - -@@ -2205,7 +2105,7 @@ - if (!rcu_nocb_poll) { - trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, - "FollowerSleep"); -- wait_event_interruptible(rdp->nocb_wq, -+ swait_event_interruptible(rdp->nocb_wq, - READ_ONCE(rdp->nocb_follower_head)); - } else if (firsttime) { - /* Don't drown trace log with "Poll"! */ -@@ -2364,7 +2264,7 @@ - static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) - { - rdp->nocb_tail = &rdp->nocb_head; -- init_waitqueue_head(&rdp->nocb_wq); -+ init_swait_queue_head(&rdp->nocb_wq); - rdp->nocb_follower_tail = &rdp->nocb_follower_head; - } - -@@ -2514,7 +2414,7 @@ - return false; - } - --static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) -+static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) - { - } - -@@ -2522,6 +2422,11 @@ - { - } - -+static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) -+{ -+ return NULL; -+} -+ - static void rcu_init_one_nocb(struct rcu_node *rnp) - { - } -diff -Nur linux-4.4.13.orig/kernel/rcu/update.c linux-4.4.13/kernel/rcu/update.c ---- linux-4.4.13.orig/kernel/rcu/update.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/rcu/update.c 2016-06-22 21:55:08.054523867 +0200 -@@ -276,6 +276,7 @@ - } - EXPORT_SYMBOL_GPL(rcu_read_lock_held); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /** - * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? - * -@@ -302,6 +303,7 @@ - return in_softirq() || irqs_disabled(); - } - EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); -+#endif - - #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ - -diff -Nur linux-4.4.13.orig/kernel/relay.c linux-4.4.13/kernel/relay.c ---- linux-4.4.13.orig/kernel/relay.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/relay.c 2016-06-22 21:55:08.058523867 +0200 -@@ -336,6 +336,10 @@ - { - struct rchan_buf *buf = (struct rchan_buf *)data; - wake_up_interruptible(&buf->read_wait); -+ /* -+ * Stupid polling for now: -+ */ -+ mod_timer(&buf->timer, jiffies + 1); - } - - /** -@@ -353,6 +357,7 @@ - init_waitqueue_head(&buf->read_wait); - kref_init(&buf->kref); - setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); -+ mod_timer(&buf->timer, jiffies + 1); - } else - del_timer_sync(&buf->timer); - -@@ -736,15 +741,6 @@ - else - buf->early_bytes += buf->chan->subbuf_size - - buf->padding[old_subbuf]; -- smp_mb(); -- if (waitqueue_active(&buf->read_wait)) -- /* -- * Calling wake_up_interruptible() from here -- * will deadlock if we happen to be logging -- * from the scheduler (trying to re-grab -- * rq->lock), so defer it. -- */ -- mod_timer(&buf->timer, jiffies + 1); - } - - old = buf->data; -diff -Nur linux-4.4.13.orig/kernel/sched/completion.c linux-4.4.13/kernel/sched/completion.c ---- linux-4.4.13.orig/kernel/sched/completion.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/completion.c 2016-06-22 21:55:08.058523867 +0200 -@@ -30,10 +30,10 @@ - { - unsigned long flags; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - x->done++; -- __wake_up_locked(&x->wait, TASK_NORMAL, 1); -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ swake_up_locked(&x->wait); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - } - EXPORT_SYMBOL(complete); - -@@ -50,10 +50,10 @@ - { - unsigned long flags; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - x->done += UINT_MAX/2; -- __wake_up_locked(&x->wait, TASK_NORMAL, 0); -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ swake_up_all_locked(&x->wait); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - } - EXPORT_SYMBOL(complete_all); - -@@ -62,20 +62,20 @@ - long (*action)(long), long timeout, int state) - { - if (!x->done) { -- DECLARE_WAITQUEUE(wait, current); -+ DECLARE_SWAITQUEUE(wait); - -- __add_wait_queue_tail_exclusive(&x->wait, &wait); -+ __prepare_to_swait(&x->wait, &wait); - do { - if (signal_pending_state(state, current)) { - timeout = -ERESTARTSYS; - break; - } - __set_current_state(state); -- spin_unlock_irq(&x->wait.lock); -+ raw_spin_unlock_irq(&x->wait.lock); - timeout = action(timeout); -- spin_lock_irq(&x->wait.lock); -+ raw_spin_lock_irq(&x->wait.lock); - } while (!x->done && timeout); -- __remove_wait_queue(&x->wait, &wait); -+ __finish_swait(&x->wait, &wait); - if (!x->done) - return timeout; - } -@@ -89,9 +89,9 @@ - { - might_sleep(); - -- spin_lock_irq(&x->wait.lock); -+ raw_spin_lock_irq(&x->wait.lock); - timeout = do_wait_for_common(x, action, timeout, state); -- spin_unlock_irq(&x->wait.lock); -+ raw_spin_unlock_irq(&x->wait.lock); - return timeout; - } - -@@ -277,12 +277,12 @@ - if (!READ_ONCE(x->done)) - return 0; - -- spin_lock_irqsave(&x->wait.lock, flags); -+ raw_spin_lock_irqsave(&x->wait.lock, flags); - if (!x->done) - ret = 0; - else - x->done--; -- spin_unlock_irqrestore(&x->wait.lock, flags); -+ raw_spin_unlock_irqrestore(&x->wait.lock, flags); - return ret; - } - EXPORT_SYMBOL(try_wait_for_completion); -@@ -311,7 +311,7 @@ - * after it's acquired the lock. - */ - smp_rmb(); -- spin_unlock_wait(&x->wait.lock); -+ raw_spin_unlock_wait(&x->wait.lock); - return true; - } - EXPORT_SYMBOL(completion_done); -diff -Nur linux-4.4.13.orig/kernel/sched/core.c linux-4.4.13/kernel/sched/core.c ---- linux-4.4.13.orig/kernel/sched/core.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/core.c 2016-06-22 21:55:08.058523867 +0200 -@@ -260,7 +260,11 @@ - * Number of tasks to iterate in a single balance run. - * Limited because this is done with IRQs disabled. - */ -+#ifndef CONFIG_PREEMPT_RT_FULL - const_debug unsigned int sysctl_sched_nr_migrate = 32; -+#else -+const_debug unsigned int sysctl_sched_nr_migrate = 8; -+#endif - - /* - * period over which we average the RT time consumption, measured -@@ -438,6 +442,7 @@ - - hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - rq->hrtick_timer.function = hrtick; -+ rq->hrtick_timer.irqsafe = 1; - } - #else /* CONFIG_SCHED_HRTICK */ - static inline void hrtick_clear(struct rq *rq) -@@ -542,7 +547,7 @@ - head->lastp = &node->next; - } - --void wake_up_q(struct wake_q_head *head) -+void __wake_up_q(struct wake_q_head *head, bool sleeper) - { - struct wake_q_node *node = head->first; - -@@ -559,7 +564,10 @@ - * wake_up_process() implies a wmb() to pair with the queueing - * in wake_q_add() so as not to miss wakeups. - */ -- wake_up_process(task); -+ if (sleeper) -+ wake_up_lock_sleeper(task); -+ else -+ wake_up_process(task); - put_task_struct(task); - } - } -@@ -595,6 +603,38 @@ - trace_sched_wake_idle_without_ipi(cpu); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+void resched_curr_lazy(struct rq *rq) -+{ -+ struct task_struct *curr = rq->curr; -+ int cpu; -+ -+ if (!sched_feat(PREEMPT_LAZY)) { -+ resched_curr(rq); -+ return; -+ } -+ -+ lockdep_assert_held(&rq->lock); -+ -+ if (test_tsk_need_resched(curr)) -+ return; -+ -+ if (test_tsk_need_resched_lazy(curr)) -+ return; -+ -+ set_tsk_need_resched_lazy(curr); -+ -+ cpu = cpu_of(rq); -+ if (cpu == smp_processor_id()) -+ return; -+ -+ /* NEED_RESCHED_LAZY must be visible before we test polling */ -+ smp_mb(); -+ if (!tsk_is_polling(curr)) -+ smp_send_reschedule(cpu); -+} -+#endif -+ - void resched_cpu(int cpu) - { - struct rq *rq = cpu_rq(cpu); -@@ -618,11 +658,14 @@ - */ - int get_nohz_timer_target(void) - { -- int i, cpu = smp_processor_id(); -+ int i, cpu; - struct sched_domain *sd; - -+ preempt_disable_rt(); -+ cpu = smp_processor_id(); -+ - if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) -- return cpu; -+ goto preempt_en_rt; - - rcu_read_lock(); - for_each_domain(cpu, sd) { -@@ -638,6 +681,8 @@ - cpu = housekeeping_any_cpu(); - unlock: - rcu_read_unlock(); -+preempt_en_rt: -+ preempt_enable_rt(); - return cpu; - } - /* -@@ -1171,6 +1216,11 @@ - - lockdep_assert_held(&p->pi_lock); - -+ if (__migrate_disabled(p)) { -+ cpumask_copy(&p->cpus_allowed, new_mask); -+ return; -+ } -+ - queued = task_on_rq_queued(p); - running = task_current(rq, p); - -@@ -1193,6 +1243,84 @@ - enqueue_task(rq, p, ENQUEUE_RESTORE); - } - -+static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); -+static DEFINE_MUTEX(sched_down_mutex); -+static cpumask_t sched_down_cpumask; -+ -+void tell_sched_cpu_down_begin(int cpu) -+{ -+ mutex_lock(&sched_down_mutex); -+ cpumask_set_cpu(cpu, &sched_down_cpumask); -+ mutex_unlock(&sched_down_mutex); -+} -+ -+void tell_sched_cpu_down_done(int cpu) -+{ -+ mutex_lock(&sched_down_mutex); -+ cpumask_clear_cpu(cpu, &sched_down_cpumask); -+ mutex_unlock(&sched_down_mutex); -+} -+ -+/** -+ * migrate_me - try to move the current task off this cpu -+ * -+ * Used by the pin_current_cpu() code to try to get tasks -+ * to move off the current CPU as it is going down. -+ * It will only move the task if the task isn't pinned to -+ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) -+ * and the task has to be in a RUNNING state. Otherwise the -+ * movement of the task will wake it up (change its state -+ * to running) when the task did not expect it. -+ * -+ * Returns 1 if it succeeded in moving the current task -+ * 0 otherwise. -+ */ -+int migrate_me(void) -+{ -+ struct task_struct *p = current; -+ struct migration_arg arg; -+ struct cpumask *cpumask; -+ struct cpumask *mask; -+ unsigned long flags; -+ unsigned int dest_cpu; -+ struct rq *rq; -+ -+ /* -+ * We can not migrate tasks bounded to a CPU or tasks not -+ * running. The movement of the task will wake it up. -+ */ -+ if (p->flags & PF_NO_SETAFFINITY || p->state) -+ return 0; -+ -+ mutex_lock(&sched_down_mutex); -+ rq = task_rq_lock(p, &flags); -+ -+ cpumask = this_cpu_ptr(&sched_cpumasks); -+ mask = &p->cpus_allowed; -+ -+ cpumask_andnot(cpumask, mask, &sched_down_cpumask); -+ -+ if (!cpumask_weight(cpumask)) { -+ /* It's only on this CPU? */ -+ task_rq_unlock(rq, p, &flags); -+ mutex_unlock(&sched_down_mutex); -+ return 0; -+ } -+ -+ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); -+ -+ arg.task = p; -+ arg.dest_cpu = dest_cpu; -+ -+ task_rq_unlock(rq, p, &flags); -+ -+ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); -+ tlb_migrate_finish(p->mm); -+ mutex_unlock(&sched_down_mutex); -+ -+ return 1; -+} -+ - /* - * Change a given task's CPU affinity. Migrate the thread to a - * proper CPU and schedule it away if the CPU it's executing on -@@ -1232,7 +1360,7 @@ - do_set_cpus_allowed(p, new_mask); - - /* Can the task run on the task's current CPU? If so, we're done */ -- if (cpumask_test_cpu(task_cpu(p), new_mask)) -+ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) - goto out; - - dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); -@@ -1408,6 +1536,18 @@ - return ret; - } - -+static bool check_task_state(struct task_struct *p, long match_state) -+{ -+ bool match = false; -+ -+ raw_spin_lock_irq(&p->pi_lock); -+ if (p->state == match_state || p->saved_state == match_state) -+ match = true; -+ raw_spin_unlock_irq(&p->pi_lock); -+ -+ return match; -+} -+ - /* - * wait_task_inactive - wait for a thread to unschedule. - * -@@ -1452,7 +1592,7 @@ - * is actually now running somewhere else! - */ - while (task_running(rq, p)) { -- if (match_state && unlikely(p->state != match_state)) -+ if (match_state && !check_task_state(p, match_state)) - return 0; - cpu_relax(); - } -@@ -1467,7 +1607,8 @@ - running = task_running(rq, p); - queued = task_on_rq_queued(p); - ncsw = 0; -- if (!match_state || p->state == match_state) -+ if (!match_state || p->state == match_state || -+ p->saved_state == match_state) - ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ - task_rq_unlock(rq, p, &flags); - -@@ -1624,7 +1765,7 @@ - { - lockdep_assert_held(&p->pi_lock); - -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); - - /* -@@ -1704,10 +1845,6 @@ - { - activate_task(rq, p, en_flags); - p->on_rq = TASK_ON_RQ_QUEUED; -- -- /* if a worker is waking up, notify workqueue */ -- if (p->flags & PF_WQ_WORKER) -- wq_worker_waking_up(p, cpu_of(rq)); - } - - /* -@@ -1934,8 +2071,27 @@ - */ - smp_mb__before_spinlock(); - raw_spin_lock_irqsave(&p->pi_lock, flags); -- if (!(p->state & state)) -+ if (!(p->state & state)) { -+ /* -+ * The task might be running due to a spinlock sleeper -+ * wakeup. Check the saved state and set it to running -+ * if the wakeup condition is true. -+ */ -+ if (!(wake_flags & WF_LOCK_SLEEPER)) { -+ if (p->saved_state & state) { -+ p->saved_state = TASK_RUNNING; -+ success = 1; -+ } -+ } - goto out; -+ } -+ -+ /* -+ * If this is a regular wakeup, then we can unconditionally -+ * clear the saved state of a "lock sleeper". -+ */ -+ if (!(wake_flags & WF_LOCK_SLEEPER)) -+ p->saved_state = TASK_RUNNING; - - trace_sched_waking(p); - -@@ -2005,52 +2161,6 @@ - } - - /** -- * try_to_wake_up_local - try to wake up a local task with rq lock held -- * @p: the thread to be awakened -- * -- * Put @p on the run-queue if it's not already there. The caller must -- * ensure that this_rq() is locked, @p is bound to this_rq() and not -- * the current task. -- */ --static void try_to_wake_up_local(struct task_struct *p) --{ -- struct rq *rq = task_rq(p); -- -- if (WARN_ON_ONCE(rq != this_rq()) || -- WARN_ON_ONCE(p == current)) -- return; -- -- lockdep_assert_held(&rq->lock); -- -- if (!raw_spin_trylock(&p->pi_lock)) { -- /* -- * This is OK, because current is on_cpu, which avoids it being -- * picked for load-balance and preemption/IRQs are still -- * disabled avoiding further scheduler activity on it and we've -- * not yet picked a replacement task. -- */ -- lockdep_unpin_lock(&rq->lock); -- raw_spin_unlock(&rq->lock); -- raw_spin_lock(&p->pi_lock); -- raw_spin_lock(&rq->lock); -- lockdep_pin_lock(&rq->lock); -- } -- -- if (!(p->state & TASK_NORMAL)) -- goto out; -- -- trace_sched_waking(p); -- -- if (!task_on_rq_queued(p)) -- ttwu_activate(rq, p, ENQUEUE_WAKEUP); -- -- ttwu_do_wakeup(rq, p, 0); -- ttwu_stat(p, smp_processor_id(), 0); --out: -- raw_spin_unlock(&p->pi_lock); --} -- --/** - * wake_up_process - Wake up a specific process - * @p: The process to be woken up. - * -@@ -2068,6 +2178,18 @@ - } - EXPORT_SYMBOL(wake_up_process); - -+/** -+ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" -+ * @p: The process to be woken up. -+ * -+ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate -+ * the nature of the wakeup. -+ */ -+int wake_up_lock_sleeper(struct task_struct *p) -+{ -+ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); -+} -+ - int wake_up_state(struct task_struct *p, unsigned int state) - { - return try_to_wake_up(p, state, 0); -@@ -2254,6 +2376,9 @@ - p->on_cpu = 0; - #endif - init_task_preempt_count(p); -+#ifdef CONFIG_HAVE_PREEMPT_LAZY -+ task_thread_info(p)->preempt_lazy_count = 0; -+#endif - #ifdef CONFIG_SMP - plist_node_init(&p->pushable_tasks, MAX_PRIO); - RB_CLEAR_NODE(&p->pushable_dl_tasks); -@@ -2578,8 +2703,12 @@ - finish_arch_post_lock_switch(); - - fire_sched_in_preempt_notifiers(current); -+ /* -+ * We use mmdrop_delayed() here so we don't have to do the -+ * full __mmdrop() when we are the last user. -+ */ - if (mm) -- mmdrop(mm); -+ mmdrop_delayed(mm); - if (unlikely(prev_state == TASK_DEAD)) { - if (prev->sched_class->task_dead) - prev->sched_class->task_dead(prev); -@@ -2910,16 +3039,6 @@ - } - #endif - --notrace unsigned long get_parent_ip(unsigned long addr) --{ -- if (in_lock_functions(addr)) { -- addr = CALLER_ADDR2; -- if (in_lock_functions(addr)) -- addr = CALLER_ADDR3; -- } -- return addr; --} -- - #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ - defined(CONFIG_PREEMPT_TRACER)) - -@@ -2941,7 +3060,7 @@ - PREEMPT_MASK - 10); - #endif - if (preempt_count() == val) { -- unsigned long ip = get_parent_ip(CALLER_ADDR1); -+ unsigned long ip = get_lock_parent_ip(); - #ifdef CONFIG_DEBUG_PREEMPT - current->preempt_disable_ip = ip; - #endif -@@ -2968,7 +3087,7 @@ - #endif - - if (preempt_count() == val) -- trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); -+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); - __preempt_count_sub(val); - } - EXPORT_SYMBOL(preempt_count_sub); -@@ -3022,6 +3141,77 @@ - schedstat_inc(this_rq(), sched_count); - } - -+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) -+ -+void migrate_disable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic() || irqs_disabled()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic++; -+#endif -+ return; -+ } -+ -+#ifdef CONFIG_SCHED_DEBUG -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } -+#endif -+ -+ if (p->migrate_disable) { -+ p->migrate_disable++; -+ return; -+ } -+ -+ preempt_disable(); -+ preempt_lazy_disable(); -+ pin_current_cpu(); -+ p->migrate_disable = 1; -+ preempt_enable(); -+} -+EXPORT_SYMBOL(migrate_disable); -+ -+void migrate_enable(void) -+{ -+ struct task_struct *p = current; -+ -+ if (in_atomic() || irqs_disabled()) { -+#ifdef CONFIG_SCHED_DEBUG -+ p->migrate_disable_atomic--; -+#endif -+ return; -+ } -+ -+#ifdef CONFIG_SCHED_DEBUG -+ if (unlikely(p->migrate_disable_atomic)) { -+ tracing_off(); -+ WARN_ON_ONCE(1); -+ } -+#endif -+ WARN_ON_ONCE(p->migrate_disable <= 0); -+ -+ if (p->migrate_disable > 1) { -+ p->migrate_disable--; -+ return; -+ } -+ -+ preempt_disable(); -+ /* -+ * Clearing migrate_disable causes tsk_cpus_allowed to -+ * show the tasks original cpu affinity. -+ */ -+ p->migrate_disable = 0; -+ -+ unpin_current_cpu(); -+ preempt_enable(); -+ preempt_lazy_enable(); -+} -+EXPORT_SYMBOL(migrate_enable); -+#endif -+ - /* - * Pick up the highest-prio task: - */ -@@ -3146,19 +3336,6 @@ - } else { - deactivate_task(rq, prev, DEQUEUE_SLEEP); - prev->on_rq = 0; -- -- /* -- * If a worker went to sleep, notify and ask workqueue -- * whether it wants to wake up a task to maintain -- * concurrency. -- */ -- if (prev->flags & PF_WQ_WORKER) { -- struct task_struct *to_wakeup; -- -- to_wakeup = wq_worker_sleeping(prev, cpu); -- if (to_wakeup) -- try_to_wake_up_local(to_wakeup); -- } - } - switch_count = &prev->nvcsw; - } -@@ -3168,6 +3345,7 @@ - - next = pick_next_task(rq, prev); - clear_tsk_need_resched(prev); -+ clear_tsk_need_resched_lazy(prev); - clear_preempt_need_resched(); - rq->clock_skip_update = 0; - -@@ -3189,9 +3367,20 @@ - - static inline void sched_submit_work(struct task_struct *tsk) - { -- if (!tsk->state || tsk_is_pi_blocked(tsk)) -+ if (!tsk->state) - return; - /* -+ * If a worker went to sleep, notify and ask workqueue whether -+ * it wants to wake up a task to maintain concurrency. -+ */ -+ if (tsk->flags & PF_WQ_WORKER) -+ wq_worker_sleeping(tsk); -+ -+ -+ if (tsk_is_pi_blocked(tsk)) -+ return; -+ -+ /* - * If we are going to sleep and we have plugged IO queued, - * make sure to submit it to avoid deadlocks. - */ -@@ -3199,6 +3388,12 @@ - blk_schedule_flush_plug(tsk); - } - -+static void sched_update_worker(struct task_struct *tsk) -+{ -+ if (tsk->flags & PF_WQ_WORKER) -+ wq_worker_running(tsk); -+} -+ - asmlinkage __visible void __sched schedule(void) - { - struct task_struct *tsk = current; -@@ -3209,6 +3404,7 @@ - __schedule(false); - sched_preempt_enable_no_resched(); - } while (need_resched()); -+ sched_update_worker(tsk); - } - EXPORT_SYMBOL(schedule); - -@@ -3257,6 +3453,30 @@ - } while (need_resched()); - } - -+#ifdef CONFIG_PREEMPT_LAZY -+/* -+ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is -+ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as -+ * preempt_lazy_count counter >0. -+ */ -+static __always_inline int preemptible_lazy(void) -+{ -+ if (test_thread_flag(TIF_NEED_RESCHED)) -+ return 1; -+ if (current_thread_info()->preempt_lazy_count) -+ return 0; -+ return 1; -+} -+ -+#else -+ -+static int preemptible_lazy(void) -+{ -+ return 1; -+} -+ -+#endif -+ - #ifdef CONFIG_PREEMPT - /* - * this is the entry point to schedule() from in-kernel preemption -@@ -3271,6 +3491,8 @@ - */ - if (likely(!preemptible())) - return; -+ if (!preemptible_lazy()) -+ return; - - preempt_schedule_common(); - } -@@ -3297,6 +3519,8 @@ - - if (likely(!preemptible())) - return; -+ if (!preemptible_lazy()) -+ return; - - do { - preempt_disable_notrace(); -@@ -3306,7 +3530,16 @@ - * an infinite recursion. - */ - prev_ctx = exception_enter(); -+ /* -+ * The add/subtract must not be traced by the function -+ * tracer. But we still want to account for the -+ * preempt off latency tracer. Since the _notrace versions -+ * of add/subtract skip the accounting for latency tracer -+ * we must force it manually. -+ */ -+ start_critical_timings(); - __schedule(true); -+ stop_critical_timings(); - exception_exit(prev_ctx); - - preempt_enable_no_resched_notrace(); -@@ -4650,6 +4883,7 @@ - } - EXPORT_SYMBOL(__cond_resched_lock); - -+#ifndef CONFIG_PREEMPT_RT_FULL - int __sched __cond_resched_softirq(void) - { - BUG_ON(!in_softirq()); -@@ -4663,6 +4897,7 @@ - return 0; - } - EXPORT_SYMBOL(__cond_resched_softirq); -+#endif - - /** - * yield - yield the current processor to other threads. -@@ -5027,7 +5262,9 @@ - - /* Set the preempt count _outside_ the spinlocks! */ - init_idle_preempt_count(idle, cpu); -- -+#ifdef CONFIG_HAVE_PREEMPT_LAZY -+ task_thread_info(idle)->preempt_lazy_count = 0; -+#endif - /* - * The idle tasks have their own, simple scheduling class: - */ -@@ -5168,6 +5405,8 @@ - #endif /* CONFIG_NUMA_BALANCING */ - - #ifdef CONFIG_HOTPLUG_CPU -+static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); -+ - /* - * Ensures that the idle task is using init_mm right before its cpu goes - * offline. -@@ -5182,7 +5421,11 @@ - switch_mm(mm, &init_mm, current); - finish_arch_post_lock_switch(); - } -- mmdrop(mm); -+ /* -+ * Defer the cleanup to an alive cpu. On RT we can neither -+ * call mmdrop() nor mmdrop_delayed() from here. -+ */ -+ per_cpu(idle_last_mm, smp_processor_id()) = mm; - } - - /* -@@ -5555,6 +5798,10 @@ - - case CPU_DEAD: - calc_load_migrate(rq); -+ if (per_cpu(idle_last_mm, cpu)) { -+ mmdrop(per_cpu(idle_last_mm, cpu)); -+ per_cpu(idle_last_mm, cpu) = NULL; -+ } - break; - #endif - } -@@ -7538,7 +7785,7 @@ - #ifdef CONFIG_DEBUG_ATOMIC_SLEEP - static inline int preempt_count_equals(int preempt_offset) - { -- int nested = preempt_count() + rcu_preempt_depth(); -+ int nested = preempt_count() + sched_rcu_preempt_depth(); - - return (nested == preempt_offset); - } -diff -Nur linux-4.4.13.orig/kernel/sched/cpudeadline.c linux-4.4.13/kernel/sched/cpudeadline.c ---- linux-4.4.13.orig/kernel/sched/cpudeadline.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/cpudeadline.c 2016-06-22 21:55:08.058523867 +0200 -@@ -103,10 +103,10 @@ - const struct sched_dl_entity *dl_se = &p->dl; - - if (later_mask && -- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { -+ cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { - best_cpu = cpumask_any(later_mask); - goto out; -- } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && -+ } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && - dl_time_before(dl_se->deadline, cp->elements[0].dl)) { - best_cpu = cpudl_maximum(cp); - if (later_mask) -diff -Nur linux-4.4.13.orig/kernel/sched/cpupri.c linux-4.4.13/kernel/sched/cpupri.c ---- linux-4.4.13.orig/kernel/sched/cpupri.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/cpupri.c 2016-06-22 21:55:08.058523867 +0200 -@@ -103,11 +103,11 @@ - if (skip) - continue; - -- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) -+ if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) - continue; - - if (lowest_mask) { -- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); -+ cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); - - /* - * We have to ensure that we have at least one bit -diff -Nur linux-4.4.13.orig/kernel/sched/cputime.c linux-4.4.13/kernel/sched/cputime.c ---- linux-4.4.13.orig/kernel/sched/cputime.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/cputime.c 2016-06-22 21:55:08.058523867 +0200 -@@ -680,7 +680,7 @@ - { - unsigned long long delta = vtime_delta(tsk); - -- WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); -+ WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); - tsk->vtime_snap += delta; - - /* CHECKME: always safe to convert nsecs to cputime? */ -@@ -696,37 +696,37 @@ - - void vtime_account_system(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seqcount); - } - - void vtime_gen_account_irq_exit(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); - if (context_tracking_in_user()) - tsk->vtime_snap_whence = VTIME_USER; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seqcount); - } - - void vtime_account_user(struct task_struct *tsk) - { - cputime_t delta_cpu; - -- write_seqlock(&tsk->vtime_seqlock); -+ write_seqcount_begin(&tsk->vtime_seqcount); - delta_cpu = get_vtime_delta(tsk); - tsk->vtime_snap_whence = VTIME_SYS; - account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seqcount); - } - - void vtime_user_enter(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); - tsk->vtime_snap_whence = VTIME_USER; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seqcount); - } - - void vtime_guest_enter(struct task_struct *tsk) -@@ -738,19 +738,19 @@ - * synchronization against the reader (task_gtime()) - * that can thus safely catch up with a tickless delta. - */ -- write_seqlock(&tsk->vtime_seqlock); -+ write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); - current->flags |= PF_VCPU; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seqcount); - } - EXPORT_SYMBOL_GPL(vtime_guest_enter); - - void vtime_guest_exit(struct task_struct *tsk) - { -- write_seqlock(&tsk->vtime_seqlock); -+ write_seqcount_begin(&tsk->vtime_seqcount); - __vtime_account_system(tsk); - current->flags &= ~PF_VCPU; -- write_sequnlock(&tsk->vtime_seqlock); -+ write_seqcount_end(&tsk->vtime_seqcount); - } - EXPORT_SYMBOL_GPL(vtime_guest_exit); - -@@ -763,24 +763,26 @@ - - void arch_vtime_task_switch(struct task_struct *prev) - { -- write_seqlock(&prev->vtime_seqlock); -- prev->vtime_snap_whence = VTIME_SLEEPING; -- write_sequnlock(&prev->vtime_seqlock); -+ write_seqcount_begin(&prev->vtime_seqcount); -+ prev->vtime_snap_whence = VTIME_INACTIVE; -+ write_seqcount_end(&prev->vtime_seqcount); - -- write_seqlock(¤t->vtime_seqlock); -+ write_seqcount_begin(¤t->vtime_seqcount); - current->vtime_snap_whence = VTIME_SYS; - current->vtime_snap = sched_clock_cpu(smp_processor_id()); -- write_sequnlock(¤t->vtime_seqlock); -+ write_seqcount_end(¤t->vtime_seqcount); - } - - void vtime_init_idle(struct task_struct *t, int cpu) - { - unsigned long flags; - -- write_seqlock_irqsave(&t->vtime_seqlock, flags); -+ local_irq_save(flags); -+ write_seqcount_begin(&t->vtime_seqcount); - t->vtime_snap_whence = VTIME_SYS; - t->vtime_snap = sched_clock_cpu(cpu); -- write_sequnlock_irqrestore(&t->vtime_seqlock, flags); -+ write_seqcount_end(&t->vtime_seqcount); -+ local_irq_restore(flags); - } - - cputime_t task_gtime(struct task_struct *t) -@@ -792,13 +794,13 @@ - return t->gtime; - - do { -- seq = read_seqbegin(&t->vtime_seqlock); -+ seq = read_seqcount_begin(&t->vtime_seqcount); - - gtime = t->gtime; - if (t->flags & PF_VCPU) - gtime += vtime_delta(t); - -- } while (read_seqretry(&t->vtime_seqlock, seq)); -+ } while (read_seqcount_retry(&t->vtime_seqcount, seq)); - - return gtime; - } -@@ -821,7 +823,7 @@ - *udelta = 0; - *sdelta = 0; - -- seq = read_seqbegin(&t->vtime_seqlock); -+ seq = read_seqcount_begin(&t->vtime_seqcount); - - if (u_dst) - *u_dst = *u_src; -@@ -829,7 +831,7 @@ - *s_dst = *s_src; - - /* Task is sleeping, nothing to add */ -- if (t->vtime_snap_whence == VTIME_SLEEPING || -+ if (t->vtime_snap_whence == VTIME_INACTIVE || - is_idle_task(t)) - continue; - -@@ -845,7 +847,7 @@ - if (t->vtime_snap_whence == VTIME_SYS) - *sdelta = delta; - } -- } while (read_seqretry(&t->vtime_seqlock, seq)); -+ } while (read_seqcount_retry(&t->vtime_seqcount, seq)); - } - - -diff -Nur linux-4.4.13.orig/kernel/sched/deadline.c linux-4.4.13/kernel/sched/deadline.c ---- linux-4.4.13.orig/kernel/sched/deadline.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/deadline.c 2016-06-22 21:55:08.058523867 +0200 -@@ -134,7 +134,7 @@ - { - struct task_struct *p = dl_task_of(dl_se); - -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - dl_rq->dl_nr_migratory++; - - update_dl_migration(dl_rq); -@@ -144,7 +144,7 @@ - { - struct task_struct *p = dl_task_of(dl_se); - -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - dl_rq->dl_nr_migratory--; - - update_dl_migration(dl_rq); -@@ -697,6 +697,7 @@ - - hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - timer->function = dl_task_timer; -+ timer->irqsafe = 1; - } - - static -@@ -989,7 +990,7 @@ - - enqueue_dl_entity(&p->dl, pi_se, flags); - -- if (!task_current(rq, p) && p->nr_cpus_allowed > 1) -+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_dl_task(rq, p); - } - -@@ -1067,9 +1068,9 @@ - * try to make it stay here, it might be important. - */ - if (unlikely(dl_task(curr)) && -- (curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(curr) < 2 || - !dl_entity_preempt(&p->dl, &curr->dl)) && -- (p->nr_cpus_allowed > 1)) { -+ (tsk_nr_cpus_allowed(p) > 1)) { - int target = find_later_rq(p); - - if (target != -1 && -@@ -1090,7 +1091,7 @@ - * Current can't be migrated, useless to reschedule, - * let's hope p can move out. - */ -- if (rq->curr->nr_cpus_allowed == 1 || -+ if (tsk_nr_cpus_allowed(rq->curr) == 1 || - cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) - return; - -@@ -1098,7 +1099,7 @@ - * p is migratable, so let's not schedule it and - * see if it is pushed or pulled somewhere else. - */ -- if (p->nr_cpus_allowed != 1 && -+ if (tsk_nr_cpus_allowed(p) != 1 && - cpudl_find(&rq->rd->cpudl, p, NULL) != -1) - return; - -@@ -1212,7 +1213,7 @@ - { - update_curr_dl(rq); - -- if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) -+ if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_dl_task(rq, p); - } - -@@ -1335,7 +1336,7 @@ - if (unlikely(!later_mask)) - return -1; - -- if (task->nr_cpus_allowed == 1) -+ if (tsk_nr_cpus_allowed(task) == 1) - return -1; - - /* -@@ -1441,7 +1442,7 @@ - if (double_lock_balance(rq, later_rq)) { - if (unlikely(task_rq(task) != rq || - !cpumask_test_cpu(later_rq->cpu, -- &task->cpus_allowed) || -+ tsk_cpus_allowed(task)) || - task_running(rq, task) || - !task_on_rq_queued(task))) { - double_unlock_balance(rq, later_rq); -@@ -1480,7 +1481,7 @@ - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); -- BUG_ON(p->nr_cpus_allowed <= 1); -+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!dl_task(p)); -@@ -1519,7 +1520,7 @@ - */ - if (dl_task(rq->curr) && - dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && -- rq->curr->nr_cpus_allowed > 1) { -+ tsk_nr_cpus_allowed(rq->curr) > 1) { - resched_curr(rq); - return 0; - } -@@ -1666,9 +1667,9 @@ - { - if (!task_running(rq, p) && - !test_tsk_need_resched(rq->curr) && -- p->nr_cpus_allowed > 1 && -+ tsk_nr_cpus_allowed(p) > 1 && - dl_task(rq->curr) && -- (rq->curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(rq->curr) < 2 || - !dl_entity_preempt(&p->dl, &rq->curr->dl))) { - push_dl_tasks(rq); - } -@@ -1769,7 +1770,7 @@ - { - if (task_on_rq_queued(p) && rq->curr != p) { - #ifdef CONFIG_SMP -- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) -+ if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) - queue_push_tasks(rq); - #else - if (dl_task(rq->curr)) -diff -Nur linux-4.4.13.orig/kernel/sched/debug.c linux-4.4.13/kernel/sched/debug.c ---- linux-4.4.13.orig/kernel/sched/debug.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/debug.c 2016-06-22 21:55:08.058523867 +0200 -@@ -251,6 +251,9 @@ - P(rt_throttled); - PN(rt_time); - PN(rt_runtime); -+#ifdef CONFIG_SMP -+ P(rt_nr_migratory); -+#endif - - #undef PN - #undef P -@@ -635,6 +638,10 @@ - #endif - P(policy); - P(prio); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ P(migrate_disable); -+#endif -+ P(nr_cpus_allowed); - #undef PN - #undef __PN - #undef P -diff -Nur linux-4.4.13.orig/kernel/sched/fair.c linux-4.4.13/kernel/sched/fair.c ---- linux-4.4.13.orig/kernel/sched/fair.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/fair.c 2016-06-22 21:55:08.058523867 +0200 -@@ -3135,7 +3135,7 @@ - ideal_runtime = sched_slice(cfs_rq, curr); - delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; - if (delta_exec > ideal_runtime) { -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - /* - * The current task ran long enough, ensure it doesn't get - * re-elected due to buddy favours. -@@ -3159,7 +3159,7 @@ - return; - - if (delta > ideal_runtime) -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - } - - static void -@@ -3299,7 +3299,7 @@ - * validating it and just reschedule. - */ - if (queued) { -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - return; - } - /* -@@ -3481,7 +3481,7 @@ - * hierarchy can be throttled - */ - if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) -- resched_curr(rq_of(cfs_rq)); -+ resched_curr_lazy(rq_of(cfs_rq)); - } - - static __always_inline -@@ -4093,7 +4093,7 @@ - - if (delta < 0) { - if (rq->curr == p) -- resched_curr(rq); -+ resched_curr_lazy(rq); - return; - } - hrtick_start(rq, delta); -@@ -5177,7 +5177,7 @@ - return; - - preempt: -- resched_curr(rq); -+ resched_curr_lazy(rq); - /* - * Only set the backward buddy when the current task is still - * on the rq. This can happen when a wakeup gets interleaved -@@ -7928,7 +7928,7 @@ - * 'current' within the tree based on its new key value. - */ - swap(curr->vruntime, se->vruntime); -- resched_curr(rq); -+ resched_curr_lazy(rq); - } - - se->vruntime -= cfs_rq->min_vruntime; -@@ -7953,7 +7953,7 @@ - */ - if (rq->curr == p) { - if (p->prio > oldprio) -- resched_curr(rq); -+ resched_curr_lazy(rq); - } else - check_preempt_curr(rq, p, 0); - } -diff -Nur linux-4.4.13.orig/kernel/sched/features.h linux-4.4.13/kernel/sched/features.h ---- linux-4.4.13.orig/kernel/sched/features.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/features.h 2016-06-22 21:55:08.062523867 +0200 -@@ -45,11 +45,19 @@ - */ - SCHED_FEAT(NONTASK_CAPACITY, true) - -+#ifdef CONFIG_PREEMPT_RT_FULL -+SCHED_FEAT(TTWU_QUEUE, false) -+# ifdef CONFIG_PREEMPT_LAZY -+SCHED_FEAT(PREEMPT_LAZY, true) -+# endif -+#else -+ - /* - * Queue remote wakeups on the target CPU and process them - * using the scheduler IPI. Reduces rq->lock contention/bounces. - */ - SCHED_FEAT(TTWU_QUEUE, true) -+#endif - - #ifdef HAVE_RT_PUSH_IPI - /* -diff -Nur linux-4.4.13.orig/kernel/sched/Makefile linux-4.4.13/kernel/sched/Makefile ---- linux-4.4.13.orig/kernel/sched/Makefile 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/Makefile 2016-06-22 21:55:08.062523867 +0200 -@@ -13,7 +13,7 @@ - - obj-y += core.o loadavg.o clock.o cputime.o - obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o --obj-y += wait.o completion.o idle.o -+obj-y += wait.o swait.o work-simple.o completion.o idle.o - obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o - obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o - obj-$(CONFIG_SCHEDSTATS) += stats.o -diff -Nur linux-4.4.13.orig/kernel/sched/rt.c linux-4.4.13/kernel/sched/rt.c ---- linux-4.4.13.orig/kernel/sched/rt.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/rt.c 2016-06-22 21:55:08.062523867 +0200 -@@ -47,6 +47,7 @@ - - hrtimer_init(&rt_b->rt_period_timer, - CLOCK_MONOTONIC, HRTIMER_MODE_REL); -+ rt_b->rt_period_timer.irqsafe = 1; - rt_b->rt_period_timer.function = sched_rt_period_timer; - } - -@@ -93,6 +94,7 @@ - rt_rq->push_cpu = nr_cpu_ids; - raw_spin_lock_init(&rt_rq->push_lock); - init_irq_work(&rt_rq->push_work, push_irq_work_func); -+ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ; - #endif - #endif /* CONFIG_SMP */ - /* We start is dequeued state, because no RT tasks are queued */ -@@ -326,7 +328,7 @@ - rt_rq = &rq_of_rt_rq(rt_rq)->rt; - - rt_rq->rt_nr_total++; -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - rt_rq->rt_nr_migratory++; - - update_rt_migration(rt_rq); -@@ -343,7 +345,7 @@ - rt_rq = &rq_of_rt_rq(rt_rq)->rt; - - rt_rq->rt_nr_total--; -- if (p->nr_cpus_allowed > 1) -+ if (tsk_nr_cpus_allowed(p) > 1) - rt_rq->rt_nr_migratory--; - - update_rt_migration(rt_rq); -@@ -1262,7 +1264,7 @@ - - enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); - -- if (!task_current(rq, p) && p->nr_cpus_allowed > 1) -+ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_task(rq, p); - } - -@@ -1351,7 +1353,7 @@ - * will have to sort it out. - */ - if (curr && unlikely(rt_task(curr)) && -- (curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(curr) < 2 || - curr->prio <= p->prio)) { - int target = find_lowest_rq(p); - -@@ -1375,7 +1377,7 @@ - * Current can't be migrated, useless to reschedule, - * let's hope p can move out. - */ -- if (rq->curr->nr_cpus_allowed == 1 || -+ if (tsk_nr_cpus_allowed(rq->curr) == 1 || - !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) - return; - -@@ -1383,7 +1385,7 @@ - * p is migratable, so let's not schedule it and - * see if it is pushed or pulled somewhere else. - */ -- if (p->nr_cpus_allowed != 1 -+ if (tsk_nr_cpus_allowed(p) != 1 - && cpupri_find(&rq->rd->cpupri, p, NULL)) - return; - -@@ -1517,7 +1519,7 @@ - * The previous task needs to be made eligible for pushing - * if it is still active - */ -- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) -+ if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1) - enqueue_pushable_task(rq, p); - } - -@@ -1567,7 +1569,7 @@ - if (unlikely(!lowest_mask)) - return -1; - -- if (task->nr_cpus_allowed == 1) -+ if (tsk_nr_cpus_allowed(task) == 1) - return -1; /* No other targets possible */ - - if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) -@@ -1699,7 +1701,7 @@ - - BUG_ON(rq->cpu != task_cpu(p)); - BUG_ON(task_current(rq, p)); -- BUG_ON(p->nr_cpus_allowed <= 1); -+ BUG_ON(tsk_nr_cpus_allowed(p) <= 1); - - BUG_ON(!task_on_rq_queued(p)); - BUG_ON(!rt_task(p)); -@@ -2059,9 +2061,9 @@ - { - if (!task_running(rq, p) && - !test_tsk_need_resched(rq->curr) && -- p->nr_cpus_allowed > 1 && -+ tsk_nr_cpus_allowed(p) > 1 && - (dl_task(rq->curr) || rt_task(rq->curr)) && -- (rq->curr->nr_cpus_allowed < 2 || -+ (tsk_nr_cpus_allowed(rq->curr) < 2 || - rq->curr->prio <= p->prio)) - push_rt_tasks(rq); - } -@@ -2134,7 +2136,7 @@ - */ - if (task_on_rq_queued(p) && rq->curr != p) { - #ifdef CONFIG_SMP -- if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) -+ if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) - queue_push_tasks(rq); - #else - if (p->prio < rq->curr->prio) -diff -Nur linux-4.4.13.orig/kernel/sched/sched.h linux-4.4.13/kernel/sched/sched.h ---- linux-4.4.13.orig/kernel/sched/sched.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/sched/sched.h 2016-06-22 21:55:08.062523867 +0200 -@@ -1100,6 +1100,7 @@ - #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ - #define WF_FORK 0x02 /* child wakeup after fork */ - #define WF_MIGRATED 0x4 /* internal use, task got migrated */ -+#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ - - /* - * To aid in avoiding the subversion of "niceness" due to uneven distribution -@@ -1299,6 +1300,15 @@ - extern void resched_curr(struct rq *rq); - extern void resched_cpu(int cpu); - -+#ifdef CONFIG_PREEMPT_LAZY -+extern void resched_curr_lazy(struct rq *rq); -+#else -+static inline void resched_curr_lazy(struct rq *rq) -+{ -+ resched_curr(rq); -+} -+#endif -+ - extern struct rt_bandwidth def_rt_bandwidth; - extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); - -diff -Nur linux-4.4.13.orig/kernel/sched/swait.c linux-4.4.13/kernel/sched/swait.c ---- linux-4.4.13.orig/kernel/sched/swait.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/kernel/sched/swait.c 2016-06-22 21:55:08.062523867 +0200 -@@ -0,0 +1,143 @@ -+#include -+#include -+#include -+ -+void __init_swait_queue_head(struct swait_queue_head *q, const char *name, -+ struct lock_class_key *key) -+{ -+ raw_spin_lock_init(&q->lock); -+ lockdep_set_class_and_name(&q->lock, key, name); -+ INIT_LIST_HEAD(&q->task_list); -+} -+EXPORT_SYMBOL(__init_swait_queue_head); -+ -+/* -+ * The thing about the wake_up_state() return value; I think we can ignore it. -+ * -+ * If for some reason it would return 0, that means the previously waiting -+ * task is already running, so it will observe condition true (or has already). -+ */ -+void swake_up_locked(struct swait_queue_head *q) -+{ -+ struct swait_queue *curr; -+ -+ if (list_empty(&q->task_list)) -+ return; -+ -+ curr = list_first_entry(&q->task_list, typeof(*curr), task_list); -+ wake_up_process(curr->task); -+ list_del_init(&curr->task_list); -+} -+EXPORT_SYMBOL(swake_up_locked); -+ -+void swake_up_all_locked(struct swait_queue_head *q) -+{ -+ struct swait_queue *curr; -+ int wakes = 0; -+ -+ while (!list_empty(&q->task_list)) { -+ -+ curr = list_first_entry(&q->task_list, typeof(*curr), -+ task_list); -+ wake_up_process(curr->task); -+ list_del_init(&curr->task_list); -+ wakes++; -+ } -+ if (pm_in_action) -+ return; -+ WARN(wakes > 2, "complate_all() with %d waiters\n", wakes); -+} -+EXPORT_SYMBOL(swake_up_all_locked); -+ -+void swake_up(struct swait_queue_head *q) -+{ -+ unsigned long flags; -+ -+ if (!swait_active(q)) -+ return; -+ -+ raw_spin_lock_irqsave(&q->lock, flags); -+ swake_up_locked(q); -+ raw_spin_unlock_irqrestore(&q->lock, flags); -+} -+EXPORT_SYMBOL(swake_up); -+ -+/* -+ * Does not allow usage from IRQ disabled, since we must be able to -+ * release IRQs to guarantee bounded hold time. -+ */ -+void swake_up_all(struct swait_queue_head *q) -+{ -+ struct swait_queue *curr; -+ LIST_HEAD(tmp); -+ -+ if (!swait_active(q)) -+ return; -+ -+ raw_spin_lock_irq(&q->lock); -+ list_splice_init(&q->task_list, &tmp); -+ while (!list_empty(&tmp)) { -+ curr = list_first_entry(&tmp, typeof(*curr), task_list); -+ -+ wake_up_state(curr->task, TASK_NORMAL); -+ list_del_init(&curr->task_list); -+ -+ if (list_empty(&tmp)) -+ break; -+ -+ raw_spin_unlock_irq(&q->lock); -+ raw_spin_lock_irq(&q->lock); -+ } -+ raw_spin_unlock_irq(&q->lock); -+} -+EXPORT_SYMBOL(swake_up_all); -+ -+void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) -+{ -+ wait->task = current; -+ if (list_empty(&wait->task_list)) -+ list_add(&wait->task_list, &q->task_list); -+} -+ -+void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) -+{ -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&q->lock, flags); -+ __prepare_to_swait(q, wait); -+ set_current_state(state); -+ raw_spin_unlock_irqrestore(&q->lock, flags); -+} -+EXPORT_SYMBOL(prepare_to_swait); -+ -+long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) -+{ -+ if (signal_pending_state(state, current)) -+ return -ERESTARTSYS; -+ -+ prepare_to_swait(q, wait, state); -+ -+ return 0; -+} -+EXPORT_SYMBOL(prepare_to_swait_event); -+ -+void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) -+{ -+ __set_current_state(TASK_RUNNING); -+ if (!list_empty(&wait->task_list)) -+ list_del_init(&wait->task_list); -+} -+ -+void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) -+{ -+ unsigned long flags; -+ -+ __set_current_state(TASK_RUNNING); -+ -+ if (!list_empty_careful(&wait->task_list)) { -+ raw_spin_lock_irqsave(&q->lock, flags); -+ list_del_init(&wait->task_list); -+ raw_spin_unlock_irqrestore(&q->lock, flags); -+ } -+} -+EXPORT_SYMBOL(finish_swait); -diff -Nur linux-4.4.13.orig/kernel/sched/work-simple.c linux-4.4.13/kernel/sched/work-simple.c ---- linux-4.4.13.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/kernel/sched/work-simple.c 2016-06-22 21:55:08.062523867 +0200 -@@ -0,0 +1,173 @@ -+/* -+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de -+ * -+ * Provides a framework for enqueuing callbacks from irq context -+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define SWORK_EVENT_PENDING (1 << 0) -+ -+static DEFINE_MUTEX(worker_mutex); -+static struct sworker *glob_worker; -+ -+struct sworker { -+ struct list_head events; -+ struct swait_queue_head wq; -+ -+ raw_spinlock_t lock; -+ -+ struct task_struct *task; -+ int refs; -+}; -+ -+static bool swork_readable(struct sworker *worker) -+{ -+ bool r; -+ -+ if (kthread_should_stop()) -+ return true; -+ -+ raw_spin_lock_irq(&worker->lock); -+ r = !list_empty(&worker->events); -+ raw_spin_unlock_irq(&worker->lock); -+ -+ return r; -+} -+ -+static int swork_kthread(void *arg) -+{ -+ struct sworker *worker = arg; -+ -+ for (;;) { -+ swait_event_interruptible(worker->wq, -+ swork_readable(worker)); -+ if (kthread_should_stop()) -+ break; -+ -+ raw_spin_lock_irq(&worker->lock); -+ while (!list_empty(&worker->events)) { -+ struct swork_event *sev; -+ -+ sev = list_first_entry(&worker->events, -+ struct swork_event, item); -+ list_del(&sev->item); -+ raw_spin_unlock_irq(&worker->lock); -+ -+ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, -+ &sev->flags)); -+ sev->func(sev); -+ raw_spin_lock_irq(&worker->lock); -+ } -+ raw_spin_unlock_irq(&worker->lock); -+ } -+ return 0; -+} -+ -+static struct sworker *swork_create(void) -+{ -+ struct sworker *worker; -+ -+ worker = kzalloc(sizeof(*worker), GFP_KERNEL); -+ if (!worker) -+ return ERR_PTR(-ENOMEM); -+ -+ INIT_LIST_HEAD(&worker->events); -+ raw_spin_lock_init(&worker->lock); -+ init_swait_queue_head(&worker->wq); -+ -+ worker->task = kthread_run(swork_kthread, worker, "kswork"); -+ if (IS_ERR(worker->task)) { -+ kfree(worker); -+ return ERR_PTR(-ENOMEM); -+ } -+ -+ return worker; -+} -+ -+static void swork_destroy(struct sworker *worker) -+{ -+ kthread_stop(worker->task); -+ -+ WARN_ON(!list_empty(&worker->events)); -+ kfree(worker); -+} -+ -+/** -+ * swork_queue - queue swork -+ * -+ * Returns %false if @work was already on a queue, %true otherwise. -+ * -+ * The work is queued and processed on a random CPU -+ */ -+bool swork_queue(struct swork_event *sev) -+{ -+ unsigned long flags; -+ -+ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) -+ return false; -+ -+ raw_spin_lock_irqsave(&glob_worker->lock, flags); -+ list_add_tail(&sev->item, &glob_worker->events); -+ raw_spin_unlock_irqrestore(&glob_worker->lock, flags); -+ -+ swake_up(&glob_worker->wq); -+ return true; -+} -+EXPORT_SYMBOL_GPL(swork_queue); -+ -+/** -+ * swork_get - get an instance of the sworker -+ * -+ * Returns an negative error code if the initialization if the worker did not -+ * work, %0 otherwise. -+ * -+ */ -+int swork_get(void) -+{ -+ struct sworker *worker; -+ -+ mutex_lock(&worker_mutex); -+ if (!glob_worker) { -+ worker = swork_create(); -+ if (IS_ERR(worker)) { -+ mutex_unlock(&worker_mutex); -+ return -ENOMEM; -+ } -+ -+ glob_worker = worker; -+ } -+ -+ glob_worker->refs++; -+ mutex_unlock(&worker_mutex); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(swork_get); -+ -+/** -+ * swork_put - puts an instance of the sworker -+ * -+ * Will destroy the sworker thread. This function must not be called until all -+ * queued events have been completed. -+ */ -+void swork_put(void) -+{ -+ mutex_lock(&worker_mutex); -+ -+ glob_worker->refs--; -+ if (glob_worker->refs > 0) -+ goto out; -+ -+ swork_destroy(glob_worker); -+ glob_worker = NULL; -+out: -+ mutex_unlock(&worker_mutex); -+} -+EXPORT_SYMBOL_GPL(swork_put); -diff -Nur linux-4.4.13.orig/kernel/signal.c linux-4.4.13/kernel/signal.c ---- linux-4.4.13.orig/kernel/signal.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/signal.c 2016-06-22 21:55:08.062523867 +0200 -@@ -14,6 +14,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -352,13 +353,30 @@ - return false; - } - -+static inline struct sigqueue *get_task_cache(struct task_struct *t) -+{ -+ struct sigqueue *q = t->sigqueue_cache; -+ -+ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) -+ return NULL; -+ return q; -+} -+ -+static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) -+{ -+ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) -+ return 0; -+ return 1; -+} -+ - /* - * allocate a new signal queue record - * - this may be called without locks if and only if t == current, otherwise an - * appropriate lock must be held to stop the target task from exiting - */ - static struct sigqueue * --__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) -+__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, -+ int override_rlimit, int fromslab) - { - struct sigqueue *q = NULL; - struct user_struct *user; -@@ -375,7 +393,10 @@ - if (override_rlimit || - atomic_read(&user->sigpending) <= - task_rlimit(t, RLIMIT_SIGPENDING)) { -- q = kmem_cache_alloc(sigqueue_cachep, flags); -+ if (!fromslab) -+ q = get_task_cache(t); -+ if (!q) -+ q = kmem_cache_alloc(sigqueue_cachep, flags); - } else { - print_dropped_signal(sig); - } -@@ -392,6 +413,13 @@ - return q; - } - -+static struct sigqueue * -+__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, -+ int override_rlimit) -+{ -+ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); -+} -+ - static void __sigqueue_free(struct sigqueue *q) - { - if (q->flags & SIGQUEUE_PREALLOC) -@@ -401,6 +429,21 @@ - kmem_cache_free(sigqueue_cachep, q); - } - -+static void sigqueue_free_current(struct sigqueue *q) -+{ -+ struct user_struct *up; -+ -+ if (q->flags & SIGQUEUE_PREALLOC) -+ return; -+ -+ up = q->user; -+ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { -+ atomic_dec(&up->sigpending); -+ free_uid(up); -+ } else -+ __sigqueue_free(q); -+} -+ - void flush_sigqueue(struct sigpending *queue) - { - struct sigqueue *q; -@@ -414,6 +457,21 @@ - } - - /* -+ * Called from __exit_signal. Flush tsk->pending and -+ * tsk->sigqueue_cache -+ */ -+void flush_task_sigqueue(struct task_struct *tsk) -+{ -+ struct sigqueue *q; -+ -+ flush_sigqueue(&tsk->pending); -+ -+ q = get_task_cache(tsk); -+ if (q) -+ kmem_cache_free(sigqueue_cachep, q); -+} -+ -+/* - * Flush all pending signals for this kthread. - */ - void flush_signals(struct task_struct *t) -@@ -525,7 +583,7 @@ - still_pending: - list_del_init(&first->list); - copy_siginfo(info, &first->info); -- __sigqueue_free(first); -+ sigqueue_free_current(first); - } else { - /* - * Ok, it wasn't in the queue. This must be -@@ -560,6 +618,8 @@ - { - int signr; - -+ WARN_ON_ONCE(tsk != current); -+ - /* We only dequeue private signals from ourselves, we don't let - * signalfd steal them - */ -@@ -1156,8 +1216,8 @@ - * We don't want to have recursive SIGSEGV's etc, for example, - * that is why we also clear SIGNAL_UNKILLABLE. - */ --int --force_sig_info(int sig, struct siginfo *info, struct task_struct *t) -+static int -+do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) - { - unsigned long int flags; - int ret, blocked, ignored; -@@ -1182,6 +1242,39 @@ - return ret; - } - -+int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) -+{ -+/* -+ * On some archs, PREEMPT_RT has to delay sending a signal from a trap -+ * since it can not enable preemption, and the signal code's spin_locks -+ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will -+ * send the signal on exit of the trap. -+ */ -+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND -+ if (in_atomic()) { -+ if (WARN_ON_ONCE(t != current)) -+ return 0; -+ if (WARN_ON_ONCE(t->forced_info.si_signo)) -+ return 0; -+ -+ if (is_si_special(info)) { -+ WARN_ON_ONCE(info != SEND_SIG_PRIV); -+ t->forced_info.si_signo = sig; -+ t->forced_info.si_errno = 0; -+ t->forced_info.si_code = SI_KERNEL; -+ t->forced_info.si_pid = 0; -+ t->forced_info.si_uid = 0; -+ } else { -+ t->forced_info = *info; -+ } -+ -+ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); -+ return 0; -+ } -+#endif -+ return do_force_sig_info(sig, info, t); -+} -+ - /* - * Nuke all other threads in the group. - */ -@@ -1216,12 +1309,12 @@ - * Disable interrupts early to avoid deadlocks. - * See rcu_read_unlock() comment header for details. - */ -- local_irq_save(*flags); -+ local_irq_save_nort(*flags); - rcu_read_lock(); - sighand = rcu_dereference(tsk->sighand); - if (unlikely(sighand == NULL)) { - rcu_read_unlock(); -- local_irq_restore(*flags); -+ local_irq_restore_nort(*flags); - break; - } - /* -@@ -1242,7 +1335,7 @@ - } - spin_unlock(&sighand->siglock); - rcu_read_unlock(); -- local_irq_restore(*flags); -+ local_irq_restore_nort(*flags); - } - - return sighand; -@@ -1485,7 +1578,8 @@ - */ - struct sigqueue *sigqueue_alloc(void) - { -- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); -+ /* Preallocated sigqueue objects always from the slabcache ! */ -+ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); - - if (q) - q->flags |= SIGQUEUE_PREALLOC; -@@ -1846,15 +1940,7 @@ - if (gstop_done && ptrace_reparented(current)) - do_notify_parent_cldstop(current, false, why); - -- /* -- * Don't want to allow preemption here, because -- * sys_ptrace() needs this task to be inactive. -- * -- * XXX: implement read_unlock_no_resched(). -- */ -- preempt_disable(); - read_unlock(&tasklist_lock); -- preempt_enable_no_resched(); - freezable_schedule(); - } else { - /* -diff -Nur linux-4.4.13.orig/kernel/softirq.c linux-4.4.13/kernel/softirq.c ---- linux-4.4.13.orig/kernel/softirq.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/softirq.c 2016-06-22 21:55:08.062523867 +0200 -@@ -21,10 +21,12 @@ - #include - #include - #include -+#include - #include - #include - #include - #include -+#include - #include - - #define CREATE_TRACE_POINTS -@@ -56,12 +58,108 @@ - static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; - - DEFINE_PER_CPU(struct task_struct *, ksoftirqd); -+#ifdef CONFIG_PREEMPT_RT_FULL -+#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) -+DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); -+#endif - - const char * const softirq_to_name[NR_SOFTIRQS] = { - "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", - "TASKLET", "SCHED", "HRTIMER", "RCU" - }; - -+#ifdef CONFIG_NO_HZ_COMMON -+# ifdef CONFIG_PREEMPT_RT_FULL -+ -+struct softirq_runner { -+ struct task_struct *runner[NR_SOFTIRQS]; -+}; -+ -+static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); -+ -+static inline void softirq_set_runner(unsigned int sirq) -+{ -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ -+ sr->runner[sirq] = current; -+} -+ -+static inline void softirq_clr_runner(unsigned int sirq) -+{ -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ -+ sr->runner[sirq] = NULL; -+} -+ -+/* -+ * On preempt-rt a softirq running context might be blocked on a -+ * lock. There might be no other runnable task on this CPU because the -+ * lock owner runs on some other CPU. So we have to go into idle with -+ * the pending bit set. Therefor we need to check this otherwise we -+ * warn about false positives which confuses users and defeats the -+ * whole purpose of this test. -+ * -+ * This code is called with interrupts disabled. -+ */ -+void softirq_check_pending_idle(void) -+{ -+ static int rate_limit; -+ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); -+ u32 warnpending; -+ int i; -+ -+ if (rate_limit >= 10) -+ return; -+ -+ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; -+ for (i = 0; i < NR_SOFTIRQS; i++) { -+ struct task_struct *tsk = sr->runner[i]; -+ -+ /* -+ * The wakeup code in rtmutex.c wakes up the task -+ * _before_ it sets pi_blocked_on to NULL under -+ * tsk->pi_lock. So we need to check for both: state -+ * and pi_blocked_on. -+ */ -+ if (tsk) { -+ raw_spin_lock(&tsk->pi_lock); -+ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { -+ /* Clear all bits pending in that task */ -+ warnpending &= ~(tsk->softirqs_raised); -+ warnpending &= ~(1 << i); -+ } -+ raw_spin_unlock(&tsk->pi_lock); -+ } -+ } -+ -+ if (warnpending) { -+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", -+ warnpending); -+ rate_limit++; -+ } -+} -+# else -+/* -+ * On !PREEMPT_RT we just printk rate limited: -+ */ -+void softirq_check_pending_idle(void) -+{ -+ static int rate_limit; -+ -+ if (rate_limit < 10 && -+ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { -+ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", -+ local_softirq_pending()); -+ rate_limit++; -+ } -+} -+# endif -+ -+#else /* !CONFIG_NO_HZ_COMMON */ -+static inline void softirq_set_runner(unsigned int sirq) { } -+static inline void softirq_clr_runner(unsigned int sirq) { } -+#endif -+ - /* - * we cannot loop indefinitely here to avoid userspace starvation, - * but we also don't want to introduce a worst case 1/HZ latency -@@ -77,6 +175,79 @@ - wake_up_process(tsk); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static void wakeup_timer_softirqd(void) -+{ -+ /* Interrupts are disabled: no need to stop preemption */ -+ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); -+ -+ if (tsk && tsk->state != TASK_RUNNING) -+ wake_up_process(tsk); -+} -+#endif -+ -+static void handle_softirq(unsigned int vec_nr) -+{ -+ struct softirq_action *h = softirq_vec + vec_nr; -+ int prev_count; -+ -+ prev_count = preempt_count(); -+ -+ kstat_incr_softirqs_this_cpu(vec_nr); -+ -+ trace_softirq_entry(vec_nr); -+ h->action(h); -+ trace_softirq_exit(vec_nr); -+ if (unlikely(prev_count != preempt_count())) { -+ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -+ vec_nr, softirq_to_name[vec_nr], h->action, -+ prev_count, preempt_count()); -+ preempt_count_set(prev_count); -+ } -+} -+ -+#ifndef CONFIG_PREEMPT_RT_FULL -+static inline int ksoftirqd_softirq_pending(void) -+{ -+ return local_softirq_pending(); -+} -+ -+static void handle_pending_softirqs(u32 pending) -+{ -+ struct softirq_action *h = softirq_vec; -+ int softirq_bit; -+ -+ local_irq_enable(); -+ -+ h = softirq_vec; -+ -+ while ((softirq_bit = ffs(pending))) { -+ unsigned int vec_nr; -+ -+ h += softirq_bit - 1; -+ vec_nr = h - softirq_vec; -+ handle_softirq(vec_nr); -+ -+ h++; -+ pending >>= softirq_bit; -+ } -+ -+ rcu_bh_qs(); -+ local_irq_disable(); -+} -+ -+static void run_ksoftirqd(unsigned int cpu) -+{ -+ local_irq_disable(); -+ if (ksoftirqd_softirq_pending()) { -+ __do_softirq(); -+ local_irq_enable(); -+ cond_resched_rcu_qs(); -+ return; -+ } -+ local_irq_enable(); -+} -+ - /* - * preempt_count and SOFTIRQ_OFFSET usage: - * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving -@@ -116,9 +287,9 @@ - - if (preempt_count() == cnt) { - #ifdef CONFIG_DEBUG_PREEMPT -- current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1); -+ current->preempt_disable_ip = get_lock_parent_ip(); - #endif -- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); -+ trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); - } - } - EXPORT_SYMBOL(__local_bh_disable_ip); -@@ -232,10 +403,8 @@ - unsigned long end = jiffies + MAX_SOFTIRQ_TIME; - unsigned long old_flags = current->flags; - int max_restart = MAX_SOFTIRQ_RESTART; -- struct softirq_action *h; - bool in_hardirq; - __u32 pending; -- int softirq_bit; - - /* - * Mask out PF_MEMALLOC s current task context is borrowed for the -@@ -254,36 +423,7 @@ - /* Reset the pending bitmask before enabling irqs */ - set_softirq_pending(0); - -- local_irq_enable(); -- -- h = softirq_vec; -- -- while ((softirq_bit = ffs(pending))) { -- unsigned int vec_nr; -- int prev_count; -- -- h += softirq_bit - 1; -- -- vec_nr = h - softirq_vec; -- prev_count = preempt_count(); -- -- kstat_incr_softirqs_this_cpu(vec_nr); -- -- trace_softirq_entry(vec_nr); -- h->action(h); -- trace_softirq_exit(vec_nr); -- if (unlikely(prev_count != preempt_count())) { -- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", -- vec_nr, softirq_to_name[vec_nr], h->action, -- prev_count, preempt_count()); -- preempt_count_set(prev_count); -- } -- h++; -- pending >>= softirq_bit; -- } -- -- rcu_bh_qs(); -- local_irq_disable(); -+ handle_pending_softirqs(pending); - - pending = local_softirq_pending(); - if (pending) { -@@ -320,6 +460,310 @@ - } - - /* -+ * This function must run with irqs disabled! -+ */ -+void raise_softirq_irqoff(unsigned int nr) -+{ -+ __raise_softirq_irqoff(nr); -+ -+ /* -+ * If we're in an interrupt or softirq, we're done -+ * (this also catches softirq-disabled code). We will -+ * actually run the softirq once we return from -+ * the irq or softirq. -+ * -+ * Otherwise we wake up ksoftirqd to make sure we -+ * schedule the softirq soon. -+ */ -+ if (!in_interrupt()) -+ wakeup_softirqd(); -+} -+ -+void __raise_softirq_irqoff(unsigned int nr) -+{ -+ trace_softirq_raise(nr); -+ or_softirq_pending(1UL << nr); -+} -+ -+static inline void local_bh_disable_nort(void) { local_bh_disable(); } -+static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } -+static void ksoftirqd_set_sched_params(unsigned int cpu) { } -+ -+#else /* !PREEMPT_RT_FULL */ -+ -+/* -+ * On RT we serialize softirq execution with a cpu local lock per softirq -+ */ -+static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); -+ -+void __init softirq_early_init(void) -+{ -+ int i; -+ -+ for (i = 0; i < NR_SOFTIRQS; i++) -+ local_irq_lock_init(local_softirq_locks[i]); -+} -+ -+static void lock_softirq(int which) -+{ -+ local_lock(local_softirq_locks[which]); -+} -+ -+static void unlock_softirq(int which) -+{ -+ local_unlock(local_softirq_locks[which]); -+} -+ -+static void do_single_softirq(int which) -+{ -+ unsigned long old_flags = current->flags; -+ -+ current->flags &= ~PF_MEMALLOC; -+ vtime_account_irq_enter(current); -+ current->flags |= PF_IN_SOFTIRQ; -+ lockdep_softirq_enter(); -+ local_irq_enable(); -+ handle_softirq(which); -+ local_irq_disable(); -+ lockdep_softirq_exit(); -+ current->flags &= ~PF_IN_SOFTIRQ; -+ vtime_account_irq_enter(current); -+ tsk_restore_flags(current, old_flags, PF_MEMALLOC); -+} -+ -+/* -+ * Called with interrupts disabled. Process softirqs which were raised -+ * in current context (or on behalf of ksoftirqd). -+ */ -+static void do_current_softirqs(void) -+{ -+ while (current->softirqs_raised) { -+ int i = __ffs(current->softirqs_raised); -+ unsigned int pending, mask = (1U << i); -+ -+ current->softirqs_raised &= ~mask; -+ local_irq_enable(); -+ -+ /* -+ * If the lock is contended, we boost the owner to -+ * process the softirq or leave the critical section -+ * now. -+ */ -+ lock_softirq(i); -+ local_irq_disable(); -+ softirq_set_runner(i); -+ /* -+ * Check with the local_softirq_pending() bits, -+ * whether we need to process this still or if someone -+ * else took care of it. -+ */ -+ pending = local_softirq_pending(); -+ if (pending & mask) { -+ set_softirq_pending(pending & ~mask); -+ do_single_softirq(i); -+ } -+ softirq_clr_runner(i); -+ WARN_ON(current->softirq_nestcnt != 1); -+ local_irq_enable(); -+ unlock_softirq(i); -+ local_irq_disable(); -+ } -+} -+ -+void __local_bh_disable(void) -+{ -+ if (++current->softirq_nestcnt == 1) -+ migrate_disable(); -+} -+EXPORT_SYMBOL(__local_bh_disable); -+ -+void __local_bh_enable(void) -+{ -+ if (WARN_ON(current->softirq_nestcnt == 0)) -+ return; -+ -+ local_irq_disable(); -+ if (current->softirq_nestcnt == 1 && current->softirqs_raised) -+ do_current_softirqs(); -+ local_irq_enable(); -+ -+ if (--current->softirq_nestcnt == 0) -+ migrate_enable(); -+} -+EXPORT_SYMBOL(__local_bh_enable); -+ -+void _local_bh_enable(void) -+{ -+ if (WARN_ON(current->softirq_nestcnt == 0)) -+ return; -+ if (--current->softirq_nestcnt == 0) -+ migrate_enable(); -+} -+EXPORT_SYMBOL(_local_bh_enable); -+ -+int in_serving_softirq(void) -+{ -+ return current->flags & PF_IN_SOFTIRQ; -+} -+EXPORT_SYMBOL(in_serving_softirq); -+ -+/* Called with preemption disabled */ -+static void run_ksoftirqd(unsigned int cpu) -+{ -+ local_irq_disable(); -+ current->softirq_nestcnt++; -+ -+ do_current_softirqs(); -+ current->softirq_nestcnt--; -+ local_irq_enable(); -+ cond_resched_rcu_qs(); -+} -+ -+/* -+ * Called from netif_rx_ni(). Preemption enabled, but migration -+ * disabled. So the cpu can't go away under us. -+ */ -+void thread_do_softirq(void) -+{ -+ if (!in_serving_softirq() && current->softirqs_raised) { -+ current->softirq_nestcnt++; -+ do_current_softirqs(); -+ current->softirq_nestcnt--; -+ } -+} -+ -+static void do_raise_softirq_irqoff(unsigned int nr) -+{ -+ unsigned int mask; -+ -+ mask = 1UL << nr; -+ -+ trace_softirq_raise(nr); -+ or_softirq_pending(mask); -+ -+ /* -+ * If we are not in a hard interrupt and inside a bh disabled -+ * region, we simply raise the flag on current. local_bh_enable() -+ * will make sure that the softirq is executed. Otherwise we -+ * delegate it to ksoftirqd. -+ */ -+ if (!in_irq() && current->softirq_nestcnt) -+ current->softirqs_raised |= mask; -+ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) -+ return; -+ -+ if (mask & TIMER_SOFTIRQS) -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; -+ else -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; -+} -+ -+static void wakeup_proper_softirq(unsigned int nr) -+{ -+ if ((1UL << nr) & TIMER_SOFTIRQS) -+ wakeup_timer_softirqd(); -+ else -+ wakeup_softirqd(); -+} -+ -+ -+void __raise_softirq_irqoff(unsigned int nr) -+{ -+ do_raise_softirq_irqoff(nr); -+ if (!in_irq() && !current->softirq_nestcnt) -+ wakeup_proper_softirq(nr); -+} -+ -+/* -+ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd -+ */ -+void __raise_softirq_irqoff_ksoft(unsigned int nr) -+{ -+ unsigned int mask; -+ -+ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || -+ !__this_cpu_read(ktimer_softirqd))) -+ return; -+ mask = 1UL << nr; -+ -+ trace_softirq_raise(nr); -+ or_softirq_pending(mask); -+ if (mask & TIMER_SOFTIRQS) -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; -+ else -+ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; -+ wakeup_proper_softirq(nr); -+} -+ -+/* -+ * This function must run with irqs disabled! -+ */ -+void raise_softirq_irqoff(unsigned int nr) -+{ -+ do_raise_softirq_irqoff(nr); -+ -+ /* -+ * If we're in an hard interrupt we let irq return code deal -+ * with the wakeup of ksoftirqd. -+ */ -+ if (in_irq()) -+ return; -+ /* -+ * If we are in thread context but outside of a bh disabled -+ * region, we need to wake ksoftirqd as well. -+ * -+ * CHECKME: Some of the places which do that could be wrapped -+ * into local_bh_disable/enable pairs. Though it's unclear -+ * whether this is worth the effort. To find those places just -+ * raise a WARN() if the condition is met. -+ */ -+ if (!current->softirq_nestcnt) -+ wakeup_proper_softirq(nr); -+} -+ -+static inline int ksoftirqd_softirq_pending(void) -+{ -+ return current->softirqs_raised; -+} -+ -+static inline void local_bh_disable_nort(void) { } -+static inline void _local_bh_enable_nort(void) { } -+ -+static inline void ksoftirqd_set_sched_params(unsigned int cpu) -+{ -+ /* Take over all but timer pending softirqs when starting */ -+ local_irq_disable(); -+ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; -+ local_irq_enable(); -+} -+ -+static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) -+{ -+ struct sched_param param = { .sched_priority = 1 }; -+ -+ sched_setscheduler(current, SCHED_FIFO, ¶m); -+ -+ /* Take over timer pending softirqs when starting */ -+ local_irq_disable(); -+ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; -+ local_irq_enable(); -+} -+ -+static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, -+ bool online) -+{ -+ struct sched_param param = { .sched_priority = 0 }; -+ -+ sched_setscheduler(current, SCHED_NORMAL, ¶m); -+} -+ -+static int ktimer_softirqd_should_run(unsigned int cpu) -+{ -+ return current->softirqs_raised; -+} -+ -+#endif /* PREEMPT_RT_FULL */ -+/* - * Enter an interrupt context. - */ - void irq_enter(void) -@@ -330,9 +774,9 @@ - * Prevent raise_softirq from needlessly waking up ksoftirqd - * here, as softirq will be serviced on return from interrupt. - */ -- local_bh_disable(); -+ local_bh_disable_nort(); - tick_irq_enter(); -- _local_bh_enable(); -+ _local_bh_enable_nort(); - } - - __irq_enter(); -@@ -340,6 +784,7 @@ - - static inline void invoke_softirq(void) - { -+#ifndef CONFIG_PREEMPT_RT_FULL - if (!force_irqthreads) { - #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK - /* -@@ -359,6 +804,18 @@ - } else { - wakeup_softirqd(); - } -+#else /* PREEMPT_RT_FULL */ -+ unsigned long flags; -+ -+ local_irq_save(flags); -+ if (__this_cpu_read(ksoftirqd) && -+ __this_cpu_read(ksoftirqd)->softirqs_raised) -+ wakeup_softirqd(); -+ if (__this_cpu_read(ktimer_softirqd) && -+ __this_cpu_read(ktimer_softirqd)->softirqs_raised) -+ wakeup_timer_softirqd(); -+ local_irq_restore(flags); -+#endif - } - - static inline void tick_irq_exit(void) -@@ -395,26 +852,6 @@ - trace_hardirq_exit(); /* must be last! */ - } - --/* -- * This function must run with irqs disabled! -- */ --inline void raise_softirq_irqoff(unsigned int nr) --{ -- __raise_softirq_irqoff(nr); -- -- /* -- * If we're in an interrupt or softirq, we're done -- * (this also catches softirq-disabled code). We will -- * actually run the softirq once we return from -- * the irq or softirq. -- * -- * Otherwise we wake up ksoftirqd to make sure we -- * schedule the softirq soon. -- */ -- if (!in_interrupt()) -- wakeup_softirqd(); --} -- - void raise_softirq(unsigned int nr) - { - unsigned long flags; -@@ -424,12 +861,6 @@ - local_irq_restore(flags); - } - --void __raise_softirq_irqoff(unsigned int nr) --{ -- trace_softirq_raise(nr); -- or_softirq_pending(1UL << nr); --} -- - void open_softirq(int nr, void (*action)(struct softirq_action *)) - { - softirq_vec[nr].action = action; -@@ -446,15 +877,45 @@ - static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); - static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); - -+static void inline -+__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) -+{ -+ if (tasklet_trylock(t)) { -+again: -+ /* We may have been preempted before tasklet_trylock -+ * and __tasklet_action may have already run. -+ * So double check the sched bit while the takslet -+ * is locked before adding it to the list. -+ */ -+ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { -+ t->next = NULL; -+ *head->tail = t; -+ head->tail = &(t->next); -+ raise_softirq_irqoff(nr); -+ tasklet_unlock(t); -+ } else { -+ /* This is subtle. If we hit the corner case above -+ * It is possible that we get preempted right here, -+ * and another task has successfully called -+ * tasklet_schedule(), then this function, and -+ * failed on the trylock. Thus we must be sure -+ * before releasing the tasklet lock, that the -+ * SCHED_BIT is clear. Otherwise the tasklet -+ * may get its SCHED_BIT set, but not added to the -+ * list -+ */ -+ if (!tasklet_tryunlock(t)) -+ goto again; -+ } -+ } -+} -+ - void __tasklet_schedule(struct tasklet_struct *t) - { - unsigned long flags; - - local_irq_save(flags); -- t->next = NULL; -- *__this_cpu_read(tasklet_vec.tail) = t; -- __this_cpu_write(tasklet_vec.tail, &(t->next)); -- raise_softirq_irqoff(TASKLET_SOFTIRQ); -+ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); - local_irq_restore(flags); - } - EXPORT_SYMBOL(__tasklet_schedule); -@@ -464,10 +925,7 @@ - unsigned long flags; - - local_irq_save(flags); -- t->next = NULL; -- *__this_cpu_read(tasklet_hi_vec.tail) = t; -- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); -- raise_softirq_irqoff(HI_SOFTIRQ); -+ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); - local_irq_restore(flags); - } - EXPORT_SYMBOL(__tasklet_hi_schedule); -@@ -476,82 +934,122 @@ - { - BUG_ON(!irqs_disabled()); - -- t->next = __this_cpu_read(tasklet_hi_vec.head); -- __this_cpu_write(tasklet_hi_vec.head, t); -- __raise_softirq_irqoff(HI_SOFTIRQ); -+ __tasklet_hi_schedule(t); - } - EXPORT_SYMBOL(__tasklet_hi_schedule_first); - --static void tasklet_action(struct softirq_action *a) -+void tasklet_enable(struct tasklet_struct *t) - { -- struct tasklet_struct *list; -+ if (!atomic_dec_and_test(&t->count)) -+ return; -+ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) -+ tasklet_schedule(t); -+} -+EXPORT_SYMBOL(tasklet_enable); - -- local_irq_disable(); -- list = __this_cpu_read(tasklet_vec.head); -- __this_cpu_write(tasklet_vec.head, NULL); -- __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); -- local_irq_enable(); -+static void __tasklet_action(struct softirq_action *a, -+ struct tasklet_struct *list) -+{ -+ int loops = 1000000; - - while (list) { - struct tasklet_struct *t = list; - - list = list->next; - -- if (tasklet_trylock(t)) { -- if (!atomic_read(&t->count)) { -- if (!test_and_clear_bit(TASKLET_STATE_SCHED, -- &t->state)) -- BUG(); -- t->func(t->data); -- tasklet_unlock(t); -- continue; -- } -- tasklet_unlock(t); -+ /* -+ * Should always succeed - after a tasklist got on the -+ * list (after getting the SCHED bit set from 0 to 1), -+ * nothing but the tasklet softirq it got queued to can -+ * lock it: -+ */ -+ if (!tasklet_trylock(t)) { -+ WARN_ON(1); -+ continue; - } - -- local_irq_disable(); - t->next = NULL; -- *__this_cpu_read(tasklet_vec.tail) = t; -- __this_cpu_write(tasklet_vec.tail, &(t->next)); -- __raise_softirq_irqoff(TASKLET_SOFTIRQ); -- local_irq_enable(); -+ -+ /* -+ * If we cannot handle the tasklet because it's disabled, -+ * mark it as pending. tasklet_enable() will later -+ * re-schedule the tasklet. -+ */ -+ if (unlikely(atomic_read(&t->count))) { -+out_disabled: -+ /* implicit unlock: */ -+ wmb(); -+ t->state = TASKLET_STATEF_PENDING; -+ continue; -+ } -+ -+ /* -+ * After this point on the tasklet might be rescheduled -+ * on another CPU, but it can only be added to another -+ * CPU's tasklet list if we unlock the tasklet (which we -+ * dont do yet). -+ */ -+ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) -+ WARN_ON(1); -+ -+again: -+ t->func(t->data); -+ -+ /* -+ * Try to unlock the tasklet. We must use cmpxchg, because -+ * another CPU might have scheduled or disabled the tasklet. -+ * We only allow the STATE_RUN -> 0 transition here. -+ */ -+ while (!tasklet_tryunlock(t)) { -+ /* -+ * If it got disabled meanwhile, bail out: -+ */ -+ if (atomic_read(&t->count)) -+ goto out_disabled; -+ /* -+ * If it got scheduled meanwhile, re-execute -+ * the tasklet function: -+ */ -+ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) -+ goto again; -+ if (!--loops) { -+ printk("hm, tasklet state: %08lx\n", t->state); -+ WARN_ON(1); -+ tasklet_unlock(t); -+ break; -+ } -+ } - } - } - -+static void tasklet_action(struct softirq_action *a) -+{ -+ struct tasklet_struct *list; -+ -+ local_irq_disable(); -+ -+ list = __this_cpu_read(tasklet_vec.head); -+ __this_cpu_write(tasklet_vec.head, NULL); -+ __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); -+ -+ local_irq_enable(); -+ -+ __tasklet_action(a, list); -+} -+ - static void tasklet_hi_action(struct softirq_action *a) - { - struct tasklet_struct *list; - - local_irq_disable(); -+ - list = __this_cpu_read(tasklet_hi_vec.head); - __this_cpu_write(tasklet_hi_vec.head, NULL); - __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); -- local_irq_enable(); - -- while (list) { -- struct tasklet_struct *t = list; -- -- list = list->next; -- -- if (tasklet_trylock(t)) { -- if (!atomic_read(&t->count)) { -- if (!test_and_clear_bit(TASKLET_STATE_SCHED, -- &t->state)) -- BUG(); -- t->func(t->data); -- tasklet_unlock(t); -- continue; -- } -- tasklet_unlock(t); -- } -+ local_irq_enable(); - -- local_irq_disable(); -- t->next = NULL; -- *__this_cpu_read(tasklet_hi_vec.tail) = t; -- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); -- __raise_softirq_irqoff(HI_SOFTIRQ); -- local_irq_enable(); -- } -+ __tasklet_action(a, list); - } - - void tasklet_init(struct tasklet_struct *t, -@@ -572,7 +1070,7 @@ - - while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { - do { -- yield(); -+ msleep(1); - } while (test_bit(TASKLET_STATE_SCHED, &t->state)); - } - tasklet_unlock_wait(t); -@@ -646,25 +1144,26 @@ - open_softirq(HI_SOFTIRQ, tasklet_hi_action); - } - --static int ksoftirqd_should_run(unsigned int cpu) --{ -- return local_softirq_pending(); --} -- --static void run_ksoftirqd(unsigned int cpu) -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) -+void tasklet_unlock_wait(struct tasklet_struct *t) - { -- local_irq_disable(); -- if (local_softirq_pending()) { -+ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { - /* -- * We can safely run softirq on inline stack, as we are not deep -- * in the task stack here. -+ * Hack for now to avoid this busy-loop: - */ -- __do_softirq(); -- local_irq_enable(); -- cond_resched_rcu_qs(); -- return; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ msleep(1); -+#else -+ barrier(); -+#endif - } -- local_irq_enable(); -+} -+EXPORT_SYMBOL(tasklet_unlock_wait); -+#endif -+ -+static int ksoftirqd_should_run(unsigned int cpu) -+{ -+ return ksoftirqd_softirq_pending(); - } - - #ifdef CONFIG_HOTPLUG_CPU -@@ -746,16 +1245,31 @@ - - static struct smp_hotplug_thread softirq_threads = { - .store = &ksoftirqd, -+ .setup = ksoftirqd_set_sched_params, - .thread_should_run = ksoftirqd_should_run, - .thread_fn = run_ksoftirqd, - .thread_comm = "ksoftirqd/%u", - }; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static struct smp_hotplug_thread softirq_timer_threads = { -+ .store = &ktimer_softirqd, -+ .setup = ktimer_softirqd_set_sched_params, -+ .cleanup = ktimer_softirqd_clr_sched_params, -+ .thread_should_run = ktimer_softirqd_should_run, -+ .thread_fn = run_ksoftirqd, -+ .thread_comm = "ktimersoftd/%u", -+}; -+#endif -+ - static __init int spawn_ksoftirqd(void) - { - register_cpu_notifier(&cpu_nfb); - - BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); -+#endif - - return 0; - } -diff -Nur linux-4.4.13.orig/kernel/stop_machine.c linux-4.4.13/kernel/stop_machine.c ---- linux-4.4.13.orig/kernel/stop_machine.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/stop_machine.c 2016-06-22 21:55:08.062523867 +0200 -@@ -37,7 +37,7 @@ - struct cpu_stopper { - struct task_struct *thread; - -- spinlock_t lock; -+ raw_spinlock_t lock; - bool enabled; /* is this stopper enabled? */ - struct list_head works; /* list of pending works */ - -@@ -86,12 +86,12 @@ - struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - unsigned long flags; - -- spin_lock_irqsave(&stopper->lock, flags); -+ raw_spin_lock_irqsave(&stopper->lock, flags); - if (stopper->enabled) - __cpu_stop_queue_work(stopper, work); - else - cpu_stop_signal_done(work->done, false); -- spin_unlock_irqrestore(&stopper->lock, flags); -+ raw_spin_unlock_irqrestore(&stopper->lock, flags); - } - - /** -@@ -224,8 +224,8 @@ - int err; - - lg_double_lock(&stop_cpus_lock, cpu1, cpu2); -- spin_lock_irq(&stopper1->lock); -- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); -+ raw_spin_lock_irq(&stopper1->lock); -+ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); - - err = -ENOENT; - if (!stopper1->enabled || !stopper2->enabled) -@@ -235,8 +235,8 @@ - __cpu_stop_queue_work(stopper1, work1); - __cpu_stop_queue_work(stopper2, work2); - unlock: -- spin_unlock(&stopper2->lock); -- spin_unlock_irq(&stopper1->lock); -+ raw_spin_unlock(&stopper2->lock); -+ raw_spin_unlock_irq(&stopper1->lock); - lg_double_unlock(&stop_cpus_lock, cpu1, cpu2); - - return err; -@@ -258,7 +258,7 @@ - struct cpu_stop_work work1, work2; - struct multi_stop_data msdata; - -- preempt_disable(); -+ preempt_disable_nort(); - msdata = (struct multi_stop_data){ - .fn = fn, - .data = arg, -@@ -278,11 +278,11 @@ - if (cpu1 > cpu2) - swap(cpu1, cpu2); - if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) { -- preempt_enable(); -+ preempt_enable_nort(); - return -ENOENT; - } - -- preempt_enable(); -+ preempt_enable_nort(); - - wait_for_completion(&done.completion); - -@@ -315,17 +315,20 @@ - - static void queue_stop_cpus_work(const struct cpumask *cpumask, - cpu_stop_fn_t fn, void *arg, -- struct cpu_stop_done *done) -+ struct cpu_stop_done *done, bool inactive) - { - struct cpu_stop_work *work; - unsigned int cpu; - - /* -- * Disable preemption while queueing to avoid getting -- * preempted by a stopper which might wait for other stoppers -- * to enter @fn which can lead to deadlock. -+ * Make sure that all work is queued on all cpus before -+ * any of the cpus can execute it. - */ -- lg_global_lock(&stop_cpus_lock); -+ if (!inactive) -+ lg_global_lock(&stop_cpus_lock); -+ else -+ lg_global_trylock_relax(&stop_cpus_lock); -+ - for_each_cpu(cpu, cpumask) { - work = &per_cpu(cpu_stopper.stop_work, cpu); - work->fn = fn; -@@ -342,7 +345,7 @@ - struct cpu_stop_done done; - - cpu_stop_init_done(&done, cpumask_weight(cpumask)); -- queue_stop_cpus_work(cpumask, fn, arg, &done); -+ queue_stop_cpus_work(cpumask, fn, arg, &done, false); - wait_for_completion(&done.completion); - return done.executed ? done.ret : -ENOENT; - } -@@ -422,9 +425,9 @@ - unsigned long flags; - int run; - -- spin_lock_irqsave(&stopper->lock, flags); -+ raw_spin_lock_irqsave(&stopper->lock, flags); - run = !list_empty(&stopper->works); -- spin_unlock_irqrestore(&stopper->lock, flags); -+ raw_spin_unlock_irqrestore(&stopper->lock, flags); - return run; - } - -@@ -436,13 +439,13 @@ - - repeat: - work = NULL; -- spin_lock_irq(&stopper->lock); -+ raw_spin_lock_irq(&stopper->lock); - if (!list_empty(&stopper->works)) { - work = list_first_entry(&stopper->works, - struct cpu_stop_work, list); - list_del_init(&work->list); - } -- spin_unlock_irq(&stopper->lock); -+ raw_spin_unlock_irq(&stopper->lock); - - if (work) { - cpu_stop_fn_t fn = work->fn; -@@ -450,6 +453,16 @@ - struct cpu_stop_done *done = work->done; - char ksym_buf[KSYM_NAME_LEN] __maybe_unused; - -+ /* -+ * Wait until the stopper finished scheduling on all -+ * cpus -+ */ -+ lg_global_lock(&stop_cpus_lock); -+ /* -+ * Let other cpu threads continue as well -+ */ -+ lg_global_unlock(&stop_cpus_lock); -+ - /* cpu stop callbacks are not allowed to sleep */ - preempt_disable(); - -@@ -520,10 +533,12 @@ - for_each_possible_cpu(cpu) { - struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); - -- spin_lock_init(&stopper->lock); -+ raw_spin_lock_init(&stopper->lock); - INIT_LIST_HEAD(&stopper->works); - } - -+ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock"); -+ - BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); - stop_machine_unpark(raw_smp_processor_id()); - stop_machine_initialized = true; -@@ -620,7 +635,7 @@ - set_state(&msdata, MULTI_STOP_PREPARE); - cpu_stop_init_done(&done, num_active_cpus()); - queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, -- &done); -+ &done, true); - ret = multi_cpu_stop(&msdata); - - /* Busy wait for completion. */ -diff -Nur linux-4.4.13.orig/kernel/time/hrtimer.c linux-4.4.13/kernel/time/hrtimer.c ---- linux-4.4.13.orig/kernel/time/hrtimer.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/hrtimer.c 2016-06-22 21:55:08.062523867 +0200 -@@ -48,11 +48,13 @@ - #include - #include - #include -+#include - #include - - #include - - #include -+#include - - #include "tick-internal.h" - -@@ -712,6 +714,44 @@ - - static DECLARE_WORK(hrtimer_work, clock_was_set_work); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * RT can not call schedule_work from real interrupt context. -+ * Need to make a thread to do the real work. -+ */ -+static struct task_struct *clock_set_delay_thread; -+static bool do_clock_set_delay; -+ -+static int run_clock_set_delay(void *ignore) -+{ -+ while (!kthread_should_stop()) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ if (do_clock_set_delay) { -+ do_clock_set_delay = false; -+ schedule_work(&hrtimer_work); -+ } -+ schedule(); -+ } -+ __set_current_state(TASK_RUNNING); -+ return 0; -+} -+ -+void clock_was_set_delayed(void) -+{ -+ do_clock_set_delay = true; -+ /* Make visible before waking up process */ -+ smp_wmb(); -+ wake_up_process(clock_set_delay_thread); -+} -+ -+static __init int create_clock_set_delay_thread(void) -+{ -+ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd"); -+ BUG_ON(!clock_set_delay_thread); -+ return 0; -+} -+early_initcall(create_clock_set_delay_thread); -+#else /* PREEMPT_RT_FULL */ - /* - * Called from timekeeping and resume code to reprogramm the hrtimer - * interrupt device on all cpus. -@@ -720,6 +760,7 @@ - { - schedule_work(&hrtimer_work); - } -+#endif - - #else - -@@ -729,11 +770,8 @@ - static inline void hrtimer_switch_to_hres(void) { } - static inline void - hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } --static inline int hrtimer_reprogram(struct hrtimer *timer, -- struct hrtimer_clock_base *base) --{ -- return 0; --} -+static inline void hrtimer_reprogram(struct hrtimer *timer, -+ struct hrtimer_clock_base *base) { } - static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } - static inline void retrigger_next_event(void *arg) { } - -@@ -865,6 +903,32 @@ - } - EXPORT_SYMBOL_GPL(hrtimer_forward); - -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define wake_up_timer_waiters(b) wake_up(&(b)->wait) -+ -+/** -+ * hrtimer_wait_for_timer - Wait for a running timer -+ * -+ * @timer: timer to wait for -+ * -+ * The function waits in case the timers callback function is -+ * currently executed on the waitqueue of the timer base. The -+ * waitqueue is woken up after the timer callback function has -+ * finished execution. -+ */ -+void hrtimer_wait_for_timer(const struct hrtimer *timer) -+{ -+ struct hrtimer_clock_base *base = timer->base; -+ -+ if (base && base->cpu_base && !timer->irqsafe) -+ wait_event(base->cpu_base->wait, -+ !(hrtimer_callback_running(timer))); -+} -+ -+#else -+# define wake_up_timer_waiters(b) do { } while (0) -+#endif -+ - /* - * enqueue_hrtimer - internal function to (re)start a timer - * -@@ -906,6 +970,11 @@ - if (!(state & HRTIMER_STATE_ENQUEUED)) - return; - -+ if (unlikely(!list_empty(&timer->cb_entry))) { -+ list_del_init(&timer->cb_entry); -+ return; -+ } -+ - if (!timerqueue_del(&base->active, &timer->node)) - cpu_base->active_bases &= ~(1 << base->index); - -@@ -1001,7 +1070,16 @@ - new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); - - timer_stats_hrtimer_set_start_info(timer); -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ { -+ ktime_t now = new_base->get_time(); - -+ if (ktime_to_ns(tim) < ktime_to_ns(now)) -+ timer->praecox = now; -+ else -+ timer->praecox = ktime_set(0, 0); -+ } -+#endif - leftmost = enqueue_hrtimer(timer, new_base); - if (!leftmost) - goto unlock; -@@ -1073,7 +1151,7 @@ - - if (ret >= 0) - return ret; -- cpu_relax(); -+ hrtimer_wait_for_timer(timer); - } - } - EXPORT_SYMBOL_GPL(hrtimer_cancel); -@@ -1137,6 +1215,7 @@ - - base = hrtimer_clockid_to_base(clock_id); - timer->base = &cpu_base->clock_base[base]; -+ INIT_LIST_HEAD(&timer->cb_entry); - timerqueue_init(&timer->node); - - #ifdef CONFIG_TIMER_STATS -@@ -1177,6 +1256,7 @@ - seq = raw_read_seqcount_begin(&cpu_base->seq); - - if (timer->state != HRTIMER_STATE_INACTIVE || -+ cpu_base->running_soft == timer || - cpu_base->running == timer) - return true; - -@@ -1275,10 +1355,112 @@ - cpu_base->running = NULL; - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, -+ struct hrtimer_clock_base *base) -+{ -+ int leftmost; -+ -+ if (restart != HRTIMER_NORESTART && -+ !(timer->state & HRTIMER_STATE_ENQUEUED)) { -+ -+ leftmost = enqueue_hrtimer(timer, base); -+ if (!leftmost) -+ return; -+#ifdef CONFIG_HIGH_RES_TIMERS -+ if (!hrtimer_is_hres_active(timer)) { -+ /* -+ * Kick to reschedule the next tick to handle the new timer -+ * on dynticks target. -+ */ -+ if (base->cpu_base->nohz_active) -+ wake_up_nohz_cpu(base->cpu_base->cpu); -+ } else { -+ -+ hrtimer_reprogram(timer, base); -+ } -+#endif -+ } -+} -+ -+/* -+ * The changes in mainline which removed the callback modes from -+ * hrtimer are not yet working with -rt. The non wakeup_process() -+ * based callbacks which involve sleeping locks need to be treated -+ * seperately. -+ */ -+static void hrtimer_rt_run_pending(void) -+{ -+ enum hrtimer_restart (*fn)(struct hrtimer *); -+ struct hrtimer_cpu_base *cpu_base; -+ struct hrtimer_clock_base *base; -+ struct hrtimer *timer; -+ int index, restart; -+ -+ local_irq_disable(); -+ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); -+ -+ raw_spin_lock(&cpu_base->lock); -+ -+ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { -+ base = &cpu_base->clock_base[index]; -+ -+ while (!list_empty(&base->expired)) { -+ timer = list_first_entry(&base->expired, -+ struct hrtimer, cb_entry); -+ -+ /* -+ * Same as the above __run_hrtimer function -+ * just we run with interrupts enabled. -+ */ -+ debug_deactivate(timer); -+ cpu_base->running_soft = timer; -+ raw_write_seqcount_barrier(&cpu_base->seq); -+ -+ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); -+ timer_stats_account_hrtimer(timer); -+ fn = timer->function; -+ -+ raw_spin_unlock_irq(&cpu_base->lock); -+ restart = fn(timer); -+ raw_spin_lock_irq(&cpu_base->lock); -+ -+ hrtimer_rt_reprogram(restart, timer, base); -+ raw_write_seqcount_barrier(&cpu_base->seq); -+ -+ WARN_ON_ONCE(cpu_base->running_soft != timer); -+ cpu_base->running_soft = NULL; -+ } -+ } -+ -+ raw_spin_unlock_irq(&cpu_base->lock); -+ -+ wake_up_timer_waiters(cpu_base); -+} -+ -+static int hrtimer_rt_defer(struct hrtimer *timer) -+{ -+ if (timer->irqsafe) -+ return 0; -+ -+ __remove_hrtimer(timer, timer->base, timer->state, 0); -+ list_add_tail(&timer->cb_entry, &timer->base->expired); -+ return 1; -+} -+ -+#else -+ -+static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } -+ -+#endif -+ -+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); -+ - static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) - { - struct hrtimer_clock_base *base = cpu_base->clock_base; - unsigned int active = cpu_base->active_bases; -+ int raise = 0; - - for (; active; base++, active >>= 1) { - struct timerqueue_node *node; -@@ -1294,6 +1476,15 @@ - - timer = container_of(node, struct hrtimer, node); - -+ trace_hrtimer_interrupt(raw_smp_processor_id(), -+ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? -+ timer->praecox : hrtimer_get_expires(timer), -+ basenow)), -+ current, -+ timer->function == hrtimer_wakeup ? -+ container_of(timer, struct hrtimer_sleeper, -+ timer)->task : NULL); -+ - /* - * The immediate goal for using the softexpires is - * minimizing wakeups, not running timers at the -@@ -1309,9 +1500,14 @@ - if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) - break; - -- __run_hrtimer(cpu_base, base, timer, &basenow); -+ if (!hrtimer_rt_defer(timer)) -+ __run_hrtimer(cpu_base, base, timer, &basenow); -+ else -+ raise = 1; - } - } -+ if (raise) -+ raise_softirq_irqoff(HRTIMER_SOFTIRQ); - } - - #ifdef CONFIG_HIGH_RES_TIMERS -@@ -1474,16 +1670,18 @@ - void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) - { - sl->timer.function = hrtimer_wakeup; -+ sl->timer.irqsafe = 1; - sl->task = task; - } - EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); - --static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) -+static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, -+ unsigned long state) - { - hrtimer_init_sleeper(t, current); - - do { -- set_current_state(TASK_INTERRUPTIBLE); -+ set_current_state(state); - hrtimer_start_expires(&t->timer, mode); - - if (likely(t->task)) -@@ -1525,7 +1723,8 @@ - HRTIMER_MODE_ABS); - hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); - -- if (do_nanosleep(&t, HRTIMER_MODE_ABS)) -+ /* cpu_chill() does not care about restart state. */ -+ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) - goto out; - - rmtp = restart->nanosleep.rmtp; -@@ -1542,8 +1741,10 @@ - return ret; - } - --long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, -- const enum hrtimer_mode mode, const clockid_t clockid) -+static long -+__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, -+ const enum hrtimer_mode mode, const clockid_t clockid, -+ unsigned long state) - { - struct restart_block *restart; - struct hrtimer_sleeper t; -@@ -1556,7 +1757,7 @@ - - hrtimer_init_on_stack(&t.timer, clockid, mode); - hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); -- if (do_nanosleep(&t, mode)) -+ if (do_nanosleep(&t, mode, state)) - goto out; - - /* Absolute timers do not update the rmtp value and restart: */ -@@ -1583,6 +1784,12 @@ - return ret; - } - -+long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, -+ const enum hrtimer_mode mode, const clockid_t clockid) -+{ -+ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); -+} -+ - SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, - struct timespec __user *, rmtp) - { -@@ -1597,6 +1804,26 @@ - return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * Sleep for 1 ms in hope whoever holds what we want will let it go. -+ */ -+void cpu_chill(void) -+{ -+ struct timespec tu = { -+ .tv_nsec = NSEC_PER_MSEC, -+ }; -+ unsigned int freeze_flag = current->flags & PF_NOFREEZE; -+ -+ current->flags |= PF_NOFREEZE; -+ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, -+ TASK_UNINTERRUPTIBLE); -+ if (!freeze_flag) -+ current->flags &= ~PF_NOFREEZE; -+} -+EXPORT_SYMBOL(cpu_chill); -+#endif -+ - /* - * Functions related to boot-time initialization: - */ -@@ -1608,10 +1835,14 @@ - for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { - cpu_base->clock_base[i].cpu_base = cpu_base; - timerqueue_init_head(&cpu_base->clock_base[i].active); -+ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); - } - - cpu_base->cpu = cpu; - hrtimer_init_hres(cpu_base); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ init_waitqueue_head(&cpu_base->wait); -+#endif - } - - #ifdef CONFIG_HOTPLUG_CPU -@@ -1709,11 +1940,21 @@ - .notifier_call = hrtimer_cpu_notify, - }; - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static void run_hrtimer_softirq(struct softirq_action *h) -+{ -+ hrtimer_rt_run_pending(); -+} -+#endif -+ - void __init hrtimers_init(void) - { - hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, - (void *)(long)smp_processor_id()); - register_cpu_notifier(&hrtimers_nb); -+#ifdef CONFIG_PREEMPT_RT_BASE -+ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); -+#endif - } - - /** -diff -Nur linux-4.4.13.orig/kernel/time/itimer.c linux-4.4.13/kernel/time/itimer.c ---- linux-4.4.13.orig/kernel/time/itimer.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/itimer.c 2016-06-22 21:55:08.062523867 +0200 -@@ -213,6 +213,7 @@ - /* We are sharing ->siglock with it_real_fn() */ - if (hrtimer_try_to_cancel(timer) < 0) { - spin_unlock_irq(&tsk->sighand->siglock); -+ hrtimer_wait_for_timer(&tsk->signal->real_timer); - goto again; - } - expires = timeval_to_ktime(value->it_value); -diff -Nur linux-4.4.13.orig/kernel/time/jiffies.c linux-4.4.13/kernel/time/jiffies.c ---- linux-4.4.13.orig/kernel/time/jiffies.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/jiffies.c 2016-06-22 21:55:08.062523867 +0200 -@@ -74,7 +74,8 @@ - .max_cycles = 10, - }; - --__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); -+__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); -+__cacheline_aligned_in_smp seqcount_t jiffies_seq; - - #if (BITS_PER_LONG < 64) - u64 get_jiffies_64(void) -@@ -83,9 +84,9 @@ - u64 ret; - - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - ret = jiffies_64; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - return ret; - } - EXPORT_SYMBOL(get_jiffies_64); -diff -Nur linux-4.4.13.orig/kernel/time/ntp.c linux-4.4.13/kernel/time/ntp.c ---- linux-4.4.13.orig/kernel/time/ntp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/ntp.c 2016-06-22 21:55:08.062523867 +0200 -@@ -10,6 +10,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -562,10 +563,52 @@ - &sync_cmos_work, timespec64_to_jiffies(&next)); - } - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * RT can not call schedule_delayed_work from real interrupt context. -+ * Need to make a thread to do the real work. -+ */ -+static struct task_struct *cmos_delay_thread; -+static bool do_cmos_delay; -+ -+static int run_cmos_delay(void *ignore) -+{ -+ while (!kthread_should_stop()) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ if (do_cmos_delay) { -+ do_cmos_delay = false; -+ queue_delayed_work(system_power_efficient_wq, -+ &sync_cmos_work, 0); -+ } -+ schedule(); -+ } -+ __set_current_state(TASK_RUNNING); -+ return 0; -+} -+ -+void ntp_notify_cmos_timer(void) -+{ -+ do_cmos_delay = true; -+ /* Make visible before waking up process */ -+ smp_wmb(); -+ wake_up_process(cmos_delay_thread); -+} -+ -+static __init int create_cmos_delay_thread(void) -+{ -+ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd"); -+ BUG_ON(!cmos_delay_thread); -+ return 0; -+} -+early_initcall(create_cmos_delay_thread); -+ -+#else -+ - void ntp_notify_cmos_timer(void) - { - queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); - } -+#endif /* CONFIG_PREEMPT_RT_FULL */ - - #else - void ntp_notify_cmos_timer(void) { } -diff -Nur linux-4.4.13.orig/kernel/time/posix-cpu-timers.c linux-4.4.13/kernel/time/posix-cpu-timers.c ---- linux-4.4.13.orig/kernel/time/posix-cpu-timers.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/posix-cpu-timers.c 2016-06-22 21:55:08.066523867 +0200 -@@ -3,6 +3,7 @@ - */ - - #include -+#include - #include - #include - #include -@@ -650,7 +651,7 @@ - /* - * Disarm any old timer after extracting its expiry time. - */ -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - - ret = 0; - old_incr = timer->it.cpu.incr; -@@ -1091,7 +1092,7 @@ - /* - * Now re-arm for the new expiry time. - */ -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - arm_timer(timer); - unlock_task_sighand(p, &flags); - -@@ -1182,13 +1183,13 @@ - * already updated our counts. We need to check if any timers fire now. - * Interrupts are disabled. - */ --void run_posix_cpu_timers(struct task_struct *tsk) -+static void __run_posix_cpu_timers(struct task_struct *tsk) - { - LIST_HEAD(firing); - struct k_itimer *timer, *next; - unsigned long flags; - -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - - /* - * The fast path checks that there are no expired thread or thread -@@ -1242,6 +1243,190 @@ - } - } - -+#ifdef CONFIG_PREEMPT_RT_BASE -+#include -+#include -+DEFINE_PER_CPU(struct task_struct *, posix_timer_task); -+DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); -+ -+static int posix_cpu_timers_thread(void *data) -+{ -+ int cpu = (long)data; -+ -+ BUG_ON(per_cpu(posix_timer_task,cpu) != current); -+ -+ while (!kthread_should_stop()) { -+ struct task_struct *tsk = NULL; -+ struct task_struct *next = NULL; -+ -+ if (cpu_is_offline(cpu)) -+ goto wait_to_die; -+ -+ /* grab task list */ -+ raw_local_irq_disable(); -+ tsk = per_cpu(posix_timer_tasklist, cpu); -+ per_cpu(posix_timer_tasklist, cpu) = NULL; -+ raw_local_irq_enable(); -+ -+ /* its possible the list is empty, just return */ -+ if (!tsk) { -+ set_current_state(TASK_INTERRUPTIBLE); -+ schedule(); -+ __set_current_state(TASK_RUNNING); -+ continue; -+ } -+ -+ /* Process task list */ -+ while (1) { -+ /* save next */ -+ next = tsk->posix_timer_list; -+ -+ /* run the task timers, clear its ptr and -+ * unreference it -+ */ -+ __run_posix_cpu_timers(tsk); -+ tsk->posix_timer_list = NULL; -+ put_task_struct(tsk); -+ -+ /* check if this is the last on the list */ -+ if (next == tsk) -+ break; -+ tsk = next; -+ } -+ } -+ return 0; -+ -+wait_to_die: -+ /* Wait for kthread_stop */ -+ set_current_state(TASK_INTERRUPTIBLE); -+ while (!kthread_should_stop()) { -+ schedule(); -+ set_current_state(TASK_INTERRUPTIBLE); -+ } -+ __set_current_state(TASK_RUNNING); -+ return 0; -+} -+ -+static inline int __fastpath_timer_check(struct task_struct *tsk) -+{ -+ /* tsk == current, ensure it is safe to use ->signal/sighand */ -+ if (unlikely(tsk->exit_state)) -+ return 0; -+ -+ if (!task_cputime_zero(&tsk->cputime_expires)) -+ return 1; -+ -+ if (!task_cputime_zero(&tsk->signal->cputime_expires)) -+ return 1; -+ -+ return 0; -+} -+ -+void run_posix_cpu_timers(struct task_struct *tsk) -+{ -+ unsigned long cpu = smp_processor_id(); -+ struct task_struct *tasklist; -+ -+ BUG_ON(!irqs_disabled()); -+ if(!per_cpu(posix_timer_task, cpu)) -+ return; -+ /* get per-cpu references */ -+ tasklist = per_cpu(posix_timer_tasklist, cpu); -+ -+ /* check to see if we're already queued */ -+ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { -+ get_task_struct(tsk); -+ if (tasklist) { -+ tsk->posix_timer_list = tasklist; -+ } else { -+ /* -+ * The list is terminated by a self-pointing -+ * task_struct -+ */ -+ tsk->posix_timer_list = tsk; -+ } -+ per_cpu(posix_timer_tasklist, cpu) = tsk; -+ -+ wake_up_process(per_cpu(posix_timer_task, cpu)); -+ } -+} -+ -+/* -+ * posix_cpu_thread_call - callback that gets triggered when a CPU is added. -+ * Here we can start up the necessary migration thread for the new CPU. -+ */ -+static int posix_cpu_thread_call(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ int cpu = (long)hcpu; -+ struct task_struct *p; -+ struct sched_param param; -+ -+ switch (action) { -+ case CPU_UP_PREPARE: -+ p = kthread_create(posix_cpu_timers_thread, hcpu, -+ "posixcputmr/%d",cpu); -+ if (IS_ERR(p)) -+ return NOTIFY_BAD; -+ p->flags |= PF_NOFREEZE; -+ kthread_bind(p, cpu); -+ /* Must be high prio to avoid getting starved */ -+ param.sched_priority = MAX_RT_PRIO-1; -+ sched_setscheduler(p, SCHED_FIFO, ¶m); -+ per_cpu(posix_timer_task,cpu) = p; -+ break; -+ case CPU_ONLINE: -+ /* Strictly unneccessary, as first user will wake it. */ -+ wake_up_process(per_cpu(posix_timer_task,cpu)); -+ break; -+#ifdef CONFIG_HOTPLUG_CPU -+ case CPU_UP_CANCELED: -+ /* Unbind it from offline cpu so it can run. Fall thru. */ -+ kthread_bind(per_cpu(posix_timer_task, cpu), -+ cpumask_any(cpu_online_mask)); -+ kthread_stop(per_cpu(posix_timer_task,cpu)); -+ per_cpu(posix_timer_task,cpu) = NULL; -+ break; -+ case CPU_DEAD: -+ kthread_stop(per_cpu(posix_timer_task,cpu)); -+ per_cpu(posix_timer_task,cpu) = NULL; -+ break; -+#endif -+ } -+ return NOTIFY_OK; -+} -+ -+/* Register at highest priority so that task migration (migrate_all_tasks) -+ * happens before everything else. -+ */ -+static struct notifier_block posix_cpu_thread_notifier = { -+ .notifier_call = posix_cpu_thread_call, -+ .priority = 10 -+}; -+ -+static int __init posix_cpu_thread_init(void) -+{ -+ void *hcpu = (void *)(long)smp_processor_id(); -+ /* Start one for boot CPU. */ -+ unsigned long cpu; -+ -+ /* init the per-cpu posix_timer_tasklets */ -+ for_each_possible_cpu(cpu) -+ per_cpu(posix_timer_tasklist, cpu) = NULL; -+ -+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); -+ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); -+ register_cpu_notifier(&posix_cpu_thread_notifier); -+ return 0; -+} -+early_initcall(posix_cpu_thread_init); -+#else /* CONFIG_PREEMPT_RT_BASE */ -+void run_posix_cpu_timers(struct task_struct *tsk) -+{ -+ __run_posix_cpu_timers(tsk); -+} -+#endif /* CONFIG_PREEMPT_RT_BASE */ -+ - /* - * Set one of the process-wide special case CPU timers or RLIMIT_CPU. - * The tsk->sighand->siglock must be held by the caller. -diff -Nur linux-4.4.13.orig/kernel/time/posix-timers.c linux-4.4.13/kernel/time/posix-timers.c ---- linux-4.4.13.orig/kernel/time/posix-timers.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/posix-timers.c 2016-06-22 21:55:08.066523867 +0200 -@@ -506,6 +506,7 @@ - static struct pid *good_sigevent(sigevent_t * event) - { - struct task_struct *rtn = current->group_leader; -+ int sig = event->sigev_signo; - - if ((event->sigev_notify & SIGEV_THREAD_ID ) && - (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || -@@ -514,7 +515,8 @@ - return NULL; - - if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && -- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) -+ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || -+ sig_kernel_coredump(sig))) - return NULL; - - return task_pid(rtn); -@@ -826,6 +828,20 @@ - return overrun; - } - -+/* -+ * Protected by RCU! -+ */ -+static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) -+{ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (kc->timer_set == common_timer_set) -+ hrtimer_wait_for_timer(&timr->it.real.timer); -+ else -+ /* FIXME: Whacky hack for posix-cpu-timers */ -+ schedule_timeout(1); -+#endif -+} -+ - /* Set a POSIX.1b interval timer. */ - /* timr->it_lock is taken. */ - static int -@@ -903,6 +919,7 @@ - if (!timr) - return -EINVAL; - -+ rcu_read_lock(); - kc = clockid_to_kclock(timr->it_clock); - if (WARN_ON_ONCE(!kc || !kc->timer_set)) - error = -EINVAL; -@@ -911,9 +928,12 @@ - - unlock_timer(timr, flag); - if (error == TIMER_RETRY) { -+ timer_wait_for_callback(kc, timr); - rtn = NULL; // We already got the old time... -+ rcu_read_unlock(); - goto retry; - } -+ rcu_read_unlock(); - - if (old_setting && !error && - copy_to_user(old_setting, &old_spec, sizeof (old_spec))) -@@ -951,10 +971,15 @@ - if (!timer) - return -EINVAL; - -+ rcu_read_lock(); - if (timer_delete_hook(timer) == TIMER_RETRY) { - unlock_timer(timer, flags); -+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -+ timer); -+ rcu_read_unlock(); - goto retry_delete; - } -+ rcu_read_unlock(); - - spin_lock(¤t->sighand->siglock); - list_del(&timer->list); -@@ -980,8 +1005,18 @@ - retry_delete: - spin_lock_irqsave(&timer->it_lock, flags); - -+ /* On RT we can race with a deletion */ -+ if (!timer->it_signal) { -+ unlock_timer(timer, flags); -+ return; -+ } -+ - if (timer_delete_hook(timer) == TIMER_RETRY) { -+ rcu_read_lock(); - unlock_timer(timer, flags); -+ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), -+ timer); -+ rcu_read_unlock(); - goto retry_delete; - } - list_del(&timer->list); -diff -Nur linux-4.4.13.orig/kernel/time/tick-broadcast-hrtimer.c linux-4.4.13/kernel/time/tick-broadcast-hrtimer.c ---- linux-4.4.13.orig/kernel/time/tick-broadcast-hrtimer.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/tick-broadcast-hrtimer.c 2016-06-22 21:55:08.066523867 +0200 -@@ -106,5 +106,6 @@ - { - hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); - bctimer.function = bc_handler; -+ bctimer.irqsafe = true; - clockevents_register_device(&ce_broadcast_hrtimer); - } -diff -Nur linux-4.4.13.orig/kernel/time/tick-common.c linux-4.4.13/kernel/time/tick-common.c ---- linux-4.4.13.orig/kernel/time/tick-common.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/tick-common.c 2016-06-22 21:55:08.066523867 +0200 -@@ -79,13 +79,15 @@ - static void tick_periodic(int cpu) - { - if (tick_do_timer_cpu == cpu) { -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - - /* Keep track of the next tick event */ - tick_next_period = ktime_add(tick_next_period, tick_period); - - do_timer(1); -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } - -@@ -157,9 +159,9 @@ - ktime_t next; - - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - next = tick_next_period; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - - clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); - -diff -Nur linux-4.4.13.orig/kernel/time/tick-sched.c linux-4.4.13/kernel/time/tick-sched.c ---- linux-4.4.13.orig/kernel/time/tick-sched.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/tick-sched.c 2016-06-22 21:55:08.066523867 +0200 -@@ -62,7 +62,8 @@ - return; - - /* Reevalute with jiffies_lock held */ -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - - delta = ktime_sub(now, last_jiffies_update); - if (delta.tv64 >= tick_period.tv64) { -@@ -85,10 +86,12 @@ - /* Keep the tick_next_period variable up to date */ - tick_next_period = ktime_add(last_jiffies_update, tick_period); - } else { -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - return; - } -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } - -@@ -99,12 +102,14 @@ - { - ktime_t period; - -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - /* Did we start the jiffies update yet ? */ - if (last_jiffies_update.tv64 == 0) - last_jiffies_update = tick_next_period; - period = last_jiffies_update; -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - return period; - } - -@@ -176,6 +181,11 @@ - return false; - } - -+ if (!arch_irq_work_has_interrupt()) { -+ trace_tick_stop(0, "missing irq work interrupt\n"); -+ return false; -+ } -+ - /* sched_clock_tick() needs us? */ - #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK - /* -@@ -204,6 +214,7 @@ - - static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { - .func = nohz_full_kick_work_func, -+ .flags = IRQ_WORK_HARD_IRQ, - }; - - /* -@@ -578,10 +589,10 @@ - - /* Read jiffies and the time when jiffies were updated last */ - do { -- seq = read_seqbegin(&jiffies_lock); -+ seq = read_seqcount_begin(&jiffies_seq); - basemono = last_jiffies_update.tv64; - basejiff = jiffies; -- } while (read_seqretry(&jiffies_lock, seq)); -+ } while (read_seqcount_retry(&jiffies_seq, seq)); - ts->last_jiffies = basejiff; - - if (rcu_needs_cpu(basemono, &next_rcu) || -@@ -753,14 +764,7 @@ - return false; - - if (unlikely(local_softirq_pending() && cpu_online(cpu))) { -- static int ratelimit; -- -- if (ratelimit < 10 && -- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { -- pr_warn("NOHZ: local_softirq_pending %02x\n", -- (unsigned int) local_softirq_pending()); -- ratelimit++; -- } -+ softirq_check_pending_idle(); - return false; - } - -@@ -1100,6 +1104,7 @@ - * Emulate tick processing via per-CPU hrtimers: - */ - hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); -+ ts->sched_timer.irqsafe = 1; - ts->sched_timer.function = tick_sched_timer; - - /* Get the next period (per cpu) */ -diff -Nur linux-4.4.13.orig/kernel/time/timekeeping.c linux-4.4.13/kernel/time/timekeeping.c ---- linux-4.4.13.orig/kernel/time/timekeeping.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/timekeeping.c 2016-06-22 21:55:08.066523867 +0200 -@@ -2047,8 +2047,10 @@ - */ - void xtime_update(unsigned long ticks) - { -- write_seqlock(&jiffies_lock); -+ raw_spin_lock(&jiffies_lock); -+ write_seqcount_begin(&jiffies_seq); - do_timer(ticks); -- write_sequnlock(&jiffies_lock); -+ write_seqcount_end(&jiffies_seq); -+ raw_spin_unlock(&jiffies_lock); - update_wall_time(); - } -diff -Nur linux-4.4.13.orig/kernel/time/timekeeping.h linux-4.4.13/kernel/time/timekeeping.h ---- linux-4.4.13.orig/kernel/time/timekeeping.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/timekeeping.h 2016-06-22 21:55:08.066523867 +0200 -@@ -19,7 +19,8 @@ - extern void do_timer(unsigned long ticks); - extern void update_wall_time(void); - --extern seqlock_t jiffies_lock; -+extern raw_spinlock_t jiffies_lock; -+extern seqcount_t jiffies_seq; - - #define CS_NAME_LEN 32 - -diff -Nur linux-4.4.13.orig/kernel/time/timer.c linux-4.4.13/kernel/time/timer.c ---- linux-4.4.13.orig/kernel/time/timer.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/time/timer.c 2016-06-22 21:55:08.066523867 +0200 -@@ -80,6 +80,9 @@ - struct tvec_base { - spinlock_t lock; - struct timer_list *running_timer; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ wait_queue_head_t wait_for_running_timer; -+#endif - unsigned long timer_jiffies; - unsigned long next_timer; - unsigned long active_timers; -@@ -777,6 +780,39 @@ - cpu_relax(); - } - } -+#ifdef CONFIG_PREEMPT_RT_FULL -+static inline struct tvec_base *switch_timer_base(struct timer_list *timer, -+ struct tvec_base *old, -+ struct tvec_base *new) -+{ -+ /* -+ * We cannot do the below because we might be preempted and -+ * then the preempter would see NULL and loop forever. -+ */ -+ if (spin_trylock(&new->lock)) { -+ WRITE_ONCE(timer->flags, -+ (timer->flags & ~TIMER_BASEMASK) | new->cpu); -+ spin_unlock(&old->lock); -+ return new; -+ } -+ return old; -+} -+ -+#else -+static inline struct tvec_base *switch_timer_base(struct timer_list *timer, -+ struct tvec_base *old, -+ struct tvec_base *new) -+{ -+ /* See the comment in lock_timer_base() */ -+ timer->flags |= TIMER_MIGRATING; -+ -+ spin_unlock(&old->lock); -+ spin_lock(&new->lock); -+ WRITE_ONCE(timer->flags, -+ (timer->flags & ~TIMER_BASEMASK) | new->cpu); -+ return new; -+} -+#endif - - static inline int - __mod_timer(struct timer_list *timer, unsigned long expires, -@@ -807,16 +843,8 @@ - * handler yet has not finished. This also guarantees that - * the timer is serialized wrt itself. - */ -- if (likely(base->running_timer != timer)) { -- /* See the comment in lock_timer_base() */ -- timer->flags |= TIMER_MIGRATING; -- -- spin_unlock(&base->lock); -- base = new_base; -- spin_lock(&base->lock); -- WRITE_ONCE(timer->flags, -- (timer->flags & ~TIMER_BASEMASK) | base->cpu); -- } -+ if (likely(base->running_timer != timer)) -+ base = switch_timer_base(timer, base, new_base); - } - - timer->expires = expires; -@@ -1006,6 +1034,33 @@ - } - EXPORT_SYMBOL_GPL(add_timer_on); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * Wait for a running timer -+ */ -+static void wait_for_running_timer(struct timer_list *timer) -+{ -+ struct tvec_base *base; -+ u32 tf = timer->flags; -+ -+ if (tf & TIMER_MIGRATING) -+ return; -+ -+ base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); -+ wait_event(base->wait_for_running_timer, -+ base->running_timer != timer); -+} -+ -+# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer) -+#else -+static inline void wait_for_running_timer(struct timer_list *timer) -+{ -+ cpu_relax(); -+} -+ -+# define wakeup_timer_waiters(b) do { } while (0) -+#endif -+ - /** - * del_timer - deactive a timer. - * @timer: the timer to be deactivated -@@ -1063,7 +1118,7 @@ - } - EXPORT_SYMBOL(try_to_del_timer_sync); - --#ifdef CONFIG_SMP -+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) - /** - * del_timer_sync - deactivate a timer and wait for the handler to finish. - * @timer: the timer to be deactivated -@@ -1123,7 +1178,7 @@ - int ret = try_to_del_timer_sync(timer); - if (ret >= 0) - return ret; -- cpu_relax(); -+ wait_for_running_timer(timer); - } - } - EXPORT_SYMBOL(del_timer_sync); -@@ -1248,15 +1303,17 @@ - if (irqsafe) { - spin_unlock(&base->lock); - call_timer_fn(timer, fn, data); -+ base->running_timer = NULL; - spin_lock(&base->lock); - } else { - spin_unlock_irq(&base->lock); - call_timer_fn(timer, fn, data); -+ base->running_timer = NULL; - spin_lock_irq(&base->lock); - } - } - } -- base->running_timer = NULL; -+ wakeup_timer_waiters(base); - spin_unlock_irq(&base->lock); - } - -@@ -1390,6 +1447,14 @@ - if (cpu_is_offline(smp_processor_id())) - return expires; - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ /* -+ * On PREEMPT_RT we cannot sleep here. As a result we can't take -+ * the base lock to check when the next timer is pending and so -+ * we assume the next jiffy. -+ */ -+ return basem + TICK_NSEC; -+#endif - spin_lock(&base->lock); - if (base->active_timers) { - if (time_before_eq(base->next_timer, base->timer_jiffies)) -@@ -1416,13 +1481,13 @@ - - /* Note: this timer irq context must be accounted for as well. */ - account_process_tick(p, user_tick); -+ scheduler_tick(); - run_local_timers(); - rcu_check_callbacks(user_tick); --#ifdef CONFIG_IRQ_WORK -+#if defined(CONFIG_IRQ_WORK) - if (in_irq()) - irq_work_tick(); - #endif -- scheduler_tick(); - run_posix_cpu_timers(p); - } - -@@ -1433,6 +1498,8 @@ - { - struct tvec_base *base = this_cpu_ptr(&tvec_bases); - -+ irq_work_tick_soft(); -+ - if (time_after_eq(jiffies, base->timer_jiffies)) - __run_timers(base); - } -@@ -1589,7 +1656,7 @@ - - BUG_ON(cpu_online(cpu)); - old_base = per_cpu_ptr(&tvec_bases, cpu); -- new_base = get_cpu_ptr(&tvec_bases); -+ new_base = get_local_ptr(&tvec_bases); - /* - * The caller is globally serialized and nobody else - * takes two locks at once, deadlock is not possible. -@@ -1613,7 +1680,7 @@ - - spin_unlock(&old_base->lock); - spin_unlock_irq(&new_base->lock); -- put_cpu_ptr(&tvec_bases); -+ put_local_ptr(&tvec_bases); - } - - static int timer_cpu_notify(struct notifier_block *self, -@@ -1645,6 +1712,9 @@ - - base->cpu = cpu; - spin_lock_init(&base->lock); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ init_waitqueue_head(&base->wait_for_running_timer); -+#endif - - base->timer_jiffies = jiffies; - base->next_timer = base->timer_jiffies; -diff -Nur linux-4.4.13.orig/kernel/trace/Kconfig linux-4.4.13/kernel/trace/Kconfig ---- linux-4.4.13.orig/kernel/trace/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/trace/Kconfig 2016-06-22 21:55:08.066523867 +0200 -@@ -187,6 +187,24 @@ - enabled. This option and the preempt-off timing option can be - used together or separately.) - -+config INTERRUPT_OFF_HIST -+ bool "Interrupts-off Latency Histogram" -+ depends on IRQSOFF_TRACER -+ help -+ This option generates continuously updated histograms (one per cpu) -+ of the duration of time periods with interrupts disabled. The -+ histograms are disabled by default. To enable them, write a non-zero -+ number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff -+ -+ If PREEMPT_OFF_HIST is also selected, additional histograms (one -+ per cpu) are generated that accumulate the duration of time periods -+ when both interrupts and preemption are disabled. The histogram data -+ will be located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/irqsoff -+ - config PREEMPT_TRACER - bool "Preemption-off Latency Tracer" - default n -@@ -211,6 +229,24 @@ - enabled. This option and the irqs-off timing option can be - used together or separately.) - -+config PREEMPT_OFF_HIST -+ bool "Preemption-off Latency Histogram" -+ depends on PREEMPT_TRACER -+ help -+ This option generates continuously updated histograms (one per cpu) -+ of the duration of time periods with preemption disabled. The -+ histograms are disabled by default. To enable them, write a non-zero -+ number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff -+ -+ If INTERRUPT_OFF_HIST is also selected, additional histograms (one -+ per cpu) are generated that accumulate the duration of time periods -+ when both interrupts and preemption are disabled. The histogram data -+ will be located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/preemptoff -+ - config SCHED_TRACER - bool "Scheduling Latency Tracer" - select GENERIC_TRACER -@@ -221,6 +257,74 @@ - This tracer tracks the latency of the highest priority task - to be scheduled in, starting from the point it has woken up. - -+config WAKEUP_LATENCY_HIST -+ bool "Scheduling Latency Histogram" -+ depends on SCHED_TRACER -+ help -+ This option generates continuously updated histograms (one per cpu) -+ of the scheduling latency of the highest priority task. -+ The histograms are disabled by default. To enable them, write a -+ non-zero number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/wakeup -+ -+ Two different algorithms are used, one to determine the latency of -+ processes that exclusively use the highest priority of the system and -+ another one to determine the latency of processes that share the -+ highest system priority with other processes. The former is used to -+ improve hardware and system software, the latter to optimize the -+ priority design of a given system. The histogram data will be -+ located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/wakeup -+ -+ and -+ -+ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio -+ -+ If both Scheduling Latency Histogram and Missed Timer Offsets -+ Histogram are selected, additional histogram data will be collected -+ that contain, in addition to the wakeup latency, the timer latency, in -+ case the wakeup was triggered by an expired timer. These histograms -+ are available in the -+ -+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup -+ -+ directory. They reflect the apparent interrupt and scheduling latency -+ and are best suitable to determine the worst-case latency of a given -+ system. To enable these histograms, write a non-zero number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup -+ -+config MISSED_TIMER_OFFSETS_HIST -+ depends on HIGH_RES_TIMERS -+ select GENERIC_TRACER -+ bool "Missed Timer Offsets Histogram" -+ help -+ Generate a histogram of missed timer offsets in microseconds. The -+ histograms are disabled by default. To enable them, write a non-zero -+ number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets -+ -+ The histogram data will be located in the debug file system at -+ -+ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets -+ -+ If both Scheduling Latency Histogram and Missed Timer Offsets -+ Histogram are selected, additional histogram data will be collected -+ that contain, in addition to the wakeup latency, the timer latency, in -+ case the wakeup was triggered by an expired timer. These histograms -+ are available in the -+ -+ /sys/kernel/debug/tracing/latency_hist/timerandwakeup -+ -+ directory. They reflect the apparent interrupt and scheduling latency -+ and are best suitable to determine the worst-case latency of a given -+ system. To enable these histograms, write a non-zero number to -+ -+ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup -+ - config ENABLE_DEFAULT_TRACERS - bool "Trace process context switches and events" - depends on !GENERIC_TRACER -diff -Nur linux-4.4.13.orig/kernel/trace/latency_hist.c linux-4.4.13/kernel/trace/latency_hist.c ---- linux-4.4.13.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100 -+++ linux-4.4.13/kernel/trace/latency_hist.c 2016-06-22 21:55:08.066523867 +0200 -@@ -0,0 +1,1178 @@ -+/* -+ * kernel/trace/latency_hist.c -+ * -+ * Add support for histograms of preemption-off latency and -+ * interrupt-off latency and wakeup latency, it depends on -+ * Real-Time Preemption Support. -+ * -+ * Copyright (C) 2005 MontaVista Software, Inc. -+ * Yi Yang -+ * -+ * Converted to work with the new latency tracer. -+ * Copyright (C) 2008 Red Hat, Inc. -+ * Steven Rostedt -+ * -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "trace.h" -+#include -+ -+#define NSECS_PER_USECS 1000L -+ -+#define CREATE_TRACE_POINTS -+#include -+ -+enum { -+ IRQSOFF_LATENCY = 0, -+ PREEMPTOFF_LATENCY, -+ PREEMPTIRQSOFF_LATENCY, -+ WAKEUP_LATENCY, -+ WAKEUP_LATENCY_SHAREDPRIO, -+ MISSED_TIMER_OFFSETS, -+ TIMERANDWAKEUP_LATENCY, -+ MAX_LATENCY_TYPE, -+}; -+ -+#define MAX_ENTRY_NUM 10240 -+ -+struct hist_data { -+ atomic_t hist_mode; /* 0 log, 1 don't log */ -+ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ -+ long min_lat; -+ long max_lat; -+ unsigned long long below_hist_bound_samples; -+ unsigned long long above_hist_bound_samples; -+ long long accumulate_lat; -+ unsigned long long total_samples; -+ unsigned long long hist_array[MAX_ENTRY_NUM]; -+}; -+ -+struct enable_data { -+ int latency_type; -+ int enabled; -+}; -+ -+static char *latency_hist_dir_root = "latency_hist"; -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); -+static char *irqsoff_hist_dir = "irqsoff"; -+static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); -+static DEFINE_PER_CPU(int, hist_irqsoff_counting); -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); -+static char *preemptoff_hist_dir = "preemptoff"; -+static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); -+static DEFINE_PER_CPU(int, hist_preemptoff_counting); -+#endif -+ -+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) -+static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); -+static char *preemptirqsoff_hist_dir = "preemptirqsoff"; -+static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); -+static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); -+#endif -+ -+#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) -+static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); -+static struct enable_data preemptirqsoff_enabled_data = { -+ .latency_type = PREEMPTIRQSOFF_LATENCY, -+ .enabled = 0, -+}; -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+struct maxlatproc_data { -+ char comm[FIELD_SIZEOF(struct task_struct, comm)]; -+ char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; -+ int pid; -+ int current_pid; -+ int prio; -+ int current_prio; -+ long latency; -+ long timeroffset; -+ cycle_t timestamp; -+}; -+#endif -+ -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); -+static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); -+static char *wakeup_latency_hist_dir = "wakeup"; -+static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; -+static notrace void probe_wakeup_latency_hist_start(void *v, -+ struct task_struct *p); -+static notrace void probe_wakeup_latency_hist_stop(void *v, -+ bool preempt, struct task_struct *prev, struct task_struct *next); -+static notrace void probe_sched_migrate_task(void *, -+ struct task_struct *task, int cpu); -+static struct enable_data wakeup_latency_enabled_data = { -+ .latency_type = WAKEUP_LATENCY, -+ .enabled = 0, -+}; -+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); -+static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); -+static DEFINE_PER_CPU(struct task_struct *, wakeup_task); -+static DEFINE_PER_CPU(int, wakeup_sharedprio); -+static unsigned long wakeup_pid; -+#endif -+ -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); -+static char *missed_timer_offsets_dir = "missed_timer_offsets"; -+static notrace void probe_hrtimer_interrupt(void *v, int cpu, -+ long long offset, struct task_struct *curr, struct task_struct *task); -+static struct enable_data missed_timer_offsets_enabled_data = { -+ .latency_type = MISSED_TIMER_OFFSETS, -+ .enabled = 0, -+}; -+static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); -+static unsigned long missed_timer_offsets_pid; -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); -+static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; -+static struct enable_data timerandwakeup_enabled_data = { -+ .latency_type = TIMERANDWAKEUP_LATENCY, -+ .enabled = 0, -+}; -+static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); -+#endif -+ -+void notrace latency_hist(int latency_type, int cpu, long latency, -+ long timeroffset, cycle_t stop, -+ struct task_struct *p) -+{ -+ struct hist_data *my_hist; -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ struct maxlatproc_data *mp = NULL; -+#endif -+ -+ if (!cpu_possible(cpu) || latency_type < 0 || -+ latency_type >= MAX_LATENCY_TYPE) -+ return; -+ -+ switch (latency_type) { -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ case IRQSOFF_LATENCY: -+ my_hist = &per_cpu(irqsoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ case PREEMPTOFF_LATENCY: -+ my_hist = &per_cpu(preemptoff_hist, cpu); -+ break; -+#endif -+#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ my_hist = &per_cpu(preemptirqsoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ my_hist = &per_cpu(wakeup_latency_hist, cpu); -+ mp = &per_cpu(wakeup_maxlatproc, cpu); -+ break; -+ case WAKEUP_LATENCY_SHAREDPRIO: -+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); -+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ my_hist = &per_cpu(missed_timer_offsets, cpu); -+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); -+ break; -+#endif -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ case TIMERANDWAKEUP_LATENCY: -+ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); -+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); -+ break; -+#endif -+ -+ default: -+ return; -+ } -+ -+ latency += my_hist->offset; -+ -+ if (atomic_read(&my_hist->hist_mode) == 0) -+ return; -+ -+ if (latency < 0 || latency >= MAX_ENTRY_NUM) { -+ if (latency < 0) -+ my_hist->below_hist_bound_samples++; -+ else -+ my_hist->above_hist_bound_samples++; -+ } else -+ my_hist->hist_array[latency]++; -+ -+ if (unlikely(latency > my_hist->max_lat || -+ my_hist->min_lat == LONG_MAX)) { -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ if (latency_type == WAKEUP_LATENCY || -+ latency_type == WAKEUP_LATENCY_SHAREDPRIO || -+ latency_type == MISSED_TIMER_OFFSETS || -+ latency_type == TIMERANDWAKEUP_LATENCY) { -+ strncpy(mp->comm, p->comm, sizeof(mp->comm)); -+ strncpy(mp->current_comm, current->comm, -+ sizeof(mp->current_comm)); -+ mp->pid = task_pid_nr(p); -+ mp->current_pid = task_pid_nr(current); -+ mp->prio = p->prio; -+ mp->current_prio = current->prio; -+ mp->latency = latency; -+ mp->timeroffset = timeroffset; -+ mp->timestamp = stop; -+ } -+#endif -+ my_hist->max_lat = latency; -+ } -+ if (unlikely(latency < my_hist->min_lat)) -+ my_hist->min_lat = latency; -+ my_hist->total_samples++; -+ my_hist->accumulate_lat += latency; -+} -+ -+static void *l_start(struct seq_file *m, loff_t *pos) -+{ -+ loff_t *index_ptr = NULL; -+ loff_t index = *pos; -+ struct hist_data *my_hist = m->private; -+ -+ if (index == 0) { -+ char minstr[32], avgstr[32], maxstr[32]; -+ -+ atomic_dec(&my_hist->hist_mode); -+ -+ if (likely(my_hist->total_samples)) { -+ long avg = (long) div64_s64(my_hist->accumulate_lat, -+ my_hist->total_samples); -+ snprintf(minstr, sizeof(minstr), "%ld", -+ my_hist->min_lat - my_hist->offset); -+ snprintf(avgstr, sizeof(avgstr), "%ld", -+ avg - my_hist->offset); -+ snprintf(maxstr, sizeof(maxstr), "%ld", -+ my_hist->max_lat - my_hist->offset); -+ } else { -+ strcpy(minstr, ""); -+ strcpy(avgstr, minstr); -+ strcpy(maxstr, minstr); -+ } -+ -+ seq_printf(m, "#Minimum latency: %s microseconds\n" -+ "#Average latency: %s microseconds\n" -+ "#Maximum latency: %s microseconds\n" -+ "#Total samples: %llu\n" -+ "#There are %llu samples lower than %ld" -+ " microseconds.\n" -+ "#There are %llu samples greater or equal" -+ " than %ld microseconds.\n" -+ "#usecs\t%16s\n", -+ minstr, avgstr, maxstr, -+ my_hist->total_samples, -+ my_hist->below_hist_bound_samples, -+ -my_hist->offset, -+ my_hist->above_hist_bound_samples, -+ MAX_ENTRY_NUM - my_hist->offset, -+ "samples"); -+ } -+ if (index < MAX_ENTRY_NUM) { -+ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); -+ if (index_ptr) -+ *index_ptr = index; -+ } -+ -+ return index_ptr; -+} -+ -+static void *l_next(struct seq_file *m, void *p, loff_t *pos) -+{ -+ loff_t *index_ptr = p; -+ struct hist_data *my_hist = m->private; -+ -+ if (++*pos >= MAX_ENTRY_NUM) { -+ atomic_inc(&my_hist->hist_mode); -+ return NULL; -+ } -+ *index_ptr = *pos; -+ return index_ptr; -+} -+ -+static void l_stop(struct seq_file *m, void *p) -+{ -+ kfree(p); -+} -+ -+static int l_show(struct seq_file *m, void *p) -+{ -+ int index = *(loff_t *) p; -+ struct hist_data *my_hist = m->private; -+ -+ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, -+ my_hist->hist_array[index]); -+ return 0; -+} -+ -+static const struct seq_operations latency_hist_seq_op = { -+ .start = l_start, -+ .next = l_next, -+ .stop = l_stop, -+ .show = l_show -+}; -+ -+static int latency_hist_open(struct inode *inode, struct file *file) -+{ -+ int ret; -+ -+ ret = seq_open(file, &latency_hist_seq_op); -+ if (!ret) { -+ struct seq_file *seq = file->private_data; -+ seq->private = inode->i_private; -+ } -+ return ret; -+} -+ -+static const struct file_operations latency_hist_fops = { -+ .open = latency_hist_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = seq_release, -+}; -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static void clear_maxlatprocdata(struct maxlatproc_data *mp) -+{ -+ mp->comm[0] = mp->current_comm[0] = '\0'; -+ mp->prio = mp->current_prio = mp->pid = mp->current_pid = -+ mp->latency = mp->timeroffset = -1; -+ mp->timestamp = 0; -+} -+#endif -+ -+static void hist_reset(struct hist_data *hist) -+{ -+ atomic_dec(&hist->hist_mode); -+ -+ memset(hist->hist_array, 0, sizeof(hist->hist_array)); -+ hist->below_hist_bound_samples = 0ULL; -+ hist->above_hist_bound_samples = 0ULL; -+ hist->min_lat = LONG_MAX; -+ hist->max_lat = LONG_MIN; -+ hist->total_samples = 0ULL; -+ hist->accumulate_lat = 0LL; -+ -+ atomic_inc(&hist->hist_mode); -+} -+ -+static ssize_t -+latency_hist_reset(struct file *file, const char __user *a, -+ size_t size, loff_t *off) -+{ -+ int cpu; -+ struct hist_data *hist = NULL; -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ struct maxlatproc_data *mp = NULL; -+#endif -+ off_t latency_type = (off_t) file->private_data; -+ -+ for_each_online_cpu(cpu) { -+ -+ switch (latency_type) { -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ case PREEMPTOFF_LATENCY: -+ hist = &per_cpu(preemptoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ case IRQSOFF_LATENCY: -+ hist = &per_cpu(irqsoff_hist, cpu); -+ break; -+#endif -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ hist = &per_cpu(preemptirqsoff_hist, cpu); -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ hist = &per_cpu(wakeup_latency_hist, cpu); -+ mp = &per_cpu(wakeup_maxlatproc, cpu); -+ break; -+ case WAKEUP_LATENCY_SHAREDPRIO: -+ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); -+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ hist = &per_cpu(missed_timer_offsets, cpu); -+ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); -+ break; -+#endif -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ case TIMERANDWAKEUP_LATENCY: -+ hist = &per_cpu(timerandwakeup_latency_hist, cpu); -+ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); -+ break; -+#endif -+ } -+ -+ hist_reset(hist); -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ if (latency_type == WAKEUP_LATENCY || -+ latency_type == WAKEUP_LATENCY_SHAREDPRIO || -+ latency_type == MISSED_TIMER_OFFSETS || -+ latency_type == TIMERANDWAKEUP_LATENCY) -+ clear_maxlatprocdata(mp); -+#endif -+ } -+ -+ return size; -+} -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static ssize_t -+show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ int r; -+ unsigned long *this_pid = file->private_data; -+ -+ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -+} -+ -+static ssize_t do_pid(struct file *file, const char __user *ubuf, -+ size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ unsigned long pid; -+ unsigned long *this_pid = file->private_data; -+ -+ if (cnt >= sizeof(buf)) -+ return -EINVAL; -+ -+ if (copy_from_user(&buf, ubuf, cnt)) -+ return -EFAULT; -+ -+ buf[cnt] = '\0'; -+ -+ if (kstrtoul(buf, 10, &pid)) -+ return -EINVAL; -+ -+ *this_pid = pid; -+ -+ return cnt; -+} -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static ssize_t -+show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ int r; -+ struct maxlatproc_data *mp = file->private_data; -+ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); -+ unsigned long long t; -+ unsigned long usecs, secs; -+ char *buf; -+ -+ if (mp->pid == -1 || mp->current_pid == -1) { -+ buf = "(none)\n"; -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, -+ strlen(buf)); -+ } -+ -+ buf = kmalloc(strmaxlen, GFP_KERNEL); -+ if (buf == NULL) -+ return -ENOMEM; -+ -+ t = ns2usecs(mp->timestamp); -+ usecs = do_div(t, USEC_PER_SEC); -+ secs = (unsigned long) t; -+ r = snprintf(buf, strmaxlen, -+ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, -+ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, -+ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, -+ secs, usecs); -+ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -+ kfree(buf); -+ return r; -+} -+#endif -+ -+static ssize_t -+show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ struct enable_data *ed = file->private_data; -+ int r; -+ -+ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); -+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); -+} -+ -+static ssize_t -+do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) -+{ -+ char buf[64]; -+ long enable; -+ struct enable_data *ed = file->private_data; -+ -+ if (cnt >= sizeof(buf)) -+ return -EINVAL; -+ -+ if (copy_from_user(&buf, ubuf, cnt)) -+ return -EFAULT; -+ -+ buf[cnt] = 0; -+ -+ if (kstrtoul(buf, 10, &enable)) -+ return -EINVAL; -+ -+ if ((enable && ed->enabled) || (!enable && !ed->enabled)) -+ return cnt; -+ -+ if (enable) { -+ int ret; -+ -+ switch (ed->latency_type) { -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ ret = register_trace_preemptirqsoff_hist( -+ probe_preemptirqsoff_hist, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_preemptirqsoff_hist " -+ "to trace_preemptirqsoff_hist\n"); -+ return ret; -+ } -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ ret = register_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_wakeup_latency_hist_start " -+ "to trace_sched_wakeup\n"); -+ return ret; -+ } -+ ret = register_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_wakeup_latency_hist_start " -+ "to trace_sched_wakeup_new\n"); -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ return ret; -+ } -+ ret = register_trace_sched_switch( -+ probe_wakeup_latency_hist_stop, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_wakeup_latency_hist_stop " -+ "to trace_sched_switch\n"); -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ return ret; -+ } -+ ret = register_trace_sched_migrate_task( -+ probe_sched_migrate_task, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_sched_migrate_task " -+ "to trace_sched_migrate_task\n"); -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_switch( -+ probe_wakeup_latency_hist_stop, NULL); -+ return ret; -+ } -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ ret = register_trace_hrtimer_interrupt( -+ probe_hrtimer_interrupt, NULL); -+ if (ret) { -+ pr_info("wakeup trace: Couldn't assign " -+ "probe_hrtimer_interrupt " -+ "to trace_hrtimer_interrupt\n"); -+ return ret; -+ } -+ break; -+#endif -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ case TIMERANDWAKEUP_LATENCY: -+ if (!wakeup_latency_enabled_data.enabled || -+ !missed_timer_offsets_enabled_data.enabled) -+ return -EINVAL; -+ break; -+#endif -+ default: -+ break; -+ } -+ } else { -+ switch (ed->latency_type) { -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+ case PREEMPTIRQSOFF_LATENCY: -+ { -+ int cpu; -+ -+ unregister_trace_preemptirqsoff_hist( -+ probe_preemptirqsoff_hist, NULL); -+ for_each_online_cpu(cpu) { -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ per_cpu(hist_irqsoff_counting, -+ cpu) = 0; -+#endif -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ per_cpu(hist_preemptoff_counting, -+ cpu) = 0; -+#endif -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ per_cpu(hist_preemptirqsoff_counting, -+ cpu) = 0; -+#endif -+ } -+ } -+ break; -+#endif -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ case WAKEUP_LATENCY: -+ { -+ int cpu; -+ -+ unregister_trace_sched_wakeup( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_wakeup_new( -+ probe_wakeup_latency_hist_start, NULL); -+ unregister_trace_sched_switch( -+ probe_wakeup_latency_hist_stop, NULL); -+ unregister_trace_sched_migrate_task( -+ probe_sched_migrate_task, NULL); -+ -+ for_each_online_cpu(cpu) { -+ per_cpu(wakeup_task, cpu) = NULL; -+ per_cpu(wakeup_sharedprio, cpu) = 0; -+ } -+ } -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ timerandwakeup_enabled_data.enabled = 0; -+#endif -+ break; -+#endif -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ case MISSED_TIMER_OFFSETS: -+ unregister_trace_hrtimer_interrupt( -+ probe_hrtimer_interrupt, NULL); -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ timerandwakeup_enabled_data.enabled = 0; -+#endif -+ break; -+#endif -+ default: -+ break; -+ } -+ } -+ ed->enabled = enable; -+ return cnt; -+} -+ -+static const struct file_operations latency_hist_reset_fops = { -+ .open = tracing_open_generic, -+ .write = latency_hist_reset, -+}; -+ -+static const struct file_operations enable_fops = { -+ .open = tracing_open_generic, -+ .read = show_enable, -+ .write = do_enable, -+}; -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+static const struct file_operations pid_fops = { -+ .open = tracing_open_generic, -+ .read = show_pid, -+ .write = do_pid, -+}; -+ -+static const struct file_operations maxlatproc_fops = { -+ .open = tracing_open_generic, -+ .read = show_maxlatproc, -+}; -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+static notrace void probe_preemptirqsoff_hist(void *v, int reason, -+ int starthist) -+{ -+ int cpu = raw_smp_processor_id(); -+ int time_set = 0; -+ -+ if (starthist) { -+ cycle_t uninitialized_var(start); -+ -+ if (!preempt_count() && !irqs_disabled()) -+ return; -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ if ((reason == IRQS_OFF || reason == TRACE_START) && -+ !per_cpu(hist_irqsoff_counting, cpu)) { -+ per_cpu(hist_irqsoff_counting, cpu) = 1; -+ start = ftrace_now(cpu); -+ time_set++; -+ per_cpu(hist_irqsoff_start, cpu) = start; -+ } -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ if ((reason == PREEMPT_OFF || reason == TRACE_START) && -+ !per_cpu(hist_preemptoff_counting, cpu)) { -+ per_cpu(hist_preemptoff_counting, cpu) = 1; -+ if (!(time_set++)) -+ start = ftrace_now(cpu); -+ per_cpu(hist_preemptoff_start, cpu) = start; -+ } -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ if (per_cpu(hist_irqsoff_counting, cpu) && -+ per_cpu(hist_preemptoff_counting, cpu) && -+ !per_cpu(hist_preemptirqsoff_counting, cpu)) { -+ per_cpu(hist_preemptirqsoff_counting, cpu) = 1; -+ if (!time_set) -+ start = ftrace_now(cpu); -+ per_cpu(hist_preemptirqsoff_start, cpu) = start; -+ } -+#endif -+ } else { -+ cycle_t uninitialized_var(stop); -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ if ((reason == IRQS_ON || reason == TRACE_STOP) && -+ per_cpu(hist_irqsoff_counting, cpu)) { -+ cycle_t start = per_cpu(hist_irqsoff_start, cpu); -+ -+ stop = ftrace_now(cpu); -+ time_set++; -+ if (start) { -+ long latency = ((long) (stop - start)) / -+ NSECS_PER_USECS; -+ -+ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, -+ stop, NULL); -+ } -+ per_cpu(hist_irqsoff_counting, cpu) = 0; -+ } -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ if ((reason == PREEMPT_ON || reason == TRACE_STOP) && -+ per_cpu(hist_preemptoff_counting, cpu)) { -+ cycle_t start = per_cpu(hist_preemptoff_start, cpu); -+ -+ if (!(time_set++)) -+ stop = ftrace_now(cpu); -+ if (start) { -+ long latency = ((long) (stop - start)) / -+ NSECS_PER_USECS; -+ -+ latency_hist(PREEMPTOFF_LATENCY, cpu, latency, -+ 0, stop, NULL); -+ } -+ per_cpu(hist_preemptoff_counting, cpu) = 0; -+ } -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ if ((!per_cpu(hist_irqsoff_counting, cpu) || -+ !per_cpu(hist_preemptoff_counting, cpu)) && -+ per_cpu(hist_preemptirqsoff_counting, cpu)) { -+ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); -+ -+ if (!time_set) -+ stop = ftrace_now(cpu); -+ if (start) { -+ long latency = ((long) (stop - start)) / -+ NSECS_PER_USECS; -+ -+ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, -+ latency, 0, stop, NULL); -+ } -+ per_cpu(hist_preemptirqsoff_counting, cpu) = 0; -+ } -+#endif -+ } -+} -+#endif -+ -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+static DEFINE_RAW_SPINLOCK(wakeup_lock); -+static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, -+ int cpu) -+{ -+ int old_cpu = task_cpu(task); -+ -+ if (cpu != old_cpu) { -+ unsigned long flags; -+ struct task_struct *cpu_wakeup_task; -+ -+ raw_spin_lock_irqsave(&wakeup_lock, flags); -+ -+ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); -+ if (task == cpu_wakeup_task) { -+ put_task_struct(cpu_wakeup_task); -+ per_cpu(wakeup_task, old_cpu) = NULL; -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; -+ get_task_struct(cpu_wakeup_task); -+ } -+ -+ raw_spin_unlock_irqrestore(&wakeup_lock, flags); -+ } -+} -+ -+static notrace void probe_wakeup_latency_hist_start(void *v, -+ struct task_struct *p) -+{ -+ unsigned long flags; -+ struct task_struct *curr = current; -+ int cpu = task_cpu(p); -+ struct task_struct *cpu_wakeup_task; -+ -+ raw_spin_lock_irqsave(&wakeup_lock, flags); -+ -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu); -+ -+ if (wakeup_pid) { -+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || -+ p->prio == curr->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ if (likely(wakeup_pid != task_pid_nr(p))) -+ goto out; -+ } else { -+ if (likely(!rt_task(p)) || -+ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || -+ p->prio > curr->prio) -+ goto out; -+ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || -+ p->prio == curr->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ } -+ -+ if (cpu_wakeup_task) -+ put_task_struct(cpu_wakeup_task); -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; -+ get_task_struct(cpu_wakeup_task); -+ cpu_wakeup_task->preempt_timestamp_hist = -+ ftrace_now(raw_smp_processor_id()); -+out: -+ raw_spin_unlock_irqrestore(&wakeup_lock, flags); -+} -+ -+static notrace void probe_wakeup_latency_hist_stop(void *v, -+ bool preempt, struct task_struct *prev, struct task_struct *next) -+{ -+ unsigned long flags; -+ int cpu = task_cpu(next); -+ long latency; -+ cycle_t stop; -+ struct task_struct *cpu_wakeup_task; -+ -+ raw_spin_lock_irqsave(&wakeup_lock, flags); -+ -+ cpu_wakeup_task = per_cpu(wakeup_task, cpu); -+ -+ if (cpu_wakeup_task == NULL) -+ goto out; -+ -+ /* Already running? */ -+ if (unlikely(current == cpu_wakeup_task)) -+ goto out_reset; -+ -+ if (next != cpu_wakeup_task) { -+ if (next->prio < cpu_wakeup_task->prio) -+ goto out_reset; -+ -+ if (next->prio == cpu_wakeup_task->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ -+ goto out; -+ } -+ -+ if (current->prio == cpu_wakeup_task->prio) -+ per_cpu(wakeup_sharedprio, cpu) = 1; -+ -+ /* -+ * The task we are waiting for is about to be switched to. -+ * Calculate latency and store it in histogram. -+ */ -+ stop = ftrace_now(raw_smp_processor_id()); -+ -+ latency = ((long) (stop - next->preempt_timestamp_hist)) / -+ NSECS_PER_USECS; -+ -+ if (per_cpu(wakeup_sharedprio, cpu)) { -+ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, -+ next); -+ per_cpu(wakeup_sharedprio, cpu) = 0; -+ } else { -+ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ if (timerandwakeup_enabled_data.enabled) { -+ latency_hist(TIMERANDWAKEUP_LATENCY, cpu, -+ next->timer_offset + latency, next->timer_offset, -+ stop, next); -+ } -+#endif -+ } -+ -+out_reset: -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ next->timer_offset = 0; -+#endif -+ put_task_struct(cpu_wakeup_task); -+ per_cpu(wakeup_task, cpu) = NULL; -+out: -+ raw_spin_unlock_irqrestore(&wakeup_lock, flags); -+} -+#endif -+ -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+static notrace void probe_hrtimer_interrupt(void *v, int cpu, -+ long long latency_ns, struct task_struct *curr, -+ struct task_struct *task) -+{ -+ if (latency_ns <= 0 && task != NULL && rt_task(task) && -+ (task->prio < curr->prio || -+ (task->prio == curr->prio && -+ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { -+ long latency; -+ cycle_t now; -+ -+ if (missed_timer_offsets_pid) { -+ if (likely(missed_timer_offsets_pid != -+ task_pid_nr(task))) -+ return; -+ } -+ -+ now = ftrace_now(cpu); -+ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); -+ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, -+ task); -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ task->timer_offset = latency; -+#endif -+ } -+} -+#endif -+ -+static __init int latency_hist_init(void) -+{ -+ struct dentry *latency_hist_root = NULL; -+ struct dentry *dentry; -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ struct dentry *dentry_sharedprio; -+#endif -+ struct dentry *entry; -+ struct dentry *enable_root; -+ int i = 0; -+ struct hist_data *my_hist; -+ char name[64]; -+ char *cpufmt = "CPU%d"; -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ char *cpufmt_maxlatproc = "max_latency-CPU%d"; -+ struct maxlatproc_data *mp = NULL; -+#endif -+ -+ dentry = tracing_init_dentry(); -+ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); -+ enable_root = debugfs_create_dir("enable", latency_hist_root); -+ -+#ifdef CONFIG_INTERRUPT_OFF_HIST -+ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(irqsoff_hist, i), &latency_hist_fops); -+ my_hist = &per_cpu(irqsoff_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); -+#endif -+ -+#ifdef CONFIG_PREEMPT_OFF_HIST -+ dentry = debugfs_create_dir(preemptoff_hist_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(preemptoff_hist, i), &latency_hist_fops); -+ my_hist = &per_cpu(preemptoff_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) -+ dentry = debugfs_create_dir(preemptirqsoff_hist_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); -+ my_hist = &per_cpu(preemptirqsoff_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); -+#endif -+ -+#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) -+ entry = debugfs_create_file("preemptirqsoff", 0644, -+ enable_root, (void *)&preemptirqsoff_enabled_data, -+ &enable_fops); -+#endif -+ -+#ifdef CONFIG_WAKEUP_LATENCY_HIST -+ dentry = debugfs_create_dir(wakeup_latency_hist_dir, -+ latency_hist_root); -+ dentry_sharedprio = debugfs_create_dir( -+ wakeup_latency_hist_dir_sharedprio, dentry); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(wakeup_latency_hist, i), -+ &latency_hist_fops); -+ my_hist = &per_cpu(wakeup_latency_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, -+ &per_cpu(wakeup_latency_hist_sharedprio, i), -+ &latency_hist_fops); -+ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ sprintf(name, cpufmt_maxlatproc, i); -+ -+ mp = &per_cpu(wakeup_maxlatproc, i); -+ entry = debugfs_create_file(name, 0444, dentry, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ -+ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); -+ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ } -+ entry = debugfs_create_file("pid", 0644, dentry, -+ (void *)&wakeup_pid, &pid_fops); -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); -+ entry = debugfs_create_file("reset", 0644, dentry_sharedprio, -+ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); -+ entry = debugfs_create_file("wakeup", 0644, -+ enable_root, (void *)&wakeup_latency_enabled_data, -+ &enable_fops); -+#endif -+ -+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST -+ dentry = debugfs_create_dir(missed_timer_offsets_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(missed_timer_offsets, i), &latency_hist_fops); -+ my_hist = &per_cpu(missed_timer_offsets, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ sprintf(name, cpufmt_maxlatproc, i); -+ mp = &per_cpu(missed_timer_offsets_maxlatproc, i); -+ entry = debugfs_create_file(name, 0444, dentry, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ } -+ entry = debugfs_create_file("pid", 0644, dentry, -+ (void *)&missed_timer_offsets_pid, &pid_fops); -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); -+ entry = debugfs_create_file("missed_timer_offsets", 0644, -+ enable_root, (void *)&missed_timer_offsets_enabled_data, -+ &enable_fops); -+#endif -+ -+#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ -+ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) -+ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, -+ latency_hist_root); -+ for_each_possible_cpu(i) { -+ sprintf(name, cpufmt, i); -+ entry = debugfs_create_file(name, 0444, dentry, -+ &per_cpu(timerandwakeup_latency_hist, i), -+ &latency_hist_fops); -+ my_hist = &per_cpu(timerandwakeup_latency_hist, i); -+ atomic_set(&my_hist->hist_mode, 1); -+ my_hist->min_lat = LONG_MAX; -+ -+ sprintf(name, cpufmt_maxlatproc, i); -+ mp = &per_cpu(timerandwakeup_maxlatproc, i); -+ entry = debugfs_create_file(name, 0444, dentry, mp, -+ &maxlatproc_fops); -+ clear_maxlatprocdata(mp); -+ } -+ entry = debugfs_create_file("reset", 0644, dentry, -+ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); -+ entry = debugfs_create_file("timerandwakeup", 0644, -+ enable_root, (void *)&timerandwakeup_enabled_data, -+ &enable_fops); -+#endif -+ return 0; -+} -+ -+device_initcall(latency_hist_init); -diff -Nur linux-4.4.13.orig/kernel/trace/Makefile linux-4.4.13/kernel/trace/Makefile ---- linux-4.4.13.orig/kernel/trace/Makefile 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/trace/Makefile 2016-06-22 21:55:08.066523867 +0200 -@@ -36,6 +36,10 @@ - obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o - obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o - obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o -+obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o -+obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o -+obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o -+obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o - obj-$(CONFIG_NOP_TRACER) += trace_nop.o - obj-$(CONFIG_STACK_TRACER) += trace_stack.o - obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o -diff -Nur linux-4.4.13.orig/kernel/trace/trace.c linux-4.4.13/kernel/trace/trace.c ---- linux-4.4.13.orig/kernel/trace/trace.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/trace/trace.c 2016-06-22 21:55:08.066523867 +0200 -@@ -1652,6 +1652,7 @@ - struct task_struct *tsk = current; - - entry->preempt_count = pc & 0xff; -+ entry->preempt_lazy_count = preempt_lazy_count(); - entry->pid = (tsk) ? tsk->pid : 0; - entry->flags = - #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT -@@ -1661,8 +1662,11 @@ - #endif - ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | - ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | -- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | -+ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | -+ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | - (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); -+ -+ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; - } - EXPORT_SYMBOL_GPL(tracing_generic_entry_update); - -@@ -2555,14 +2559,17 @@ - - static void print_lat_help_header(struct seq_file *m) - { -- seq_puts(m, "# _------=> CPU# \n" -- "# / _-----=> irqs-off \n" -- "# | / _----=> need-resched \n" -- "# || / _---=> hardirq/softirq \n" -- "# ||| / _--=> preempt-depth \n" -- "# |||| / delay \n" -- "# cmd pid ||||| time | caller \n" -- "# \\ / ||||| \\ | / \n"); -+ seq_puts(m, "# _--------=> CPU# \n" -+ "# / _-------=> irqs-off \n" -+ "# | / _------=> need-resched \n" -+ "# || / _-----=> need-resched_lazy \n" -+ "# ||| / _----=> hardirq/softirq \n" -+ "# |||| / _---=> preempt-depth \n" -+ "# ||||| / _--=> preempt-lazy-depth\n" -+ "# |||||| / _-=> migrate-disable \n" -+ "# ||||||| / delay \n" -+ "# cmd pid |||||||| time | caller \n" -+ "# \\ / |||||||| \\ | / \n"); - } - - static void print_event_info(struct trace_buffer *buf, struct seq_file *m) -@@ -2588,11 +2595,14 @@ - print_event_info(buf, m); - seq_puts(m, "# _-----=> irqs-off\n" - "# / _----=> need-resched\n" -- "# | / _---=> hardirq/softirq\n" -- "# || / _--=> preempt-depth\n" -- "# ||| / delay\n" -- "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" -- "# | | | |||| | |\n"); -+ "# |/ _-----=> need-resched_lazy\n" -+ "# || / _---=> hardirq/softirq\n" -+ "# ||| / _--=> preempt-depth\n" -+ "# |||| /_--=> preempt-lazy-depth\n" -+ "# ||||| _-=> migrate-disable \n" -+ "# ||||| / delay\n" -+ "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n" -+ "# | | | |||||| | |\n"); - } - - void -diff -Nur linux-4.4.13.orig/kernel/trace/trace_events.c linux-4.4.13/kernel/trace/trace_events.c ---- linux-4.4.13.orig/kernel/trace/trace_events.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/trace/trace_events.c 2016-06-22 21:55:08.070523867 +0200 -@@ -188,6 +188,8 @@ - __common_field(unsigned char, flags); - __common_field(unsigned char, preempt_count); - __common_field(int, pid); -+ __common_field(unsigned short, migrate_disable); -+ __common_field(unsigned short, padding); - - return ret; - } -diff -Nur linux-4.4.13.orig/kernel/trace/trace.h linux-4.4.13/kernel/trace/trace.h ---- linux-4.4.13.orig/kernel/trace/trace.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/trace/trace.h 2016-06-22 21:55:08.070523867 +0200 -@@ -117,6 +117,7 @@ - * NEED_RESCHED - reschedule is requested - * HARDIRQ - inside an interrupt handler - * SOFTIRQ - inside a softirq handler -+ * NEED_RESCHED_LAZY - lazy reschedule is requested - */ - enum trace_flag_type { - TRACE_FLAG_IRQS_OFF = 0x01, -@@ -125,6 +126,7 @@ - TRACE_FLAG_HARDIRQ = 0x08, - TRACE_FLAG_SOFTIRQ = 0x10, - TRACE_FLAG_PREEMPT_RESCHED = 0x20, -+ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40, - }; - - #define TRACE_BUF_SIZE 1024 -diff -Nur linux-4.4.13.orig/kernel/trace/trace_irqsoff.c linux-4.4.13/kernel/trace/trace_irqsoff.c ---- linux-4.4.13.orig/kernel/trace/trace_irqsoff.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/trace/trace_irqsoff.c 2016-06-22 21:55:08.070523867 +0200 -@@ -13,6 +13,7 @@ - #include - #include - #include -+#include - - #include "trace.h" - -@@ -424,11 +425,13 @@ - { - if (preempt_trace() || irq_trace()) - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); -+ trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1); - } - EXPORT_SYMBOL_GPL(start_critical_timings); - - void stop_critical_timings(void) - { -+ trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0); - if (preempt_trace() || irq_trace()) - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); - } -@@ -438,6 +441,7 @@ - #ifdef CONFIG_PROVE_LOCKING - void time_hardirqs_on(unsigned long a0, unsigned long a1) - { -+ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0); - if (!preempt_trace() && irq_trace()) - stop_critical_timing(a0, a1); - } -@@ -446,6 +450,7 @@ - { - if (!preempt_trace() && irq_trace()) - start_critical_timing(a0, a1); -+ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1); - } - - #else /* !CONFIG_PROVE_LOCKING */ -@@ -471,6 +476,7 @@ - */ - void trace_hardirqs_on(void) - { -+ trace_preemptirqsoff_hist(IRQS_ON, 0); - if (!preempt_trace() && irq_trace()) - stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); - } -@@ -480,11 +486,13 @@ - { - if (!preempt_trace() && irq_trace()) - start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); -+ trace_preemptirqsoff_hist(IRQS_OFF, 1); - } - EXPORT_SYMBOL(trace_hardirqs_off); - - __visible void trace_hardirqs_on_caller(unsigned long caller_addr) - { -+ trace_preemptirqsoff_hist(IRQS_ON, 0); - if (!preempt_trace() && irq_trace()) - stop_critical_timing(CALLER_ADDR0, caller_addr); - } -@@ -494,6 +502,7 @@ - { - if (!preempt_trace() && irq_trace()) - start_critical_timing(CALLER_ADDR0, caller_addr); -+ trace_preemptirqsoff_hist(IRQS_OFF, 1); - } - EXPORT_SYMBOL(trace_hardirqs_off_caller); - -@@ -503,12 +512,14 @@ - #ifdef CONFIG_PREEMPT_TRACER - void trace_preempt_on(unsigned long a0, unsigned long a1) - { -+ trace_preemptirqsoff_hist(PREEMPT_ON, 0); - if (preempt_trace() && !irq_trace()) - stop_critical_timing(a0, a1); - } - - void trace_preempt_off(unsigned long a0, unsigned long a1) - { -+ trace_preemptirqsoff_hist(PREEMPT_ON, 1); - if (preempt_trace() && !irq_trace()) - start_critical_timing(a0, a1); - } -diff -Nur linux-4.4.13.orig/kernel/trace/trace_output.c linux-4.4.13/kernel/trace/trace_output.c ---- linux-4.4.13.orig/kernel/trace/trace_output.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/trace/trace_output.c 2016-06-22 21:55:08.070523867 +0200 -@@ -386,6 +386,7 @@ - { - char hardsoft_irq; - char need_resched; -+ char need_resched_lazy; - char irqs_off; - int hardirq; - int softirq; -@@ -413,6 +414,8 @@ - need_resched = '.'; - break; - } -+ need_resched_lazy = -+ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; - - hardsoft_irq = - (hardirq && softirq) ? 'H' : -@@ -420,14 +423,25 @@ - softirq ? 's' : - '.'; - -- trace_seq_printf(s, "%c%c%c", -- irqs_off, need_resched, hardsoft_irq); -+ trace_seq_printf(s, "%c%c%c%c", -+ irqs_off, need_resched, need_resched_lazy, -+ hardsoft_irq); - - if (entry->preempt_count) - trace_seq_printf(s, "%x", entry->preempt_count); - else - trace_seq_putc(s, '.'); - -+ if (entry->preempt_lazy_count) -+ trace_seq_printf(s, "%x", entry->preempt_lazy_count); -+ else -+ trace_seq_putc(s, '.'); -+ -+ if (entry->migrate_disable) -+ trace_seq_printf(s, "%x", entry->migrate_disable); -+ else -+ trace_seq_putc(s, '.'); -+ - return !trace_seq_has_overflowed(s); - } - -diff -Nur linux-4.4.13.orig/kernel/user.c linux-4.4.13/kernel/user.c ---- linux-4.4.13.orig/kernel/user.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/user.c 2016-06-22 21:55:08.070523867 +0200 -@@ -161,11 +161,11 @@ - if (!up) - return; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) - free_user(up, flags); - else -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - - struct user_struct *alloc_uid(kuid_t uid) -diff -Nur linux-4.4.13.orig/kernel/watchdog.c linux-4.4.13/kernel/watchdog.c ---- linux-4.4.13.orig/kernel/watchdog.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/watchdog.c 2016-06-22 21:55:08.070523867 +0200 -@@ -299,6 +299,8 @@ - - #ifdef CONFIG_HARDLOCKUP_DETECTOR - -+static DEFINE_RAW_SPINLOCK(watchdog_output_lock); -+ - static struct perf_event_attr wd_hw_attr = { - .type = PERF_TYPE_HARDWARE, - .config = PERF_COUNT_HW_CPU_CYCLES, -@@ -333,6 +335,13 @@ - /* only print hardlockups once */ - if (__this_cpu_read(hard_watchdog_warn) == true) - return; -+ /* -+ * If early-printk is enabled then make sure we do not -+ * lock up in printk() and kill console logging: -+ */ -+ printk_kill(); -+ -+ raw_spin_lock(&watchdog_output_lock); - - pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); - print_modules(); -@@ -350,8 +359,9 @@ - !test_and_set_bit(0, &hardlockup_allcpu_dumped)) - trigger_allbutself_cpu_backtrace(); - -+ raw_spin_unlock(&watchdog_output_lock); - if (hardlockup_panic) -- panic("Hard LOCKUP"); -+ nmi_panic(regs, "Hard LOCKUP"); - - __this_cpu_write(hard_watchdog_warn, true); - return; -@@ -497,6 +507,7 @@ - /* kick off the timer for the hardlockup detector */ - hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - hrtimer->function = watchdog_timer_fn; -+ hrtimer->irqsafe = 1; - - /* Enable the perf event */ - watchdog_nmi_enable(cpu); -diff -Nur linux-4.4.13.orig/kernel/workqueue.c linux-4.4.13/kernel/workqueue.c ---- linux-4.4.13.orig/kernel/workqueue.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/workqueue.c 2016-06-22 21:55:08.070523867 +0200 -@@ -48,6 +48,8 @@ - #include - #include - #include -+#include -+#include - - #include "workqueue_internal.h" - -@@ -121,11 +123,16 @@ - * cpu or grabbing pool->lock is enough for read access. If - * POOL_DISASSOCIATED is set, it's identical to L. - * -+ * On RT we need the extra protection via rt_lock_idle_list() for -+ * the list manipulations against read access from -+ * wq_worker_sleeping(). All other places are nicely serialized via -+ * pool->lock. -+ * - * A: pool->attach_mutex protected. - * - * PL: wq_pool_mutex protected. - * -- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. -+ * PR: wq_pool_mutex protected for writes. RCU protected for reads. - * - * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. - * -@@ -134,7 +141,7 @@ - * - * WQ: wq->mutex protected. - * -- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. -+ * WR: wq->mutex protected for writes. RCU protected for reads. - * - * MD: wq_mayday_lock protected. - */ -@@ -183,7 +190,7 @@ - atomic_t nr_running ____cacheline_aligned_in_smp; - - /* -- * Destruction of pool is sched-RCU protected to allow dereferences -+ * Destruction of pool is RCU protected to allow dereferences - * from get_work_pool(). - */ - struct rcu_head rcu; -@@ -212,7 +219,7 @@ - /* - * Release of unbound pwq is punted to system_wq. See put_pwq() - * and pwq_unbound_release_workfn() for details. pool_workqueue -- * itself is also sched-RCU protected so that the first pwq can be -+ * itself is also RCU protected so that the first pwq can be - * determined without grabbing wq->mutex. - */ - struct work_struct unbound_release_work; -@@ -331,6 +338,8 @@ - struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; - EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); - -+static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); -+ - static int worker_thread(void *__worker); - static void workqueue_sysfs_unregister(struct workqueue_struct *wq); - -@@ -338,20 +347,20 @@ - #include - - #define assert_rcu_or_pool_mutex() \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq_pool_mutex), \ -- "sched RCU or wq_pool_mutex should be held") -+ "RCU or wq_pool_mutex should be held") - - #define assert_rcu_or_wq_mutex(wq) \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq->mutex), \ -- "sched RCU or wq->mutex should be held") -+ "RCU or wq->mutex should be held") - - #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ -- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ -+ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ - !lockdep_is_held(&wq->mutex) && \ - !lockdep_is_held(&wq_pool_mutex), \ -- "sched RCU, wq->mutex or wq_pool_mutex should be held") -+ "RCU, wq->mutex or wq_pool_mutex should be held") - - #define for_each_cpu_worker_pool(pool, cpu) \ - for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ -@@ -363,7 +372,7 @@ - * @pool: iteration cursor - * @pi: integer used for iteration - * -- * This must be called either with wq_pool_mutex held or sched RCU read -+ * This must be called either with wq_pool_mutex held or RCU read - * locked. If the pool needs to be used beyond the locking in effect, the - * caller is responsible for guaranteeing that the pool stays online. - * -@@ -395,7 +404,7 @@ - * @pwq: iteration cursor - * @wq: the target workqueue - * -- * This must be called either with wq->mutex held or sched RCU read locked. -+ * This must be called either with wq->mutex held or RCU read locked. - * If the pwq needs to be used beyond the locking in effect, the caller is - * responsible for guaranteeing that the pwq stays online. - * -@@ -407,6 +416,31 @@ - if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ - else - -+#ifdef CONFIG_PREEMPT_RT_BASE -+static inline void rt_lock_idle_list(struct worker_pool *pool) -+{ -+ preempt_disable(); -+} -+static inline void rt_unlock_idle_list(struct worker_pool *pool) -+{ -+ preempt_enable(); -+} -+static inline void sched_lock_idle_list(struct worker_pool *pool) { } -+static inline void sched_unlock_idle_list(struct worker_pool *pool) { } -+#else -+static inline void rt_lock_idle_list(struct worker_pool *pool) { } -+static inline void rt_unlock_idle_list(struct worker_pool *pool) { } -+static inline void sched_lock_idle_list(struct worker_pool *pool) -+{ -+ spin_lock_irq(&pool->lock); -+} -+static inline void sched_unlock_idle_list(struct worker_pool *pool) -+{ -+ spin_unlock_irq(&pool->lock); -+} -+#endif -+ -+ - #ifdef CONFIG_DEBUG_OBJECTS_WORK - - static struct debug_obj_descr work_debug_descr; -@@ -557,7 +591,7 @@ - * @wq: the target workqueue - * @node: the node ID - * -- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU -+ * This must be called with any of wq_pool_mutex, wq->mutex or RCU - * read locked. - * If the pwq needs to be used beyond the locking in effect, the caller is - * responsible for guaranteeing that the pwq stays online. -@@ -701,8 +735,8 @@ - * @work: the work item of interest - * - * Pools are created and destroyed under wq_pool_mutex, and allows read -- * access under sched-RCU read lock. As such, this function should be -- * called under wq_pool_mutex or with preemption disabled. -+ * access under RCU read lock. As such, this function should be -+ * called under wq_pool_mutex or inside of a rcu_read_lock() region. - * - * All fields of the returned pool are accessible as long as the above - * mentioned locking is in effect. If the returned pool needs to be used -@@ -839,51 +873,44 @@ - */ - static void wake_up_worker(struct worker_pool *pool) - { -- struct worker *worker = first_idle_worker(pool); -+ struct worker *worker; -+ -+ rt_lock_idle_list(pool); -+ -+ worker = first_idle_worker(pool); - - if (likely(worker)) - wake_up_process(worker->task); -+ -+ rt_unlock_idle_list(pool); - } - - /** -- * wq_worker_waking_up - a worker is waking up -- * @task: task waking up -- * @cpu: CPU @task is waking up to -+ * wq_worker_running - a worker is running again -+ * @task: task returning from sleep - * -- * This function is called during try_to_wake_up() when a worker is -- * being awoken. -- * -- * CONTEXT: -- * spin_lock_irq(rq->lock) -+ * This function is called when a worker returns from schedule() - */ --void wq_worker_waking_up(struct task_struct *task, int cpu) -+void wq_worker_running(struct task_struct *task) - { - struct worker *worker = kthread_data(task); - -- if (!(worker->flags & WORKER_NOT_RUNNING)) { -- WARN_ON_ONCE(worker->pool->cpu != cpu); -+ if (!worker->sleeping) -+ return; -+ if (!(worker->flags & WORKER_NOT_RUNNING)) - atomic_inc(&worker->pool->nr_running); -- } -+ worker->sleeping = 0; - } - - /** - * wq_worker_sleeping - a worker is going to sleep - * @task: task going to sleep -- * @cpu: CPU in question, must be the current CPU number -- * -- * This function is called during schedule() when a busy worker is -- * going to sleep. Worker on the same cpu can be woken up by -- * returning pointer to its task. -- * -- * CONTEXT: -- * spin_lock_irq(rq->lock) -- * -- * Return: -- * Worker task on @cpu to wake up, %NULL if none. -+ * This function is called from schedule() when a busy worker is -+ * going to sleep. - */ --struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) -+void wq_worker_sleeping(struct task_struct *task) - { -- struct worker *worker = kthread_data(task), *to_wakeup = NULL; -+ struct worker *worker = kthread_data(task); - struct worker_pool *pool; - - /* -@@ -892,29 +919,26 @@ - * checking NOT_RUNNING. - */ - if (worker->flags & WORKER_NOT_RUNNING) -- return NULL; -+ return; - - pool = worker->pool; - -- /* this can only happen on the local cpu */ -- if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) -- return NULL; -+ if (WARN_ON_ONCE(worker->sleeping)) -+ return; -+ -+ worker->sleeping = 1; - - /* - * The counterpart of the following dec_and_test, implied mb, - * worklist not empty test sequence is in insert_work(). - * Please read comment there. -- * -- * NOT_RUNNING is clear. This means that we're bound to and -- * running on the local cpu w/ rq lock held and preemption -- * disabled, which in turn means that none else could be -- * manipulating idle_list, so dereferencing idle_list without pool -- * lock is safe. - */ - if (atomic_dec_and_test(&pool->nr_running) && -- !list_empty(&pool->worklist)) -- to_wakeup = first_idle_worker(pool); -- return to_wakeup ? to_wakeup->task : NULL; -+ !list_empty(&pool->worklist)) { -+ sched_lock_idle_list(pool); -+ wake_up_worker(pool); -+ sched_unlock_idle_list(pool); -+ } - } - - /** -@@ -1108,12 +1132,12 @@ - { - if (pwq) { - /* -- * As both pwqs and pools are sched-RCU protected, the -+ * As both pwqs and pools are RCU protected, the - * following lock operations are safe. - */ -- spin_lock_irq(&pwq->pool->lock); -+ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); - put_pwq(pwq); -- spin_unlock_irq(&pwq->pool->lock); -+ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); - } - } - -@@ -1215,7 +1239,7 @@ - struct worker_pool *pool; - struct pool_workqueue *pwq; - -- local_irq_save(*flags); -+ local_lock_irqsave(pendingb_lock, *flags); - - /* try to steal the timer if it exists */ - if (is_dwork) { -@@ -1234,6 +1258,7 @@ - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) - return 0; - -+ rcu_read_lock(); - /* - * The queueing is in progress, or it is already queued. Try to - * steal it from ->worklist without clearing WORK_STRUCT_PENDING. -@@ -1272,14 +1297,16 @@ - set_work_pool_and_keep_pending(work, pool->id); - - spin_unlock(&pool->lock); -+ rcu_read_unlock(); - return 1; - } - spin_unlock(&pool->lock); - fail: -- local_irq_restore(*flags); -+ rcu_read_unlock(); -+ local_unlock_irqrestore(pendingb_lock, *flags); - if (work_is_canceling(work)) - return -ENOENT; -- cpu_relax(); -+ cpu_chill(); - return -EAGAIN; - } - -@@ -1348,7 +1375,7 @@ - * queued or lose PENDING. Grabbing PENDING and queueing should - * happen with IRQ disabled. - */ -- WARN_ON_ONCE(!irqs_disabled()); -+ WARN_ON_ONCE_NONRT(!irqs_disabled()); - - debug_work_activate(work); - -@@ -1356,6 +1383,8 @@ - if (unlikely(wq->flags & __WQ_DRAINING) && - WARN_ON_ONCE(!is_chained_work(wq))) - return; -+ -+ rcu_read_lock(); - retry: - if (req_cpu == WORK_CPU_UNBOUND) - cpu = raw_smp_processor_id(); -@@ -1412,10 +1441,8 @@ - /* pwq determined, queue */ - trace_workqueue_queue_work(req_cpu, pwq, work); - -- if (WARN_ON(!list_empty(&work->entry))) { -- spin_unlock(&pwq->pool->lock); -- return; -- } -+ if (WARN_ON(!list_empty(&work->entry))) -+ goto out; - - pwq->nr_in_flight[pwq->work_color]++; - work_flags = work_color_to_flags(pwq->work_color); -@@ -1431,7 +1458,9 @@ - - insert_work(pwq, work, worklist, work_flags); - -+out: - spin_unlock(&pwq->pool->lock); -+ rcu_read_unlock(); - } - - /** -@@ -1451,14 +1480,14 @@ - bool ret = false; - unsigned long flags; - -- local_irq_save(flags); -+ local_lock_irqsave(pendingb_lock,flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_work(cpu, wq, work); - ret = true; - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(queue_work_on); -@@ -1525,14 +1554,14 @@ - unsigned long flags; - - /* read the comment in __queue_work() */ -- local_irq_save(flags); -+ local_lock_irqsave(pendingb_lock, flags); - - if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { - __queue_delayed_work(cpu, wq, dwork, delay); - ret = true; - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(queue_delayed_work_on); -@@ -1567,7 +1596,7 @@ - - if (likely(ret >= 0)) { - __queue_delayed_work(cpu, wq, dwork, delay); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - } - - /* -ENOENT from try_to_grab_pending() becomes %true */ -@@ -1600,7 +1629,9 @@ - worker->last_active = jiffies; - - /* idle_list is LIFO */ -+ rt_lock_idle_list(pool); - list_add(&worker->entry, &pool->idle_list); -+ rt_unlock_idle_list(pool); - - if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) - mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); -@@ -1633,7 +1664,9 @@ - return; - worker_clr_flags(worker, WORKER_IDLE); - pool->nr_idle--; -+ rt_lock_idle_list(pool); - list_del_init(&worker->entry); -+ rt_unlock_idle_list(pool); - } - - static struct worker *alloc_worker(int node) -@@ -1799,7 +1832,9 @@ - pool->nr_workers--; - pool->nr_idle--; - -+ rt_lock_idle_list(pool); - list_del_init(&worker->entry); -+ rt_unlock_idle_list(pool); - worker->flags |= WORKER_DIE; - wake_up_process(worker->task); - } -@@ -2716,14 +2751,14 @@ - - might_sleep(); - -- local_irq_disable(); -+ rcu_read_lock(); - pool = get_work_pool(work); - if (!pool) { -- local_irq_enable(); -+ rcu_read_unlock(); - return false; - } - -- spin_lock(&pool->lock); -+ spin_lock_irq(&pool->lock); - /* see the comment in try_to_grab_pending() with the same code */ - pwq = get_work_pwq(work); - if (pwq) { -@@ -2750,10 +2785,11 @@ - else - lock_map_acquire_read(&pwq->wq->lockdep_map); - lock_map_release(&pwq->wq->lockdep_map); -- -+ rcu_read_unlock(); - return true; - already_gone: - spin_unlock_irq(&pool->lock); -+ rcu_read_unlock(); - return false; - } - -@@ -2840,7 +2876,7 @@ - - /* tell other tasks trying to grab @work to back off */ - mark_work_canceling(work); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - - flush_work(work); - clear_work_data(work); -@@ -2895,10 +2931,10 @@ - */ - bool flush_delayed_work(struct delayed_work *dwork) - { -- local_irq_disable(); -+ local_lock_irq(pendingb_lock); - if (del_timer_sync(&dwork->timer)) - __queue_work(dwork->cpu, dwork->wq, &dwork->work); -- local_irq_enable(); -+ local_unlock_irq(pendingb_lock); - return flush_work(&dwork->work); - } - EXPORT_SYMBOL(flush_delayed_work); -@@ -2933,7 +2969,7 @@ - - set_work_pool_and_clear_pending(&dwork->work, - get_work_pool_id(&dwork->work)); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pendingb_lock, flags); - return ret; - } - EXPORT_SYMBOL(cancel_delayed_work); -@@ -3161,7 +3197,7 @@ - * put_unbound_pool - put a worker_pool - * @pool: worker_pool to put - * -- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU -+ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU - * safe manner. get_unbound_pool() calls this function on its failure path - * and this function should be able to release pools which went through, - * successfully or not, init_worker_pool(). -@@ -3215,8 +3251,8 @@ - del_timer_sync(&pool->idle_timer); - del_timer_sync(&pool->mayday_timer); - -- /* sched-RCU protected to allow dereferences from get_work_pool() */ -- call_rcu_sched(&pool->rcu, rcu_free_pool); -+ /* RCU protected to allow dereferences from get_work_pool() */ -+ call_rcu(&pool->rcu, rcu_free_pool); - } - - /** -@@ -3323,14 +3359,14 @@ - put_unbound_pool(pool); - mutex_unlock(&wq_pool_mutex); - -- call_rcu_sched(&pwq->rcu, rcu_free_pwq); -+ call_rcu(&pwq->rcu, rcu_free_pwq); - - /* - * If we're the last pwq going away, @wq is already dead and no one - * is gonna access it anymore. Schedule RCU free. - */ - if (is_last) -- call_rcu_sched(&wq->rcu, rcu_free_wq); -+ call_rcu(&wq->rcu, rcu_free_wq); - } - - /** -@@ -3983,7 +4019,7 @@ - * The base ref is never dropped on per-cpu pwqs. Directly - * schedule RCU free. - */ -- call_rcu_sched(&wq->rcu, rcu_free_wq); -+ call_rcu(&wq->rcu, rcu_free_wq); - } else { - /* - * We're the sole accessor of @wq at this point. Directly -@@ -4076,7 +4112,8 @@ - struct pool_workqueue *pwq; - bool ret; - -- rcu_read_lock_sched(); -+ rcu_read_lock(); -+ preempt_disable(); - - if (cpu == WORK_CPU_UNBOUND) - cpu = smp_processor_id(); -@@ -4087,7 +4124,8 @@ - pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); - - ret = !list_empty(&pwq->delayed_works); -- rcu_read_unlock_sched(); -+ preempt_enable(); -+ rcu_read_unlock(); - - return ret; - } -@@ -4113,15 +4151,15 @@ - if (work_pending(work)) - ret |= WORK_BUSY_PENDING; - -- local_irq_save(flags); -+ rcu_read_lock(); - pool = get_work_pool(work); - if (pool) { -- spin_lock(&pool->lock); -+ spin_lock_irqsave(&pool->lock, flags); - if (find_worker_executing_work(pool, work)) - ret |= WORK_BUSY_RUNNING; -- spin_unlock(&pool->lock); -+ spin_unlock_irqrestore(&pool->lock, flags); - } -- local_irq_restore(flags); -+ rcu_read_unlock(); - - return ret; - } -@@ -4310,7 +4348,7 @@ - unsigned long flags; - int pi; - -- rcu_read_lock_sched(); -+ rcu_read_lock(); - - pr_info("Showing busy workqueues and worker pools:\n"); - -@@ -4361,7 +4399,7 @@ - spin_unlock_irqrestore(&pool->lock, flags); - } - -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - } - - /* -@@ -4722,16 +4760,16 @@ - * nr_active is monotonically decreasing. It's safe - * to peek without lock. - */ -- rcu_read_lock_sched(); -+ rcu_read_lock(); - for_each_pwq(pwq, wq) { - WARN_ON_ONCE(pwq->nr_active < 0); - if (pwq->nr_active) { - busy = true; -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - goto out_unlock; - } - } -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); - } - out_unlock: - mutex_unlock(&wq_pool_mutex); -@@ -4921,7 +4959,8 @@ - const char *delim = ""; - int node, written = 0; - -- rcu_read_lock_sched(); -+ get_online_cpus(); -+ rcu_read_lock(); - for_each_node(node) { - written += scnprintf(buf + written, PAGE_SIZE - written, - "%s%d:%d", delim, node, -@@ -4929,7 +4968,8 @@ - delim = " "; - } - written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); -- rcu_read_unlock_sched(); -+ rcu_read_unlock(); -+ put_online_cpus(); - - return written; - } -diff -Nur linux-4.4.13.orig/kernel/workqueue_internal.h linux-4.4.13/kernel/workqueue_internal.h ---- linux-4.4.13.orig/kernel/workqueue_internal.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/kernel/workqueue_internal.h 2016-06-22 21:55:08.070523867 +0200 -@@ -43,6 +43,7 @@ - unsigned long last_active; /* L: last active timestamp */ - unsigned int flags; /* X: flags */ - int id; /* I: worker id */ -+ int sleeping; /* None */ - - /* - * Opaque string set with work_set_desc(). Printed out with task -@@ -68,7 +69,7 @@ - * Scheduler hooks for concurrency managed workqueue. Only to be used from - * sched/core.c and workqueue.c. - */ --void wq_worker_waking_up(struct task_struct *task, int cpu); --struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); -+void wq_worker_running(struct task_struct *task); -+void wq_worker_sleeping(struct task_struct *task); - - #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ -diff -Nur linux-4.4.13.orig/lib/debugobjects.c linux-4.4.13/lib/debugobjects.c ---- linux-4.4.13.orig/lib/debugobjects.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/debugobjects.c 2016-06-22 21:55:08.070523867 +0200 -@@ -309,7 +309,10 @@ - struct debug_obj *obj; - unsigned long flags; - -- fill_pool(); -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (preempt_count() == 0 && !irqs_disabled()) -+#endif -+ fill_pool(); - - db = get_bucket((unsigned long) addr); - -diff -Nur linux-4.4.13.orig/lib/idr.c linux-4.4.13/lib/idr.c ---- linux-4.4.13.orig/lib/idr.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/idr.c 2016-06-22 21:55:08.070523867 +0200 -@@ -30,6 +30,7 @@ - #include - #include - #include -+#include - - #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) - #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) -@@ -45,6 +46,37 @@ - static DEFINE_PER_CPU(int, idr_preload_cnt); - static DEFINE_SPINLOCK(simple_ida_lock); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+static DEFINE_LOCAL_IRQ_LOCK(idr_lock); -+ -+static inline void idr_preload_lock(void) -+{ -+ local_lock(idr_lock); -+} -+ -+static inline void idr_preload_unlock(void) -+{ -+ local_unlock(idr_lock); -+} -+ -+void idr_preload_end(void) -+{ -+ idr_preload_unlock(); -+} -+EXPORT_SYMBOL(idr_preload_end); -+#else -+static inline void idr_preload_lock(void) -+{ -+ preempt_disable(); -+} -+ -+static inline void idr_preload_unlock(void) -+{ -+ preempt_enable(); -+} -+#endif -+ -+ - /* the maximum ID which can be allocated given idr->layers */ - static int idr_max(int layers) - { -@@ -115,14 +147,14 @@ - * context. See idr_preload() for details. - */ - if (!in_interrupt()) { -- preempt_disable(); -+ idr_preload_lock(); - new = __this_cpu_read(idr_preload_head); - if (new) { - __this_cpu_write(idr_preload_head, new->ary[0]); - __this_cpu_dec(idr_preload_cnt); - new->ary[0] = NULL; - } -- preempt_enable(); -+ idr_preload_unlock(); - if (new) - return new; - } -@@ -366,7 +398,6 @@ - idr_mark_full(pa, id); - } - -- - /** - * idr_preload - preload for idr_alloc() - * @gfp_mask: allocation mask to use for preloading -@@ -401,7 +432,7 @@ - WARN_ON_ONCE(in_interrupt()); - might_sleep_if(gfpflags_allow_blocking(gfp_mask)); - -- preempt_disable(); -+ idr_preload_lock(); - - /* - * idr_alloc() is likely to succeed w/o full idr_layer buffer and -@@ -413,9 +444,9 @@ - while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { - struct idr_layer *new; - -- preempt_enable(); -+ idr_preload_unlock(); - new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); -- preempt_disable(); -+ idr_preload_lock(); - if (!new) - break; - -diff -Nur linux-4.4.13.orig/lib/Kconfig linux-4.4.13/lib/Kconfig ---- linux-4.4.13.orig/lib/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/Kconfig 2016-06-22 21:55:08.070523867 +0200 -@@ -397,6 +397,7 @@ - - config CPUMASK_OFFSTACK - bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS -+ depends on !PREEMPT_RT_FULL - help - Use dynamic allocation for cpumask_var_t, instead of putting - them on the stack. This is a bit more expensive, but avoids -diff -Nur linux-4.4.13.orig/lib/locking-selftest.c linux-4.4.13/lib/locking-selftest.c ---- linux-4.4.13.orig/lib/locking-selftest.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/locking-selftest.c 2016-06-22 21:55:08.070523867 +0200 -@@ -590,6 +590,8 @@ - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) - -@@ -605,9 +607,12 @@ - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Enabling hardirqs with a softirq-safe lock held: - */ -@@ -640,6 +645,8 @@ - #undef E1 - #undef E2 - -+#endif -+ - /* - * Enabling irqs with an irq-safe lock held: - */ -@@ -663,6 +670,8 @@ - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) - -@@ -678,6 +687,8 @@ - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - -@@ -709,6 +720,8 @@ - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) - -@@ -724,6 +737,8 @@ - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - #undef E3 -@@ -757,6 +772,8 @@ - #include "locking-selftest-spin-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - #include "locking-selftest-rlock-hardirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) - -@@ -772,10 +789,14 @@ - #include "locking-selftest-wlock-softirq.h" - GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) - -+#endif -+ - #undef E1 - #undef E2 - #undef E3 - -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * read-lock / write-lock irq inversion. - * -@@ -838,6 +859,10 @@ - #undef E2 - #undef E3 - -+#endif -+ -+#ifndef CONFIG_PREEMPT_RT_FULL -+ - /* - * read-lock / write-lock recursion that is actually safe. - */ -@@ -876,6 +901,8 @@ - #undef E2 - #undef E3 - -+#endif -+ - /* - * read-lock / write-lock recursion that is unsafe. - */ -@@ -1858,6 +1885,7 @@ - - printk(" --------------------------------------------------------------------------\n"); - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * irq-context testcases: - */ -@@ -1870,6 +1898,28 @@ - - DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); - // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); -+#else -+ /* On -rt, we only do hardirq context test for raw spinlock */ -+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); -+ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); -+ -+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); -+ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); -+ -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); -+ -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); -+ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); -+#endif - - ww_tests(); - -diff -Nur linux-4.4.13.orig/lib/percpu_ida.c linux-4.4.13/lib/percpu_ida.c ---- linux-4.4.13.orig/lib/percpu_ida.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/percpu_ida.c 2016-06-22 21:55:08.074523867 +0200 -@@ -26,6 +26,9 @@ - #include - #include - #include -+#include -+ -+static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock); - - struct percpu_ida_cpu { - /* -@@ -148,13 +151,13 @@ - unsigned long flags; - int tag; - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - tags = this_cpu_ptr(pool->tag_cpu); - - /* Fastpath */ - tag = alloc_local_tag(tags); - if (likely(tag >= 0)) { -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - return tag; - } - -@@ -173,6 +176,7 @@ - - if (!tags->nr_free) - alloc_global_tags(pool, tags); -+ - if (!tags->nr_free) - steal_tags(pool, tags); - -@@ -184,7 +188,7 @@ - } - - spin_unlock(&pool->lock); -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - - if (tag >= 0 || state == TASK_RUNNING) - break; -@@ -196,7 +200,7 @@ - - schedule(); - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - tags = this_cpu_ptr(pool->tag_cpu); - } - if (state != TASK_RUNNING) -@@ -221,7 +225,7 @@ - - BUG_ON(tag >= pool->nr_tags); - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - tags = this_cpu_ptr(pool->tag_cpu); - - spin_lock(&tags->lock); -@@ -253,7 +257,7 @@ - spin_unlock(&pool->lock); - } - -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - } - EXPORT_SYMBOL_GPL(percpu_ida_free); - -@@ -345,7 +349,7 @@ - struct percpu_ida_cpu *remote; - unsigned cpu, i, err = 0; - -- local_irq_save(flags); -+ local_lock_irqsave(irq_off_lock, flags); - for_each_possible_cpu(cpu) { - remote = per_cpu_ptr(pool->tag_cpu, cpu); - spin_lock(&remote->lock); -@@ -367,7 +371,7 @@ - } - spin_unlock(&pool->lock); - out: -- local_irq_restore(flags); -+ local_unlock_irqrestore(irq_off_lock, flags); - return err; - } - EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); -diff -Nur linux-4.4.13.orig/lib/radix-tree.c linux-4.4.13/lib/radix-tree.c ---- linux-4.4.13.orig/lib/radix-tree.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/radix-tree.c 2016-06-22 21:55:08.074523867 +0200 -@@ -196,13 +196,14 @@ - * succeed in getting a node here (and never reach - * kmem_cache_alloc) - */ -- rtp = this_cpu_ptr(&radix_tree_preloads); -+ rtp = &get_cpu_var(radix_tree_preloads); - if (rtp->nr) { - ret = rtp->nodes; - rtp->nodes = ret->private_data; - ret->private_data = NULL; - rtp->nr--; - } -+ put_cpu_var(radix_tree_preloads); - /* - * Update the allocation stack trace as this is more useful - * for debugging. -@@ -242,6 +243,7 @@ - call_rcu(&node->rcu_head, radix_tree_node_rcu_free); - } - -+#ifndef CONFIG_PREEMPT_RT_FULL - /* - * Load up this CPU's radix_tree_node buffer with sufficient objects to - * ensure that the addition of a single element in the tree cannot fail. On -@@ -310,6 +312,7 @@ - return 0; - } - EXPORT_SYMBOL(radix_tree_maybe_preload); -+#endif - - /* - * Return the maximum key which can be store into a -diff -Nur linux-4.4.13.orig/lib/rbtree.c linux-4.4.13/lib/rbtree.c ---- linux-4.4.13.orig/lib/rbtree.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/rbtree.c 2016-06-22 21:55:08.074523867 +0200 -@@ -23,6 +23,7 @@ - - #include - #include -+#include - - /* - * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree -@@ -590,3 +591,13 @@ - return rb_left_deepest_node(root->rb_node); - } - EXPORT_SYMBOL(rb_first_postorder); -+ -+void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, -+ struct rb_node **rb_link) -+{ -+ node->__rb_parent_color = (unsigned long)parent; -+ node->rb_left = node->rb_right = NULL; -+ -+ rcu_assign_pointer(*rb_link, node); -+} -+EXPORT_SYMBOL(rb_link_node_rcu); -diff -Nur linux-4.4.13.orig/lib/scatterlist.c linux-4.4.13/lib/scatterlist.c ---- linux-4.4.13.orig/lib/scatterlist.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/scatterlist.c 2016-06-22 21:55:08.074523867 +0200 -@@ -620,7 +620,7 @@ - flush_kernel_dcache_page(miter->page); - - if (miter->__flags & SG_MITER_ATOMIC) { -- WARN_ON_ONCE(preemptible()); -+ WARN_ON_ONCE(!pagefault_disabled()); - kunmap_atomic(miter->addr); - } else - kunmap(miter->page); -@@ -664,7 +664,7 @@ - if (!sg_miter_skip(&miter, skip)) - return false; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - - while (sg_miter_next(&miter) && offset < buflen) { - unsigned int len; -@@ -681,7 +681,7 @@ - - sg_miter_stop(&miter); - -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return offset; - } - EXPORT_SYMBOL(sg_copy_buffer); -diff -Nur linux-4.4.13.orig/lib/smp_processor_id.c linux-4.4.13/lib/smp_processor_id.c ---- linux-4.4.13.orig/lib/smp_processor_id.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/lib/smp_processor_id.c 2016-06-22 21:55:08.074523867 +0200 -@@ -39,8 +39,9 @@ - if (!printk_ratelimit()) - goto out_enable; - -- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", -- what1, what2, preempt_count() - 1, current->comm, current->pid); -+ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n", -+ what1, what2, preempt_count() - 1, __migrate_disabled(current), -+ current->comm, current->pid); - - print_symbol("caller is %s\n", (long)__builtin_return_address(0)); - dump_stack(); -diff -Nur linux-4.4.13.orig/Makefile linux-4.4.13/Makefile ---- linux-4.4.13.orig/Makefile 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/Makefile 2016-06-22 21:55:08.074523867 +0200 -@@ -768,6 +768,9 @@ - # Prohibit date/time macros, which would make the build non-deterministic - KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) - -+# enforce correct pointer usage -+KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) -+ - # use the deterministic mode of AR if available - KBUILD_ARFLAGS := $(call ar-option,D) - -diff -Nur linux-4.4.13.orig/mm/backing-dev.c linux-4.4.13/mm/backing-dev.c ---- linux-4.4.13.orig/mm/backing-dev.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/backing-dev.c 2016-06-22 21:55:08.074523867 +0200 -@@ -457,9 +457,9 @@ - { - unsigned long flags; - -- local_irq_save(flags); -+ local_irq_save_nort(flags); - if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - return; - } - -diff -Nur linux-4.4.13.orig/mm/compaction.c linux-4.4.13/mm/compaction.c ---- linux-4.4.13.orig/mm/compaction.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/compaction.c 2016-06-22 21:55:08.074523867 +0200 -@@ -1435,10 +1435,12 @@ - cc->migrate_pfn & ~((1UL << cc->order) - 1); - - if (cc->last_migrated_pfn < current_block_start) { -- cpu = get_cpu(); -+ cpu = get_cpu_light(); -+ local_lock_irq(swapvec_lock); - lru_add_drain_cpu(cpu); -+ local_unlock_irq(swapvec_lock); - drain_local_pages(zone); -- put_cpu(); -+ put_cpu_light(); - /* No more flushing until we migrate again */ - cc->last_migrated_pfn = 0; - } -diff -Nur linux-4.4.13.orig/mm/filemap.c linux-4.4.13/mm/filemap.c ---- linux-4.4.13.orig/mm/filemap.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/filemap.c 2016-06-22 21:55:08.074523867 +0200 -@@ -168,7 +168,9 @@ - if (!workingset_node_pages(node) && - list_empty(&node->private_list)) { - node->private_data = mapping; -- list_lru_add(&workingset_shadow_nodes, &node->private_list); -+ local_lock(workingset_shadow_lock); -+ list_lru_add(&__workingset_shadow_nodes, &node->private_list); -+ local_unlock(workingset_shadow_lock); - } - } - -@@ -597,9 +599,12 @@ - * node->private_list is protected by - * mapping->tree_lock. - */ -- if (!list_empty(&node->private_list)) -- list_lru_del(&workingset_shadow_nodes, -+ if (!list_empty(&node->private_list)) { -+ local_lock(workingset_shadow_lock); -+ list_lru_del(&__workingset_shadow_nodes, - &node->private_list); -+ local_unlock(workingset_shadow_lock); -+ } - } - return 0; - } -diff -Nur linux-4.4.13.orig/mm/highmem.c linux-4.4.13/mm/highmem.c ---- linux-4.4.13.orig/mm/highmem.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/highmem.c 2016-06-22 21:55:08.074523867 +0200 -@@ -29,10 +29,11 @@ - #include - #include - -- -+#ifndef CONFIG_PREEMPT_RT_FULL - #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) - DEFINE_PER_CPU(int, __kmap_atomic_idx); - #endif -+#endif - - /* - * Virtual_count is not a pure "count". -@@ -107,8 +108,9 @@ - unsigned long totalhigh_pages __read_mostly; - EXPORT_SYMBOL(totalhigh_pages); - -- -+#ifndef CONFIG_PREEMPT_RT_FULL - EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); -+#endif - - unsigned int nr_free_highpages (void) - { -diff -Nur linux-4.4.13.orig/mm/Kconfig linux-4.4.13/mm/Kconfig ---- linux-4.4.13.orig/mm/Kconfig 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/Kconfig 2016-06-22 21:55:08.074523867 +0200 -@@ -392,7 +392,7 @@ - - config TRANSPARENT_HUGEPAGE - bool "Transparent Hugepage Support" -- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE -+ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL - select COMPACTION - help - Transparent Hugepages allows the kernel to use huge pages and -diff -Nur linux-4.4.13.orig/mm/memcontrol.c linux-4.4.13/mm/memcontrol.c ---- linux-4.4.13.orig/mm/memcontrol.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/memcontrol.c 2016-06-22 21:55:08.074523867 +0200 -@@ -67,6 +67,8 @@ - #include - #include - #include -+#include -+ - #include "slab.h" - - #include -@@ -87,6 +89,7 @@ - #define do_swap_account 0 - #endif - -+static DEFINE_LOCAL_IRQ_LOCK(event_lock); - static const char * const mem_cgroup_stat_names[] = { - "cache", - "rss", -@@ -1936,14 +1939,17 @@ - */ - static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) - { -- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); -+ struct memcg_stock_pcp *stock; -+ int cpu = get_cpu_light(); -+ -+ stock = &per_cpu(memcg_stock, cpu); - - if (stock->cached != memcg) { /* reset if necessary */ - drain_stock(stock); - stock->cached = memcg; - } - stock->nr_pages += nr_pages; -- put_cpu_var(memcg_stock); -+ put_cpu_light(); - } - - /* -@@ -1959,7 +1965,7 @@ - return; - /* Notify other cpus that system-wide "drain" is running */ - get_online_cpus(); -- curcpu = get_cpu(); -+ curcpu = get_cpu_light(); - for_each_online_cpu(cpu) { - struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); - struct mem_cgroup *memcg; -@@ -1976,7 +1982,7 @@ - schedule_work_on(cpu, &stock->work); - } - } -- put_cpu(); -+ put_cpu_light(); - put_online_cpus(); - mutex_unlock(&percpu_charge_mutex); - } -@@ -4618,12 +4624,12 @@ - - ret = 0; - -- local_irq_disable(); -+ local_lock_irq(event_lock); - mem_cgroup_charge_statistics(to, page, nr_pages); - memcg_check_events(to, page); - mem_cgroup_charge_statistics(from, page, -nr_pages); - memcg_check_events(from, page); -- local_irq_enable(); -+ local_unlock_irq(event_lock); - out_unlock: - unlock_page(page); - out: -@@ -5411,10 +5417,10 @@ - VM_BUG_ON_PAGE(!PageTransHuge(page), page); - } - -- local_irq_disable(); -+ local_lock_irq(event_lock); - mem_cgroup_charge_statistics(memcg, page, nr_pages); - memcg_check_events(memcg, page); -- local_irq_enable(); -+ local_unlock_irq(event_lock); - - if (do_swap_account && PageSwapCache(page)) { - swp_entry_t entry = { .val = page_private(page) }; -@@ -5470,14 +5476,14 @@ - memcg_oom_recover(memcg); - } - -- local_irq_save(flags); -+ local_lock_irqsave(event_lock, flags); - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); - __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); - __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); - __this_cpu_add(memcg->stat->nr_page_events, nr_pages); - memcg_check_events(memcg, dummy_page); -- local_irq_restore(flags); -+ local_unlock_irqrestore(event_lock, flags); - - if (!mem_cgroup_is_root(memcg)) - css_put_many(&memcg->css, nr_pages); -@@ -5669,6 +5675,7 @@ - { - struct mem_cgroup *memcg; - unsigned short oldid; -+ unsigned long flags; - - VM_BUG_ON_PAGE(PageLRU(page), page); - VM_BUG_ON_PAGE(page_count(page), page); -@@ -5697,9 +5704,13 @@ - * important here to have the interrupts disabled because it is the - * only synchronisation we have for udpating the per-CPU variables. - */ -+ local_lock_irqsave(event_lock, flags); -+#ifndef CONFIG_PREEMPT_RT_BASE - VM_BUG_ON(!irqs_disabled()); -+#endif - mem_cgroup_charge_statistics(memcg, page, -1); - memcg_check_events(memcg, page); -+ local_unlock_irqrestore(event_lock, flags); - } - - /** -diff -Nur linux-4.4.13.orig/mm/mmu_context.c linux-4.4.13/mm/mmu_context.c ---- linux-4.4.13.orig/mm/mmu_context.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/mmu_context.c 2016-06-22 21:55:08.074523867 +0200 -@@ -23,6 +23,7 @@ - struct task_struct *tsk = current; - - task_lock(tsk); -+ preempt_disable_rt(); - active_mm = tsk->active_mm; - if (active_mm != mm) { - atomic_inc(&mm->mm_count); -@@ -30,6 +31,7 @@ - } - tsk->mm = mm; - switch_mm(active_mm, mm, tsk); -+ preempt_enable_rt(); - task_unlock(tsk); - #ifdef finish_arch_post_lock_switch - finish_arch_post_lock_switch(); -diff -Nur linux-4.4.13.orig/mm/page_alloc.c linux-4.4.13/mm/page_alloc.c ---- linux-4.4.13.orig/mm/page_alloc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/page_alloc.c 2016-06-22 21:55:08.078523867 +0200 -@@ -60,6 +60,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -264,6 +265,18 @@ - EXPORT_SYMBOL(nr_online_nodes); - #endif - -+static DEFINE_LOCAL_IRQ_LOCK(pa_lock); -+ -+#ifdef CONFIG_PREEMPT_RT_BASE -+# define cpu_lock_irqsave(cpu, flags) \ -+ local_lock_irqsave_on(pa_lock, flags, cpu) -+# define cpu_unlock_irqrestore(cpu, flags) \ -+ local_unlock_irqrestore_on(pa_lock, flags, cpu) -+#else -+# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) -+# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) -+#endif -+ - int page_group_by_mobility_disabled __read_mostly; - - #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT -@@ -784,7 +797,7 @@ - } - - /* -- * Frees a number of pages from the PCP lists -+ * Frees a number of pages which have been collected from the pcp lists. - * Assumes all pages on list are in same zone, and of same order. - * count is the number of pages to free. - * -@@ -795,18 +808,53 @@ - * pinned" detection logic. - */ - static void free_pcppages_bulk(struct zone *zone, int count, -- struct per_cpu_pages *pcp) -+ struct list_head *list) - { -- int migratetype = 0; -- int batch_free = 0; - int to_free = count; - unsigned long nr_scanned; -+ unsigned long flags; -+ -+ spin_lock_irqsave(&zone->lock, flags); - -- spin_lock(&zone->lock); - nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); - if (nr_scanned) - __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); - -+ while (!list_empty(list)) { -+ struct page *page = list_first_entry(list, struct page, lru); -+ int mt; /* migratetype of the to-be-freed page */ -+ -+ /* must delete as __free_one_page list manipulates */ -+ list_del(&page->lru); -+ -+ mt = get_pcppage_migratetype(page); -+ /* MIGRATE_ISOLATE page should not go to pcplists */ -+ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); -+ /* Pageblock could have been isolated meanwhile */ -+ if (unlikely(has_isolate_pageblock(zone))) -+ mt = get_pageblock_migratetype(page); -+ -+ __free_one_page(page, page_to_pfn(page), zone, 0, mt); -+ trace_mm_page_pcpu_drain(page, 0, mt); -+ to_free--; -+ } -+ WARN_ON(to_free != 0); -+ spin_unlock_irqrestore(&zone->lock, flags); -+} -+ -+/* -+ * Moves a number of pages from the PCP lists to free list which -+ * is freed outside of the locked region. -+ * -+ * Assumes all pages on list are in same zone, and of same order. -+ * count is the number of pages to free. -+ */ -+static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, -+ struct list_head *dst) -+{ -+ int migratetype = 0; -+ int batch_free = 0; -+ - while (to_free) { - struct page *page; - struct list_head *list; -@@ -822,7 +870,7 @@ - batch_free++; - if (++migratetype == MIGRATE_PCPTYPES) - migratetype = 0; -- list = &pcp->lists[migratetype]; -+ list = &src->lists[migratetype]; - } while (list_empty(list)); - - /* This is the only non-empty list. Free them all. */ -@@ -830,24 +878,12 @@ - batch_free = to_free; - - do { -- int mt; /* migratetype of the to-be-freed page */ -- -- page = list_entry(list->prev, struct page, lru); -- /* must delete as __free_one_page list manipulates */ -+ page = list_last_entry(list, struct page, lru); - list_del(&page->lru); - -- mt = get_pcppage_migratetype(page); -- /* MIGRATE_ISOLATE page should not go to pcplists */ -- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); -- /* Pageblock could have been isolated meanwhile */ -- if (unlikely(has_isolate_pageblock(zone))) -- mt = get_pageblock_migratetype(page); -- -- __free_one_page(page, page_to_pfn(page), zone, 0, mt); -- trace_mm_page_pcpu_drain(page, 0, mt); -+ list_add(&page->lru, dst); - } while (--to_free && --batch_free && !list_empty(list)); - } -- spin_unlock(&zone->lock); - } - - static void free_one_page(struct zone *zone, -@@ -856,7 +892,9 @@ - int migratetype) - { - unsigned long nr_scanned; -- spin_lock(&zone->lock); -+ unsigned long flags; -+ -+ spin_lock_irqsave(&zone->lock, flags); - nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); - if (nr_scanned) - __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); -@@ -866,7 +904,7 @@ - migratetype = get_pfnblock_migratetype(page, pfn); - } - __free_one_page(page, pfn, zone, order, migratetype); -- spin_unlock(&zone->lock); -+ spin_unlock_irqrestore(&zone->lock, flags); - } - - static int free_tail_pages_check(struct page *head_page, struct page *page) -@@ -1017,10 +1055,10 @@ - return; - - migratetype = get_pfnblock_migratetype(page, pfn); -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - __count_vm_events(PGFREE, 1 << order); - free_one_page(page_zone(page), page, pfn, order, migratetype); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - static void __init __free_pages_boot_core(struct page *page, -@@ -1877,16 +1915,18 @@ - void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) - { - unsigned long flags; -+ LIST_HEAD(dst); - int to_drain, batch; - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - batch = READ_ONCE(pcp->batch); - to_drain = min(pcp->count, batch); - if (to_drain > 0) { -- free_pcppages_bulk(zone, to_drain, pcp); -+ isolate_pcp_pages(to_drain, pcp, &dst); - pcp->count -= to_drain; - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); -+ free_pcppages_bulk(zone, to_drain, &dst); - } - #endif - -@@ -1902,16 +1942,21 @@ - unsigned long flags; - struct per_cpu_pageset *pset; - struct per_cpu_pages *pcp; -+ LIST_HEAD(dst); -+ int count; - -- local_irq_save(flags); -+ cpu_lock_irqsave(cpu, flags); - pset = per_cpu_ptr(zone->pageset, cpu); - - pcp = &pset->pcp; -- if (pcp->count) { -- free_pcppages_bulk(zone, pcp->count, pcp); -+ count = pcp->count; -+ if (count) { -+ isolate_pcp_pages(count, pcp, &dst); - pcp->count = 0; - } -- local_irq_restore(flags); -+ cpu_unlock_irqrestore(cpu, flags); -+ if (count) -+ free_pcppages_bulk(zone, count, &dst); - } - - /* -@@ -1997,8 +2042,17 @@ - else - cpumask_clear_cpu(cpu, &cpus_with_pcps); - } -+#ifndef CONFIG_PREEMPT_RT_BASE - on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, - zone, 1); -+#else -+ for_each_cpu(cpu, &cpus_with_pcps) { -+ if (zone) -+ drain_pages_zone(cpu, zone); -+ else -+ drain_pages(cpu); -+ } -+#endif - } - - #ifdef CONFIG_HIBERNATION -@@ -2054,7 +2108,7 @@ - - migratetype = get_pfnblock_migratetype(page, pfn); - set_pcppage_migratetype(page, migratetype); -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - __count_vm_event(PGFREE); - - /* -@@ -2080,12 +2134,17 @@ - pcp->count++; - if (pcp->count >= pcp->high) { - unsigned long batch = READ_ONCE(pcp->batch); -- free_pcppages_bulk(zone, batch, pcp); -+ LIST_HEAD(dst); -+ -+ isolate_pcp_pages(batch, pcp, &dst); - pcp->count -= batch; -+ local_unlock_irqrestore(pa_lock, flags); -+ free_pcppages_bulk(zone, batch, &dst); -+ return; - } - - out: -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - /* -@@ -2220,7 +2279,7 @@ - struct per_cpu_pages *pcp; - struct list_head *list; - -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - pcp = &this_cpu_ptr(zone->pageset)->pcp; - list = &pcp->lists[migratetype]; - if (list_empty(list)) { -@@ -2252,7 +2311,7 @@ - */ - WARN_ON_ONCE(order > 1); - } -- spin_lock_irqsave(&zone->lock, flags); -+ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); - - page = NULL; - if (alloc_flags & ALLOC_HARDER) { -@@ -2262,11 +2321,13 @@ - } - if (!page) - page = __rmqueue(zone, order, migratetype, gfp_flags); -- spin_unlock(&zone->lock); -- if (!page) -+ if (!page) { -+ spin_unlock(&zone->lock); - goto failed; -+ } - __mod_zone_freepage_state(zone, -(1 << order), - get_pcppage_migratetype(page)); -+ spin_unlock(&zone->lock); - } - - __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); -@@ -2276,13 +2337,13 @@ - - __count_zone_vm_events(PGALLOC, zone, 1 << order); - zone_statistics(preferred_zone, zone, gfp_flags); -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - - VM_BUG_ON_PAGE(bad_range(zone, page), page); - return page; - - failed: -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - return NULL; - } - -@@ -5948,6 +6009,7 @@ - void __init page_alloc_init(void) - { - hotcpu_notifier(page_alloc_cpu_notify, 0); -+ local_irq_lock_init(pa_lock); - } - - /* -@@ -6842,7 +6904,7 @@ - struct per_cpu_pageset *pset; - - /* avoid races with drain_pages() */ -- local_irq_save(flags); -+ local_lock_irqsave(pa_lock, flags); - if (zone->pageset != &boot_pageset) { - for_each_online_cpu(cpu) { - pset = per_cpu_ptr(zone->pageset, cpu); -@@ -6851,7 +6913,7 @@ - free_percpu(zone->pageset); - zone->pageset = &boot_pageset; - } -- local_irq_restore(flags); -+ local_unlock_irqrestore(pa_lock, flags); - } - - #ifdef CONFIG_MEMORY_HOTREMOVE -diff -Nur linux-4.4.13.orig/mm/slab.h linux-4.4.13/mm/slab.h ---- linux-4.4.13.orig/mm/slab.h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/slab.h 2016-06-22 21:55:08.078523867 +0200 -@@ -324,7 +324,11 @@ - * The slab lists for all objects. - */ - struct kmem_cache_node { -+#ifdef CONFIG_SLUB -+ raw_spinlock_t list_lock; -+#else - spinlock_t list_lock; -+#endif - - #ifdef CONFIG_SLAB - struct list_head slabs_partial; /* partial list first, better asm code */ -diff -Nur linux-4.4.13.orig/mm/slub.c linux-4.4.13/mm/slub.c ---- linux-4.4.13.orig/mm/slub.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/slub.c 2016-06-22 21:55:08.078523867 +0200 -@@ -1075,7 +1075,7 @@ - void *object = head; - int cnt = 0; - -- spin_lock_irqsave(&n->list_lock, *flags); -+ raw_spin_lock_irqsave(&n->list_lock, *flags); - slab_lock(page); - - if (!check_slab(s, page)) -@@ -1136,7 +1136,7 @@ - - fail: - slab_unlock(page); -- spin_unlock_irqrestore(&n->list_lock, *flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, *flags); - slab_fix(s, "Object at 0x%p not freed", object); - return NULL; - } -@@ -1263,6 +1263,12 @@ - - #endif /* CONFIG_SLUB_DEBUG */ - -+struct slub_free_list { -+ raw_spinlock_t lock; -+ struct list_head list; -+}; -+static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); -+ - /* - * Hooks for other subsystems that check memory allocations. In a typical - * production configuration these hooks all should produce no code at all. -@@ -1399,10 +1405,17 @@ - gfp_t alloc_gfp; - void *start, *p; - int idx, order; -+ bool enableirqs = false; - - flags &= gfp_allowed_mask; - - if (gfpflags_allow_blocking(flags)) -+ enableirqs = true; -+#ifdef CONFIG_PREEMPT_RT_FULL -+ if (system_state == SYSTEM_RUNNING) -+ enableirqs = true; -+#endif -+ if (enableirqs) - local_irq_enable(); - - flags |= s->allocflags; -@@ -1473,7 +1486,7 @@ - page->frozen = 1; - - out: -- if (gfpflags_allow_blocking(flags)) -+ if (enableirqs) - local_irq_disable(); - if (!page) - return NULL; -@@ -1529,6 +1542,16 @@ - __free_kmem_pages(page, order); - } - -+static void free_delayed(struct list_head *h) -+{ -+ while(!list_empty(h)) { -+ struct page *page = list_first_entry(h, struct page, lru); -+ -+ list_del(&page->lru); -+ __free_slab(page->slab_cache, page); -+ } -+} -+ - #define need_reserve_slab_rcu \ - (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) - -@@ -1560,6 +1583,12 @@ - } - - call_rcu(head, rcu_free_slab); -+ } else if (irqs_disabled()) { -+ struct slub_free_list *f = this_cpu_ptr(&slub_free_list); -+ -+ raw_spin_lock(&f->lock); -+ list_add(&page->lru, &f->list); -+ raw_spin_unlock(&f->lock); - } else - __free_slab(s, page); - } -@@ -1673,7 +1702,7 @@ - if (!n || !n->nr_partial) - return NULL; - -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - list_for_each_entry_safe(page, page2, &n->partial, lru) { - void *t; - -@@ -1698,7 +1727,7 @@ - break; - - } -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - return object; - } - -@@ -1944,7 +1973,7 @@ - * that acquire_slab() will see a slab page that - * is frozen - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - } else { - m = M_FULL; -@@ -1955,7 +1984,7 @@ - * slabs from diagnostic functions will not see - * any frozen slabs. - */ -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - } - -@@ -1990,7 +2019,7 @@ - goto redo; - - if (lock) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - if (m == M_FREE) { - stat(s, DEACTIVATE_EMPTY); -@@ -2022,10 +2051,10 @@ - n2 = get_node(s, page_to_nid(page)); - if (n != n2) { - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - n = n2; -- spin_lock(&n->list_lock); -+ raw_spin_lock(&n->list_lock); - } - - do { -@@ -2054,7 +2083,7 @@ - } - - if (n) -- spin_unlock(&n->list_lock); -+ raw_spin_unlock(&n->list_lock); - - while (discard_page) { - page = discard_page; -@@ -2093,14 +2122,21 @@ - pobjects = oldpage->pobjects; - pages = oldpage->pages; - if (drain && pobjects > s->cpu_partial) { -+ struct slub_free_list *f; - unsigned long flags; -+ LIST_HEAD(tofree); - /* - * partial array is full. Move the existing - * set to the per node partial list. - */ - local_irq_save(flags); - unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); -+ f = this_cpu_ptr(&slub_free_list); -+ raw_spin_lock(&f->lock); -+ list_splice_init(&f->list, &tofree); -+ raw_spin_unlock(&f->lock); - local_irq_restore(flags); -+ free_delayed(&tofree); - oldpage = NULL; - pobjects = 0; - pages = 0; -@@ -2172,7 +2208,22 @@ - - static void flush_all(struct kmem_cache *s) - { -+ LIST_HEAD(tofree); -+ int cpu; -+ - on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); -+ for_each_online_cpu(cpu) { -+ struct slub_free_list *f; -+ -+ if (!has_cpu_slab(cpu, s)) -+ continue; -+ -+ f = &per_cpu(slub_free_list, cpu); -+ raw_spin_lock_irq(&f->lock); -+ list_splice_init(&f->list, &tofree); -+ raw_spin_unlock_irq(&f->lock); -+ free_delayed(&tofree); -+ } - } - - /* -@@ -2208,10 +2259,10 @@ - unsigned long x = 0; - struct page *page; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - x += get_count(page); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return x; - } - #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ -@@ -2349,8 +2400,10 @@ - * already disabled (which is the case for bulk allocation). - */ - static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, -- unsigned long addr, struct kmem_cache_cpu *c) -+ unsigned long addr, struct kmem_cache_cpu *c, -+ struct list_head *to_free) - { -+ struct slub_free_list *f; - void *freelist; - struct page *page; - -@@ -2410,6 +2463,13 @@ - VM_BUG_ON(!c->page->frozen); - c->freelist = get_freepointer(s, freelist); - c->tid = next_tid(c->tid); -+ -+out: -+ f = this_cpu_ptr(&slub_free_list); -+ raw_spin_lock(&f->lock); -+ list_splice_init(&f->list, to_free); -+ raw_spin_unlock(&f->lock); -+ - return freelist; - - new_slab: -@@ -2441,7 +2501,7 @@ - deactivate_slab(s, page, get_freepointer(s, freelist)); - c->page = NULL; - c->freelist = NULL; -- return freelist; -+ goto out; - } - - /* -@@ -2453,6 +2513,7 @@ - { - void *p; - unsigned long flags; -+ LIST_HEAD(tofree); - - local_irq_save(flags); - #ifdef CONFIG_PREEMPT -@@ -2464,8 +2525,9 @@ - c = this_cpu_ptr(s->cpu_slab); - #endif - -- p = ___slab_alloc(s, gfpflags, node, addr, c); -+ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); - local_irq_restore(flags); -+ free_delayed(&tofree); - return p; - } - -@@ -2652,7 +2714,7 @@ - - do { - if (unlikely(n)) { -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - n = NULL; - } - prior = page->freelist; -@@ -2684,7 +2746,7 @@ - * Otherwise the list_lock will synchronize with - * other processors updating the list of slabs. - */ -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - } - } -@@ -2726,7 +2788,7 @@ - add_partial(n, page, DEACTIVATE_TO_TAIL); - stat(s, FREE_ADD_PARTIAL); - } -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return; - - slab_empty: -@@ -2741,7 +2803,7 @@ - remove_full(s, n, page); - } - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - stat(s, FREE_SLAB); - discard_slab(s, page); - } -@@ -2913,6 +2975,7 @@ - void **p) - { - struct kmem_cache_cpu *c; -+ LIST_HEAD(to_free); - int i; - - /* memcg and kmem_cache debug support */ -@@ -2936,7 +2999,7 @@ - * of re-populating per CPU c->freelist - */ - p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, -- _RET_IP_, c); -+ _RET_IP_, c, &to_free); - if (unlikely(!p[i])) - goto error; - -@@ -2948,6 +3011,7 @@ - } - c->tid = next_tid(c->tid); - local_irq_enable(); -+ free_delayed(&to_free); - - /* Clear memory outside IRQ disabled fastpath loop */ - if (unlikely(flags & __GFP_ZERO)) { -@@ -3095,7 +3159,7 @@ - init_kmem_cache_node(struct kmem_cache_node *n) - { - n->nr_partial = 0; -- spin_lock_init(&n->list_lock); -+ raw_spin_lock_init(&n->list_lock); - INIT_LIST_HEAD(&n->partial); - #ifdef CONFIG_SLUB_DEBUG - atomic_long_set(&n->nr_slabs, 0); -@@ -3677,7 +3741,7 @@ - for (i = 0; i < SHRINK_PROMOTE_MAX; i++) - INIT_LIST_HEAD(promote + i); - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - /* - * Build lists of slabs to discard or promote. -@@ -3708,7 +3772,7 @@ - for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) - list_splice(promote + i, &n->partial); - -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - - /* Release empty slabs */ - list_for_each_entry_safe(page, t, &discard, lru) -@@ -3884,6 +3948,12 @@ - { - static __initdata struct kmem_cache boot_kmem_cache, - boot_kmem_cache_node; -+ int cpu; -+ -+ for_each_possible_cpu(cpu) { -+ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); -+ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); -+ } - - if (debug_guardpage_minorder()) - slub_max_order = 0; -@@ -4127,7 +4197,7 @@ - struct page *page; - unsigned long flags; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - - list_for_each_entry(page, &n->partial, lru) { - validate_slab_slab(s, page, map); -@@ -4149,7 +4219,7 @@ - s->name, count, atomic_long_read(&n->nr_slabs)); - - out: -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - return count; - } - -@@ -4337,12 +4407,12 @@ - if (!atomic_long_read(&n->nr_slabs)) - continue; - -- spin_lock_irqsave(&n->list_lock, flags); -+ raw_spin_lock_irqsave(&n->list_lock, flags); - list_for_each_entry(page, &n->partial, lru) - process_slab(&t, s, page, alloc, map); - list_for_each_entry(page, &n->full, lru) - process_slab(&t, s, page, alloc, map); -- spin_unlock_irqrestore(&n->list_lock, flags); -+ raw_spin_unlock_irqrestore(&n->list_lock, flags); - } - - for (i = 0; i < t.count; i++) { -diff -Nur linux-4.4.13.orig/mm/swap.c linux-4.4.13/mm/swap.c ---- linux-4.4.13.orig/mm/swap.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/swap.c 2016-06-22 21:55:08.078523867 +0200 -@@ -31,6 +31,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -46,6 +47,9 @@ - static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); - static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); - -+static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); -+DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); -+ - /* - * This path almost never happens for VM activity - pages are normally - * freed via pagevecs. But it gets used by networking. -@@ -481,11 +485,11 @@ - unsigned long flags; - - page_cache_get(page); -- local_irq_save(flags); -+ local_lock_irqsave(rotate_lock, flags); - pvec = this_cpu_ptr(&lru_rotate_pvecs); - if (!pagevec_add(pvec, page)) - pagevec_move_tail(pvec); -- local_irq_restore(flags); -+ local_unlock_irqrestore(rotate_lock, flags); - } - } - -@@ -536,12 +540,13 @@ - void activate_page(struct page *page) - { - if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { -- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, -+ activate_page_pvecs); - - page_cache_get(page); - if (!pagevec_add(pvec, page)) - pagevec_lru_move_fn(pvec, __activate_page, NULL); -- put_cpu_var(activate_page_pvecs); -+ put_locked_var(swapvec_lock, activate_page_pvecs); - } - } - -@@ -567,7 +572,7 @@ - - static void __lru_cache_activate_page(struct page *page) - { -- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); - int i; - - /* -@@ -589,7 +594,7 @@ - } - } - -- put_cpu_var(lru_add_pvec); -+ put_locked_var(swapvec_lock, lru_add_pvec); - } - - /* -@@ -630,13 +635,13 @@ - - static void __lru_cache_add(struct page *page) - { -- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); - - page_cache_get(page); - if (!pagevec_space(pvec)) - __pagevec_lru_add(pvec); - pagevec_add(pvec, page); -- put_cpu_var(lru_add_pvec); -+ put_locked_var(swapvec_lock, lru_add_pvec); - } - - /** -@@ -816,9 +821,9 @@ - unsigned long flags; - - /* No harm done if a racing interrupt already did this */ -- local_irq_save(flags); -+ local_lock_irqsave(rotate_lock, flags); - pagevec_move_tail(pvec); -- local_irq_restore(flags); -+ local_unlock_irqrestore(rotate_lock, flags); - } - - pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); -@@ -846,18 +851,19 @@ - return; - - if (likely(get_page_unless_zero(page))) { -- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); -+ struct pagevec *pvec = &get_locked_var(swapvec_lock, -+ lru_deactivate_file_pvecs); - - if (!pagevec_add(pvec, page)) - pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); -- put_cpu_var(lru_deactivate_file_pvecs); -+ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); - } - } - - void lru_add_drain(void) - { -- lru_add_drain_cpu(get_cpu()); -- put_cpu(); -+ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); -+ local_unlock_cpu(swapvec_lock); - } - - static void lru_add_drain_per_cpu(struct work_struct *dummy) -diff -Nur linux-4.4.13.orig/mm/truncate.c linux-4.4.13/mm/truncate.c ---- linux-4.4.13.orig/mm/truncate.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/truncate.c 2016-06-22 21:55:08.078523867 +0200 -@@ -56,8 +56,11 @@ - * protected by mapping->tree_lock. - */ - if (!workingset_node_shadows(node) && -- !list_empty(&node->private_list)) -- list_lru_del(&workingset_shadow_nodes, &node->private_list); -+ !list_empty(&node->private_list)) { -+ local_lock(workingset_shadow_lock); -+ list_lru_del(&__workingset_shadow_nodes, &node->private_list); -+ local_unlock(workingset_shadow_lock); -+ } - __radix_tree_delete_node(&mapping->page_tree, node); - unlock: - spin_unlock_irq(&mapping->tree_lock); -diff -Nur linux-4.4.13.orig/mm/vmalloc.c linux-4.4.13/mm/vmalloc.c ---- linux-4.4.13.orig/mm/vmalloc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/vmalloc.c 2016-06-22 21:55:08.078523867 +0200 -@@ -821,7 +821,7 @@ - struct vmap_block *vb; - struct vmap_area *va; - unsigned long vb_idx; -- int node, err; -+ int node, err, cpu; - void *vaddr; - - node = numa_node_id(); -@@ -864,11 +864,12 @@ - BUG_ON(err); - radix_tree_preload_end(); - -- vbq = &get_cpu_var(vmap_block_queue); -+ cpu = get_cpu_light(); -+ vbq = this_cpu_ptr(&vmap_block_queue); - spin_lock(&vbq->lock); - list_add_tail_rcu(&vb->free_list, &vbq->free); - spin_unlock(&vbq->lock); -- put_cpu_var(vmap_block_queue); -+ put_cpu_light(); - - return vaddr; - } -@@ -937,6 +938,7 @@ - struct vmap_block *vb; - void *vaddr = NULL; - unsigned int order; -+ int cpu; - - BUG_ON(offset_in_page(size)); - BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); -@@ -951,7 +953,8 @@ - order = get_order(size); - - rcu_read_lock(); -- vbq = &get_cpu_var(vmap_block_queue); -+ cpu = get_cpu_light(); -+ vbq = this_cpu_ptr(&vmap_block_queue); - list_for_each_entry_rcu(vb, &vbq->free, free_list) { - unsigned long pages_off; - -@@ -974,7 +977,7 @@ - break; - } - -- put_cpu_var(vmap_block_queue); -+ put_cpu_light(); - rcu_read_unlock(); - - /* Allocate new block if nothing was found */ -diff -Nur linux-4.4.13.orig/mm/vmstat.c linux-4.4.13/mm/vmstat.c ---- linux-4.4.13.orig/mm/vmstat.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/vmstat.c 2016-06-22 21:55:08.078523867 +0200 -@@ -226,6 +226,7 @@ - long x; - long t; - -+ preempt_disable_rt(); - x = delta + __this_cpu_read(*p); - - t = __this_cpu_read(pcp->stat_threshold); -@@ -235,6 +236,7 @@ - x = 0; - } - __this_cpu_write(*p, x); -+ preempt_enable_rt(); - } - EXPORT_SYMBOL(__mod_zone_page_state); - -@@ -267,6 +269,7 @@ - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_inc_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v > t)) { -@@ -275,6 +278,7 @@ - zone_page_state_add(v + overstep, zone, item); - __this_cpu_write(*p, -overstep); - } -+ preempt_enable_rt(); - } - - void __inc_zone_page_state(struct page *page, enum zone_stat_item item) -@@ -289,6 +293,7 @@ - s8 __percpu *p = pcp->vm_stat_diff + item; - s8 v, t; - -+ preempt_disable_rt(); - v = __this_cpu_dec_return(*p); - t = __this_cpu_read(pcp->stat_threshold); - if (unlikely(v < - t)) { -@@ -297,6 +302,7 @@ - zone_page_state_add(v - overstep, zone, item); - __this_cpu_write(*p, overstep); - } -+ preempt_enable_rt(); - } - - void __dec_zone_page_state(struct page *page, enum zone_stat_item item) -diff -Nur linux-4.4.13.orig/mm/workingset.c linux-4.4.13/mm/workingset.c ---- linux-4.4.13.orig/mm/workingset.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/workingset.c 2016-06-22 21:55:08.078523867 +0200 -@@ -264,7 +264,8 @@ - * point where they would still be useful. - */ - --struct list_lru workingset_shadow_nodes; -+struct list_lru __workingset_shadow_nodes; -+DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock); - - static unsigned long count_shadow_nodes(struct shrinker *shrinker, - struct shrink_control *sc) -@@ -274,9 +275,9 @@ - unsigned long pages; - - /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ -- local_irq_disable(); -- shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); -- local_irq_enable(); -+ local_lock_irq(workingset_shadow_lock); -+ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc); -+ local_unlock_irq(workingset_shadow_lock); - - pages = node_present_pages(sc->nid); - /* -@@ -363,9 +364,9 @@ - spin_unlock(&mapping->tree_lock); - ret = LRU_REMOVED_RETRY; - out: -- local_irq_enable(); -+ local_unlock_irq(workingset_shadow_lock); - cond_resched(); -- local_irq_disable(); -+ local_lock_irq(workingset_shadow_lock); - spin_lock(lru_lock); - return ret; - } -@@ -376,10 +377,10 @@ - unsigned long ret; - - /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ -- local_irq_disable(); -- ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, -+ local_lock_irq(workingset_shadow_lock); -+ ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc, - shadow_lru_isolate, NULL); -- local_irq_enable(); -+ local_unlock_irq(workingset_shadow_lock); - return ret; - } - -@@ -400,7 +401,7 @@ - { - int ret; - -- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); -+ ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key); - if (ret) - goto err; - ret = register_shrinker(&workingset_shadow_shrinker); -@@ -408,7 +409,7 @@ - goto err_list_lru; - return 0; - err_list_lru: -- list_lru_destroy(&workingset_shadow_nodes); -+ list_lru_destroy(&__workingset_shadow_nodes); - err: - return ret; - } -diff -Nur linux-4.4.13.orig/mm/zsmalloc.c linux-4.4.13/mm/zsmalloc.c ---- linux-4.4.13.orig/mm/zsmalloc.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/mm/zsmalloc.c 2016-06-22 21:55:08.078523867 +0200 -@@ -1289,7 +1289,7 @@ - class = pool->size_class[class_idx]; - off = obj_idx_to_offset(page, obj_idx, class->size); - -- area = &get_cpu_var(zs_map_area); -+ area = per_cpu_ptr(&zs_map_area, get_cpu_light()); - area->vm_mm = mm; - if (off + class->size <= PAGE_SIZE) { - /* this object is contained entirely within a page */ -@@ -1342,7 +1342,7 @@ - - __zs_unmap_object(area, pages, off, class->size); - } -- put_cpu_var(zs_map_area); -+ put_cpu_light(); - unpin_tag(handle); - } - EXPORT_SYMBOL_GPL(zs_unmap_object); -diff -Nur linux-4.4.13.orig/net/core/dev.c linux-4.4.13/net/core/dev.c ---- linux-4.4.13.orig/net/core/dev.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/core/dev.c 2016-06-22 21:55:08.082523867 +0200 -@@ -186,6 +186,7 @@ - static DEFINE_HASHTABLE(napi_hash, 8); - - static seqcount_t devnet_rename_seq; -+static DEFINE_MUTEX(devnet_rename_mutex); - - static inline void dev_base_seq_inc(struct net *net) - { -@@ -207,14 +208,14 @@ - static inline void rps_lock(struct softnet_data *sd) - { - #ifdef CONFIG_RPS -- spin_lock(&sd->input_pkt_queue.lock); -+ raw_spin_lock(&sd->input_pkt_queue.raw_lock); - #endif - } - - static inline void rps_unlock(struct softnet_data *sd) - { - #ifdef CONFIG_RPS -- spin_unlock(&sd->input_pkt_queue.lock); -+ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); - #endif - } - -@@ -884,7 +885,8 @@ - strcpy(name, dev->name); - rcu_read_unlock(); - if (read_seqcount_retry(&devnet_rename_seq, seq)) { -- cond_resched(); -+ mutex_lock(&devnet_rename_mutex); -+ mutex_unlock(&devnet_rename_mutex); - goto retry; - } - -@@ -1153,20 +1155,17 @@ - if (dev->flags & IFF_UP) - return -EBUSY; - -- write_seqcount_begin(&devnet_rename_seq); -+ mutex_lock(&devnet_rename_mutex); -+ __raw_write_seqcount_begin(&devnet_rename_seq); - -- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { -- write_seqcount_end(&devnet_rename_seq); -- return 0; -- } -+ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) -+ goto outunlock; - - memcpy(oldname, dev->name, IFNAMSIZ); - - err = dev_get_valid_name(net, dev, newname); -- if (err < 0) { -- write_seqcount_end(&devnet_rename_seq); -- return err; -- } -+ if (err < 0) -+ goto outunlock; - - if (oldname[0] && !strchr(oldname, '%')) - netdev_info(dev, "renamed from %s\n", oldname); -@@ -1179,11 +1178,12 @@ - if (ret) { - memcpy(dev->name, oldname, IFNAMSIZ); - dev->name_assign_type = old_assign_type; -- write_seqcount_end(&devnet_rename_seq); -- return ret; -+ err = ret; -+ goto outunlock; - } - -- write_seqcount_end(&devnet_rename_seq); -+ __raw_write_seqcount_end(&devnet_rename_seq); -+ mutex_unlock(&devnet_rename_mutex); - - netdev_adjacent_rename_links(dev, oldname); - -@@ -1204,7 +1204,8 @@ - /* err >= 0 after dev_alloc_name() or stores the first errno */ - if (err >= 0) { - err = ret; -- write_seqcount_begin(&devnet_rename_seq); -+ mutex_lock(&devnet_rename_mutex); -+ __raw_write_seqcount_begin(&devnet_rename_seq); - memcpy(dev->name, oldname, IFNAMSIZ); - memcpy(oldname, newname, IFNAMSIZ); - dev->name_assign_type = old_assign_type; -@@ -1217,6 +1218,11 @@ - } - - return err; -+ -+outunlock: -+ __raw_write_seqcount_end(&devnet_rename_seq); -+ mutex_unlock(&devnet_rename_mutex); -+ return err; - } - - /** -@@ -2246,6 +2252,7 @@ - sd->output_queue_tailp = &q->next_sched; - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - - void __netif_schedule(struct Qdisc *q) -@@ -2327,6 +2334,7 @@ - __this_cpu_write(softnet_data.completion_queue, skb); - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__dev_kfree_skb_irq); - -@@ -2883,7 +2891,11 @@ - * This permits __QDISC___STATE_RUNNING owner to get the lock more - * often and dequeue packets faster. - */ -+#ifdef CONFIG_PREEMPT_RT_FULL -+ contended = true; -+#else - contended = qdisc_is_running(q); -+#endif - if (unlikely(contended)) - spin_lock(&q->busylock); - -@@ -2943,9 +2955,44 @@ - #define skb_update_prio(skb) - #endif - -+#ifdef CONFIG_PREEMPT_RT_FULL -+ -+static inline int xmit_rec_read(void) -+{ -+ return current->xmit_recursion; -+} -+ -+static inline void xmit_rec_inc(void) -+{ -+ current->xmit_recursion++; -+} -+ -+static inline void xmit_rec_dec(void) -+{ -+ current->xmit_recursion--; -+} -+ -+#else -+ - DEFINE_PER_CPU(int, xmit_recursion); - EXPORT_SYMBOL(xmit_recursion); - -+static inline int xmit_rec_read(void) -+{ -+ return __this_cpu_read(xmit_recursion); -+} -+ -+static inline void xmit_rec_inc(void) -+{ -+ __this_cpu_inc(xmit_recursion); -+} -+ -+static inline void xmit_rec_dec(void) -+{ -+ __this_cpu_dec(xmit_recursion); -+} -+#endif -+ - #define RECURSION_LIMIT 10 - - /** -@@ -3138,7 +3185,7 @@ - - if (txq->xmit_lock_owner != cpu) { - -- if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) -+ if (xmit_rec_read() > RECURSION_LIMIT) - goto recursion_alert; - - skb = validate_xmit_skb(skb, dev); -@@ -3148,9 +3195,9 @@ - HARD_TX_LOCK(dev, txq, cpu); - - if (!netif_xmit_stopped(txq)) { -- __this_cpu_inc(xmit_recursion); -+ xmit_rec_inc(); - skb = dev_hard_start_xmit(skb, dev, txq, &rc); -- __this_cpu_dec(xmit_recursion); -+ xmit_rec_dec(); - if (dev_xmit_complete(rc)) { - HARD_TX_UNLOCK(dev, txq); - goto out; -@@ -3524,6 +3571,7 @@ - rps_unlock(sd); - - local_irq_restore(flags); -+ preempt_check_resched_rt(); - - atomic_long_inc(&skb->dev->rx_dropped); - kfree_skb(skb); -@@ -3542,7 +3590,7 @@ - struct rps_dev_flow voidflow, *rflow = &voidflow; - int cpu; - -- preempt_disable(); -+ migrate_disable(); - rcu_read_lock(); - - cpu = get_rps_cpu(skb->dev, skb, &rflow); -@@ -3552,13 +3600,13 @@ - ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); - - rcu_read_unlock(); -- preempt_enable(); -+ migrate_enable(); - } else - #endif - { - unsigned int qtail; -- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); -- put_cpu(); -+ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); -+ put_cpu_light(); - } - return ret; - } -@@ -3592,16 +3640,44 @@ - - trace_netif_rx_ni_entry(skb); - -- preempt_disable(); -+ local_bh_disable(); - err = netif_rx_internal(skb); -- if (local_softirq_pending()) -- do_softirq(); -- preempt_enable(); -+ local_bh_enable(); - - return err; - } - EXPORT_SYMBOL(netif_rx_ni); - -+#ifdef CONFIG_PREEMPT_RT_FULL -+/* -+ * RT runs ksoftirqd as a real time thread and the root_lock is a -+ * "sleeping spinlock". If the trylock fails then we can go into an -+ * infinite loop when ksoftirqd preempted the task which actually -+ * holds the lock, because we requeue q and raise NET_TX softirq -+ * causing ksoftirqd to loop forever. -+ * -+ * It's safe to use spin_lock on RT here as softirqs run in thread -+ * context and cannot deadlock against the thread which is holding -+ * root_lock. -+ * -+ * On !RT the trylock might fail, but there we bail out from the -+ * softirq loop after 10 attempts which we can't do on RT. And the -+ * task holding root_lock cannot be preempted, so the only downside of -+ * that trylock is that we need 10 loops to decide that we should have -+ * given up in the first one :) -+ */ -+static inline int take_root_lock(spinlock_t *lock) -+{ -+ spin_lock(lock); -+ return 1; -+} -+#else -+static inline int take_root_lock(spinlock_t *lock) -+{ -+ return spin_trylock(lock); -+} -+#endif -+ - static void net_tx_action(struct softirq_action *h) - { - struct softnet_data *sd = this_cpu_ptr(&softnet_data); -@@ -3643,7 +3719,7 @@ - head = head->next_sched; - - root_lock = qdisc_lock(q); -- if (spin_trylock(root_lock)) { -+ if (take_root_lock(root_lock)) { - smp_mb__before_atomic(); - clear_bit(__QDISC_STATE_SCHED, - &q->state); -@@ -4049,7 +4125,7 @@ - skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { - if (skb->dev == dev) { - __skb_unlink(skb, &sd->input_pkt_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } -@@ -4058,10 +4134,13 @@ - skb_queue_walk_safe(&sd->process_queue, skb, tmp) { - if (skb->dev == dev) { - __skb_unlink(skb, &sd->process_queue); -- kfree_skb(skb); -+ __skb_queue_tail(&sd->tofree_queue, skb); - input_queue_head_incr(sd); - } - } -+ -+ if (!skb_queue_empty(&sd->tofree_queue)) -+ raise_softirq_irqoff(NET_RX_SOFTIRQ); - } - - static int napi_gro_complete(struct sk_buff *skb) -@@ -4515,6 +4594,7 @@ - sd->rps_ipi_list = NULL; - - local_irq_enable(); -+ preempt_check_resched_rt(); - - /* Send pending IPI's to kick RPS processing on remote cpus. */ - while (remsd) { -@@ -4528,6 +4608,7 @@ - } else - #endif - local_irq_enable(); -+ preempt_check_resched_rt(); - } - - static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) -@@ -4609,6 +4690,7 @@ - local_irq_save(flags); - ____napi_schedule(this_cpu_ptr(&softnet_data), n); - local_irq_restore(flags); -+ preempt_check_resched_rt(); - } - EXPORT_SYMBOL(__napi_schedule); - -@@ -4885,7 +4967,7 @@ - list_splice_tail(&repoll, &list); - list_splice(&list, &sd->poll_list); - if (!list_empty(&sd->poll_list)) -- __raise_softirq_irqoff(NET_RX_SOFTIRQ); -+ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); - - net_rps_action_and_irq_enable(sd); - } -@@ -7218,7 +7300,7 @@ - void synchronize_net(void) - { - might_sleep(); -- if (rtnl_is_locked()) -+ if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) - synchronize_rcu_expedited(); - else - synchronize_rcu(); -@@ -7459,16 +7541,20 @@ - - raise_softirq_irqoff(NET_TX_SOFTIRQ); - local_irq_enable(); -+ preempt_check_resched_rt(); - - /* Process offline CPU's input_pkt_queue */ - while ((skb = __skb_dequeue(&oldsd->process_queue))) { - netif_rx_ni(skb); - input_queue_head_incr(oldsd); - } -- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { -+ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { - netif_rx_ni(skb); - input_queue_head_incr(oldsd); - } -+ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { -+ kfree_skb(skb); -+ } - - return NOTIFY_OK; - } -@@ -7770,8 +7856,9 @@ - for_each_possible_cpu(i) { - struct softnet_data *sd = &per_cpu(softnet_data, i); - -- skb_queue_head_init(&sd->input_pkt_queue); -- skb_queue_head_init(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->input_pkt_queue); -+ skb_queue_head_init_raw(&sd->process_queue); -+ skb_queue_head_init_raw(&sd->tofree_queue); - INIT_LIST_HEAD(&sd->poll_list); - sd->output_queue_tailp = &sd->output_queue; - #ifdef CONFIG_RPS -diff -Nur linux-4.4.13.orig/net/core/skbuff.c linux-4.4.13/net/core/skbuff.c ---- linux-4.4.13.orig/net/core/skbuff.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/core/skbuff.c 2016-06-22 21:55:08.082523867 +0200 -@@ -63,6 +63,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -351,6 +352,8 @@ - - static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); - static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache); -+static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); -+static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); - - static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -@@ -358,10 +361,10 @@ - unsigned long flags; - void *data; - -- local_irq_save(flags); -+ local_lock_irqsave(netdev_alloc_lock, flags); - nc = this_cpu_ptr(&netdev_alloc_cache); - data = __alloc_page_frag(nc, fragsz, gfp_mask); -- local_irq_restore(flags); -+ local_unlock_irqrestore(netdev_alloc_lock, flags); - return data; - } - -@@ -380,9 +383,13 @@ - - static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) - { -- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct page_frag_cache *nc; -+ void *data; - -- return __alloc_page_frag(nc, fragsz, gfp_mask); -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ data = __alloc_page_frag(nc, fragsz, gfp_mask); -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ return data; - } - - void *napi_alloc_frag(unsigned int fragsz) -@@ -429,13 +436,13 @@ - if (sk_memalloc_socks()) - gfp_mask |= __GFP_MEMALLOC; - -- local_irq_save(flags); -+ local_lock_irqsave(netdev_alloc_lock, flags); - - nc = this_cpu_ptr(&netdev_alloc_cache); - data = __alloc_page_frag(nc, len, gfp_mask); - pfmemalloc = nc->pfmemalloc; - -- local_irq_restore(flags); -+ local_unlock_irqrestore(netdev_alloc_lock, flags); - - if (unlikely(!data)) - return NULL; -@@ -476,9 +483,10 @@ - struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, - gfp_t gfp_mask) - { -- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); -+ struct page_frag_cache *nc; - struct sk_buff *skb; - void *data; -+ bool pfmemalloc; - - len += NET_SKB_PAD + NET_IP_ALIGN; - -@@ -496,7 +504,11 @@ - if (sk_memalloc_socks()) - gfp_mask |= __GFP_MEMALLOC; - -+ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); - data = __alloc_page_frag(nc, len, gfp_mask); -+ pfmemalloc = nc->pfmemalloc; -+ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); -+ - if (unlikely(!data)) - return NULL; - -@@ -507,7 +519,7 @@ - } - - /* use OR instead of assignment to avoid clearing of bits in mask */ -- if (nc->pfmemalloc) -+ if (pfmemalloc) - skb->pfmemalloc = 1; - skb->head_frag = 1; - -diff -Nur linux-4.4.13.orig/net/core/sock.c linux-4.4.13/net/core/sock.c ---- linux-4.4.13.orig/net/core/sock.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/core/sock.c 2016-06-22 21:55:08.082523867 +0200 -@@ -2435,12 +2435,11 @@ - if (sk->sk_lock.owned) - __lock_sock(sk); - sk->sk_lock.owned = 1; -- spin_unlock(&sk->sk_lock.slock); -+ spin_unlock_bh(&sk->sk_lock.slock); - /* - * The sk_lock has mutex_lock() semantics here: - */ - mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); -- local_bh_enable(); - } - EXPORT_SYMBOL(lock_sock_nested); - -diff -Nur linux-4.4.13.orig/net/ipv4/icmp.c linux-4.4.13/net/ipv4/icmp.c ---- linux-4.4.13.orig/net/ipv4/icmp.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/ipv4/icmp.c 2016-06-22 21:55:08.082523867 +0200 -@@ -69,6 +69,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -891,6 +892,30 @@ - } - - /* -+ * 32bit and 64bit have different timestamp length, so we check for -+ * the cookie at offset 20 and verify it is repeated at offset 50 -+ */ -+#define CO_POS0 20 -+#define CO_POS1 50 -+#define CO_SIZE sizeof(int) -+#define ICMP_SYSRQ_SIZE 57 -+ -+/* -+ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie -+ * pattern and if it matches send the next byte as a trigger to sysrq. -+ */ -+static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) -+{ -+ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); -+ char *p = skb->data; -+ -+ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && -+ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && -+ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) -+ handle_sysrq(p[CO_POS0 + CO_SIZE]); -+} -+ -+/* - * Handle ICMP_ECHO ("ping") requests. - * - * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo -@@ -917,6 +942,11 @@ - icmp_param.data_len = skb->len; - icmp_param.head_len = sizeof(struct icmphdr); - icmp_reply(&icmp_param, skb); -+ -+ if (skb->len == ICMP_SYSRQ_SIZE && -+ net->ipv4.sysctl_icmp_echo_sysrq) { -+ icmp_check_sysrq(net, skb); -+ } - } - /* should there be an ICMP stat for ignored echos? */ - return true; -diff -Nur linux-4.4.13.orig/net/ipv4/sysctl_net_ipv4.c linux-4.4.13/net/ipv4/sysctl_net_ipv4.c ---- linux-4.4.13.orig/net/ipv4/sysctl_net_ipv4.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/ipv4/sysctl_net_ipv4.c 2016-06-22 21:55:08.082523867 +0200 -@@ -818,6 +818,13 @@ - .proc_handler = proc_dointvec - }, - { -+ .procname = "icmp_echo_sysrq", -+ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, -+ .maxlen = sizeof(int), -+ .mode = 0644, -+ .proc_handler = proc_dointvec -+ }, -+ { - .procname = "icmp_ignore_bogus_error_responses", - .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, - .maxlen = sizeof(int), -diff -Nur linux-4.4.13.orig/net/mac80211/rx.c linux-4.4.13/net/mac80211/rx.c ---- linux-4.4.13.orig/net/mac80211/rx.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/mac80211/rx.c 2016-06-22 21:55:08.082523867 +0200 -@@ -3574,7 +3574,7 @@ - struct ieee80211_supported_band *sband; - struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); - -- WARN_ON_ONCE(softirq_count() == 0); -+ WARN_ON_ONCE_NONRT(softirq_count() == 0); - - if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) - goto drop; -diff -Nur linux-4.4.13.orig/net/netfilter/core.c linux-4.4.13/net/netfilter/core.c ---- linux-4.4.13.orig/net/netfilter/core.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/netfilter/core.c 2016-06-22 21:55:08.082523867 +0200 -@@ -22,11 +22,17 @@ - #include - #include - #include -+#include - #include - #include - - #include "nf_internals.h" - -+#ifdef CONFIG_PREEMPT_RT_BASE -+DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); -+EXPORT_PER_CPU_SYMBOL(xt_write_lock); -+#endif -+ - static DEFINE_MUTEX(afinfo_mutex); - - const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; -diff -Nur linux-4.4.13.orig/net/packet/af_packet.c linux-4.4.13/net/packet/af_packet.c ---- linux-4.4.13.orig/net/packet/af_packet.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/packet/af_packet.c 2016-06-22 21:55:08.086523867 +0200 -@@ -63,6 +63,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -695,7 +696,7 @@ - if (BLOCK_NUM_PKTS(pbd)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ -- cpu_relax(); -+ cpu_chill(); - } - } - -@@ -957,7 +958,7 @@ - if (!(status & TP_STATUS_BLK_TMO)) { - while (atomic_read(&pkc->blk_fill_in_prog)) { - /* Waiting for skb_copy_bits to finish... */ -- cpu_relax(); -+ cpu_chill(); - } - } - prb_close_block(pkc, pbd, po, status); -diff -Nur linux-4.4.13.orig/net/rds/ib_rdma.c linux-4.4.13/net/rds/ib_rdma.c ---- linux-4.4.13.orig/net/rds/ib_rdma.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/rds/ib_rdma.c 2016-06-22 21:55:08.086523867 +0200 -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - - #include "rds.h" - #include "ib.h" -@@ -313,7 +314,7 @@ - for_each_online_cpu(cpu) { - flag = &per_cpu(clean_list_grace, cpu); - while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) -- cpu_relax(); -+ cpu_chill(); - } - } - -diff -Nur linux-4.4.13.orig/net/sched/sch_generic.c linux-4.4.13/net/sched/sch_generic.c ---- linux-4.4.13.orig/net/sched/sch_generic.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/sched/sch_generic.c 2016-06-22 21:55:08.086523867 +0200 -@@ -893,7 +893,7 @@ - /* Wait for outstanding qdisc_run calls. */ - list_for_each_entry(dev, head, close_list) - while (some_qdisc_is_busy(dev)) -- yield(); -+ msleep(1); - } - - void dev_deactivate(struct net_device *dev) -diff -Nur linux-4.4.13.orig/net/sunrpc/svc_xprt.c linux-4.4.13/net/sunrpc/svc_xprt.c ---- linux-4.4.13.orig/net/sunrpc/svc_xprt.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/net/sunrpc/svc_xprt.c 2016-06-22 21:55:08.086523867 +0200 -@@ -340,7 +340,7 @@ - goto out; - } - -- cpu = get_cpu(); -+ cpu = get_cpu_light(); - pool = svc_pool_for_cpu(xprt->xpt_server, cpu); - - atomic_long_inc(&pool->sp_stats.packets); -@@ -376,7 +376,7 @@ - - atomic_long_inc(&pool->sp_stats.threads_woken); - wake_up_process(rqstp->rq_task); -- put_cpu(); -+ put_cpu_light(); - goto out; - } - rcu_read_unlock(); -@@ -397,7 +397,7 @@ - goto redo_search; - } - rqstp = NULL; -- put_cpu(); -+ put_cpu_light(); - out: - trace_svc_xprt_do_enqueue(xprt, rqstp); - } -diff -Nur linux-4.4.13.orig/scripts/mkcompile_h linux-4.4.13/scripts/mkcompile_h ---- linux-4.4.13.orig/scripts/mkcompile_h 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/scripts/mkcompile_h 2016-06-22 21:55:08.086523867 +0200 -@@ -4,7 +4,8 @@ - ARCH=$2 - SMP=$3 - PREEMPT=$4 --CC=$5 -+RT=$5 -+CC=$6 - - vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } - -@@ -57,6 +58,7 @@ - CONFIG_FLAGS="" - if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi - if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi -+if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi - UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" - - # Truncate to maximum length -diff -Nur linux-4.4.13.orig/sound/core/pcm_native.c linux-4.4.13/sound/core/pcm_native.c ---- linux-4.4.13.orig/sound/core/pcm_native.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/sound/core/pcm_native.c 2016-06-22 21:55:08.086523867 +0200 -@@ -135,7 +135,7 @@ - void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) - { - if (!substream->pcm->nonatomic) -- local_irq_disable(); -+ local_irq_disable_nort(); - snd_pcm_stream_lock(substream); - } - EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); -@@ -150,7 +150,7 @@ - { - snd_pcm_stream_unlock(substream); - if (!substream->pcm->nonatomic) -- local_irq_enable(); -+ local_irq_enable_nort(); - } - EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); - -@@ -158,7 +158,7 @@ - { - unsigned long flags = 0; - if (!substream->pcm->nonatomic) -- local_irq_save(flags); -+ local_irq_save_nort(flags); - snd_pcm_stream_lock(substream); - return flags; - } -@@ -176,7 +176,7 @@ - { - snd_pcm_stream_unlock(substream); - if (!substream->pcm->nonatomic) -- local_irq_restore(flags); -+ local_irq_restore_nort(flags); - } - EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); - -diff -Nur linux-4.4.13.orig/virt/kvm/async_pf.c linux-4.4.13/virt/kvm/async_pf.c ---- linux-4.4.13.orig/virt/kvm/async_pf.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/virt/kvm/async_pf.c 2016-06-22 21:55:08.086523867 +0200 -@@ -98,8 +98,8 @@ - * This memory barrier pairs with prepare_to_wait's set_current_state() - */ - smp_mb(); -- if (waitqueue_active(&vcpu->wq)) -- wake_up_interruptible(&vcpu->wq); -+ if (swait_active(&vcpu->wq)) -+ swake_up(&vcpu->wq); - - mmput(mm); - kvm_put_kvm(vcpu->kvm); -diff -Nur linux-4.4.13.orig/virt/kvm/kvm_main.c linux-4.4.13/virt/kvm/kvm_main.c ---- linux-4.4.13.orig/virt/kvm/kvm_main.c 2016-06-08 03:14:51.000000000 +0200 -+++ linux-4.4.13/virt/kvm/kvm_main.c 2016-06-22 21:55:08.086523867 +0200 -@@ -226,8 +226,7 @@ - vcpu->kvm = kvm; - vcpu->vcpu_id = id; - vcpu->pid = NULL; -- vcpu->halt_poll_ns = 0; -- init_waitqueue_head(&vcpu->wq); -+ init_swait_queue_head(&vcpu->wq); - kvm_async_pf_vcpu_init(vcpu); - - vcpu->pre_pcpu = -1; -@@ -2003,7 +2002,7 @@ - void kvm_vcpu_block(struct kvm_vcpu *vcpu) - { - ktime_t start, cur; -- DEFINE_WAIT(wait); -+ DECLARE_SWAITQUEUE(wait); - bool waited = false; - u64 block_ns; - -@@ -2028,7 +2027,7 @@ - kvm_arch_vcpu_blocking(vcpu); - - for (;;) { -- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); -+ prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); - - if (kvm_vcpu_check_block(vcpu) < 0) - break; -@@ -2037,7 +2036,7 @@ - schedule(); - } - -- finish_wait(&vcpu->wq, &wait); -+ finish_swait(&vcpu->wq, &wait); - cur = ktime_get(); - - kvm_arch_vcpu_unblocking(vcpu); -@@ -2069,11 +2068,11 @@ - { - int me; - int cpu = vcpu->cpu; -- wait_queue_head_t *wqp; -+ struct swait_queue_head *wqp; - - wqp = kvm_arch_vcpu_wq(vcpu); -- if (waitqueue_active(wqp)) { -- wake_up_interruptible(wqp); -+ if (swait_active(wqp)) { -+ swake_up(wqp); - ++vcpu->stat.halt_wakeup; - } - -@@ -2174,7 +2173,7 @@ - continue; - if (vcpu == me) - continue; -- if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) -+ if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) - continue; - if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) - continue; diff --git a/target/linux/patches/4.4.17/revert-sparc.patch b/target/linux/patches/4.4.17/revert-sparc.patch deleted file mode 100644 index 296be0e4a..000000000 --- a/target/linux/patches/4.4.17/revert-sparc.patch +++ /dev/null @@ -1,319 +0,0 @@ -From fff1ade5bd7576b053b6bbc9c9b72c2572092c06 Mon Sep 17 00:00:00 2001 -From: Waldemar Brodkorb -Date: Wed, 6 Jul 2016 08:16:59 +0200 -Subject: [PATCH] Revert "sparc: Harden signal return frame checks." - -This reverts commit 1fda90c39d8ef6acbedfd3cd9bd710a5bcc490c3. - -Signed-off-by: Waldemar Brodkorb ---- - arch/sparc/kernel/signal32.c | 46 ++++++++++++++-------------------------- - arch/sparc/kernel/signal_32.c | 41 +++++++++++++---------------------- - arch/sparc/kernel/signal_64.c | 31 ++++++++++----------------- - arch/sparc/kernel/sigutil_32.c | 9 +------- - arch/sparc/kernel/sigutil_64.c | 10 ++------- - 5 files changed, 45 insertions(+), 92 deletions(-) - -diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c -index 77655f0..4eed773 100644 ---- a/arch/sparc/kernel/signal32.c -+++ b/arch/sparc/kernel/signal32.c -@@ -138,24 +138,12 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) - return 0; - } - --/* Checks if the fp is valid. We always build signal frames which are -- * 16-byte aligned, therefore we can always enforce that the restore -- * frame has that property as well. -- */ --static bool invalid_frame_pointer(void __user *fp, int fplen) --{ -- if ((((unsigned long) fp) & 15) || -- ((unsigned long)fp) > 0x100000000ULL - fplen) -- return true; -- return false; --} -- - void do_sigreturn32(struct pt_regs *regs) - { - struct signal_frame32 __user *sf; - compat_uptr_t fpu_save; - compat_uptr_t rwin_save; -- unsigned int psr, ufp; -+ unsigned int psr; - unsigned pc, npc; - sigset_t set; - compat_sigset_t seta; -@@ -170,16 +158,11 @@ void do_sigreturn32(struct pt_regs *regs) - sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; - - /* 1. Make sure we are not getting garbage from the user */ -- if (invalid_frame_pointer(sf, sizeof(*sf))) -- goto segv; -- -- if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) -- goto segv; -- -- if (ufp & 0x7) -+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || -+ (((unsigned long) sf) & 3)) - goto segv; - -- if (__get_user(pc, &sf->info.si_regs.pc) || -+ if (get_user(pc, &sf->info.si_regs.pc) || - __get_user(npc, &sf->info.si_regs.npc)) - goto segv; - -@@ -244,7 +227,7 @@ segv: - asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) - { - struct rt_signal_frame32 __user *sf; -- unsigned int psr, pc, npc, ufp; -+ unsigned int psr, pc, npc; - compat_uptr_t fpu_save; - compat_uptr_t rwin_save; - sigset_t set; -@@ -259,16 +242,11 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) - sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; - - /* 1. Make sure we are not getting garbage from the user */ -- if (invalid_frame_pointer(sf, sizeof(*sf))) -+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || -+ (((unsigned long) sf) & 3)) - goto segv; - -- if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) -- goto segv; -- -- if (ufp & 0x7) -- goto segv; -- -- if (__get_user(pc, &sf->regs.pc) || -+ if (get_user(pc, &sf->regs.pc) || - __get_user(npc, &sf->regs.npc)) - goto segv; - -@@ -329,6 +307,14 @@ segv: - force_sig(SIGSEGV, current); - } - -+/* Checks if the fp is valid */ -+static int invalid_frame_pointer(void __user *fp, int fplen) -+{ -+ if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) -+ return 1; -+ return 0; -+} -+ - static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) - { - unsigned long sp; -diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c -index c3c12ef..52aa5e4 100644 ---- a/arch/sparc/kernel/signal_32.c -+++ b/arch/sparc/kernel/signal_32.c -@@ -60,22 +60,10 @@ struct rt_signal_frame { - #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) - #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) - --/* Checks if the fp is valid. We always build signal frames which are -- * 16-byte aligned, therefore we can always enforce that the restore -- * frame has that property as well. -- */ --static inline bool invalid_frame_pointer(void __user *fp, int fplen) --{ -- if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) -- return true; -- -- return false; --} -- - asmlinkage void do_sigreturn(struct pt_regs *regs) - { -- unsigned long up_psr, pc, npc, ufp; - struct signal_frame __user *sf; -+ unsigned long up_psr, pc, npc; - sigset_t set; - __siginfo_fpu_t __user *fpu_save; - __siginfo_rwin_t __user *rwin_save; -@@ -89,13 +77,10 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) - sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; - - /* 1. Make sure we are not getting garbage from the user */ -- if (!invalid_frame_pointer(sf, sizeof(*sf))) -- goto segv_and_exit; -- -- if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) -+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) - goto segv_and_exit; - -- if (ufp & 0x7) -+ if (((unsigned long) sf) & 3) - goto segv_and_exit; - - err = __get_user(pc, &sf->info.si_regs.pc); -@@ -142,7 +127,7 @@ segv_and_exit: - asmlinkage void do_rt_sigreturn(struct pt_regs *regs) - { - struct rt_signal_frame __user *sf; -- unsigned int psr, pc, npc, ufp; -+ unsigned int psr, pc, npc; - __siginfo_fpu_t __user *fpu_save; - __siginfo_rwin_t __user *rwin_save; - sigset_t set; -@@ -150,13 +135,8 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) - - synchronize_user_stack(); - sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; -- if (!invalid_frame_pointer(sf, sizeof(*sf))) -- goto segv; -- -- if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) -- goto segv; -- -- if (ufp & 0x7) -+ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || -+ (((unsigned long) sf) & 0x03)) - goto segv; - - err = __get_user(pc, &sf->regs.pc); -@@ -198,6 +178,15 @@ segv: - force_sig(SIGSEGV, current); - } - -+/* Checks if the fp is valid */ -+static inline int invalid_frame_pointer(void __user *fp, int fplen) -+{ -+ if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) -+ return 1; -+ -+ return 0; -+} -+ - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) - { - unsigned long sp = regs->u_regs[UREG_FP]; -diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c -index 5ee930c..39aaec1 100644 ---- a/arch/sparc/kernel/signal_64.c -+++ b/arch/sparc/kernel/signal_64.c -@@ -234,17 +234,6 @@ do_sigsegv: - goto out; - } - --/* Checks if the fp is valid. We always build rt signal frames which -- * are 16-byte aligned, therefore we can always enforce that the -- * restore frame has that property as well. -- */ --static bool invalid_frame_pointer(void __user *fp) --{ -- if (((unsigned long) fp) & 15) -- return true; -- return false; --} -- - struct rt_signal_frame { - struct sparc_stackf ss; - siginfo_t info; -@@ -257,8 +246,8 @@ struct rt_signal_frame { - - void do_rt_sigreturn(struct pt_regs *regs) - { -- unsigned long tpc, tnpc, tstate, ufp; - struct rt_signal_frame __user *sf; -+ unsigned long tpc, tnpc, tstate; - __siginfo_fpu_t __user *fpu_save; - __siginfo_rwin_t __user *rwin_save; - sigset_t set; -@@ -272,16 +261,10 @@ void do_rt_sigreturn(struct pt_regs *regs) - (regs->u_regs [UREG_FP] + STACK_BIAS); - - /* 1. Make sure we are not getting garbage from the user */ -- if (invalid_frame_pointer(sf)) -- goto segv; -- -- if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) -+ if (((unsigned long) sf) & 3) - goto segv; - -- if ((ufp + STACK_BIAS) & 0x7) -- goto segv; -- -- err = __get_user(tpc, &sf->regs.tpc); -+ err = get_user(tpc, &sf->regs.tpc); - err |= __get_user(tnpc, &sf->regs.tnpc); - if (test_thread_flag(TIF_32BIT)) { - tpc &= 0xffffffff; -@@ -325,6 +308,14 @@ segv: - force_sig(SIGSEGV, current); - } - -+/* Checks if the fp is valid */ -+static int invalid_frame_pointer(void __user *fp) -+{ -+ if (((unsigned long) fp) & 15) -+ return 1; -+ return 0; -+} -+ - static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) - { - unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; -diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c -index e5fe8ce..0f6eebe 100644 ---- a/arch/sparc/kernel/sigutil_32.c -+++ b/arch/sparc/kernel/sigutil_32.c -@@ -48,10 +48,6 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) - int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) - { - int err; -- -- if (((unsigned long) fpu) & 3) -- return -EFAULT; -- - #ifdef CONFIG_SMP - if (test_tsk_thread_flag(current, TIF_USEDFPU)) - regs->psr &= ~PSR_EF; -@@ -101,10 +97,7 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) - struct thread_info *t = current_thread_info(); - int i, wsaved, err; - -- if (((unsigned long) rp) & 3) -- return -EFAULT; -- -- get_user(wsaved, &rp->wsaved); -+ __get_user(wsaved, &rp->wsaved); - if (wsaved > NSWINS) - return -EFAULT; - -diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c -index 36aadcb..387834a 100644 ---- a/arch/sparc/kernel/sigutil_64.c -+++ b/arch/sparc/kernel/sigutil_64.c -@@ -37,10 +37,7 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) - unsigned long fprs; - int err; - -- if (((unsigned long) fpu) & 7) -- return -EFAULT; -- -- err = get_user(fprs, &fpu->si_fprs); -+ err = __get_user(fprs, &fpu->si_fprs); - fprs_write(0); - regs->tstate &= ~TSTATE_PEF; - if (fprs & FPRS_DL) -@@ -75,10 +72,7 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) - struct thread_info *t = current_thread_info(); - int i, wsaved, err; - -- if (((unsigned long) rp) & 7) -- return -EFAULT; -- -- get_user(wsaved, &rp->wsaved); -+ __get_user(wsaved, &rp->wsaved); - if (wsaved > NSWINS) - return -EFAULT; - --- -1.7.10.4 - diff --git a/target/linux/patches/4.4.17/startup.patch b/target/linux/patches/4.4.17/startup.patch deleted file mode 100644 index e54ac19a6..000000000 --- a/target/linux/patches/4.4.17/startup.patch +++ /dev/null @@ -1,34 +0,0 @@ -diff -Nur linux-4.4.6.orig/init/initramfs.c linux-4.4.6/init/initramfs.c ---- linux-4.4.6.orig/init/initramfs.c 2016-03-16 16:43:17.000000000 +0100 -+++ linux-4.4.6/init/initramfs.c 2016-03-27 14:15:43.931044532 +0200 -@@ -653,6 +653,9 @@ - */ - load_default_modules(); - } -+#ifdef CONFIG_DEVTMPFS_MOUNT -+ devtmpfs_mount("dev"); -+#endif - return 0; - } - rootfs_initcall(populate_rootfs); -diff -Nur linux-4.4.6.orig/init/main.c linux-4.4.6/init/main.c ---- linux-4.4.6.orig/init/main.c 2016-03-16 16:43:17.000000000 +0100 -+++ linux-4.4.6/init/main.c 2016-03-27 14:15:43.935044783 +0200 -@@ -1011,6 +1011,8 @@ - if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) - pr_err("Warning: unable to open an initial console.\n"); - -+ printk(KERN_WARNING "Starting Linux (built with OpenADK).\n"); -+ - (void) sys_dup(0); - (void) sys_dup(0); - /* -@@ -1019,7 +1021,7 @@ - */ - - if (!ramdisk_execute_command) -- ramdisk_execute_command = "/init"; -+ ramdisk_execute_command = "/sbin/init"; - - if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { - ramdisk_execute_command = NULL; diff --git a/target/linux/patches/4.4.17/use-libgcc-for-sh.patch b/target/linux/patches/4.4.17/use-libgcc-for-sh.patch deleted file mode 100644 index 6420219b0..000000000 --- a/target/linux/patches/4.4.17/use-libgcc-for-sh.patch +++ /dev/null @@ -1,29 +0,0 @@ -diff -Nur linux-4.1.13.orig/arch/sh/Makefile linux-4.1.13/arch/sh/Makefile ---- linux-4.1.13.orig/arch/sh/Makefile 2015-11-09 23:34:10.000000000 +0100 -+++ linux-4.1.13/arch/sh/Makefile 2015-12-06 19:59:31.000000000 +0100 -@@ -200,7 +206,9 @@ - KBUILD_CFLAGS += -fasynchronous-unwind-tables - endif - --libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) -+LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a) -+ -+libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) $(LIBGCC) - libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) - - BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \ -diff -Nur linux-4.1.13.orig/arch/sh/lib/Makefile linux-4.1.13/arch/sh/lib/Makefile ---- linux-4.1.13.orig/arch/sh/lib/Makefile 2015-11-09 23:34:10.000000000 +0100 -+++ linux-4.1.13/arch/sh/lib/Makefile 2015-12-06 19:59:14.000000000 +0100 -@@ -5,11 +5,6 @@ - lib-y = delay.o memmove.o memchr.o \ - checksum.o strlen.o div64.o div64-generic.o - --# Extracted from libgcc --obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ -- ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ -- udiv_qrnnd.o -- - udivsi3-y := udivsi3_i4i-Os.o - - ifneq ($(CONFIG_CC_OPTIMIZE_FOR_SIZE),y) diff --git a/target/linux/patches/4.4.19/coldfire-sighandler.patch b/target/linux/patches/4.4.19/coldfire-sighandler.patch new file mode 100644 index 000000000..c52a4e228 --- /dev/null +++ b/target/linux/patches/4.4.19/coldfire-sighandler.patch @@ -0,0 +1,100 @@ +From a95517992a37488c0bc8b629c47c570e580e407d Mon Sep 17 00:00:00 2001 +From: Greg Ungerer +Date: Mon, 15 Feb 2016 16:36:29 +1000 +Subject: m68k: Use conventional function parameters for do_sigreturn + +Create conventional stack parameters for the calls to do_sigreturn and +do_rt_sigreturn. The current C code for do_sigreturn and do_rt_sigreturn +dig into the stack to create local pointers to the saved switch stack +and the pt_regs structs. + +The motivation for this change is a problem with non-MMU targets that +have broken signal return paths on newer versions of gcc. It appears as +though gcc has determined that the pointers into the saved stack structs, +and the saved structs themselves, are function parameters and updates to +them will be lost on function return, so they are optimized away. This +results in large parts of restore_sigcontext() and mangle_kernel_stack() +functions being removed. Of course this results in non-functional code +causing kernel oops. This problem has been observed with gcc version +5.2 and 5.3, and probably exists in earlier versions as well. + +Using conventional stack parameter pointers passed to these functions has +the advantage of the code here not needing to know the exact details of +how the underlying entry handler layed these structs out on the stack. +So the rather ugly pointer setup casting and arg referencing can be +removed. + +The resulting code after this change is a few bytes larger (due to the +overhead of creating the stack args and their tear down). Not being hot +paths I don't think this is too much of a problem here. + +An alternative solution is to put a barrier() in the do_sigreturn() code, +but this doesn't feel quite as clean as this solution. + +This change has been compile tested on all defconfigs, and run tested on +Atari (through aranym), ColdFire with MMU (M5407EVB) and ColdFire with +no-MMU (QEMU and M5208EVB). + +Signed-off-by: Greg Ungerer +Acked-by: Andreas Schwab +Signed-off-by: Geert Uytterhoeven +--- + arch/m68k/kernel/entry.S | 6 ++++++ + arch/m68k/kernel/signal.c | 8 ++------ + 2 files changed, 8 insertions(+), 6 deletions(-) + +diff --git a/arch/m68k/kernel/entry.S b/arch/m68k/kernel/entry.S +index b54ac7a..97cd3ea 100644 +--- a/arch/m68k/kernel/entry.S ++++ b/arch/m68k/kernel/entry.S +@@ -71,13 +71,19 @@ ENTRY(__sys_vfork) + + ENTRY(sys_sigreturn) + SAVE_SWITCH_STACK ++ movel %sp,%sp@- | switch_stack pointer ++ pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer + jbsr do_sigreturn ++ addql #8,%sp + RESTORE_SWITCH_STACK + rts + + ENTRY(sys_rt_sigreturn) + SAVE_SWITCH_STACK ++ movel %sp,%sp@- | switch_stack pointer ++ pea %sp@(SWITCH_STACK_SIZE+4) | pt_regs pointer + jbsr do_rt_sigreturn ++ addql #8,%sp + RESTORE_SWITCH_STACK + rts + +diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c +index af1c4f3..2dcee3a 100644 +--- a/arch/m68k/kernel/signal.c ++++ b/arch/m68k/kernel/signal.c +@@ -737,10 +737,8 @@ badframe: + return 1; + } + +-asmlinkage int do_sigreturn(unsigned long __unused) ++asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw) + { +- struct switch_stack *sw = (struct switch_stack *) &__unused; +- struct pt_regs *regs = (struct pt_regs *) (sw + 1); + unsigned long usp = rdusp(); + struct sigframe __user *frame = (struct sigframe __user *)(usp - 4); + sigset_t set; +@@ -764,10 +762,8 @@ badframe: + return 0; + } + +-asmlinkage int do_rt_sigreturn(unsigned long __unused) ++asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw) + { +- struct switch_stack *sw = (struct switch_stack *) &__unused; +- struct pt_regs *regs = (struct pt_regs *) (sw + 1); + unsigned long usp = rdusp(); + struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4); + sigset_t set; +-- +cgit v0.12 + diff --git a/target/linux/patches/4.4.19/crisv32.patch b/target/linux/patches/4.4.19/crisv32.patch new file mode 100644 index 000000000..cb9b0d028 --- /dev/null +++ b/target/linux/patches/4.4.19/crisv32.patch @@ -0,0 +1,33 @@ +diff -Nur linux-4.4.13.orig/arch/cris/arch-v32/mm/intmem.c linux-4.4.13/arch/cris/arch-v32/mm/intmem.c +--- linux-4.4.13.orig/arch/cris/arch-v32/mm/intmem.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/cris/arch-v32/mm/intmem.c 2016-06-21 20:40:18.919361891 +0200 +@@ -113,14 +113,14 @@ + + allocation->status = STATUS_FREE; + /* Join with prev and/or next if also free */ +- if ((prev != &intmem_allocations) && ++ if ((&prev->entry != &intmem_allocations) && + (prev->status == STATUS_FREE)) { + prev->size += allocation->size; + list_del(&allocation->entry); + kfree(allocation); + allocation = prev; + } +- if ((next != &intmem_allocations) && ++ if ((&next->entry != &intmem_allocations) && + (next->status == STATUS_FREE)) { + allocation->size += next->size; + list_del(&next->entry); +@@ -145,5 +145,11 @@ + (unsigned long)intmem_virtual + MEM_INTMEM_START + + RESERVED_SIZE); + } +-device_initcall(crisv32_intmem_init); + ++static int __init crisv32_intmem_setup(void) ++{ ++ crisv32_intmem_init(); ++ ++ return 0; ++} ++device_initcall(crisv32_intmem_setup); diff --git a/target/linux/patches/4.4.19/initramfs-nosizelimit.patch b/target/linux/patches/4.4.19/initramfs-nosizelimit.patch new file mode 100644 index 000000000..40d2f6bd8 --- /dev/null +++ b/target/linux/patches/4.4.19/initramfs-nosizelimit.patch @@ -0,0 +1,57 @@ +From 9a18df7a71bfa620b1278777d64783a359d7eb4e Mon Sep 17 00:00:00 2001 +From: Thorsten Glaser +Date: Sun, 4 May 2014 01:37:54 +0200 +Subject: [PATCH] mount tmpfs-as-rootfs (initramfs) with -o + nr_blocks=0,nr_inodes=0 + +I would have preferred to write this patch to be able to pass +rootflags=nr_blocks=0,nr_inodes=0 on the kernel command line, +and then hand these rootflags over to the initramfs (tmpfs) +mount in the same way the kernel hands them over to the block +device rootfs mount. But at least the Debian/m68k initrd also +parses $rootflags from the environment and adds it to the call +to the user-space mount for the eventual root device, which +would make the kernel command line rootflags option be used in +both places (tmpfs and e.g. ext4) which is guaranteed to error +out in at least one of them. + +This change is intended to aid people in a setup where the +initrd is the final root filesystem, i.e. not mounted over. +This is especially useful in automated tests running on qemu +for boards with constrained memory (e.g. 64 MiB on sh4). + +Considering that the initramfs is normally emptied out then +overmounted, this change is probably safe for setups where +initramfs just hosts early userspace, too, since the tmpfs +backing it is not accessible any more later on, AFAICT. + +Signed-off-by: Thorsten Glaser +--- + init/do_mounts.c | 4 ++++ + 1 file changed, 4 insertions(+) + +diff --git a/init/do_mounts.c b/init/do_mounts.c +index 82f2288..55a4cfe 100644 +--- a/init/do_mounts.c ++++ b/init/do_mounts.c +@@ -594,6 +594,7 @@ out: + } + + static bool is_tmpfs; ++static char tmpfs_rootflags[] = "nr_blocks=0,nr_inodes=0"; + static struct dentry *rootfs_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, void *data) + { +@@ -606,6 +607,9 @@ static struct dentry *rootfs_mount(struct file_system_type *fs_type, + if (IS_ENABLED(CONFIG_TMPFS) && is_tmpfs) + fill = shmem_fill_super; + ++ if (is_tmpfs) ++ data = tmpfs_rootflags; ++ + return mount_nodev(fs_type, flags, data, fill); + } + +-- +2.0.0.rc0 + diff --git a/target/linux/patches/4.4.19/ld-or1k.patch b/target/linux/patches/4.4.19/ld-or1k.patch new file mode 100644 index 000000000..264f9166f --- /dev/null +++ b/target/linux/patches/4.4.19/ld-or1k.patch @@ -0,0 +1,12 @@ +diff -Nur linux-4.4.5.orig/arch/openrisc/kernel/vmlinux.lds.S linux-4.4.5/arch/openrisc/kernel/vmlinux.lds.S +--- linux-4.4.5.orig/arch/openrisc/kernel/vmlinux.lds.S 2016-03-10 00:35:58.000000000 +0100 ++++ linux-4.4.5/arch/openrisc/kernel/vmlinux.lds.S 2016-03-16 09:21:07.933535532 +0100 +@@ -30,7 +30,7 @@ + #include + #include + +-OUTPUT_FORMAT("elf32-or32", "elf32-or32", "elf32-or32") ++OUTPUT_FORMAT("elf32-or1k", "elf32-or1k", "elf32-or1k") + jiffies = jiffies_64 + 4; + + SECTIONS diff --git a/target/linux/patches/4.4.19/macsonic.patch b/target/linux/patches/4.4.19/macsonic.patch new file mode 100644 index 000000000..75a6fcad2 --- /dev/null +++ b/target/linux/patches/4.4.19/macsonic.patch @@ -0,0 +1,11 @@ +diff -Nur linux-4.4.6.orig/drivers/net/ethernet/natsemi/Kconfig linux-4.4.6/drivers/net/ethernet/natsemi/Kconfig +--- linux-4.4.6.orig/drivers/net/ethernet/natsemi/Kconfig 2016-03-16 16:43:17.000000000 +0100 ++++ linux-4.4.6/drivers/net/ethernet/natsemi/Kconfig 2016-03-18 10:43:34.000000000 +0100 +@@ -18,6 +18,7 @@ + config MACSONIC + tristate "Macintosh SONIC based ethernet (onboard, NuBus, LC, CS)" + depends on MAC ++ select BITREVERSE + ---help--- + Support for NatSemi SONIC based Ethernet devices. This includes + the onboard Ethernet in many Quadras as well as some LC-PDS, diff --git a/target/linux/patches/4.4.19/mips-xz.patch b/target/linux/patches/4.4.19/mips-xz.patch new file mode 100644 index 000000000..5cfac6254 --- /dev/null +++ b/target/linux/patches/4.4.19/mips-xz.patch @@ -0,0 +1,12 @@ +diff -Nur linux-4.4.5.orig/arch/mips/boot/compressed/Makefile linux-4.4.5/arch/mips/boot/compressed/Makefile +--- linux-4.4.5.orig/arch/mips/boot/compressed/Makefile 2016-03-10 00:35:58.000000000 +0100 ++++ linux-4.4.5/arch/mips/boot/compressed/Makefile 2016-03-15 11:35:31.951795457 +0100 +@@ -41,7 +41,7 @@ + endif + + ifdef CONFIG_KERNEL_XZ +-vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o ++vmlinuzobjs-y += $(obj)/../../lib/ashldi3.o $(obj)/../../lib/bswapsi.o + endif + + targets += vmlinux.bin diff --git a/target/linux/patches/4.4.19/patch-realtime b/target/linux/patches/4.4.19/patch-realtime new file mode 100644 index 000000000..c677e4098 --- /dev/null +++ b/target/linux/patches/4.4.19/patch-realtime @@ -0,0 +1,30649 @@ +diff -Nur linux-4.4.13.orig/arch/arm/include/asm/switch_to.h linux-4.4.13/arch/arm/include/asm/switch_to.h +--- linux-4.4.13.orig/arch/arm/include/asm/switch_to.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/include/asm/switch_to.h 2016-06-22 21:55:07.966523867 +0200 +@@ -3,6 +3,13 @@ + + #include + ++#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p); ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + /* + * For v7 SMP cores running a preemptible kernel we may be pre-empted + * during a TLB maintenance operation, so execute an inner-shareable dsb +@@ -25,6 +32,7 @@ + #define switch_to(prev,next,last) \ + do { \ + __complete_pending_tlbi(); \ ++ switch_kmaps(prev, next); \ + last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \ + } while (0) + +diff -Nur linux-4.4.13.orig/arch/arm/include/asm/thread_info.h linux-4.4.13/arch/arm/include/asm/thread_info.h +--- linux-4.4.13.orig/arch/arm/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/include/asm/thread_info.h 2016-06-22 21:55:07.966523867 +0200 +@@ -49,6 +49,7 @@ + struct thread_info { + unsigned long flags; /* low level flags */ + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + __u32 cpu; /* cpu */ +@@ -142,7 +143,8 @@ + #define TIF_SYSCALL_TRACE 4 /* syscall trace active */ + #define TIF_SYSCALL_AUDIT 5 /* syscall auditing active */ + #define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +-#define TIF_SECCOMP 7 /* seccomp syscall filtering active */ ++#define TIF_SECCOMP 8 /* seccomp syscall filtering active */ ++#define TIF_NEED_RESCHED_LAZY 7 + + #define TIF_NOHZ 12 /* in adaptive nohz mode */ + #define TIF_USING_IWMMXT 17 +@@ -152,6 +154,7 @@ + #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_UPROBE (1 << TIF_UPROBE) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +@@ -167,7 +170,8 @@ + * Change these and you break ASM code in entry-common.S + */ + #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +- _TIF_NOTIFY_RESUME | _TIF_UPROBE) ++ _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ ++ _TIF_NEED_RESCHED_LAZY) + + #endif /* __KERNEL__ */ + #endif /* __ASM_ARM_THREAD_INFO_H */ +diff -Nur linux-4.4.13.orig/arch/arm/Kconfig linux-4.4.13/arch/arm/Kconfig +--- linux-4.4.13.orig/arch/arm/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/Kconfig 2016-06-22 21:55:07.966523867 +0200 +@@ -33,7 +33,7 @@ + select HARDIRQS_SW_RESEND + select HAVE_ARCH_AUDITSYSCALL if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_BITREVERSE if (CPU_32v7M || CPU_32v7) && !CPU_32v6 +- select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 ++ select HAVE_ARCH_JUMP_LABEL if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !PREEMPT_RT_BASE + select HAVE_ARCH_KGDB if !CPU_ENDIAN_BE32 + select HAVE_ARCH_SECCOMP_FILTER if (AEABI && !OABI_COMPAT) + select HAVE_ARCH_TRACEHOOK +@@ -68,6 +68,7 @@ + select HAVE_PERF_EVENTS + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP ++ select HAVE_PREEMPT_LAZY + select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE) + select HAVE_REGS_AND_STACK_ACCESS_API + select HAVE_SYSCALL_TRACEPOINTS +diff -Nur linux-4.4.13.orig/arch/arm/kernel/asm-offsets.c linux-4.4.13/arch/arm/kernel/asm-offsets.c +--- linux-4.4.13.orig/arch/arm/kernel/asm-offsets.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kernel/asm-offsets.c 2016-06-22 21:55:07.970523867 +0200 +@@ -65,6 +65,7 @@ + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +diff -Nur linux-4.4.13.orig/arch/arm/kernel/entry-armv.S linux-4.4.13/arch/arm/kernel/entry-armv.S +--- linux-4.4.13.orig/arch/arm/kernel/entry-armv.S 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kernel/entry-armv.S 2016-06-22 21:55:07.970523867 +0200 +@@ -215,11 +215,18 @@ + #ifdef CONFIG_PREEMPT + get_thread_info tsk + ldr r8, [tsk, #TI_PREEMPT] @ get preempt count +- ldr r0, [tsk, #TI_FLAGS] @ get flags + teq r8, #0 @ if preempt count != 0 ++ bne 1f @ return from exeption ++ ldr r0, [tsk, #TI_FLAGS] @ get flags ++ tst r0, #_TIF_NEED_RESCHED @ if NEED_RESCHED is set ++ blne svc_preempt @ preempt! ++ ++ ldr r8, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r8, #0 @ if preempt lazy count != 0 + movne r0, #0 @ force flags to 0 +- tst r0, #_TIF_NEED_RESCHED ++ tst r0, #_TIF_NEED_RESCHED_LAZY + blne svc_preempt ++1: + #endif + + svc_exit r5, irq = 1 @ return from exception +@@ -234,8 +241,14 @@ + 1: bl preempt_schedule_irq @ irq en/disable is done inside + ldr r0, [tsk, #TI_FLAGS] @ get new tasks TI_FLAGS + tst r0, #_TIF_NEED_RESCHED ++ bne 1b ++ tst r0, #_TIF_NEED_RESCHED_LAZY + reteq r8 @ go again +- b 1b ++ ldr r0, [tsk, #TI_PREEMPT_LAZY] @ get preempt lazy count ++ teq r0, #0 @ if preempt lazy count != 0 ++ beq 1b ++ ret r8 @ go again ++ + #endif + + __und_fault: +diff -Nur linux-4.4.13.orig/arch/arm/kernel/entry-common.S linux-4.4.13/arch/arm/kernel/entry-common.S +--- linux-4.4.13.orig/arch/arm/kernel/entry-common.S 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kernel/entry-common.S 2016-06-22 21:55:07.970523867 +0200 +@@ -36,7 +36,9 @@ + UNWIND(.cantunwind ) + disable_irq_notrace @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing +- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne fast_work_pending ++ tst r1, #_TIF_SECCOMP + bne fast_work_pending + + /* perform architecture specific actions before user return */ +@@ -62,8 +64,11 @@ + str r0, [sp, #S_R0 + S_OFF]! @ save returned r0 + disable_irq_notrace @ disable interrupts + ldr r1, [tsk, #TI_FLAGS] @ re-check for syscall tracing +- tst r1, #_TIF_SYSCALL_WORK | _TIF_WORK_MASK ++ tst r1, #((_TIF_SYSCALL_WORK | _TIF_WORK_MASK) & ~_TIF_SECCOMP) ++ bne do_slower_path ++ tst r1, #_TIF_SECCOMP + beq no_work_pending ++do_slower_path: + UNWIND(.fnend ) + ENDPROC(ret_fast_syscall) + +diff -Nur linux-4.4.13.orig/arch/arm/kernel/process.c linux-4.4.13/arch/arm/kernel/process.c +--- linux-4.4.13.orig/arch/arm/kernel/process.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kernel/process.c 2016-06-22 21:55:07.970523867 +0200 +@@ -319,6 +319,30 @@ + } + + #ifdef CONFIG_MMU ++/* ++ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock. If the lock is not ++ * initialized by pgtable_page_ctor() then a coredump of the vector page will ++ * fail. ++ */ ++static int __init vectors_user_mapping_init_page(void) ++{ ++ struct page *page; ++ unsigned long addr = 0xffff0000; ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ page = pmd_page(*(pmd)); ++ ++ pgtable_page_ctor(page); ++ ++ return 0; ++} ++late_initcall(vectors_user_mapping_init_page); ++ + #ifdef CONFIG_KUSER_HELPERS + /* + * The vectors page is always readable from user space for the +diff -Nur linux-4.4.13.orig/arch/arm/kernel/signal.c linux-4.4.13/arch/arm/kernel/signal.c +--- linux-4.4.13.orig/arch/arm/kernel/signal.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kernel/signal.c 2016-06-22 21:55:07.970523867 +0200 +@@ -572,7 +572,8 @@ + */ + trace_hardirqs_off(); + do { +- if (likely(thread_flags & _TIF_NEED_RESCHED)) { ++ if (likely(thread_flags & (_TIF_NEED_RESCHED | ++ _TIF_NEED_RESCHED_LAZY))) { + schedule(); + } else { + if (unlikely(!user_mode(regs))) +diff -Nur linux-4.4.13.orig/arch/arm/kernel/smp.c linux-4.4.13/arch/arm/kernel/smp.c +--- linux-4.4.13.orig/arch/arm/kernel/smp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kernel/smp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -230,8 +230,6 @@ + flush_cache_louis(); + local_flush_tlb_all(); + +- clear_tasks_mm_cpumask(cpu); +- + return 0; + } + +@@ -247,6 +245,9 @@ + pr_err("CPU%u: cpu didn't die\n", cpu); + return; + } ++ ++ clear_tasks_mm_cpumask(cpu); ++ + pr_notice("CPU%u: shutdown\n", cpu); + + /* +diff -Nur linux-4.4.13.orig/arch/arm/kernel/unwind.c linux-4.4.13/arch/arm/kernel/unwind.c +--- linux-4.4.13.orig/arch/arm/kernel/unwind.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kernel/unwind.c 2016-06-22 21:55:07.974523867 +0200 +@@ -93,7 +93,7 @@ + static const struct unwind_idx *__origin_unwind_idx; + extern const struct unwind_idx __stop_unwind_idx[]; + +-static DEFINE_SPINLOCK(unwind_lock); ++static DEFINE_RAW_SPINLOCK(unwind_lock); + static LIST_HEAD(unwind_tables); + + /* Convert a prel31 symbol to an absolute address */ +@@ -201,7 +201,7 @@ + /* module unwind tables */ + struct unwind_table *table; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_for_each_entry(table, &unwind_tables, list) { + if (addr >= table->begin_addr && + addr < table->end_addr) { +@@ -213,7 +213,7 @@ + break; + } + } +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + } + + pr_debug("%s: idx = %p\n", __func__, idx); +@@ -529,9 +529,9 @@ + tab->begin_addr = text_addr; + tab->end_addr = text_addr + text_size; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_add_tail(&tab->list, &unwind_tables); +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + + return tab; + } +@@ -543,9 +543,9 @@ + if (!tab) + return; + +- spin_lock_irqsave(&unwind_lock, flags); ++ raw_spin_lock_irqsave(&unwind_lock, flags); + list_del(&tab->list); +- spin_unlock_irqrestore(&unwind_lock, flags); ++ raw_spin_unlock_irqrestore(&unwind_lock, flags); + + kfree(tab); + } +diff -Nur linux-4.4.13.orig/arch/arm/kvm/arm.c linux-4.4.13/arch/arm/kvm/arm.c +--- linux-4.4.13.orig/arch/arm/kvm/arm.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kvm/arm.c 2016-06-22 21:55:07.974523867 +0200 +@@ -498,18 +498,18 @@ + struct kvm_vcpu *vcpu; + + kvm_for_each_vcpu(i, vcpu, kvm) { +- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); ++ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); + + vcpu->arch.pause = false; +- wake_up_interruptible(wq); ++ swake_up(wq); + } + } + + static void vcpu_sleep(struct kvm_vcpu *vcpu) + { +- wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu); ++ struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu); + +- wait_event_interruptible(*wq, ((!vcpu->arch.power_off) && ++ swait_event_interruptible(*wq, ((!vcpu->arch.power_off) && + (!vcpu->arch.pause))); + } + +@@ -568,7 +568,7 @@ + * involves poking the GIC, which must be done in a + * non-preemptible context. + */ +- preempt_disable(); ++ migrate_disable(); + kvm_timer_flush_hwstate(vcpu); + kvm_vgic_flush_hwstate(vcpu); + +@@ -587,7 +587,7 @@ + local_irq_enable(); + kvm_timer_sync_hwstate(vcpu); + kvm_vgic_sync_hwstate(vcpu); +- preempt_enable(); ++ migrate_enable(); + continue; + } + +@@ -641,7 +641,7 @@ + + kvm_vgic_sync_hwstate(vcpu); + +- preempt_enable(); ++ migrate_enable(); + + ret = handle_exit(vcpu, run, ret); + } +diff -Nur linux-4.4.13.orig/arch/arm/kvm/psci.c linux-4.4.13/arch/arm/kvm/psci.c +--- linux-4.4.13.orig/arch/arm/kvm/psci.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/kvm/psci.c 2016-06-22 21:55:07.974523867 +0200 +@@ -70,7 +70,7 @@ + { + struct kvm *kvm = source_vcpu->kvm; + struct kvm_vcpu *vcpu = NULL; +- wait_queue_head_t *wq; ++ struct swait_queue_head *wq; + unsigned long cpu_id; + unsigned long context_id; + phys_addr_t target_pc; +@@ -119,7 +119,7 @@ + smp_mb(); /* Make sure the above is visible */ + + wq = kvm_arch_vcpu_wq(vcpu); +- wake_up_interruptible(wq); ++ swake_up(wq); + + return PSCI_RET_SUCCESS; + } +diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/at91rm9200.c linux-4.4.13/arch/arm/mach-at91/at91rm9200.c +--- linux-4.4.13.orig/arch/arm/mach-at91/at91rm9200.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-at91/at91rm9200.c 2016-06-22 21:55:07.974523867 +0200 +@@ -12,7 +12,6 @@ + #include + + #include +-#include + + #include "generic.h" + #include "soc.h" +@@ -33,7 +32,6 @@ + + of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev); + +- arm_pm_idle = at91rm9200_idle; + at91rm9200_pm_init(); + } + +diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/at91sam9.c linux-4.4.13/arch/arm/mach-at91/at91sam9.c +--- linux-4.4.13.orig/arch/arm/mach-at91/at91sam9.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-at91/at91sam9.c 2016-06-22 21:55:07.974523867 +0200 +@@ -62,8 +62,6 @@ + soc_dev = soc_device_to_device(soc); + + of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev); +- +- arm_pm_idle = at91sam9_idle; + } + + static void __init at91sam9_dt_device_init(void) +diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/generic.h linux-4.4.13/arch/arm/mach-at91/generic.h +--- linux-4.4.13.orig/arch/arm/mach-at91/generic.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-at91/generic.h 2016-06-22 21:55:07.974523867 +0200 +@@ -11,27 +11,18 @@ + #ifndef _AT91_GENERIC_H + #define _AT91_GENERIC_H + +-#include +-#include +- +- /* Map io */ +-extern void __init at91_map_io(void); +-extern void __init at91_alt_map_io(void); +- +-/* idle */ +-extern void at91rm9200_idle(void); +-extern void at91sam9_idle(void); +- + #ifdef CONFIG_PM + extern void __init at91rm9200_pm_init(void); + extern void __init at91sam9260_pm_init(void); + extern void __init at91sam9g45_pm_init(void); + extern void __init at91sam9x5_pm_init(void); ++extern void __init sama5_pm_init(void); + #else + static inline void __init at91rm9200_pm_init(void) { } + static inline void __init at91sam9260_pm_init(void) { } + static inline void __init at91sam9g45_pm_init(void) { } + static inline void __init at91sam9x5_pm_init(void) { } ++static inline void __init sama5_pm_init(void) { } + #endif + + #endif /* _AT91_GENERIC_H */ +diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/Kconfig linux-4.4.13/arch/arm/mach-at91/Kconfig +--- linux-4.4.13.orig/arch/arm/mach-at91/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-at91/Kconfig 2016-06-22 21:55:07.974523867 +0200 +@@ -99,6 +99,7 @@ + config COMMON_CLK_AT91 + bool + select COMMON_CLK ++ select MFD_SYSCON + + config HAVE_AT91_SMD + bool +diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/pm.c linux-4.4.13/arch/arm/mach-at91/pm.c +--- linux-4.4.13.orig/arch/arm/mach-at91/pm.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-at91/pm.c 2016-06-22 21:55:07.974523867 +0200 +@@ -31,10 +31,13 @@ + #include + #include + #include ++#include + + #include "generic.h" + #include "pm.h" + ++static void __iomem *pmc; ++ + /* + * FIXME: this is needed to communicate between the pinctrl driver and + * the PM implementation in the machine. Possibly part of the PM +@@ -87,7 +90,7 @@ + unsigned long scsr; + int i; + +- scsr = at91_pmc_read(AT91_PMC_SCSR); ++ scsr = readl(pmc + AT91_PMC_SCSR); + + /* USB must not be using PLLB */ + if ((scsr & at91_pm_data.uhp_udp_mask) != 0) { +@@ -101,8 +104,7 @@ + + if ((scsr & (AT91_PMC_PCK0 << i)) == 0) + continue; +- +- css = at91_pmc_read(AT91_PMC_PCKR(i)) & AT91_PMC_CSS; ++ css = readl(pmc + AT91_PMC_PCKR(i)) & AT91_PMC_CSS; + if (css != AT91_PMC_CSS_SLOW) { + pr_err("AT91: PM - Suspend-to-RAM with PCK%d src %d\n", i, css); + return 0; +@@ -145,8 +147,8 @@ + flush_cache_all(); + outer_disable(); + +- at91_suspend_sram_fn(at91_pmc_base, at91_ramc_base[0], +- at91_ramc_base[1], pm_data); ++ at91_suspend_sram_fn(pmc, at91_ramc_base[0], ++ at91_ramc_base[1], pm_data); + + outer_resume(); + } +@@ -353,6 +355,21 @@ + at91_pm_set_standby(standby); + } + ++void at91rm9200_idle(void) ++{ ++ /* ++ * Disable the processor clock. The processor will be automatically ++ * re-enabled by an interrupt or by a reset. ++ */ ++ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR); ++} ++ ++void at91sam9_idle(void) ++{ ++ writel(AT91_PMC_PCK, pmc + AT91_PMC_SCDR); ++ cpu_do_idle(); ++} ++ + static void __init at91_pm_sram_init(void) + { + struct gen_pool *sram_pool; +@@ -399,13 +416,36 @@ + &at91_pm_suspend_in_sram, at91_pm_suspend_in_sram_sz); + } + +-static void __init at91_pm_init(void) ++static const struct of_device_id atmel_pmc_ids[] __initconst = { ++ { .compatible = "atmel,at91rm9200-pmc" }, ++ { .compatible = "atmel,at91sam9260-pmc" }, ++ { .compatible = "atmel,at91sam9g45-pmc" }, ++ { .compatible = "atmel,at91sam9n12-pmc" }, ++ { .compatible = "atmel,at91sam9x5-pmc" }, ++ { .compatible = "atmel,sama5d3-pmc" }, ++ { .compatible = "atmel,sama5d2-pmc" }, ++ { /* sentinel */ }, ++}; ++ ++static void __init at91_pm_init(void (*pm_idle)(void)) + { +- at91_pm_sram_init(); ++ struct device_node *pmc_np; + + if (at91_cpuidle_device.dev.platform_data) + platform_device_register(&at91_cpuidle_device); + ++ pmc_np = of_find_matching_node(NULL, atmel_pmc_ids); ++ pmc = of_iomap(pmc_np, 0); ++ if (!pmc) { ++ pr_err("AT91: PM not supported, PMC not found\n"); ++ return; ++ } ++ ++ if (pm_idle) ++ arm_pm_idle = pm_idle; ++ ++ at91_pm_sram_init(); ++ + if (at91_suspend_sram_fn) + suspend_set_ops(&at91_pm_ops); + else +@@ -424,7 +464,7 @@ + at91_pm_data.uhp_udp_mask = AT91RM9200_PMC_UHP | AT91RM9200_PMC_UDP; + at91_pm_data.memctrl = AT91_MEMCTRL_MC; + +- at91_pm_init(); ++ at91_pm_init(at91rm9200_idle); + } + + void __init at91sam9260_pm_init(void) +@@ -432,7 +472,7 @@ + at91_dt_ramc(); + at91_pm_data.memctrl = AT91_MEMCTRL_SDRAMC; + at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP; +- return at91_pm_init(); ++ at91_pm_init(at91sam9_idle); + } + + void __init at91sam9g45_pm_init(void) +@@ -440,7 +480,7 @@ + at91_dt_ramc(); + at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP; + at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR; +- return at91_pm_init(); ++ at91_pm_init(at91sam9_idle); + } + + void __init at91sam9x5_pm_init(void) +@@ -448,5 +488,13 @@ + at91_dt_ramc(); + at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP; + at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR; +- return at91_pm_init(); ++ at91_pm_init(at91sam9_idle); ++} ++ ++void __init sama5_pm_init(void) ++{ ++ at91_dt_ramc(); ++ at91_pm_data.uhp_udp_mask = AT91SAM926x_PMC_UHP | AT91SAM926x_PMC_UDP; ++ at91_pm_data.memctrl = AT91_MEMCTRL_DDRSDR; ++ at91_pm_init(NULL); + } +diff -Nur linux-4.4.13.orig/arch/arm/mach-at91/sama5.c linux-4.4.13/arch/arm/mach-at91/sama5.c +--- linux-4.4.13.orig/arch/arm/mach-at91/sama5.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-at91/sama5.c 2016-06-22 21:55:07.974523867 +0200 +@@ -51,7 +51,7 @@ + soc_dev = soc_device_to_device(soc); + + of_platform_populate(NULL, of_default_bus_match_table, NULL, soc_dev); +- at91sam9x5_pm_init(); ++ sama5_pm_init(); + } + + static const char *const sama5_dt_board_compat[] __initconst = { +diff -Nur linux-4.4.13.orig/arch/arm/mach-exynos/platsmp.c linux-4.4.13/arch/arm/mach-exynos/platsmp.c +--- linux-4.4.13.orig/arch/arm/mach-exynos/platsmp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-exynos/platsmp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -230,7 +230,7 @@ + return (void __iomem *)(S5P_VA_SCU); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void exynos_secondary_init(unsigned int cpu) + { +@@ -243,8 +243,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int exynos_set_boot_addr(u32 core_id, unsigned long boot_addr) +@@ -308,7 +308,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -335,7 +335,7 @@ + + if (timeout == 0) { + printk(KERN_ERR "cpu1 power enable failed"); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return -ETIMEDOUT; + } + } +@@ -381,7 +381,7 @@ + * calibrations, then wait for it to finish + */ + fail: +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? ret : 0; + } +diff -Nur linux-4.4.13.orig/arch/arm/mach-hisi/platmcpm.c linux-4.4.13/arch/arm/mach-hisi/platmcpm.c +--- linux-4.4.13.orig/arch/arm/mach-hisi/platmcpm.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-hisi/platmcpm.c 2016-06-22 21:55:07.974523867 +0200 +@@ -61,7 +61,7 @@ + + static void __iomem *sysctrl, *fabric; + static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER]; +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + static u32 fabric_phys_addr; + /* + * [0]: bootwrapper physical address +@@ -113,7 +113,7 @@ + if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER) + return -EINVAL; + +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + + if (hip04_cpu_table[cluster][cpu]) + goto out; +@@ -147,7 +147,7 @@ + + out: + hip04_cpu_table[cluster][cpu]++; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + + return 0; + } +@@ -162,11 +162,11 @@ + cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0); + cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + hip04_cpu_table[cluster][cpu]--; + if (hip04_cpu_table[cluster][cpu] == 1) { + /* A power_up request went ahead of us. */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + return; + } else if (hip04_cpu_table[cluster][cpu] > 1) { + pr_err("Cluster %d CPU%d boots multiple times\n", cluster, cpu); +@@ -174,7 +174,7 @@ + } + + last_man = hip04_cluster_is_down(cluster); +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + if (last_man) { + /* Since it's Cortex A15, disable L2 prefetching. */ + asm volatile( +@@ -203,7 +203,7 @@ + cpu >= HIP04_MAX_CPUS_PER_CLUSTER); + + count = TIMEOUT_MSEC / POLL_MSEC; +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + for (tries = 0; tries < count; tries++) { + if (hip04_cpu_table[cluster][cpu]) + goto err; +@@ -211,10 +211,10 @@ + data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster)); + if (data & CORE_WFI_STATUS(cpu)) + break; +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + /* Wait for clean L2 when the whole cluster is down. */ + msleep(POLL_MSEC); +- spin_lock_irq(&boot_lock); ++ raw_spin_lock_irq(&boot_lock); + } + if (tries >= count) + goto err; +@@ -231,10 +231,10 @@ + goto err; + if (hip04_cluster_is_down(cluster)) + hip04_set_snoop_filter(cluster, 0); +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return 1; + err: +- spin_unlock_irq(&boot_lock); ++ raw_spin_unlock_irq(&boot_lock); + return 0; + } + #endif +diff -Nur linux-4.4.13.orig/arch/arm/mach-imx/Kconfig linux-4.4.13/arch/arm/mach-imx/Kconfig +--- linux-4.4.13.orig/arch/arm/mach-imx/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-imx/Kconfig 2016-06-22 21:55:07.974523867 +0200 +@@ -524,7 +524,7 @@ + bool "i.MX6 Quad/DualLite support" + select ARM_ERRATA_764369 if SMP + select HAVE_ARM_SCU if SMP +- select HAVE_ARM_TWD if SMP ++ select HAVE_ARM_TWD + select PCI_DOMAINS if PCI + select PINCTRL_IMX6Q + select SOC_IMX6 +diff -Nur linux-4.4.13.orig/arch/arm/mach-omap2/omap-smp.c linux-4.4.13/arch/arm/mach-omap2/omap-smp.c +--- linux-4.4.13.orig/arch/arm/mach-omap2/omap-smp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-omap2/omap-smp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -43,7 +43,7 @@ + /* SCU base address */ + static void __iomem *scu_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void __iomem *omap4_get_scu_base(void) + { +@@ -74,8 +74,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -89,7 +89,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Update the AuxCoreBoot0 with boot state for secondary core. +@@ -166,7 +166,7 @@ + * Now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return 0; + } +diff -Nur linux-4.4.13.orig/arch/arm/mach-prima2/platsmp.c linux-4.4.13/arch/arm/mach-prima2/platsmp.c +--- linux-4.4.13.orig/arch/arm/mach-prima2/platsmp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-prima2/platsmp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -22,7 +22,7 @@ + + static void __iomem *clk_base; + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void sirfsoc_secondary_init(unsigned int cpu) + { +@@ -36,8 +36,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static const struct of_device_id clk_ids[] = { +@@ -75,7 +75,7 @@ + /* make sure write buffer is drained */ + mb(); + +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -107,7 +107,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.4.13.orig/arch/arm/mach-qcom/platsmp.c linux-4.4.13/arch/arm/mach-qcom/platsmp.c +--- linux-4.4.13.orig/arch/arm/mach-qcom/platsmp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-qcom/platsmp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -46,7 +46,7 @@ + + extern void secondary_startup_arm(void); + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + #ifdef CONFIG_HOTPLUG_CPU + static void qcom_cpu_die(unsigned int cpu) +@@ -60,8 +60,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int scss_release_secondary(unsigned int cpu) +@@ -284,7 +284,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * Send the secondary CPU a soft interrupt, thereby causing +@@ -297,7 +297,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return ret; + } +diff -Nur linux-4.4.13.orig/arch/arm/mach-spear/platsmp.c linux-4.4.13/arch/arm/mach-spear/platsmp.c +--- linux-4.4.13.orig/arch/arm/mach-spear/platsmp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-spear/platsmp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -32,7 +32,7 @@ + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void __iomem *scu_base = IOMEM(VA_SCU_BASE); + +@@ -47,8 +47,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -59,7 +59,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -84,7 +84,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.4.13.orig/arch/arm/mach-sti/platsmp.c linux-4.4.13/arch/arm/mach-sti/platsmp.c +--- linux-4.4.13.orig/arch/arm/mach-sti/platsmp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mach-sti/platsmp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -35,7 +35,7 @@ + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + static void sti_secondary_init(unsigned int cpu) + { +@@ -48,8 +48,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -60,7 +60,7 @@ + * set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * The secondary processor is waiting to be released from +@@ -91,7 +91,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.4.13.orig/arch/arm/mm/fault.c linux-4.4.13/arch/arm/mm/fault.c +--- linux-4.4.13.orig/arch/arm/mm/fault.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mm/fault.c 2016-06-22 21:55:07.974523867 +0200 +@@ -430,6 +430,9 @@ + if (addr < TASK_SIZE) + return do_page_fault(addr, fsr, regs); + ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); ++ + if (user_mode(regs)) + goto bad_area; + +@@ -497,6 +500,9 @@ + static int + do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) + { ++ if (interrupts_enabled(regs)) ++ local_irq_enable(); ++ + do_bad_area(addr, fsr, regs); + return 0; + } +diff -Nur linux-4.4.13.orig/arch/arm/mm/highmem.c linux-4.4.13/arch/arm/mm/highmem.c +--- linux-4.4.13.orig/arch/arm/mm/highmem.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/mm/highmem.c 2016-06-22 21:55:07.974523867 +0200 +@@ -34,6 +34,11 @@ + return *ptep; + } + ++static unsigned int fixmap_idx(int type) ++{ ++ return FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++} ++ + void *kmap(struct page *page) + { + might_sleep(); +@@ -54,12 +59,13 @@ + + void *kmap_atomic(struct page *page) + { ++ pte_t pte = mk_pte(page, kmap_prot); + unsigned int idx; + unsigned long vaddr; + void *kmap; + int type; + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); +@@ -79,7 +85,7 @@ + + type = kmap_atomic_idx_push(); + +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + /* +@@ -93,7 +99,10 @@ + * in place, so the contained TLB flush ensures the TLB is updated + * with the new mapping. + */ +- set_fixmap_pte(idx, mk_pte(page, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } +@@ -106,44 +115,75 @@ + + if (kvaddr >= (void *)FIXADDR_START) { + type = kmap_atomic_idx(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + + if (cache_is_vivt()) + __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(vaddr != __fix_to_virt(idx)); +- set_fixmap_pte(idx, __pte(0)); + #else + (void) idx; /* to kill a warning */ + #endif ++ set_fixmap_pte(idx, __pte(0)); + kmap_atomic_idx_pop(); + } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { + /* this address was obtained through kmap_high_get() */ + kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); + } + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(__kunmap_atomic); + + void *kmap_atomic_pfn(unsigned long pfn) + { ++ pte_t pte = pfn_pte(pfn, kmap_prot); + unsigned long vaddr; + int idx, type; + struct page *page = pfn_to_page(pfn); + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + if (!PageHighMem(page)) + return page_address(page); + + type = kmap_atomic_idx_push(); +- idx = FIX_KMAP_BEGIN + type + KM_TYPE_NR * smp_processor_id(); ++ idx = fixmap_idx(type); + vaddr = __fix_to_virt(idx); + #ifdef CONFIG_DEBUG_HIGHMEM + BUG_ON(!pte_none(get_fixmap_pte(vaddr))); + #endif +- set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_fixmap_pte(idx, pte); + + return (void *)vaddr; + } ++#if defined CONFIG_PREEMPT_RT_FULL ++void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ set_fixmap_pte(idx, __pte(0)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = fixmap_idx(i); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_fixmap_pte(idx, next_p->kmap_pte[i]); ++ } ++} ++#endif +diff -Nur linux-4.4.13.orig/arch/arm/plat-versatile/platsmp.c linux-4.4.13/arch/arm/plat-versatile/platsmp.c +--- linux-4.4.13.orig/arch/arm/plat-versatile/platsmp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm/plat-versatile/platsmp.c 2016-06-22 21:55:07.974523867 +0200 +@@ -30,7 +30,7 @@ + sync_cache_w(&pen_release); + } + +-static DEFINE_SPINLOCK(boot_lock); ++static DEFINE_RAW_SPINLOCK(boot_lock); + + void versatile_secondary_init(unsigned int cpu) + { +@@ -43,8 +43,8 @@ + /* + * Synchronise with the boot thread. + */ +- spin_lock(&boot_lock); +- spin_unlock(&boot_lock); ++ raw_spin_lock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + } + + int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle) +@@ -55,7 +55,7 @@ + * Set synchronisation state between this boot processor + * and the secondary one + */ +- spin_lock(&boot_lock); ++ raw_spin_lock(&boot_lock); + + /* + * This is really belt and braces; we hold unintended secondary +@@ -85,7 +85,7 @@ + * now the secondary core is starting up let it run its + * calibrations, then wait for it to finish + */ +- spin_unlock(&boot_lock); ++ raw_spin_unlock(&boot_lock); + + return pen_release != -1 ? -ENOSYS : 0; + } +diff -Nur linux-4.4.13.orig/arch/arm64/include/asm/thread_info.h linux-4.4.13/arch/arm64/include/asm/thread_info.h +--- linux-4.4.13.orig/arch/arm64/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm64/include/asm/thread_info.h 2016-06-22 21:55:07.974523867 +0200 +@@ -49,6 +49,7 @@ + mm_segment_t addr_limit; /* address limit */ + struct task_struct *task; /* main task structure */ + int preempt_count; /* 0 => preemptable, <0 => bug */ ++ int preempt_lazy_count; /* 0 => preemptable, <0 => bug */ + int cpu; /* cpu */ + }; + +@@ -103,6 +104,7 @@ + #define TIF_NEED_RESCHED 1 + #define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ + #define TIF_FOREIGN_FPSTATE 3 /* CPU's FP state is not current's */ ++#define TIF_NEED_RESCHED_LAZY 4 + #define TIF_NOHZ 7 + #define TIF_SYSCALL_TRACE 8 + #define TIF_SYSCALL_AUDIT 9 +@@ -118,6 +120,7 @@ + #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) + #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) + #define _TIF_FOREIGN_FPSTATE (1 << TIF_FOREIGN_FPSTATE) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_NOHZ (1 << TIF_NOHZ) + #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +@@ -126,7 +129,8 @@ + #define _TIF_32BIT (1 << TIF_32BIT) + + #define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | \ +- _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE) ++ _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \ ++ _TIF_NEED_RESCHED_LAZY) + + #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ + _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \ +diff -Nur linux-4.4.13.orig/arch/arm64/Kconfig linux-4.4.13/arch/arm64/Kconfig +--- linux-4.4.13.orig/arch/arm64/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm64/Kconfig 2016-06-22 21:55:07.974523867 +0200 +@@ -76,6 +76,7 @@ + select HAVE_PERF_REGS + select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE ++ select HAVE_PREEMPT_LAZY + select HAVE_SYSCALL_TRACEPOINTS + select IOMMU_DMA if IOMMU_SUPPORT + select IRQ_DOMAIN +@@ -562,7 +563,7 @@ + + config XEN + bool "Xen guest support on ARM64" +- depends on ARM64 && OF ++ depends on ARM64 && OF && !PREEMPT_RT_FULL + select SWIOTLB_XEN + help + Say Y if you want to run Linux in a Virtual Machine on Xen on ARM64. +diff -Nur linux-4.4.13.orig/arch/arm64/kernel/asm-offsets.c linux-4.4.13/arch/arm64/kernel/asm-offsets.c +--- linux-4.4.13.orig/arch/arm64/kernel/asm-offsets.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm64/kernel/asm-offsets.c 2016-06-22 21:55:07.974523867 +0200 +@@ -35,6 +35,7 @@ + BLANK(); + DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); + DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); ++ DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count)); + DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit)); + DEFINE(TI_TASK, offsetof(struct thread_info, task)); + DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); +diff -Nur linux-4.4.13.orig/arch/arm64/kernel/entry.S linux-4.4.13/arch/arm64/kernel/entry.S +--- linux-4.4.13.orig/arch/arm64/kernel/entry.S 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/arm64/kernel/entry.S 2016-06-22 21:55:07.978523867 +0200 +@@ -363,11 +363,16 @@ + #ifdef CONFIG_PREEMPT + get_thread_info tsk + ldr w24, [tsk, #TI_PREEMPT] // get preempt count +- cbnz w24, 1f // preempt count != 0 ++ cbnz w24, 2f // preempt count != 0 + ldr x0, [tsk, #TI_FLAGS] // get flags +- tbz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? +- bl el1_preempt ++ tbnz x0, #TIF_NEED_RESCHED, 1f // needs rescheduling? ++ ++ ldr w24, [tsk, #TI_PREEMPT_LAZY] // get preempt lazy count ++ cbnz w24, 2f // preempt lazy count != 0 ++ tbz x0, #TIF_NEED_RESCHED_LAZY, 2f // needs rescheduling? + 1: ++ bl el1_preempt ++2: + #endif + #ifdef CONFIG_TRACE_IRQFLAGS + bl trace_hardirqs_on +@@ -381,6 +386,7 @@ + 1: bl preempt_schedule_irq // irq en/disable is done inside + ldr x0, [tsk, #TI_FLAGS] // get new tasks TI_FLAGS + tbnz x0, #TIF_NEED_RESCHED, 1b // needs rescheduling? ++ tbnz x0, #TIF_NEED_RESCHED_LAZY, 1b // needs rescheduling? + ret x24 + #endif + +@@ -625,6 +631,7 @@ + */ + work_pending: + tbnz x1, #TIF_NEED_RESCHED, work_resched ++ tbnz x1, #TIF_NEED_RESCHED_LAZY, work_resched + /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */ + ldr x2, [sp, #S_PSTATE] + mov x0, sp // 'regs' +diff -Nur linux-4.4.13.orig/arch/Kconfig linux-4.4.13/arch/Kconfig +--- linux-4.4.13.orig/arch/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/Kconfig 2016-06-22 21:55:07.978523867 +0200 +@@ -9,6 +9,7 @@ + tristate "OProfile system profiling" + depends on PROFILING + depends on HAVE_OPROFILE ++ depends on !PREEMPT_RT_FULL + select RING_BUFFER + select RING_BUFFER_ALLOW_SWAP + help +@@ -52,6 +53,7 @@ + config JUMP_LABEL + bool "Optimize very unlikely/likely branches" + depends on HAVE_ARCH_JUMP_LABEL ++ depends on (!INTERRUPT_OFF_HIST && !PREEMPT_OFF_HIST && !WAKEUP_LATENCY_HIST && !MISSED_TIMER_OFFSETS_HIST) + help + This option enables a transparent branch optimization that + makes certain almost-always-true or almost-always-false branch +diff -Nur linux-4.4.13.orig/arch/mips/Kconfig linux-4.4.13/arch/mips/Kconfig +--- linux-4.4.13.orig/arch/mips/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/mips/Kconfig 2016-06-22 21:55:07.978523867 +0200 +@@ -2410,7 +2410,7 @@ + # + config HIGHMEM + bool "High Memory Support" +- depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA ++ depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL + + config CPU_SUPPORTS_HIGHMEM + bool +diff -Nur linux-4.4.13.orig/arch/mips/kvm/mips.c linux-4.4.13/arch/mips/kvm/mips.c +--- linux-4.4.13.orig/arch/mips/kvm/mips.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/mips/kvm/mips.c 2016-06-22 21:55:07.978523867 +0200 +@@ -445,8 +445,8 @@ + + dvcpu->arch.wait = 0; + +- if (waitqueue_active(&dvcpu->wq)) +- wake_up_interruptible(&dvcpu->wq); ++ if (swait_active(&dvcpu->wq)) ++ swake_up(&dvcpu->wq); + + return 0; + } +@@ -1174,8 +1174,8 @@ + kvm_mips_callbacks->queue_timer_int(vcpu); + + vcpu->arch.wait = 0; +- if (waitqueue_active(&vcpu->wq)) +- wake_up_interruptible(&vcpu->wq); ++ if (swait_active(&vcpu->wq)) ++ swake_up(&vcpu->wq); + } + + /* low level hrtimer wake routine */ +diff -Nur linux-4.4.13.orig/arch/powerpc/include/asm/kvm_host.h linux-4.4.13/arch/powerpc/include/asm/kvm_host.h +--- linux-4.4.13.orig/arch/powerpc/include/asm/kvm_host.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/powerpc/include/asm/kvm_host.h 2016-06-22 21:55:07.978523867 +0200 +@@ -286,7 +286,7 @@ + struct list_head runnable_threads; + struct list_head preempt_list; + spinlock_t lock; +- wait_queue_head_t wq; ++ struct swait_queue_head wq; + spinlock_t stoltb_lock; /* protects stolen_tb and preempt_tb */ + u64 stolen_tb; + u64 preempt_tb; +@@ -626,7 +626,7 @@ + u8 prodded; + u32 last_inst; + +- wait_queue_head_t *wqp; ++ struct swait_queue_head *wqp; + struct kvmppc_vcore *vcore; + int ret; + int trap; +diff -Nur linux-4.4.13.orig/arch/powerpc/include/asm/thread_info.h linux-4.4.13/arch/powerpc/include/asm/thread_info.h +--- linux-4.4.13.orig/arch/powerpc/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/powerpc/include/asm/thread_info.h 2016-06-22 21:55:07.978523867 +0200 +@@ -42,6 +42,8 @@ + int cpu; /* cpu we're on */ + int preempt_count; /* 0 => preemptable, + <0 => BUG */ ++ int preempt_lazy_count; /* 0 => preemptable, ++ <0 => BUG */ + unsigned long local_flags; /* private flags for thread */ + + /* low level flags - has atomic operations done on it */ +@@ -82,8 +84,7 @@ + #define TIF_SYSCALL_TRACE 0 /* syscall trace active */ + #define TIF_SIGPENDING 1 /* signal pending */ + #define TIF_NEED_RESCHED 2 /* rescheduling necessary */ +-#define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling +- TIF_NEED_RESCHED */ ++#define TIF_NEED_RESCHED_LAZY 3 /* lazy rescheduling necessary */ + #define TIF_32BIT 4 /* 32 bit binary */ + #define TIF_RESTORE_TM 5 /* need to restore TM FP/VEC/VSX */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ +@@ -101,6 +102,8 @@ + #if defined(CONFIG_PPC64) + #define TIF_ELF2ABI 18 /* function descriptors must die! */ + #endif ++#define TIF_POLLING_NRFLAG 19 /* true if poll_idle() is polling ++ TIF_NEED_RESCHED */ + + /* as above, but as bit values */ + #define _TIF_SYSCALL_TRACE (1<flags) + set_bits(irqtp->flags, &curtp->flags); + } ++#endif + + irq_hw_number_t virq_to_hw(unsigned int virq) + { +diff -Nur linux-4.4.13.orig/arch/powerpc/kernel/misc_32.S linux-4.4.13/arch/powerpc/kernel/misc_32.S +--- linux-4.4.13.orig/arch/powerpc/kernel/misc_32.S 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/powerpc/kernel/misc_32.S 2016-06-22 21:55:07.978523867 +0200 +@@ -40,6 +40,7 @@ + * We store the saved ksp_limit in the unused part + * of the STACK_FRAME_OVERHEAD + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + stw r0,4(r1) +@@ -56,6 +57,7 @@ + stw r10,THREAD+KSP_LIMIT(r2) + mtlr r0 + blr ++#endif + + /* + * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp); +diff -Nur linux-4.4.13.orig/arch/powerpc/kernel/misc_64.S linux-4.4.13/arch/powerpc/kernel/misc_64.S +--- linux-4.4.13.orig/arch/powerpc/kernel/misc_64.S 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/powerpc/kernel/misc_64.S 2016-06-22 21:55:07.978523867 +0200 +@@ -30,6 +30,7 @@ + + .text + ++#ifndef CONFIG_PREEMPT_RT_FULL + _GLOBAL(call_do_softirq) + mflr r0 + std r0,16(r1) +@@ -40,6 +41,7 @@ + ld r0,16(r1) + mtlr r0 + blr ++#endif + + _GLOBAL(call_do_irq) + mflr r0 +diff -Nur linux-4.4.13.orig/arch/powerpc/kvm/book3s_hv.c linux-4.4.13/arch/powerpc/kvm/book3s_hv.c +--- linux-4.4.13.orig/arch/powerpc/kvm/book3s_hv.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/powerpc/kvm/book3s_hv.c 2016-06-22 21:55:07.978523867 +0200 +@@ -114,11 +114,11 @@ + static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) + { + int cpu; +- wait_queue_head_t *wqp; ++ struct swait_queue_head *wqp; + + wqp = kvm_arch_vcpu_wq(vcpu); +- if (waitqueue_active(wqp)) { +- wake_up_interruptible(wqp); ++ if (swait_active(wqp)) { ++ swake_up(wqp); + ++vcpu->stat.halt_wakeup; + } + +@@ -707,8 +707,8 @@ + tvcpu->arch.prodded = 1; + smp_mb(); + if (vcpu->arch.ceded) { +- if (waitqueue_active(&vcpu->wq)) { +- wake_up_interruptible(&vcpu->wq); ++ if (swait_active(&vcpu->wq)) { ++ swake_up(&vcpu->wq); + vcpu->stat.halt_wakeup++; + } + } +@@ -1447,7 +1447,7 @@ + INIT_LIST_HEAD(&vcore->runnable_threads); + spin_lock_init(&vcore->lock); + spin_lock_init(&vcore->stoltb_lock); +- init_waitqueue_head(&vcore->wq); ++ init_swait_queue_head(&vcore->wq); + vcore->preempt_tb = TB_NIL; + vcore->lpcr = kvm->arch.lpcr; + vcore->first_vcpuid = core * threads_per_subcore; +@@ -2519,10 +2519,9 @@ + { + struct kvm_vcpu *vcpu; + int do_sleep = 1; ++ DECLARE_SWAITQUEUE(wait); + +- DEFINE_WAIT(wait); +- +- prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE); ++ prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); + + /* + * Check one last time for pending exceptions and ceded state after +@@ -2536,7 +2535,7 @@ + } + + if (!do_sleep) { +- finish_wait(&vc->wq, &wait); ++ finish_swait(&vc->wq, &wait); + return; + } + +@@ -2544,7 +2543,7 @@ + trace_kvmppc_vcore_blocked(vc, 0); + spin_unlock(&vc->lock); + schedule(); +- finish_wait(&vc->wq, &wait); ++ finish_swait(&vc->wq, &wait); + spin_lock(&vc->lock); + vc->vcore_state = VCORE_INACTIVE; + trace_kvmppc_vcore_blocked(vc, 1); +@@ -2600,7 +2599,7 @@ + kvmppc_start_thread(vcpu, vc); + trace_kvm_guest_enter(vcpu); + } else if (vc->vcore_state == VCORE_SLEEPING) { +- wake_up(&vc->wq); ++ swake_up(&vc->wq); + } + + } +diff -Nur linux-4.4.13.orig/arch/powerpc/kvm/Kconfig linux-4.4.13/arch/powerpc/kvm/Kconfig +--- linux-4.4.13.orig/arch/powerpc/kvm/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/powerpc/kvm/Kconfig 2016-06-22 21:55:07.978523867 +0200 +@@ -172,6 +172,7 @@ + config KVM_MPIC + bool "KVM in-kernel MPIC emulation" + depends on KVM && E500 ++ depends on !PREEMPT_RT_FULL + select HAVE_KVM_IRQCHIP + select HAVE_KVM_IRQFD + select HAVE_KVM_IRQ_ROUTING +diff -Nur linux-4.4.13.orig/arch/powerpc/platforms/ps3/device-init.c linux-4.4.13/arch/powerpc/platforms/ps3/device-init.c +--- linux-4.4.13.orig/arch/powerpc/platforms/ps3/device-init.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/powerpc/platforms/ps3/device-init.c 2016-06-22 21:55:07.982523867 +0200 +@@ -752,7 +752,7 @@ + } + pr_debug("%s:%u: notification %s issued\n", __func__, __LINE__, op); + +- res = wait_event_interruptible(dev->done.wait, ++ res = swait_event_interruptible(dev->done.wait, + dev->done.done || kthread_should_stop()); + if (kthread_should_stop()) + res = -EINTR; +diff -Nur linux-4.4.13.orig/arch/s390/include/asm/kvm_host.h linux-4.4.13/arch/s390/include/asm/kvm_host.h +--- linux-4.4.13.orig/arch/s390/include/asm/kvm_host.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/s390/include/asm/kvm_host.h 2016-06-22 21:55:07.982523867 +0200 +@@ -427,7 +427,7 @@ + struct kvm_s390_local_interrupt { + spinlock_t lock; + struct kvm_s390_float_interrupt *float_int; +- wait_queue_head_t *wq; ++ struct swait_queue_head *wq; + atomic_t *cpuflags; + DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS); + struct kvm_s390_irq_payload irq; +diff -Nur linux-4.4.13.orig/arch/s390/kvm/interrupt.c linux-4.4.13/arch/s390/kvm/interrupt.c +--- linux-4.4.13.orig/arch/s390/kvm/interrupt.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/s390/kvm/interrupt.c 2016-06-22 21:55:07.982523867 +0200 +@@ -868,13 +868,13 @@ + + void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu) + { +- if (waitqueue_active(&vcpu->wq)) { ++ if (swait_active(&vcpu->wq)) { + /* + * The vcpu gave up the cpu voluntarily, mark it as a good + * yield-candidate. + */ + vcpu->preempted = true; +- wake_up_interruptible(&vcpu->wq); ++ swake_up(&vcpu->wq); + vcpu->stat.halt_wakeup++; + } + } +diff -Nur linux-4.4.13.orig/arch/sh/kernel/irq.c linux-4.4.13/arch/sh/kernel/irq.c +--- linux-4.4.13.orig/arch/sh/kernel/irq.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/sh/kernel/irq.c 2016-06-22 21:55:07.982523867 +0200 +@@ -147,6 +147,7 @@ + hardirq_ctx[cpu] = NULL; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curctx; +@@ -174,6 +175,7 @@ + "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr" + ); + } ++#endif + #else + static inline void handle_one_irq(unsigned int irq) + { +diff -Nur linux-4.4.13.orig/arch/sparc/Kconfig linux-4.4.13/arch/sparc/Kconfig +--- linux-4.4.13.orig/arch/sparc/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/sparc/Kconfig 2016-06-22 21:55:07.982523867 +0200 +@@ -189,12 +189,10 @@ + source kernel/Kconfig.hz + + config RWSEM_GENERIC_SPINLOCK +- bool +- default y if SPARC32 ++ def_bool PREEMPT_RT_FULL + + config RWSEM_XCHGADD_ALGORITHM +- bool +- default y if SPARC64 ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_HWEIGHT + bool +diff -Nur linux-4.4.13.orig/arch/sparc/kernel/irq_64.c linux-4.4.13/arch/sparc/kernel/irq_64.c +--- linux-4.4.13.orig/arch/sparc/kernel/irq_64.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/sparc/kernel/irq_64.c 2016-06-22 21:55:07.982523867 +0200 +@@ -854,6 +854,7 @@ + set_irq_regs(old_regs); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + void *orig_sp, *sp = softirq_stack[smp_processor_id()]; +@@ -868,6 +869,7 @@ + __asm__ __volatile__("mov %0, %%sp" + : : "r" (orig_sp)); + } ++#endif + + #ifdef CONFIG_HOTPLUG_CPU + void fixup_irqs(void) +diff -Nur linux-4.4.13.orig/arch/x86/crypto/aesni-intel_glue.c linux-4.4.13/arch/x86/crypto/aesni-intel_glue.c +--- linux-4.4.13.orig/arch/x86/crypto/aesni-intel_glue.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/crypto/aesni-intel_glue.c 2016-06-22 21:55:07.982523867 +0200 +@@ -383,14 +383,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, +- nbytes & AES_BLOCK_MASK); ++ nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -407,14 +407,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -431,14 +431,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -455,14 +455,14 @@ + err = blkcipher_walk_virt(desc, &walk); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes)) { ++ kernel_fpu_begin(); + aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- kernel_fpu_end(); + + return err; + } +@@ -514,18 +514,20 @@ + err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE); + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + +- kernel_fpu_begin(); + while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) { ++ kernel_fpu_begin(); + aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr, + nbytes & AES_BLOCK_MASK, walk.iv); ++ kernel_fpu_end(); + nbytes &= AES_BLOCK_SIZE - 1; + err = blkcipher_walk_done(desc, &walk, nbytes); + } + if (walk.nbytes) { ++ kernel_fpu_begin(); + ctr_crypt_final(ctx, &walk); ++ kernel_fpu_end(); + err = blkcipher_walk_done(desc, &walk, 0); + } +- kernel_fpu_end(); + + return err; + } +diff -Nur linux-4.4.13.orig/arch/x86/crypto/cast5_avx_glue.c linux-4.4.13/arch/x86/crypto/cast5_avx_glue.c +--- linux-4.4.13.orig/arch/x86/crypto/cast5_avx_glue.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/crypto/cast5_avx_glue.c 2016-06-22 21:55:07.982523867 +0200 +@@ -59,7 +59,7 @@ + static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk, + bool enc) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = CAST5_BLOCK_SIZE; + unsigned int nbytes; +@@ -75,7 +75,7 @@ + u8 *wsrc = walk->src.virt.addr; + u8 *wdst = walk->dst.virt.addr; + +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + + /* Process multi-block batch */ + if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) { +@@ -103,10 +103,9 @@ + } while (nbytes >= bsize); + + done: ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -227,7 +226,7 @@ + static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -236,12 +235,11 @@ + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + while ((nbytes = walk.nbytes)) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __cbc_decrypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } +- +- cast5_fpu_end(fpu_enabled); + return err; + } + +@@ -311,7 +309,7 @@ + static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, + struct scatterlist *src, unsigned int nbytes) + { +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -320,13 +318,12 @@ + desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP; + + while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) { +- fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes); ++ fpu_enabled = cast5_fpu_begin(false, nbytes); + nbytes = __ctr_crypt(desc, &walk); ++ cast5_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- cast5_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + ctr_crypt_final(desc, &walk); + err = blkcipher_walk_done(desc, &walk, 0); +diff -Nur linux-4.4.13.orig/arch/x86/crypto/glue_helper.c linux-4.4.13/arch/x86/crypto/glue_helper.c +--- linux-4.4.13.orig/arch/x86/crypto/glue_helper.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/crypto/glue_helper.c 2016-06-22 21:55:07.982523867 +0200 +@@ -39,7 +39,7 @@ + void *ctx = crypto_blkcipher_ctx(desc->tfm); + const unsigned int bsize = 128 / 8; + unsigned int nbytes, i, func_bytes; +- bool fpu_enabled = false; ++ bool fpu_enabled; + int err; + + err = blkcipher_walk_virt(desc, walk); +@@ -49,7 +49,7 @@ + u8 *wdst = walk->dst.virt.addr; + + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + + for (i = 0; i < gctx->num_funcs; i++) { + func_bytes = bsize * gctx->funcs[i].num_blocks; +@@ -71,10 +71,10 @@ + } + + done: ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; + } + +@@ -194,7 +194,7 @@ + struct scatterlist *src, unsigned int nbytes) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -203,12 +203,12 @@ + + while ((nbytes = walk.nbytes)) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); + return err; + } + EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit); +@@ -277,7 +277,7 @@ + struct scatterlist *src, unsigned int nbytes) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -286,13 +286,12 @@ + + while ((nbytes = walk.nbytes) >= bsize) { + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, nbytes); ++ desc, false, nbytes); + nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk); ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + } + +- glue_fpu_end(fpu_enabled); +- + if (walk.nbytes) { + glue_ctr_crypt_final_128bit( + gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk); +@@ -347,7 +346,7 @@ + void *tweak_ctx, void *crypt_ctx) + { + const unsigned int bsize = 128 / 8; +- bool fpu_enabled = false; ++ bool fpu_enabled; + struct blkcipher_walk walk; + int err; + +@@ -360,21 +359,21 @@ + + /* set minimum length to bsize, for tweak_fn */ + fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, +- desc, fpu_enabled, ++ desc, false, + nbytes < bsize ? bsize : nbytes); +- + /* calculate first value of T */ + tweak_fn(tweak_ctx, walk.iv, walk.iv); ++ glue_fpu_end(fpu_enabled); + + while (nbytes) { ++ fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit, ++ desc, false, nbytes); + nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk); + ++ glue_fpu_end(fpu_enabled); + err = blkcipher_walk_done(desc, &walk, nbytes); + nbytes = walk.nbytes; + } +- +- glue_fpu_end(fpu_enabled); +- + return err; + } + EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit); +diff -Nur linux-4.4.13.orig/arch/x86/entry/common.c linux-4.4.13/arch/x86/entry/common.c +--- linux-4.4.13.orig/arch/x86/entry/common.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/entry/common.c 2016-06-22 21:55:07.982523867 +0200 +@@ -220,7 +220,7 @@ + + #define EXIT_TO_USERMODE_LOOP_FLAGS \ + (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_UPROBE | \ +- _TIF_NEED_RESCHED | _TIF_USER_RETURN_NOTIFY) ++ _TIF_NEED_RESCHED_MASK | _TIF_USER_RETURN_NOTIFY) + + static void exit_to_usermode_loop(struct pt_regs *regs, u32 cached_flags) + { +@@ -236,9 +236,16 @@ + /* We have work to do. */ + local_irq_enable(); + +- if (cached_flags & _TIF_NEED_RESCHED) ++ if (cached_flags & _TIF_NEED_RESCHED_MASK) + schedule(); + ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (unlikely(current->forced_info.si_signo)) { ++ struct task_struct *t = current; ++ force_sig_info(t->forced_info.si_signo, &t->forced_info, t); ++ t->forced_info.si_signo = 0; ++ } ++#endif + if (cached_flags & _TIF_UPROBE) + uprobe_notify_resume(regs); + +diff -Nur linux-4.4.13.orig/arch/x86/entry/entry_32.S linux-4.4.13/arch/x86/entry/entry_32.S +--- linux-4.4.13.orig/arch/x86/entry/entry_32.S 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/entry/entry_32.S 2016-06-22 21:55:07.982523867 +0200 +@@ -278,8 +278,24 @@ + ENTRY(resume_kernel) + DISABLE_INTERRUPTS(CLBR_ANY) + need_resched: ++ # preempt count == 0 + NEED_RS set? + cmpl $0, PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz restore_all ++#else ++ jz test_int_off ++ ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jne restore_all ++ ++ cmpl $0,TI_preempt_lazy_count(%ebp) # non-zero preempt_lazy_count ? ++ jnz restore_all ++ ++ testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp) ++ jz restore_all ++test_int_off: ++#endif + testl $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ? + jz restore_all + call preempt_schedule_irq +diff -Nur linux-4.4.13.orig/arch/x86/entry/entry_64.S linux-4.4.13/arch/x86/entry/entry_64.S +--- linux-4.4.13.orig/arch/x86/entry/entry_64.S 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/entry/entry_64.S 2016-06-22 21:55:07.982523867 +0200 +@@ -579,7 +579,23 @@ + bt $9, EFLAGS(%rsp) /* were interrupts off? */ + jnc 1f + 0: cmpl $0, PER_CPU_VAR(__preempt_count) ++#ifndef CONFIG_PREEMPT_LAZY + jnz 1f ++#else ++ jz do_preempt_schedule_irq ++ ++ # atleast preempt count == 0 ? ++ cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count) ++ jnz 1f ++ ++ GET_THREAD_INFO(%rcx) ++ cmpl $0, TI_preempt_lazy_count(%rcx) ++ jnz 1f ++ ++ bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx) ++ jnc 1f ++do_preempt_schedule_irq: ++#endif + call preempt_schedule_irq + jmp 0b + 1: +@@ -867,6 +883,7 @@ + jmp 2b + .previous + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* Call softirq on interrupt stack. Interrupts are off. */ + ENTRY(do_softirq_own_stack) + pushq %rbp +@@ -879,6 +896,7 @@ + decl PER_CPU_VAR(irq_count) + ret + END(do_softirq_own_stack) ++#endif + + #ifdef CONFIG_XEN + idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0 +diff -Nur linux-4.4.13.orig/arch/x86/include/asm/preempt.h linux-4.4.13/arch/x86/include/asm/preempt.h +--- linux-4.4.13.orig/arch/x86/include/asm/preempt.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/include/asm/preempt.h 2016-06-22 21:55:07.982523867 +0200 +@@ -79,17 +79,33 @@ + * a decrement which hits zero means we have no preempt_count and should + * reschedule. + */ +-static __always_inline bool __preempt_count_dec_and_test(void) ++static __always_inline bool ____preempt_count_dec_and_test(void) + { + GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e"); + } + ++static __always_inline bool __preempt_count_dec_and_test(void) ++{ ++ if (____preempt_count_dec_and_test()) ++ return true; ++#ifdef CONFIG_PREEMPT_LAZY ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++#else ++ return false; ++#endif ++} ++ + /* + * Returns true when we need to resched and can (barring IRQ state). + */ + static __always_inline bool should_resched(int preempt_offset) + { ++#ifdef CONFIG_PREEMPT_LAZY ++ return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset || ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)); ++#else + return unlikely(raw_cpu_read_4(__preempt_count) == preempt_offset); ++#endif + } + + #ifdef CONFIG_PREEMPT +diff -Nur linux-4.4.13.orig/arch/x86/include/asm/signal.h linux-4.4.13/arch/x86/include/asm/signal.h +--- linux-4.4.13.orig/arch/x86/include/asm/signal.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/include/asm/signal.h 2016-06-22 21:55:07.982523867 +0200 +@@ -23,6 +23,19 @@ + unsigned long sig[_NSIG_WORDS]; + } sigset_t; + ++/* ++ * Because some traps use the IST stack, we must keep preemption ++ * disabled while calling do_trap(), but do_trap() may call ++ * force_sig_info() which will grab the signal spin_locks for the ++ * task, which in PREEMPT_RT_FULL are mutexes. By defining ++ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set ++ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the ++ * trap. ++ */ ++#if defined(CONFIG_PREEMPT_RT_FULL) ++#define ARCH_RT_DELAYS_SIGNAL_SEND ++#endif ++ + #ifndef CONFIG_COMPAT + typedef sigset_t compat_sigset_t; + #endif +diff -Nur linux-4.4.13.orig/arch/x86/include/asm/stackprotector.h linux-4.4.13/arch/x86/include/asm/stackprotector.h +--- linux-4.4.13.orig/arch/x86/include/asm/stackprotector.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/include/asm/stackprotector.h 2016-06-22 21:55:07.982523867 +0200 +@@ -59,7 +59,7 @@ + */ + static __always_inline void boot_init_stack_canary(void) + { +- u64 canary; ++ u64 uninitialized_var(canary); + u64 tsc; + + #ifdef CONFIG_X86_64 +@@ -70,8 +70,15 @@ + * of randomness. The TSC only matters for very early init, + * there it already has some randomness on most systems. Later + * on during the bootup the random pool has true entropy too. ++ * ++ * For preempt-rt we need to weaken the randomness a bit, as ++ * we can't call into the random generator from atomic context ++ * due to locking constraints. We just leave canary ++ * uninitialized and use the TSC based randomness on top of it. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + get_random_bytes(&canary, sizeof(canary)); ++#endif + tsc = rdtsc(); + canary += tsc + (tsc << 32UL); + +diff -Nur linux-4.4.13.orig/arch/x86/include/asm/thread_info.h linux-4.4.13/arch/x86/include/asm/thread_info.h +--- linux-4.4.13.orig/arch/x86/include/asm/thread_info.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/include/asm/thread_info.h 2016-06-22 21:55:07.982523867 +0200 +@@ -58,6 +58,8 @@ + __u32 status; /* thread synchronous flags */ + __u32 cpu; /* current CPU */ + mm_segment_t addr_limit; ++ int preempt_lazy_count; /* 0 => lazy preemptable ++ <0 => BUG */ + unsigned int sig_on_uaccess_error:1; + unsigned int uaccess_err:1; /* uaccess failed */ + }; +@@ -95,6 +97,7 @@ + #define TIF_SYSCALL_EMU 6 /* syscall emulation active */ + #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */ + #define TIF_SECCOMP 8 /* secure computing */ ++#define TIF_NEED_RESCHED_LAZY 9 /* lazy rescheduling necessary */ + #define TIF_USER_RETURN_NOTIFY 11 /* notify kernel of userspace return */ + #define TIF_UPROBE 12 /* breakpointed or singlestepping */ + #define TIF_NOTSC 16 /* TSC is not accessible in userland */ +@@ -119,6 +122,7 @@ + #define _TIF_SYSCALL_EMU (1 << TIF_SYSCALL_EMU) + #define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) + #define _TIF_SECCOMP (1 << TIF_SECCOMP) ++#define _TIF_NEED_RESCHED_LAZY (1 << TIF_NEED_RESCHED_LAZY) + #define _TIF_USER_RETURN_NOTIFY (1 << TIF_USER_RETURN_NOTIFY) + #define _TIF_UPROBE (1 << TIF_UPROBE) + #define _TIF_NOTSC (1 << TIF_NOTSC) +@@ -152,6 +156,8 @@ + #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY) + #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW) + ++#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY) ++ + #define STACK_WARN (THREAD_SIZE/8) + + /* +diff -Nur linux-4.4.13.orig/arch/x86/include/asm/uv/uv_bau.h linux-4.4.13/arch/x86/include/asm/uv/uv_bau.h +--- linux-4.4.13.orig/arch/x86/include/asm/uv/uv_bau.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/include/asm/uv/uv_bau.h 2016-06-22 21:55:07.982523867 +0200 +@@ -615,9 +615,9 @@ + cycles_t send_message; + cycles_t period_end; + cycles_t period_time; +- spinlock_t uvhub_lock; +- spinlock_t queue_lock; +- spinlock_t disable_lock; ++ raw_spinlock_t uvhub_lock; ++ raw_spinlock_t queue_lock; ++ raw_spinlock_t disable_lock; + /* tunables */ + int max_concurr; + int max_concurr_const; +@@ -776,15 +776,15 @@ + * to be lowered below the current 'v'. atomic_add_unless can only stop + * on equal. + */ +-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u) ++static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u) + { +- spin_lock(lock); ++ raw_spin_lock(lock); + if (atomic_read(v) >= u) { +- spin_unlock(lock); ++ raw_spin_unlock(lock); + return 0; + } + atomic_inc(v); +- spin_unlock(lock); ++ raw_spin_unlock(lock); + return 1; + } + +diff -Nur linux-4.4.13.orig/arch/x86/include/asm/uv/uv_hub.h linux-4.4.13/arch/x86/include/asm/uv/uv_hub.h +--- linux-4.4.13.orig/arch/x86/include/asm/uv/uv_hub.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/include/asm/uv/uv_hub.h 2016-06-22 21:55:07.982523867 +0200 +@@ -492,7 +492,7 @@ + unsigned short nr_online_cpus; + unsigned short pnode; + short memory_nid; +- spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ ++ raw_spinlock_t nmi_lock; /* obsolete, see uv_hub_nmi */ + unsigned long nmi_count; /* obsolete, see uv_hub_nmi */ + }; + extern struct uv_blade_info *uv_blade_info; +diff -Nur linux-4.4.13.orig/arch/x86/Kconfig linux-4.4.13/arch/x86/Kconfig +--- linux-4.4.13.orig/arch/x86/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/Kconfig 2016-06-22 21:55:07.986523867 +0200 +@@ -17,6 +17,7 @@ + ### Arch settings + config X86 + def_bool y ++ select HAVE_PREEMPT_LAZY + select ACPI_LEGACY_TABLES_LOOKUP if ACPI + select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI + select ANON_INODES +@@ -212,8 +213,11 @@ + def_bool y + depends on ISA_DMA_API + ++config RWSEM_GENERIC_SPINLOCK ++ def_bool PREEMPT_RT_FULL ++ + config RWSEM_XCHGADD_ALGORITHM +- def_bool y ++ def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL + + config GENERIC_CALIBRATE_DELAY + def_bool y +@@ -848,7 +852,7 @@ + config MAXSMP + bool "Enable Maximum number of SMP Processors and NUMA Nodes" + depends on X86_64 && SMP && DEBUG_KERNEL +- select CPUMASK_OFFSTACK ++ select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL + ---help--- + Enable maximum number of CPUS and NUMA Nodes for this architecture. + If unsure, say N. +diff -Nur linux-4.4.13.orig/arch/x86/kernel/apic/io_apic.c linux-4.4.13/arch/x86/kernel/apic/io_apic.c +--- linux-4.4.13.orig/arch/x86/kernel/apic/io_apic.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/apic/io_apic.c 2016-06-22 21:55:07.986523867 +0200 +@@ -1711,7 +1711,8 @@ + static inline bool ioapic_irqd_mask(struct irq_data *data) + { + /* If we are moving the irq we need to mask it */ +- if (unlikely(irqd_is_setaffinity_pending(data))) { ++ if (unlikely(irqd_is_setaffinity_pending(data) && ++ !irqd_irq_inprogress(data))) { + mask_ioapic_irq(data); + return true; + } +diff -Nur linux-4.4.13.orig/arch/x86/kernel/apic/x2apic_uv_x.c linux-4.4.13/arch/x86/kernel/apic/x2apic_uv_x.c +--- linux-4.4.13.orig/arch/x86/kernel/apic/x2apic_uv_x.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/apic/x2apic_uv_x.c 2016-06-22 21:55:07.986523867 +0200 +@@ -947,7 +947,7 @@ + uv_blade_info[blade].pnode = pnode; + uv_blade_info[blade].nr_possible_cpus = 0; + uv_blade_info[blade].nr_online_cpus = 0; +- spin_lock_init(&uv_blade_info[blade].nmi_lock); ++ raw_spin_lock_init(&uv_blade_info[blade].nmi_lock); + min_pnode = min(pnode, min_pnode); + max_pnode = max(pnode, max_pnode); + blade++; +diff -Nur linux-4.4.13.orig/arch/x86/kernel/asm-offsets.c linux-4.4.13/arch/x86/kernel/asm-offsets.c +--- linux-4.4.13.orig/arch/x86/kernel/asm-offsets.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/asm-offsets.c 2016-06-22 21:55:07.986523867 +0200 +@@ -32,6 +32,7 @@ + OFFSET(TI_flags, thread_info, flags); + OFFSET(TI_status, thread_info, status); + OFFSET(TI_addr_limit, thread_info, addr_limit); ++ OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count); + + BLANK(); + OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx); +@@ -89,4 +90,5 @@ + + BLANK(); + DEFINE(PTREGS_SIZE, sizeof(struct pt_regs)); ++ DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED); + } +diff -Nur linux-4.4.13.orig/arch/x86/kernel/cpu/mcheck/mce.c linux-4.4.13/arch/x86/kernel/cpu/mcheck/mce.c +--- linux-4.4.13.orig/arch/x86/kernel/cpu/mcheck/mce.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/cpu/mcheck/mce.c 2016-06-22 21:55:07.986523867 +0200 +@@ -41,6 +41,8 @@ + #include + #include + #include ++#include ++#include + + #include + #include +@@ -1236,7 +1238,7 @@ + static unsigned long check_interval = INITIAL_CHECK_INTERVAL; + + static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */ +-static DEFINE_PER_CPU(struct timer_list, mce_timer); ++static DEFINE_PER_CPU(struct hrtimer, mce_timer); + + static unsigned long mce_adjust_timer_default(unsigned long interval) + { +@@ -1245,32 +1247,18 @@ + + static unsigned long (*mce_adjust_timer)(unsigned long interval) = mce_adjust_timer_default; + +-static void __restart_timer(struct timer_list *t, unsigned long interval) ++static enum hrtimer_restart __restart_timer(struct hrtimer *timer, unsigned long interval) + { +- unsigned long when = jiffies + interval; +- unsigned long flags; +- +- local_irq_save(flags); +- +- if (timer_pending(t)) { +- if (time_before(when, t->expires)) +- mod_timer_pinned(t, when); +- } else { +- t->expires = round_jiffies(when); +- add_timer_on(t, smp_processor_id()); +- } +- +- local_irq_restore(flags); ++ if (!interval) ++ return HRTIMER_NORESTART; ++ hrtimer_forward_now(timer, ns_to_ktime(jiffies_to_nsecs(interval))); ++ return HRTIMER_RESTART; + } + +-static void mce_timer_fn(unsigned long data) ++static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); +- int cpu = smp_processor_id(); + unsigned long iv; + +- WARN_ON(cpu != data); +- + iv = __this_cpu_read(mce_next_interval); + + if (mce_available(this_cpu_ptr(&cpu_info))) { +@@ -1293,7 +1281,7 @@ + + done: + __this_cpu_write(mce_next_interval, iv); +- __restart_timer(t, iv); ++ return __restart_timer(timer, iv); + } + + /* +@@ -1301,7 +1289,7 @@ + */ + void mce_timer_kick(unsigned long interval) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); ++ struct hrtimer *t = this_cpu_ptr(&mce_timer); + unsigned long iv = __this_cpu_read(mce_next_interval); + + __restart_timer(t, interval); +@@ -1316,7 +1304,7 @@ + int cpu; + + for_each_online_cpu(cpu) +- del_timer_sync(&per_cpu(mce_timer, cpu)); ++ hrtimer_cancel(&per_cpu(mce_timer, cpu)); + } + + static void mce_do_trigger(struct work_struct *work) +@@ -1326,6 +1314,56 @@ + + static DECLARE_WORK(mce_trigger_work, mce_do_trigger); + ++static void __mce_notify_work(struct swork_event *event) ++{ ++ /* Not more than two messages every minute */ ++ static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); ++ ++ /* wake processes polling /dev/mcelog */ ++ wake_up_interruptible(&mce_chrdev_wait); ++ ++ /* ++ * There is no risk of missing notifications because ++ * work_pending is always cleared before the function is ++ * executed. ++ */ ++ if (mce_helper[0] && !work_pending(&mce_trigger_work)) ++ schedule_work(&mce_trigger_work); ++ ++ if (__ratelimit(&ratelimit)) ++ pr_info(HW_ERR "Machine check events logged\n"); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static bool notify_work_ready __read_mostly; ++static struct swork_event notify_work; ++ ++static int mce_notify_work_init(void) ++{ ++ int err; ++ ++ err = swork_get(); ++ if (err) ++ return err; ++ ++ INIT_SWORK(¬ify_work, __mce_notify_work); ++ notify_work_ready = true; ++ return 0; ++} ++ ++static void mce_notify_work(void) ++{ ++ if (notify_work_ready) ++ swork_queue(¬ify_work); ++} ++#else ++static void mce_notify_work(void) ++{ ++ __mce_notify_work(NULL); ++} ++static inline int mce_notify_work_init(void) { return 0; } ++#endif ++ + /* + * Notify the user(s) about new machine check events. + * Can be called from interrupt context, but not from machine check/NMI +@@ -1333,19 +1371,8 @@ + */ + int mce_notify_irq(void) + { +- /* Not more than two messages every minute */ +- static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2); +- + if (test_and_clear_bit(0, &mce_need_notify)) { +- /* wake processes polling /dev/mcelog */ +- wake_up_interruptible(&mce_chrdev_wait); +- +- if (mce_helper[0]) +- schedule_work(&mce_trigger_work); +- +- if (__ratelimit(&ratelimit)) +- pr_info(HW_ERR "Machine check events logged\n"); +- ++ mce_notify_work(); + return 1; + } + return 0; +@@ -1639,7 +1666,7 @@ + } + } + +-static void mce_start_timer(unsigned int cpu, struct timer_list *t) ++static void mce_start_timer(unsigned int cpu, struct hrtimer *t) + { + unsigned long iv = check_interval * HZ; + +@@ -1648,16 +1675,17 @@ + + per_cpu(mce_next_interval, cpu) = iv; + +- t->expires = round_jiffies(jiffies + iv); +- add_timer_on(t, cpu); ++ hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL), ++ 0, HRTIMER_MODE_REL_PINNED); + } + + static void __mcheck_cpu_init_timer(void) + { +- struct timer_list *t = this_cpu_ptr(&mce_timer); ++ struct hrtimer *t = this_cpu_ptr(&mce_timer); + unsigned int cpu = smp_processor_id(); + +- setup_timer(t, mce_timer_fn, cpu); ++ hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ t->function = mce_timer_fn; + mce_start_timer(cpu, t); + } + +@@ -2376,6 +2404,8 @@ + if (!mce_available(raw_cpu_ptr(&cpu_info))) + return; + ++ hrtimer_cancel(this_cpu_ptr(&mce_timer)); ++ + if (!(action & CPU_TASKS_FROZEN)) + cmci_clear(); + +@@ -2398,6 +2428,7 @@ + if (b->init) + wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl); + } ++ __mcheck_cpu_init_timer(); + } + + /* Get notified when a cpu comes on/off. Be hotplug friendly. */ +@@ -2405,7 +2436,6 @@ + mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) + { + unsigned int cpu = (unsigned long)hcpu; +- struct timer_list *t = &per_cpu(mce_timer, cpu); + + switch (action & ~CPU_TASKS_FROZEN) { + case CPU_ONLINE: +@@ -2425,11 +2455,9 @@ + break; + case CPU_DOWN_PREPARE: + smp_call_function_single(cpu, mce_disable_cpu, &action, 1); +- del_timer_sync(t); + break; + case CPU_DOWN_FAILED: + smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); +- mce_start_timer(cpu, t); + break; + } + +@@ -2468,6 +2496,10 @@ + goto err_out; + } + ++ err = mce_notify_work_init(); ++ if (err) ++ goto err_out; ++ + if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) { + err = -ENOMEM; + goto err_out; +diff -Nur linux-4.4.13.orig/arch/x86/kernel/cpu/perf_event_intel_rapl.c linux-4.4.13/arch/x86/kernel/cpu/perf_event_intel_rapl.c +--- linux-4.4.13.orig/arch/x86/kernel/cpu/perf_event_intel_rapl.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/cpu/perf_event_intel_rapl.c 2016-06-22 21:55:07.986523867 +0200 +@@ -117,7 +117,7 @@ + }; + + struct rapl_pmu { +- spinlock_t lock; ++ raw_spinlock_t lock; + int n_active; /* number of active events */ + struct list_head active_list; + struct pmu *pmu; /* pointer to rapl_pmu_class */ +@@ -220,13 +220,13 @@ + if (!pmu->n_active) + return HRTIMER_NORESTART; + +- spin_lock_irqsave(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&pmu->lock, flags); + + list_for_each_entry(event, &pmu->active_list, active_entry) { + rapl_event_update(event); + } + +- spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_unlock_irqrestore(&pmu->lock, flags); + + hrtimer_forward_now(hrtimer, pmu->timer_interval); + +@@ -263,9 +263,9 @@ + struct rapl_pmu *pmu = __this_cpu_read(rapl_pmu); + unsigned long flags; + +- spin_lock_irqsave(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&pmu->lock, flags); + __rapl_pmu_event_start(pmu, event); +- spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_unlock_irqrestore(&pmu->lock, flags); + } + + static void rapl_pmu_event_stop(struct perf_event *event, int mode) +@@ -274,7 +274,7 @@ + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + +- spin_lock_irqsave(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&pmu->lock, flags); + + /* mark event as deactivated and stopped */ + if (!(hwc->state & PERF_HES_STOPPED)) { +@@ -299,7 +299,7 @@ + hwc->state |= PERF_HES_UPTODATE; + } + +- spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_unlock_irqrestore(&pmu->lock, flags); + } + + static int rapl_pmu_event_add(struct perf_event *event, int mode) +@@ -308,14 +308,14 @@ + struct hw_perf_event *hwc = &event->hw; + unsigned long flags; + +- spin_lock_irqsave(&pmu->lock, flags); ++ raw_spin_lock_irqsave(&pmu->lock, flags); + + hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + if (mode & PERF_EF_START) + __rapl_pmu_event_start(pmu, event); + +- spin_unlock_irqrestore(&pmu->lock, flags); ++ raw_spin_unlock_irqrestore(&pmu->lock, flags); + + return 0; + } +@@ -603,7 +603,7 @@ + pmu = kzalloc_node(sizeof(*pmu), GFP_KERNEL, cpu_to_node(cpu)); + if (!pmu) + return -1; +- spin_lock_init(&pmu->lock); ++ raw_spin_lock_init(&pmu->lock); + + INIT_LIST_HEAD(&pmu->active_list); + +diff -Nur linux-4.4.13.orig/arch/x86/kernel/dumpstack_32.c linux-4.4.13/arch/x86/kernel/dumpstack_32.c +--- linux-4.4.13.orig/arch/x86/kernel/dumpstack_32.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/dumpstack_32.c 2016-06-22 21:55:07.986523867 +0200 +@@ -42,7 +42,7 @@ + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data) + { +- const unsigned cpu = get_cpu(); ++ const unsigned cpu = get_cpu_light(); + int graph = 0; + u32 *prev_esp; + +@@ -86,7 +86,7 @@ + break; + touch_nmi_watchdog(); + } +- put_cpu(); ++ put_cpu_light(); + } + EXPORT_SYMBOL(dump_trace); + +diff -Nur linux-4.4.13.orig/arch/x86/kernel/dumpstack_64.c linux-4.4.13/arch/x86/kernel/dumpstack_64.c +--- linux-4.4.13.orig/arch/x86/kernel/dumpstack_64.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/dumpstack_64.c 2016-06-22 21:55:07.986523867 +0200 +@@ -152,7 +152,7 @@ + unsigned long *stack, unsigned long bp, + const struct stacktrace_ops *ops, void *data) + { +- const unsigned cpu = get_cpu(); ++ const unsigned cpu = get_cpu_light(); + struct thread_info *tinfo; + unsigned long *irq_stack = (unsigned long *)per_cpu(irq_stack_ptr, cpu); + unsigned long dummy; +@@ -241,7 +241,7 @@ + * This handles the process stack: + */ + bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph); +- put_cpu(); ++ put_cpu_light(); + } + EXPORT_SYMBOL(dump_trace); + +@@ -255,7 +255,7 @@ + int cpu; + int i; + +- preempt_disable(); ++ migrate_disable(); + cpu = smp_processor_id(); + + irq_stack_end = (unsigned long *)(per_cpu(irq_stack_ptr, cpu)); +@@ -291,7 +291,7 @@ + pr_cont(" %016lx", *stack++); + touch_nmi_watchdog(); + } +- preempt_enable(); ++ migrate_enable(); + + pr_cont("\n"); + show_trace_log_lvl(task, regs, sp, bp, log_lvl); +diff -Nur linux-4.4.13.orig/arch/x86/kernel/irq_32.c linux-4.4.13/arch/x86/kernel/irq_32.c +--- linux-4.4.13.orig/arch/x86/kernel/irq_32.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/irq_32.c 2016-06-22 21:55:07.986523867 +0200 +@@ -128,6 +128,7 @@ + cpu, per_cpu(hardirq_stack, cpu), per_cpu(softirq_stack, cpu)); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + void do_softirq_own_stack(void) + { + struct thread_info *curstk; +@@ -146,6 +147,7 @@ + + call_on_stack(__do_softirq, isp); + } ++#endif + + bool handle_irq(struct irq_desc *desc, struct pt_regs *regs) + { +diff -Nur linux-4.4.13.orig/arch/x86/kernel/kvm.c linux-4.4.13/arch/x86/kernel/kvm.c +--- linux-4.4.13.orig/arch/x86/kernel/kvm.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/kvm.c 2016-06-22 21:55:07.986523867 +0200 +@@ -36,6 +36,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -91,14 +92,14 @@ + + struct kvm_task_sleep_node { + struct hlist_node link; +- wait_queue_head_t wq; ++ struct swait_queue_head wq; + u32 token; + int cpu; + bool halted; + }; + + static struct kvm_task_sleep_head { +- spinlock_t lock; ++ raw_spinlock_t lock; + struct hlist_head list; + } async_pf_sleepers[KVM_TASK_SLEEP_HASHSIZE]; + +@@ -122,17 +123,17 @@ + u32 key = hash_32(token, KVM_TASK_SLEEP_HASHBITS); + struct kvm_task_sleep_head *b = &async_pf_sleepers[key]; + struct kvm_task_sleep_node n, *e; +- DEFINE_WAIT(wait); ++ DECLARE_SWAITQUEUE(wait); + + rcu_irq_enter(); + +- spin_lock(&b->lock); ++ raw_spin_lock(&b->lock); + e = _find_apf_task(b, token); + if (e) { + /* dummy entry exist -> wake up was delivered ahead of PF */ + hlist_del(&e->link); + kfree(e); +- spin_unlock(&b->lock); ++ raw_spin_unlock(&b->lock); + + rcu_irq_exit(); + return; +@@ -141,13 +142,13 @@ + n.token = token; + n.cpu = smp_processor_id(); + n.halted = is_idle_task(current) || preempt_count() > 1; +- init_waitqueue_head(&n.wq); ++ init_swait_queue_head(&n.wq); + hlist_add_head(&n.link, &b->list); +- spin_unlock(&b->lock); ++ raw_spin_unlock(&b->lock); + + for (;;) { + if (!n.halted) +- prepare_to_wait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); ++ prepare_to_swait(&n.wq, &wait, TASK_UNINTERRUPTIBLE); + if (hlist_unhashed(&n.link)) + break; + +@@ -166,7 +167,7 @@ + } + } + if (!n.halted) +- finish_wait(&n.wq, &wait); ++ finish_swait(&n.wq, &wait); + + rcu_irq_exit(); + return; +@@ -178,8 +179,8 @@ + hlist_del_init(&n->link); + if (n->halted) + smp_send_reschedule(n->cpu); +- else if (waitqueue_active(&n->wq)) +- wake_up(&n->wq); ++ else if (swait_active(&n->wq)) ++ swake_up(&n->wq); + } + + static void apf_task_wake_all(void) +@@ -189,14 +190,14 @@ + for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) { + struct hlist_node *p, *next; + struct kvm_task_sleep_head *b = &async_pf_sleepers[i]; +- spin_lock(&b->lock); ++ raw_spin_lock(&b->lock); + hlist_for_each_safe(p, next, &b->list) { + struct kvm_task_sleep_node *n = + hlist_entry(p, typeof(*n), link); + if (n->cpu == smp_processor_id()) + apf_task_wake_one(n); + } +- spin_unlock(&b->lock); ++ raw_spin_unlock(&b->lock); + } + } + +@@ -212,7 +213,7 @@ + } + + again: +- spin_lock(&b->lock); ++ raw_spin_lock(&b->lock); + n = _find_apf_task(b, token); + if (!n) { + /* +@@ -225,17 +226,17 @@ + * Allocation failed! Busy wait while other cpu + * handles async PF. + */ +- spin_unlock(&b->lock); ++ raw_spin_unlock(&b->lock); + cpu_relax(); + goto again; + } + n->token = token; + n->cpu = smp_processor_id(); +- init_waitqueue_head(&n->wq); ++ init_swait_queue_head(&n->wq); + hlist_add_head(&n->link, &b->list); + } else + apf_task_wake_one(n); +- spin_unlock(&b->lock); ++ raw_spin_unlock(&b->lock); + return; + } + EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake); +@@ -486,7 +487,7 @@ + paravirt_ops_setup(); + register_reboot_notifier(&kvm_pv_reboot_nb); + for (i = 0; i < KVM_TASK_SLEEP_HASHSIZE; i++) +- spin_lock_init(&async_pf_sleepers[i].lock); ++ raw_spin_lock_init(&async_pf_sleepers[i].lock); + if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF)) + x86_init.irqs.trap_init = kvm_apf_trap_init; + +diff -Nur linux-4.4.13.orig/arch/x86/kernel/nmi.c linux-4.4.13/arch/x86/kernel/nmi.c +--- linux-4.4.13.orig/arch/x86/kernel/nmi.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/nmi.c 2016-06-22 21:55:07.986523867 +0200 +@@ -231,7 +231,7 @@ + #endif + + if (panic_on_unrecovered_nmi) +- panic("NMI: Not continuing"); ++ nmi_panic(regs, "NMI: Not continuing"); + + pr_emerg("Dazed and confused, but trying to continue\n"); + +@@ -255,8 +255,16 @@ + reason, smp_processor_id()); + show_regs(regs); + +- if (panic_on_io_nmi) +- panic("NMI IOCK error: Not continuing"); ++ if (panic_on_io_nmi) { ++ nmi_panic(regs, "NMI IOCK error: Not continuing"); ++ ++ /* ++ * If we end up here, it means we have received an NMI while ++ * processing panic(). Simply return without delaying and ++ * re-enabling NMIs. ++ */ ++ return; ++ } + + /* Re-enable the IOCK line, wait for a few seconds */ + reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; +@@ -297,7 +305,7 @@ + + pr_emerg("Do you have a strange power saving mode enabled?\n"); + if (unknown_nmi_panic || panic_on_unrecovered_nmi) +- panic("NMI: Not continuing"); ++ nmi_panic(regs, "NMI: Not continuing"); + + pr_emerg("Dazed and confused, but trying to continue\n"); + } +diff -Nur linux-4.4.13.orig/arch/x86/kernel/process_32.c linux-4.4.13/arch/x86/kernel/process_32.c +--- linux-4.4.13.orig/arch/x86/kernel/process_32.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/process_32.c 2016-06-22 21:55:07.986523867 +0200 +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -210,6 +211,35 @@ + } + EXPORT_SYMBOL_GPL(start_thread); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) ++{ ++ int i; ++ ++ /* ++ * Clear @prev's kmap_atomic mappings ++ */ ++ for (i = 0; i < prev_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ pte_t *ptep = kmap_pte - idx; ++ ++ kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx)); ++ } ++ /* ++ * Restore @next_p's kmap_atomic mappings ++ */ ++ for (i = 0; i < next_p->kmap_idx; i++) { ++ int idx = i + KM_TYPE_NR * smp_processor_id(); ++ ++ if (!pte_none(next_p->kmap_pte[i])) ++ set_pte(kmap_pte - idx, next_p->kmap_pte[i]); ++ } ++} ++#else ++static inline void ++switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { } ++#endif ++ + + /* + * switch_to(x,y) should switch tasks from x to y. +@@ -286,6 +316,8 @@ + task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) + __switch_to_xtra(prev_p, next_p, tss); + ++ switch_kmaps(prev_p, next_p); ++ + /* + * Leave lazy mode, flushing any hypercalls made here. + * This must be done before restoring TLS segments so +diff -Nur linux-4.4.13.orig/arch/x86/kernel/reboot.c linux-4.4.13/arch/x86/kernel/reboot.c +--- linux-4.4.13.orig/arch/x86/kernel/reboot.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kernel/reboot.c 2016-06-22 21:55:07.986523867 +0200 +@@ -726,6 +726,7 @@ + static nmi_shootdown_cb shootdown_callback; + + static atomic_t waiting_for_crash_ipi; ++static int crash_ipi_issued; + + static int crash_nmi_callback(unsigned int val, struct pt_regs *regs) + { +@@ -788,6 +789,9 @@ + + smp_send_nmi_allbutself(); + ++ /* Kick CPUs looping in NMI context. */ ++ WRITE_ONCE(crash_ipi_issued, 1); ++ + msecs = 1000; /* Wait at most a second for the other cpus to stop */ + while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) { + mdelay(1); +@@ -796,6 +800,22 @@ + + /* Leave the nmi callback set */ + } ++ ++/* Override the weak function in kernel/panic.c */ ++void nmi_panic_self_stop(struct pt_regs *regs) ++{ ++ while (1) { ++ /* ++ * Wait for the crash dumping IPI to be issued, and then ++ * call its callback directly. ++ */ ++ if (READ_ONCE(crash_ipi_issued)) ++ crash_nmi_callback(0, regs); /* Don't return */ ++ ++ cpu_relax(); ++ } ++} ++ + #else /* !CONFIG_SMP */ + void nmi_shootdown_cpus(nmi_shootdown_cb callback) + { +diff -Nur linux-4.4.13.orig/arch/x86/kvm/lapic.c linux-4.4.13/arch/x86/kvm/lapic.c +--- linux-4.4.13.orig/arch/x86/kvm/lapic.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kvm/lapic.c 2016-06-22 21:55:07.986523867 +0200 +@@ -1195,7 +1195,7 @@ + static void apic_timer_expired(struct kvm_lapic *apic) + { + struct kvm_vcpu *vcpu = apic->vcpu; +- wait_queue_head_t *q = &vcpu->wq; ++ struct swait_queue_head *q = &vcpu->wq; + struct kvm_timer *ktimer = &apic->lapic_timer; + + if (atomic_read(&apic->lapic_timer.pending)) +@@ -1204,8 +1204,8 @@ + atomic_inc(&apic->lapic_timer.pending); + kvm_set_pending_timer(vcpu); + +- if (waitqueue_active(q)) +- wake_up_interruptible(q); ++ if (swait_active(q)) ++ swake_up(q); + + if (apic_lvtt_tscdeadline(apic)) + ktimer->expired_tscdeadline = ktimer->tscdeadline; +@@ -1801,6 +1801,7 @@ + hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC, + HRTIMER_MODE_ABS); + apic->lapic_timer.timer.function = apic_timer_fn; ++ apic->lapic_timer.timer.irqsafe = 1; + + /* + * APIC is created enabled. This will prevent kvm_lapic_set_base from +diff -Nur linux-4.4.13.orig/arch/x86/kvm/x86.c linux-4.4.13/arch/x86/kvm/x86.c +--- linux-4.4.13.orig/arch/x86/kvm/x86.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/kvm/x86.c 2016-06-22 21:55:07.990523867 +0200 +@@ -5788,6 +5788,13 @@ + goto out; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) { ++ printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n"); ++ return -EOPNOTSUPP; ++ } ++#endif ++ + r = kvm_mmu_module_init(); + if (r) + goto out_free_percpu; +diff -Nur linux-4.4.13.orig/arch/x86/mm/highmem_32.c linux-4.4.13/arch/x86/mm/highmem_32.c +--- linux-4.4.13.orig/arch/x86/mm/highmem_32.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/mm/highmem_32.c 2016-06-22 21:55:07.990523867 +0200 +@@ -32,10 +32,11 @@ + */ + void *kmap_atomic_prot(struct page *page, pgprot_t prot) + { ++ pte_t pte = mk_pte(page, prot); + unsigned long vaddr; + int idx, type; + +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + + if (!PageHighMem(page)) +@@ -45,7 +46,10 @@ + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); +- set_pte(kmap_pte-idx, mk_pte(page, prot)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte-idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -88,6 +92,9 @@ + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + arch_flush_lazy_mmu_mode(); +@@ -100,7 +107,7 @@ + #endif + + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(__kunmap_atomic); + +diff -Nur linux-4.4.13.orig/arch/x86/mm/iomap_32.c linux-4.4.13/arch/x86/mm/iomap_32.c +--- linux-4.4.13.orig/arch/x86/mm/iomap_32.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/mm/iomap_32.c 2016-06-22 21:55:07.990523867 +0200 +@@ -56,6 +56,7 @@ + + void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) + { ++ pte_t pte = pfn_pte(pfn, prot); + unsigned long vaddr; + int idx, type; + +@@ -65,7 +66,12 @@ + type = kmap_atomic_idx_push(); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); +- set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++ WARN_ON(!pte_none(*(kmap_pte - idx))); ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = pte; ++#endif ++ set_pte(kmap_pte - idx, pte); + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +@@ -113,6 +119,9 @@ + * is a bad idea also, in case the page changes cacheability + * attributes or becomes a protected page in a hypervisor. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ current->kmap_pte[type] = __pte(0); ++#endif + kpte_clear_flush(kmap_pte-idx, vaddr); + kmap_atomic_idx_pop(); + } +diff -Nur linux-4.4.13.orig/arch/x86/platform/uv/tlb_uv.c linux-4.4.13/arch/x86/platform/uv/tlb_uv.c +--- linux-4.4.13.orig/arch/x86/platform/uv/tlb_uv.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/platform/uv/tlb_uv.c 2016-06-22 21:55:07.990523867 +0200 +@@ -714,9 +714,9 @@ + + quiesce_local_uvhub(hmaster); + +- spin_lock(&hmaster->queue_lock); ++ raw_spin_lock(&hmaster->queue_lock); + reset_with_ipi(&bau_desc->distribution, bcp); +- spin_unlock(&hmaster->queue_lock); ++ raw_spin_unlock(&hmaster->queue_lock); + + end_uvhub_quiesce(hmaster); + +@@ -736,9 +736,9 @@ + + quiesce_local_uvhub(hmaster); + +- spin_lock(&hmaster->queue_lock); ++ raw_spin_lock(&hmaster->queue_lock); + reset_with_ipi(&bau_desc->distribution, bcp); +- spin_unlock(&hmaster->queue_lock); ++ raw_spin_unlock(&hmaster->queue_lock); + + end_uvhub_quiesce(hmaster); + +@@ -759,7 +759,7 @@ + cycles_t tm1; + + hmaster = bcp->uvhub_master; +- spin_lock(&hmaster->disable_lock); ++ raw_spin_lock(&hmaster->disable_lock); + if (!bcp->baudisabled) { + stat->s_bau_disabled++; + tm1 = get_cycles(); +@@ -772,7 +772,7 @@ + } + } + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + } + + static void count_max_concurr(int stat, struct bau_control *bcp, +@@ -835,7 +835,7 @@ + */ + static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat) + { +- spinlock_t *lock = &hmaster->uvhub_lock; ++ raw_spinlock_t *lock = &hmaster->uvhub_lock; + atomic_t *v; + + v = &hmaster->active_descriptor_count; +@@ -968,7 +968,7 @@ + struct bau_control *hmaster; + + hmaster = bcp->uvhub_master; +- spin_lock(&hmaster->disable_lock); ++ raw_spin_lock(&hmaster->disable_lock); + if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { + stat->s_bau_reenabled++; + for_each_present_cpu(tcpu) { +@@ -980,10 +980,10 @@ + tbcp->period_giveups = 0; + } + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + return 0; + } +- spin_unlock(&hmaster->disable_lock); ++ raw_spin_unlock(&hmaster->disable_lock); + return -1; + } + +@@ -1901,9 +1901,9 @@ + bcp->cong_reps = congested_reps; + bcp->disabled_period = sec_2_cycles(disabled_period); + bcp->giveup_limit = giveup_limit; +- spin_lock_init(&bcp->queue_lock); +- spin_lock_init(&bcp->uvhub_lock); +- spin_lock_init(&bcp->disable_lock); ++ raw_spin_lock_init(&bcp->queue_lock); ++ raw_spin_lock_init(&bcp->uvhub_lock); ++ raw_spin_lock_init(&bcp->disable_lock); + } + } + +diff -Nur linux-4.4.13.orig/arch/x86/platform/uv/uv_time.c linux-4.4.13/arch/x86/platform/uv/uv_time.c +--- linux-4.4.13.orig/arch/x86/platform/uv/uv_time.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/arch/x86/platform/uv/uv_time.c 2016-06-22 21:55:07.990523867 +0200 +@@ -57,7 +57,7 @@ + + /* There is one of these allocated per node */ + struct uv_rtc_timer_head { +- spinlock_t lock; ++ raw_spinlock_t lock; + /* next cpu waiting for timer, local node relative: */ + int next_cpu; + /* number of cpus on this node: */ +@@ -177,7 +177,7 @@ + uv_rtc_deallocate_timers(); + return -ENOMEM; + } +- spin_lock_init(&head->lock); ++ raw_spin_lock_init(&head->lock); + head->ncpus = uv_blade_nr_possible_cpus(bid); + head->next_cpu = -1; + blade_info[bid] = head; +@@ -231,7 +231,7 @@ + unsigned long flags; + int next_cpu; + +- spin_lock_irqsave(&head->lock, flags); ++ raw_spin_lock_irqsave(&head->lock, flags); + + next_cpu = head->next_cpu; + *t = expires; +@@ -243,12 +243,12 @@ + if (uv_setup_intr(cpu, expires)) { + *t = ULLONG_MAX; + uv_rtc_find_next_timer(head, pnode); +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + return -ETIME; + } + } + +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + return 0; + } + +@@ -267,7 +267,7 @@ + unsigned long flags; + int rc = 0; + +- spin_lock_irqsave(&head->lock, flags); ++ raw_spin_lock_irqsave(&head->lock, flags); + + if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force) + rc = 1; +@@ -279,7 +279,7 @@ + uv_rtc_find_next_timer(head, pnode); + } + +- spin_unlock_irqrestore(&head->lock, flags); ++ raw_spin_unlock_irqrestore(&head->lock, flags); + + return rc; + } +@@ -299,13 +299,18 @@ + static cycle_t uv_read_rtc(struct clocksource *cs) + { + unsigned long offset; ++ cycle_t cycles; + ++ preempt_disable(); + if (uv_get_min_hub_revision_id() == 1) + offset = 0; + else + offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE; + +- return (cycle_t)uv_read_local_mmr(UVH_RTC | offset); ++ cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset); ++ preempt_enable(); ++ ++ return cycles; + } + + /* +diff -Nur linux-4.4.13.orig/block/blk-core.c linux-4.4.13/block/blk-core.c +--- linux-4.4.13.orig/block/blk-core.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/blk-core.c 2016-06-22 21:55:07.990523867 +0200 +@@ -125,6 +125,9 @@ + + INIT_LIST_HEAD(&rq->queuelist); + INIT_LIST_HEAD(&rq->timeout_list); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); ++#endif + rq->cpu = -1; + rq->q = q; + rq->__sector = (sector_t) -1; +@@ -233,7 +236,7 @@ + **/ + void blk_start_queue(struct request_queue *q) + { +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + queue_flag_clear(QUEUE_FLAG_STOPPED, q); + __blk_run_queue(q); +@@ -657,7 +660,7 @@ + if (!gfpflags_allow_blocking(gfp)) + return -EBUSY; + +- ret = wait_event_interruptible(q->mq_freeze_wq, ++ ret = swait_event_interruptible(q->mq_freeze_wq, + !atomic_read(&q->mq_freeze_depth) || + blk_queue_dying(q)); + if (blk_queue_dying(q)) +@@ -677,7 +680,7 @@ + struct request_queue *q = + container_of(ref, struct request_queue, q_usage_counter); + +- wake_up_all(&q->mq_freeze_wq); ++ swake_up_all(&q->mq_freeze_wq); + } + + struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) +@@ -739,7 +742,7 @@ + q->bypass_depth = 1; + __set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags); + +- init_waitqueue_head(&q->mq_freeze_wq); ++ init_swait_queue_head(&q->mq_freeze_wq); + + /* + * Init percpu_ref in atomic mode so that it's faster to shutdown. +@@ -3198,7 +3201,7 @@ + blk_run_queue_async(q); + else + __blk_run_queue(q); +- spin_unlock(q->queue_lock); ++ spin_unlock_irq(q->queue_lock); + } + + static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule) +@@ -3246,7 +3249,6 @@ + void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) + { + struct request_queue *q; +- unsigned long flags; + struct request *rq; + LIST_HEAD(list); + unsigned int depth; +@@ -3266,11 +3268,6 @@ + q = NULL; + depth = 0; + +- /* +- * Save and disable interrupts here, to avoid doing it for every +- * queue lock we have to take. +- */ +- local_irq_save(flags); + while (!list_empty(&list)) { + rq = list_entry_rq(list.next); + list_del_init(&rq->queuelist); +@@ -3283,7 +3280,7 @@ + queue_unplugged(q, depth, from_schedule); + q = rq->q; + depth = 0; +- spin_lock(q->queue_lock); ++ spin_lock_irq(q->queue_lock); + } + + /* +@@ -3310,8 +3307,6 @@ + */ + if (q) + queue_unplugged(q, depth, from_schedule); +- +- local_irq_restore(flags); + } + + void blk_finish_plug(struct blk_plug *plug) +diff -Nur linux-4.4.13.orig/block/blk-ioc.c linux-4.4.13/block/blk-ioc.c +--- linux-4.4.13.orig/block/blk-ioc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/blk-ioc.c 2016-06-22 21:55:07.990523867 +0200 +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include "blk.h" + +@@ -109,7 +110,7 @@ + spin_unlock(q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + spin_lock_irqsave_nested(&ioc->lock, flags, 1); + } + } +@@ -187,7 +188,7 @@ + spin_unlock(icq->q->queue_lock); + } else { + spin_unlock_irqrestore(&ioc->lock, flags); +- cpu_relax(); ++ cpu_chill(); + goto retry; + } + } +diff -Nur linux-4.4.13.orig/block/blk-iopoll.c linux-4.4.13/block/blk-iopoll.c +--- linux-4.4.13.orig/block/blk-iopoll.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/blk-iopoll.c 2016-06-22 21:55:07.990523867 +0200 +@@ -35,6 +35,7 @@ + list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(blk_iopoll_sched); + +@@ -132,6 +133,7 @@ + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + /** +@@ -201,6 +203,7 @@ + this_cpu_ptr(&blk_cpu_iopoll)); + __raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +diff -Nur linux-4.4.13.orig/block/blk-mq.c linux-4.4.13/block/blk-mq.c +--- linux-4.4.13.orig/block/blk-mq.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/blk-mq.c 2016-06-22 21:55:07.990523867 +0200 +@@ -92,7 +92,7 @@ + + static void blk_mq_freeze_queue_wait(struct request_queue *q) + { +- wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); ++ swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->q_usage_counter)); + } + + /* +@@ -130,7 +130,7 @@ + WARN_ON_ONCE(freeze_depth < 0); + if (!freeze_depth) { + percpu_ref_reinit(&q->q_usage_counter); +- wake_up_all(&q->mq_freeze_wq); ++ swake_up_all(&q->mq_freeze_wq); + } + } + EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); +@@ -149,7 +149,7 @@ + * dying, we need to ensure that processes currently waiting on + * the queue are notified as well. + */ +- wake_up_all(&q->mq_freeze_wq); ++ swake_up_all(&q->mq_freeze_wq); + } + + bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) +@@ -196,6 +196,9 @@ + rq->resid_len = 0; + rq->sense = NULL; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work); ++#endif + INIT_LIST_HEAD(&rq->timeout_list); + rq->timeout = 0; + +@@ -325,6 +328,17 @@ + } + EXPORT_SYMBOL(blk_mq_end_request); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++void __blk_mq_complete_request_remote_work(struct work_struct *work) ++{ ++ struct request *rq = container_of(work, struct request, work); ++ ++ rq->q->softirq_done_fn(rq); ++} ++ ++#else ++ + static void __blk_mq_complete_request_remote(void *data) + { + struct request *rq = data; +@@ -332,6 +346,8 @@ + rq->q->softirq_done_fn(rq); + } + ++#endif ++ + static void blk_mq_ipi_complete_request(struct request *rq) + { + struct blk_mq_ctx *ctx = rq->mq_ctx; +@@ -343,19 +359,23 @@ + return; + } + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags)) + shared = cpus_share_cache(cpu, ctx->cpu); + + if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ schedule_work_on(ctx->cpu, &rq->work); ++#else + rq->csd.func = __blk_mq_complete_request_remote; + rq->csd.info = rq; + rq->csd.flags = 0; + smp_call_function_single_async(ctx->cpu, &rq->csd); ++#endif + } else { + rq->q->softirq_done_fn(rq); + } +- put_cpu(); ++ put_cpu_light(); + } + + static void __blk_mq_complete_request(struct request *rq) +@@ -862,14 +882,14 @@ + return; + + if (!async) { +- int cpu = get_cpu(); ++ int cpu = get_cpu_light(); + if (cpumask_test_cpu(cpu, hctx->cpumask)) { + __blk_mq_run_hw_queue(hctx); +- put_cpu(); ++ put_cpu_light(); + return; + } + +- put_cpu(); ++ put_cpu_light(); + } + + kblockd_schedule_delayed_work_on(blk_mq_hctx_next_cpu(hctx), +@@ -1617,7 +1637,7 @@ + { + struct blk_mq_hw_ctx *hctx = data; + +- if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) ++ if (action == CPU_POST_DEAD) + return blk_mq_hctx_cpu_offline(hctx, cpu); + + /* +diff -Nur linux-4.4.13.orig/block/blk-mq-cpu.c linux-4.4.13/block/blk-mq-cpu.c +--- linux-4.4.13.orig/block/blk-mq-cpu.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/blk-mq-cpu.c 2016-06-22 21:55:07.990523867 +0200 +@@ -16,7 +16,7 @@ + #include "blk-mq.h" + + static LIST_HEAD(blk_mq_cpu_notify_list); +-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); ++static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); + + static int blk_mq_main_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +@@ -25,7 +25,10 @@ + struct blk_mq_cpu_notifier *notify; + int ret = NOTIFY_OK; + +- raw_spin_lock(&blk_mq_cpu_notify_lock); ++ if (action != CPU_POST_DEAD) ++ return NOTIFY_OK; ++ ++ spin_lock(&blk_mq_cpu_notify_lock); + + list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) { + ret = notify->notify(notify->data, action, cpu); +@@ -33,7 +36,7 @@ + break; + } + +- raw_spin_unlock(&blk_mq_cpu_notify_lock); ++ spin_unlock(&blk_mq_cpu_notify_lock); + return ret; + } + +@@ -41,16 +44,16 @@ + { + BUG_ON(!notifier->notify); + +- raw_spin_lock(&blk_mq_cpu_notify_lock); ++ spin_lock(&blk_mq_cpu_notify_lock); + list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); +- raw_spin_unlock(&blk_mq_cpu_notify_lock); ++ spin_unlock(&blk_mq_cpu_notify_lock); + } + + void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) + { +- raw_spin_lock(&blk_mq_cpu_notify_lock); ++ spin_lock(&blk_mq_cpu_notify_lock); + list_del(¬ifier->list); +- raw_spin_unlock(&blk_mq_cpu_notify_lock); ++ spin_unlock(&blk_mq_cpu_notify_lock); + } + + void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, +diff -Nur linux-4.4.13.orig/block/blk-mq.h linux-4.4.13/block/blk-mq.h +--- linux-4.4.13.orig/block/blk-mq.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/blk-mq.h 2016-06-22 21:55:07.990523867 +0200 +@@ -74,7 +74,10 @@ + static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, + unsigned int cpu) + { +- return per_cpu_ptr(q->queue_ctx, cpu); ++ struct blk_mq_ctx *ctx; ++ ++ ctx = per_cpu_ptr(q->queue_ctx, cpu); ++ return ctx; + } + + /* +@@ -85,12 +88,12 @@ + */ + static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) + { +- return __blk_mq_get_ctx(q, get_cpu()); ++ return __blk_mq_get_ctx(q, get_cpu_light()); + } + + static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) + { +- put_cpu(); ++ put_cpu_light(); + } + + struct blk_mq_alloc_data { +diff -Nur linux-4.4.13.orig/block/blk-softirq.c linux-4.4.13/block/blk-softirq.c +--- linux-4.4.13.orig/block/blk-softirq.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/blk-softirq.c 2016-06-22 21:55:07.990523867 +0200 +@@ -51,6 +51,7 @@ + raise_softirq_irqoff(BLOCK_SOFTIRQ); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /* +@@ -93,6 +94,7 @@ + this_cpu_ptr(&blk_cpu_done)); + raise_softirq_irqoff(BLOCK_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + return NOTIFY_OK; +@@ -150,6 +152,7 @@ + goto do_local; + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + /** +diff -Nur linux-4.4.13.orig/block/bounce.c linux-4.4.13/block/bounce.c +--- linux-4.4.13.orig/block/bounce.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/block/bounce.c 2016-06-22 21:55:07.990523867 +0200 +@@ -55,11 +55,11 @@ + unsigned long flags; + unsigned char *vto; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + vto = kmap_atomic(to->bv_page); + memcpy(vto + to->bv_offset, vfrom, to->bv_len); + kunmap_atomic(vto); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + #else /* CONFIG_HIGHMEM */ +diff -Nur linux-4.4.13.orig/crypto/algapi.c linux-4.4.13/crypto/algapi.c +--- linux-4.4.13.orig/crypto/algapi.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/crypto/algapi.c 2016-06-22 21:55:07.990523867 +0200 +@@ -719,13 +719,13 @@ + + int crypto_register_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_register(&crypto_chain, nb); ++ return srcu_notifier_chain_register(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_register_notifier); + + int crypto_unregister_notifier(struct notifier_block *nb) + { +- return blocking_notifier_chain_unregister(&crypto_chain, nb); ++ return srcu_notifier_chain_unregister(&crypto_chain, nb); + } + EXPORT_SYMBOL_GPL(crypto_unregister_notifier); + +diff -Nur linux-4.4.13.orig/crypto/api.c linux-4.4.13/crypto/api.c +--- linux-4.4.13.orig/crypto/api.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/crypto/api.c 2016-06-22 21:55:07.990523867 +0200 +@@ -31,7 +31,7 @@ + DECLARE_RWSEM(crypto_alg_sem); + EXPORT_SYMBOL_GPL(crypto_alg_sem); + +-BLOCKING_NOTIFIER_HEAD(crypto_chain); ++SRCU_NOTIFIER_HEAD(crypto_chain); + EXPORT_SYMBOL_GPL(crypto_chain); + + static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); +@@ -236,10 +236,10 @@ + { + int ok; + +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + if (ok == NOTIFY_DONE) { + request_module("cryptomgr"); +- ok = blocking_notifier_call_chain(&crypto_chain, val, v); ++ ok = srcu_notifier_call_chain(&crypto_chain, val, v); + } + + return ok; +diff -Nur linux-4.4.13.orig/crypto/internal.h linux-4.4.13/crypto/internal.h +--- linux-4.4.13.orig/crypto/internal.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/crypto/internal.h 2016-06-22 21:55:07.990523867 +0200 +@@ -47,7 +47,7 @@ + + extern struct list_head crypto_alg_list; + extern struct rw_semaphore crypto_alg_sem; +-extern struct blocking_notifier_head crypto_chain; ++extern struct srcu_notifier_head crypto_chain; + + #ifdef CONFIG_PROC_FS + void __init crypto_init_proc(void); +@@ -143,7 +143,7 @@ + + static inline void crypto_notify(unsigned long val, void *v) + { +- blocking_notifier_call_chain(&crypto_chain, val, v); ++ srcu_notifier_call_chain(&crypto_chain, val, v); + } + + #endif /* _CRYPTO_INTERNAL_H */ +diff -Nur linux-4.4.13.orig/Documentation/hwlat_detector.txt linux-4.4.13/Documentation/hwlat_detector.txt +--- linux-4.4.13.orig/Documentation/hwlat_detector.txt 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/Documentation/hwlat_detector.txt 2016-06-22 21:55:07.994523867 +0200 +@@ -0,0 +1,64 @@ ++Introduction: ++------------- ++ ++The module hwlat_detector is a special purpose kernel module that is used to ++detect large system latencies induced by the behavior of certain underlying ++hardware or firmware, independent of Linux itself. The code was developed ++originally to detect SMIs (System Management Interrupts) on x86 systems, ++however there is nothing x86 specific about this patchset. It was ++originally written for use by the "RT" patch since the Real Time ++kernel is highly latency sensitive. ++ ++SMIs are usually not serviced by the Linux kernel, which typically does not ++even know that they are occuring. SMIs are instead are set up by BIOS code ++and are serviced by BIOS code, usually for "critical" events such as ++management of thermal sensors and fans. Sometimes though, SMIs are used for ++other tasks and those tasks can spend an inordinate amount of time in the ++handler (sometimes measured in milliseconds). Obviously this is a problem if ++you are trying to keep event service latencies down in the microsecond range. ++ ++The hardware latency detector works by hogging all of the cpus for configurable ++amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter ++for some period, then looking for gaps in the TSC data. Any gap indicates a ++time when the polling was interrupted and since the machine is stopped and ++interrupts turned off the only thing that could do that would be an SMI. ++ ++Note that the SMI detector should *NEVER* be used in a production environment. ++It is intended to be run manually to determine if the hardware platform has a ++problem with long system firmware service routines. ++ ++Usage: ++------ ++ ++Loading the module hwlat_detector passing the parameter "enabled=1" (or by ++setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only ++step required to start the hwlat_detector. It is possible to redefine the ++threshold in microseconds (us) above which latency spikes will be taken ++into account (parameter "threshold="). ++ ++Example: ++ ++ # modprobe hwlat_detector enabled=1 threshold=100 ++ ++After the module is loaded, it creates a directory named "hwlat_detector" under ++the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary ++to have debugfs mounted, which might be on /sys/debug on your system. ++ ++The /debug/hwlat_detector interface contains the following files: ++ ++count - number of latency spikes observed since last reset ++enable - a global enable/disable toggle (0/1), resets count ++max - maximum hardware latency actually observed (usecs) ++sample - a pipe from which to read current raw sample data ++ in the format ++ (can be opened O_NONBLOCK for a single sample) ++threshold - minimum latency value to be considered (usecs) ++width - time period to sample with CPUs held (usecs) ++ must be less than the total window size (enforced) ++window - total period of sampling, width being inside (usecs) ++ ++By default we will set width to 500,000 and window to 1,000,000, meaning that ++we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we ++observe any latencies that exceed the threshold (initially 100 usecs), ++then we write to a global sample ring buffer of 8K samples, which is ++consumed by reading from the "sample" (pipe) debugfs file interface. +diff -Nur linux-4.4.13.orig/Documentation/kernel-parameters.txt linux-4.4.13/Documentation/kernel-parameters.txt +--- linux-4.4.13.orig/Documentation/kernel-parameters.txt 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/Documentation/kernel-parameters.txt 2016-06-22 21:55:07.994523867 +0200 +@@ -1629,6 +1629,15 @@ + ip= [IP_PNP] + See Documentation/filesystems/nfs/nfsroot.txt. + ++ irqaffinity= [SMP] Set the default irq affinity mask ++ Format: ++ ,..., ++ or ++ - ++ (must be a positive range in ascending order) ++ or a mixture ++ ,...,- ++ + irqfixup [HW] + When an interrupt is not handled search all handlers + for it. Intended to get systems with badly broken +diff -Nur linux-4.4.13.orig/Documentation/sysrq.txt linux-4.4.13/Documentation/sysrq.txt +--- linux-4.4.13.orig/Documentation/sysrq.txt 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/Documentation/sysrq.txt 2016-06-22 21:55:07.994523867 +0200 +@@ -59,10 +59,17 @@ + On other - If you know of the key combos for other architectures, please + let me know so I can add them to this section. + +-On all - write a character to /proc/sysrq-trigger. e.g.: +- ++On all - write a character to /proc/sysrq-trigger, e.g.: + echo t > /proc/sysrq-trigger + ++On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g. ++ echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq ++ Send an ICMP echo request with this pattern plus the particular ++ SysRq command key. Example: ++ # ping -c1 -s57 -p0102030468 ++ will trigger the SysRq-H (help) command. ++ ++ + * What are the 'command' keys? + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + 'b' - Will immediately reboot the system without syncing or unmounting +diff -Nur linux-4.4.13.orig/Documentation/trace/histograms.txt linux-4.4.13/Documentation/trace/histograms.txt +--- linux-4.4.13.orig/Documentation/trace/histograms.txt 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/Documentation/trace/histograms.txt 2016-06-22 21:55:07.994523867 +0200 +@@ -0,0 +1,186 @@ ++ Using the Linux Kernel Latency Histograms ++ ++ ++This document gives a short explanation how to enable, configure and use ++latency histograms. Latency histograms are primarily relevant in the ++context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT) ++and are used in the quality management of the Linux real-time ++capabilities. ++ ++ ++* Purpose of latency histograms ++ ++A latency histogram continuously accumulates the frequencies of latency ++data. There are two types of histograms ++- potential sources of latencies ++- effective latencies ++ ++ ++* Potential sources of latencies ++ ++Potential sources of latencies are code segments where interrupts, ++preemption or both are disabled (aka critical sections). To create ++histograms of potential sources of latency, the kernel stores the time ++stamp at the start of a critical section, determines the time elapsed ++when the end of the section is reached, and increments the frequency ++counter of that latency value - irrespective of whether any concurrently ++running process is affected by latency or not. ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_INTERRUPT_OFF_LATENCY ++ CONFIG_PREEMPT_OFF_LATENCY ++ ++ ++* Effective latencies ++ ++Effective latencies are actually occuring during wakeup of a process. To ++determine effective latencies, the kernel stores the time stamp when a ++process is scheduled to be woken up, and determines the duration of the ++wakeup time shortly before control is passed over to this process. Note ++that the apparent latency in user space may be somewhat longer, since the ++process may be interrupted after control is passed over to it but before ++the execution in user space takes place. Simply measuring the interval ++between enqueuing and wakeup may also not appropriate in cases when a ++process is scheduled as a result of a timer expiration. The timer may have ++missed its deadline, e.g. due to disabled interrupts, but this latency ++would not be registered. Therefore, the offsets of missed timers are ++recorded in a separate histogram. If both wakeup latency and missed timer ++offsets are configured and enabled, a third histogram may be enabled that ++records the overall latency as a sum of the timer latency, if any, and the ++wakeup latency. This histogram is called "timerandwakeup". ++- Configuration items (in the Kernel hacking/Tracers submenu) ++ CONFIG_WAKEUP_LATENCY ++ CONFIG_MISSED_TIMER_OFSETS ++ ++ ++* Usage ++ ++The interface to the administration of the latency histograms is located ++in the debugfs file system. To mount it, either enter ++ ++mount -t sysfs nodev /sys ++mount -t debugfs nodev /sys/kernel/debug ++ ++from shell command line level, or add ++ ++nodev /sys sysfs defaults 0 0 ++nodev /sys/kernel/debug debugfs defaults 0 0 ++ ++to the file /etc/fstab. All latency histogram related files are then ++available in the directory /sys/kernel/debug/tracing/latency_hist. A ++particular histogram type is enabled by writing non-zero to the related ++variable in the /sys/kernel/debug/tracing/latency_hist/enable directory. ++Select "preemptirqsoff" for the histograms of potential sources of ++latencies and "wakeup" for histograms of effective latencies etc. The ++histogram data - one per CPU - are available in the files ++ ++/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx ++/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx ++/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx ++/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx ++ ++The histograms are reset by writing non-zero to the file "reset" in a ++particular latency directory. To reset all latency data, use ++ ++#!/bin/sh ++ ++TRACINGDIR=/sys/kernel/debug/tracing ++HISTDIR=$TRACINGDIR/latency_hist ++ ++if test -d $HISTDIR ++then ++ cd $HISTDIR ++ for i in `find . | grep /reset$` ++ do ++ echo 1 >$i ++ done ++fi ++ ++ ++* Data format ++ ++Latency data are stored with a resolution of one microsecond. The ++maximum latency is 10,240 microseconds. The data are only valid, if the ++overflow register is empty. Every output line contains the latency in ++microseconds in the first row and the number of samples in the second ++row. To display only lines with a positive latency count, use, for ++example, ++ ++grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0 ++ ++#Minimum latency: 0 microseconds. ++#Average latency: 0 microseconds. ++#Maximum latency: 25 microseconds. ++#Total samples: 3104770694 ++#There are 0 samples greater or equal than 10240 microseconds ++#usecs samples ++ 0 2984486876 ++ 1 49843506 ++ 2 58219047 ++ 3 5348126 ++ 4 2187960 ++ 5 3388262 ++ 6 959289 ++ 7 208294 ++ 8 40420 ++ 9 4485 ++ 10 14918 ++ 11 18340 ++ 12 25052 ++ 13 19455 ++ 14 5602 ++ 15 969 ++ 16 47 ++ 17 18 ++ 18 14 ++ 19 1 ++ 20 3 ++ 21 2 ++ 22 5 ++ 23 2 ++ 25 1 ++ ++ ++* Wakeup latency of a selected process ++ ++To only collect wakeup latency data of a particular process, write the ++PID of the requested process to ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/pid ++ ++PIDs are not considered, if this variable is set to 0. ++ ++ ++* Details of the process with the highest wakeup latency so far ++ ++Selected data of the process that suffered from the highest wakeup ++latency that occurred in a particular CPU are available in the file ++ ++/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx. ++ ++In addition, other relevant system data at the time when the ++latency occurred are given. ++ ++The format of the data is (all in one line): ++ () \ ++<- ++ ++The value of is only relevant in the combined timer ++and wakeup latency recording. In the wakeup recording, it is ++always 0, in the missed_timer_offsets recording, it is the same ++as . ++ ++When retrospectively searching for the origin of a latency and ++tracing was not enabled, it may be helpful to know the name and ++some basic data of the task that (finally) was switching to the ++late real-tlme task. In addition to the victim's data, also the ++data of the possible culprit are therefore displayed after the ++"<-" symbol. ++ ++Finally, the timestamp of the time when the latency occurred ++in . after the most recent system boot ++is provided. ++ ++These data are also reset when the wakeup histogram is reset. +diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/acglobal.h linux-4.4.13/drivers/acpi/acpica/acglobal.h +--- linux-4.4.13.orig/drivers/acpi/acpica/acglobal.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/acpi/acpica/acglobal.h 2016-06-22 21:55:07.994523867 +0200 +@@ -116,7 +116,7 @@ + * interrupt level + */ + ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock); /* For GPE data structs and registers */ +-ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ ++ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock); /* For ACPI H/W except GPE registers */ + ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock); + + /* Mutex for _OSI support */ +diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/hwregs.c linux-4.4.13/drivers/acpi/acpica/hwregs.c +--- linux-4.4.13.orig/drivers/acpi/acpica/hwregs.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/acpi/acpica/hwregs.c 2016-06-22 21:55:07.994523867 +0200 +@@ -269,14 +269,14 @@ + ACPI_BITMASK_ALL_FIXED_STATUS, + ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address))); + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* Clear the fixed events in PM1 A/B */ + + status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS, + ACPI_BITMASK_ALL_FIXED_STATUS); + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + + if (ACPI_FAILURE(status)) { + goto exit; +diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/hwxface.c linux-4.4.13/drivers/acpi/acpica/hwxface.c +--- linux-4.4.13.orig/drivers/acpi/acpica/hwxface.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/acpi/acpica/hwxface.c 2016-06-22 21:55:07.994523867 +0200 +@@ -374,7 +374,7 @@ + return_ACPI_STATUS(AE_BAD_PARAMETER); + } + +- lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock); ++ raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags); + + /* + * At this point, we know that the parent register is one of the +@@ -435,7 +435,7 @@ + + unlock_and_exit: + +- acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags); ++ raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags); + return_ACPI_STATUS(status); + } + +diff -Nur linux-4.4.13.orig/drivers/acpi/acpica/utmutex.c linux-4.4.13/drivers/acpi/acpica/utmutex.c +--- linux-4.4.13.orig/drivers/acpi/acpica/utmutex.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/acpi/acpica/utmutex.c 2016-06-22 21:55:07.994523867 +0200 +@@ -88,7 +88,7 @@ + return_ACPI_STATUS (status); + } + +- status = acpi_os_create_lock (&acpi_gbl_hardware_lock); ++ status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock); + if (ACPI_FAILURE (status)) { + return_ACPI_STATUS (status); + } +@@ -156,7 +156,7 @@ + /* Delete the spinlocks */ + + acpi_os_delete_lock(acpi_gbl_gpe_lock); +- acpi_os_delete_lock(acpi_gbl_hardware_lock); ++ acpi_os_delete_raw_lock(acpi_gbl_hardware_lock); + acpi_os_delete_lock(acpi_gbl_reference_count_lock); + + /* Delete the reader/writer lock */ +diff -Nur linux-4.4.13.orig/drivers/ata/libata-sff.c linux-4.4.13/drivers/ata/libata-sff.c +--- linux-4.4.13.orig/drivers/ata/libata-sff.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ata/libata-sff.c 2016-06-22 21:55:07.994523867 +0200 +@@ -678,9 +678,9 @@ + unsigned long flags; + unsigned int consumed; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + consumed = ata_sff_data_xfer32(dev, buf, buflen, rw); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + return consumed; + } +@@ -719,7 +719,7 @@ + unsigned long flags; + + /* FIXME: use a bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -727,7 +727,7 @@ + do_write); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, +@@ -864,7 +864,7 @@ + unsigned long flags; + + /* FIXME: use bounce buffer */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + buf = kmap_atomic(page); + + /* do the actual data transfer */ +@@ -872,7 +872,7 @@ + count, rw); + + kunmap_atomic(buf); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } else { + buf = page_address(page); + consumed = ap->ops->sff_data_xfer(dev, buf + offset, +diff -Nur linux-4.4.13.orig/drivers/block/zram/zram_drv.c linux-4.4.13/drivers/block/zram/zram_drv.c +--- linux-4.4.13.orig/drivers/block/zram/zram_drv.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/block/zram/zram_drv.c 2016-06-22 21:55:07.994523867 +0200 +@@ -520,6 +520,8 @@ + goto out_error; + } + ++ zram_meta_init_table_locks(meta, disksize); ++ + return meta; + + out_error: +@@ -568,12 +570,12 @@ + unsigned long handle; + size_t size; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + handle = meta->table[index].handle; + size = zram_get_obj_size(meta, index); + + if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + clear_page(mem); + return 0; + } +@@ -584,7 +586,7 @@ + else + ret = zcomp_decompress(zram->comp, cmem, size, mem); + zs_unmap_object(meta->mem_pool, handle); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + /* Should NEVER happen. Return bio error if it does. */ + if (unlikely(ret)) { +@@ -604,14 +606,14 @@ + struct zram_meta *meta = zram->meta; + page = bvec->bv_page; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + if (unlikely(!meta->table[index].handle) || + zram_test_flag(meta, index, ZRAM_ZERO)) { +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + handle_zero_page(bvec); + return 0; + } +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + if (is_partial_io(bvec)) + /* Use a temporary buffer to decompress the page */ +@@ -689,10 +691,10 @@ + if (user_mem) + kunmap_atomic(user_mem); + /* Free memory associated with this sector now. */ +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); + zram_set_flag(meta, index, ZRAM_ZERO); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + atomic64_inc(&zram->stats.zero_pages); + ret = 0; +@@ -752,12 +754,12 @@ + * Free memory associated with this sector + * before overwriting unused sectors. + */ +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); + + meta->table[index].handle = handle; + zram_set_obj_size(meta, index, clen); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + + /* Update stats */ + atomic64_add(clen, &zram->stats.compr_data_size); +@@ -800,9 +802,9 @@ + } + + while (n >= PAGE_SIZE) { +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + atomic64_inc(&zram->stats.notify_free); + index++; + n -= PAGE_SIZE; +@@ -928,9 +930,9 @@ + zram = bdev->bd_disk->private_data; + meta = zram->meta; + +- bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_lock_table(&meta->table[index]); + zram_free_page(zram, index); +- bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value); ++ zram_unlock_table(&meta->table[index]); + atomic64_inc(&zram->stats.notify_free); + } + +diff -Nur linux-4.4.13.orig/drivers/block/zram/zram_drv.h linux-4.4.13/drivers/block/zram/zram_drv.h +--- linux-4.4.13.orig/drivers/block/zram/zram_drv.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/block/zram/zram_drv.h 2016-06-22 21:55:07.994523867 +0200 +@@ -72,6 +72,9 @@ + struct zram_table_entry { + unsigned long handle; + unsigned long value; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t lock; ++#endif + }; + + struct zram_stats { +@@ -119,4 +122,42 @@ + */ + bool claim; /* Protected by bdev->bd_mutex */ + }; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++static inline void zram_lock_table(struct zram_table_entry *table) ++{ ++ bit_spin_lock(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_unlock_table(struct zram_table_entry *table) ++{ ++ bit_spin_unlock(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) { } ++#else /* CONFIG_PREEMPT_RT_BASE */ ++static inline void zram_lock_table(struct zram_table_entry *table) ++{ ++ spin_lock(&table->lock); ++ __set_bit(ZRAM_ACCESS, &table->value); ++} ++ ++static inline void zram_unlock_table(struct zram_table_entry *table) ++{ ++ __clear_bit(ZRAM_ACCESS, &table->value); ++ spin_unlock(&table->lock); ++} ++ ++static inline void zram_meta_init_table_locks(struct zram_meta *meta, u64 disksize) ++{ ++ size_t num_pages = disksize >> PAGE_SHIFT; ++ size_t index; ++ ++ for (index = 0; index < num_pages; index++) { ++ spinlock_t *lock = &meta->table[index].lock; ++ spin_lock_init(lock); ++ } ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + #endif +diff -Nur linux-4.4.13.orig/drivers/char/random.c linux-4.4.13/drivers/char/random.c +--- linux-4.4.13.orig/drivers/char/random.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/char/random.c 2016-06-22 21:55:07.994523867 +0200 +@@ -796,8 +796,6 @@ + } sample; + long delta, delta2, delta3; + +- preempt_disable(); +- + sample.jiffies = jiffies; + sample.cycles = random_get_entropy(); + sample.num = num; +@@ -838,7 +836,6 @@ + */ + credit_entropy_bits(r, min_t(int, fls(delta>>1), 11)); + } +- preempt_enable(); + } + + void add_input_randomness(unsigned int type, unsigned int code, +@@ -891,28 +888,27 @@ + return *(ptr + f->reg_idx++); + } + +-void add_interrupt_randomness(int irq, int irq_flags) ++void add_interrupt_randomness(int irq, int irq_flags, __u64 ip) + { + struct entropy_store *r; + struct fast_pool *fast_pool = this_cpu_ptr(&irq_randomness); +- struct pt_regs *regs = get_irq_regs(); + unsigned long now = jiffies; + cycles_t cycles = random_get_entropy(); + __u32 c_high, j_high; +- __u64 ip; + unsigned long seed; + int credit = 0; + + if (cycles == 0) +- cycles = get_reg(fast_pool, regs); ++ cycles = get_reg(fast_pool, NULL); + c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0; + j_high = (sizeof(now) > 4) ? now >> 32 : 0; + fast_pool->pool[0] ^= cycles ^ j_high ^ irq; + fast_pool->pool[1] ^= now ^ c_high; +- ip = regs ? instruction_pointer(regs) : _RET_IP_; ++ if (!ip) ++ ip = _RET_IP_; + fast_pool->pool[2] ^= ip; + fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 : +- get_reg(fast_pool, regs); ++ get_reg(fast_pool, NULL); + + fast_mix(fast_pool); + add_interrupt_bench(cycles); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-generated.c linux-4.4.13/drivers/clk/at91/clk-generated.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-generated.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-generated.c 2016-06-22 21:55:07.994523867 +0200 +@@ -15,8 +15,8 @@ + #include + #include + #include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -28,8 +28,9 @@ + + struct clk_generated { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + struct clk_range range; ++ spinlock_t *lock; + u32 id; + u32 gckdiv; + u8 parent_id; +@@ -41,49 +42,52 @@ + static int clk_generated_enable(struct clk_hw *hw) + { + struct clk_generated *gck = to_clk_generated(hw); +- struct at91_pmc *pmc = gck->pmc; +- u32 tmp; ++ unsigned long flags; + + pr_debug("GCLK: %s, gckdiv = %d, parent id = %d\n", + __func__, gck->gckdiv, gck->parent_id); + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); +- tmp = pmc_read(pmc, AT91_PMC_PCR) & +- ~(AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK); +- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_GCKCSS(gck->parent_id) +- | AT91_PMC_PCR_CMD +- | AT91_PMC_PCR_GCKDIV(gck->gckdiv) +- | AT91_PMC_PCR_GCKEN); +- pmc_unlock(pmc); ++ spin_lock_irqsave(gck->lock, flags); ++ regmap_write(gck->regmap, AT91_PMC_PCR, ++ (gck->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_update_bits(gck->regmap, AT91_PMC_PCR, ++ AT91_PMC_PCR_GCKDIV_MASK | AT91_PMC_PCR_GCKCSS_MASK | ++ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN, ++ AT91_PMC_PCR_GCKCSS(gck->parent_id) | ++ AT91_PMC_PCR_CMD | ++ AT91_PMC_PCR_GCKDIV(gck->gckdiv) | ++ AT91_PMC_PCR_GCKEN); ++ spin_unlock_irqrestore(gck->lock, flags); + return 0; + } + + static void clk_generated_disable(struct clk_hw *hw) + { + struct clk_generated *gck = to_clk_generated(hw); +- struct at91_pmc *pmc = gck->pmc; +- u32 tmp; ++ unsigned long flags; + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); +- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_GCKEN; +- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD); +- pmc_unlock(pmc); ++ spin_lock_irqsave(gck->lock, flags); ++ regmap_write(gck->regmap, AT91_PMC_PCR, ++ (gck->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_update_bits(gck->regmap, AT91_PMC_PCR, ++ AT91_PMC_PCR_CMD | AT91_PMC_PCR_GCKEN, ++ AT91_PMC_PCR_CMD); ++ spin_unlock_irqrestore(gck->lock, flags); + } + + static int clk_generated_is_enabled(struct clk_hw *hw) + { + struct clk_generated *gck = to_clk_generated(hw); +- struct at91_pmc *pmc = gck->pmc; +- int ret; ++ unsigned long flags; ++ unsigned int status; + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); +- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_GCKEN); +- pmc_unlock(pmc); ++ spin_lock_irqsave(gck->lock, flags); ++ regmap_write(gck->regmap, AT91_PMC_PCR, ++ (gck->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_read(gck->regmap, AT91_PMC_PCR, &status); ++ spin_unlock_irqrestore(gck->lock, flags); + +- return ret; ++ return status & AT91_PMC_PCR_GCKEN ? 1 : 0; + } + + static unsigned long +@@ -214,13 +218,14 @@ + */ + static void clk_generated_startup(struct clk_generated *gck) + { +- struct at91_pmc *pmc = gck->pmc; + u32 tmp; ++ unsigned long flags; + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (gck->id & AT91_PMC_PCR_PID_MASK)); +- tmp = pmc_read(pmc, AT91_PMC_PCR); +- pmc_unlock(pmc); ++ spin_lock_irqsave(gck->lock, flags); ++ regmap_write(gck->regmap, AT91_PMC_PCR, ++ (gck->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_read(gck->regmap, AT91_PMC_PCR, &tmp); ++ spin_unlock_irqrestore(gck->lock, flags); + + gck->parent_id = (tmp & AT91_PMC_PCR_GCKCSS_MASK) + >> AT91_PMC_PCR_GCKCSS_OFFSET; +@@ -229,8 +234,8 @@ + } + + static struct clk * __init +-at91_clk_register_generated(struct at91_pmc *pmc, const char *name, +- const char **parent_names, u8 num_parents, ++at91_clk_register_generated(struct regmap *regmap, spinlock_t *lock, const char ++ *name, const char **parent_names, u8 num_parents, + u8 id, const struct clk_range *range) + { + struct clk_generated *gck; +@@ -249,7 +254,8 @@ + + gck->id = id; + gck->hw.init = &init; +- gck->pmc = pmc; ++ gck->regmap = regmap; ++ gck->lock = lock; + gck->range = *range; + + clk = clk_register(NULL, &gck->hw); +@@ -261,8 +267,7 @@ + return clk; + } + +-void __init of_sama5d2_clk_generated_setup(struct device_node *np, +- struct at91_pmc *pmc) ++void __init of_sama5d2_clk_generated_setup(struct device_node *np) + { + int num; + u32 id; +@@ -272,6 +277,7 @@ + const char *parent_names[GENERATED_SOURCE_MAX]; + struct device_node *gcknp; + struct clk_range range = CLK_RANGE(0, 0); ++ struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents <= 0 || num_parents > GENERATED_SOURCE_MAX) +@@ -283,6 +289,10 @@ + if (!num || num > PERIPHERAL_MAX) + return; + ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ + for_each_child_of_node(np, gcknp) { + if (of_property_read_u32(gcknp, "reg", &id)) + continue; +@@ -296,11 +306,14 @@ + of_at91_get_clk_range(gcknp, "atmel,clk-output-range", + &range); + +- clk = at91_clk_register_generated(pmc, name, parent_names, +- num_parents, id, &range); ++ clk = at91_clk_register_generated(regmap, &pmc_pcr_lock, name, ++ parent_names, num_parents, ++ id, &range); + if (IS_ERR(clk)) + continue; + + of_clk_add_provider(gcknp, of_clk_src_simple_get, clk); + } + } ++CLK_OF_DECLARE(of_sama5d2_clk_generated_setup, "atmel,sama5d2-clk-generated", ++ of_sama5d2_clk_generated_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-h32mx.c linux-4.4.13/drivers/clk/at91/clk-h32mx.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-h32mx.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-h32mx.c 2016-06-22 21:55:07.994523867 +0200 +@@ -15,15 +15,9 @@ + #include + #include + #include +-#include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -31,7 +25,7 @@ + + struct clk_sama5d4_h32mx { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + }; + + #define to_clk_sama5d4_h32mx(hw) container_of(hw, struct clk_sama5d4_h32mx, hw) +@@ -40,8 +34,10 @@ + unsigned long parent_rate) + { + struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw); ++ unsigned int mckr; + +- if (pmc_read(h32mxclk->pmc, AT91_PMC_MCKR) & AT91_PMC_H32MXDIV) ++ regmap_read(h32mxclk->regmap, AT91_PMC_MCKR, &mckr); ++ if (mckr & AT91_PMC_H32MXDIV) + return parent_rate / 2; + + if (parent_rate > H32MX_MAX_FREQ) +@@ -70,18 +66,16 @@ + unsigned long parent_rate) + { + struct clk_sama5d4_h32mx *h32mxclk = to_clk_sama5d4_h32mx(hw); +- struct at91_pmc *pmc = h32mxclk->pmc; +- u32 tmp; ++ u32 mckr = 0; + + if (parent_rate != rate && (parent_rate / 2) != rate) + return -EINVAL; + +- pmc_lock(pmc); +- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_H32MXDIV; + if ((parent_rate / 2) == rate) +- tmp |= AT91_PMC_H32MXDIV; +- pmc_write(pmc, AT91_PMC_MCKR, tmp); +- pmc_unlock(pmc); ++ mckr = AT91_PMC_H32MXDIV; ++ ++ regmap_update_bits(h32mxclk->regmap, AT91_PMC_MCKR, ++ AT91_PMC_H32MXDIV, mckr); + + return 0; + } +@@ -92,14 +86,18 @@ + .set_rate = clk_sama5d4_h32mx_set_rate, + }; + +-void __init of_sama5d4_clk_h32mx_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_sama5d4_clk_h32mx_setup(struct device_node *np) + { + struct clk_sama5d4_h32mx *h32mxclk; + struct clk_init_data init; + const char *parent_name; ++ struct regmap *regmap; + struct clk *clk; + ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ + h32mxclk = kzalloc(sizeof(*h32mxclk), GFP_KERNEL); + if (!h32mxclk) + return; +@@ -113,7 +111,7 @@ + init.flags = CLK_SET_RATE_GATE; + + h32mxclk->hw.init = &init; +- h32mxclk->pmc = pmc; ++ h32mxclk->regmap = regmap; + + clk = clk_register(NULL, &h32mxclk->hw); + if (IS_ERR(clk)) { +@@ -123,3 +121,5 @@ + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(of_sama5d4_clk_h32mx_setup, "atmel,sama5d4-clk-h32mx", ++ of_sama5d4_clk_h32mx_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-main.c linux-4.4.13/drivers/clk/at91/clk-main.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-main.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-main.c 2016-06-22 21:55:07.994523867 +0200 +@@ -13,13 +13,8 @@ + #include + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -34,18 +29,14 @@ + + struct clk_main_osc { + struct clk_hw hw; +- struct at91_pmc *pmc; +- unsigned int irq; +- wait_queue_head_t wait; ++ struct regmap *regmap; + }; + + #define to_clk_main_osc(hw) container_of(hw, struct clk_main_osc, hw) + + struct clk_main_rc_osc { + struct clk_hw hw; +- struct at91_pmc *pmc; +- unsigned int irq; +- wait_queue_head_t wait; ++ struct regmap *regmap; + unsigned long frequency; + unsigned long accuracy; + }; +@@ -54,51 +45,47 @@ + + struct clk_rm9200_main { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + }; + + #define to_clk_rm9200_main(hw) container_of(hw, struct clk_rm9200_main, hw) + + struct clk_sam9x5_main { + struct clk_hw hw; +- struct at91_pmc *pmc; +- unsigned int irq; +- wait_queue_head_t wait; ++ struct regmap *regmap; + u8 parent; + }; + + #define to_clk_sam9x5_main(hw) container_of(hw, struct clk_sam9x5_main, hw) + +-static irqreturn_t clk_main_osc_irq_handler(int irq, void *dev_id) ++static inline bool clk_main_osc_ready(struct regmap *regmap) + { +- struct clk_main_osc *osc = dev_id; ++ unsigned int status; + +- wake_up(&osc->wait); +- disable_irq_nosync(osc->irq); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return IRQ_HANDLED; ++ return status & AT91_PMC_MOSCS; + } + + static int clk_main_osc_prepare(struct clk_hw *hw) + { + struct clk_main_osc *osc = to_clk_main_osc(hw); +- struct at91_pmc *pmc = osc->pmc; ++ struct regmap *regmap = osc->regmap; + u32 tmp; + +- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; ++ regmap_read(regmap, AT91_CKGR_MOR, &tmp); ++ tmp &= ~MOR_KEY_MASK; ++ + if (tmp & AT91_PMC_OSCBYPASS) + return 0; + + if (!(tmp & AT91_PMC_MOSCEN)) { + tmp |= AT91_PMC_MOSCEN | AT91_PMC_KEY; +- pmc_write(pmc, AT91_CKGR_MOR, tmp); ++ regmap_write(regmap, AT91_CKGR_MOR, tmp); + } + +- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS)) { +- enable_irq(osc->irq); +- wait_event(osc->wait, +- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS); +- } ++ while (!clk_main_osc_ready(regmap)) ++ cpu_relax(); + + return 0; + } +@@ -106,9 +93,10 @@ + static void clk_main_osc_unprepare(struct clk_hw *hw) + { + struct clk_main_osc *osc = to_clk_main_osc(hw); +- struct at91_pmc *pmc = osc->pmc; +- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); ++ struct regmap *regmap = osc->regmap; ++ u32 tmp; + ++ regmap_read(regmap, AT91_CKGR_MOR, &tmp); + if (tmp & AT91_PMC_OSCBYPASS) + return; + +@@ -116,20 +104,22 @@ + return; + + tmp &= ~(AT91_PMC_KEY | AT91_PMC_MOSCEN); +- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); ++ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); + } + + static int clk_main_osc_is_prepared(struct clk_hw *hw) + { + struct clk_main_osc *osc = to_clk_main_osc(hw); +- struct at91_pmc *pmc = osc->pmc; +- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); ++ struct regmap *regmap = osc->regmap; ++ u32 tmp, status; + ++ regmap_read(regmap, AT91_CKGR_MOR, &tmp); + if (tmp & AT91_PMC_OSCBYPASS) + return 1; + +- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCS) && +- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN)); ++ regmap_read(regmap, AT91_PMC_SR, &status); ++ ++ return (status & AT91_PMC_MOSCS) && (tmp & AT91_PMC_MOSCEN); + } + + static const struct clk_ops main_osc_ops = { +@@ -139,18 +129,16 @@ + }; + + static struct clk * __init +-at91_clk_register_main_osc(struct at91_pmc *pmc, +- unsigned int irq, ++at91_clk_register_main_osc(struct regmap *regmap, + const char *name, + const char *parent_name, + bool bypass) + { +- int ret; + struct clk_main_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + +- if (!pmc || !irq || !name || !parent_name) ++ if (!name || !parent_name) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); +@@ -164,85 +152,70 @@ + init.flags = CLK_IGNORE_UNUSED; + + osc->hw.init = &init; +- osc->pmc = pmc; +- osc->irq = irq; +- +- init_waitqueue_head(&osc->wait); +- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); +- ret = request_irq(osc->irq, clk_main_osc_irq_handler, +- IRQF_TRIGGER_HIGH, name, osc); +- if (ret) { +- kfree(osc); +- return ERR_PTR(ret); +- } ++ osc->regmap = regmap; + + if (bypass) +- pmc_write(pmc, AT91_CKGR_MOR, +- (pmc_read(pmc, AT91_CKGR_MOR) & +- ~(MOR_KEY_MASK | AT91_PMC_MOSCEN)) | +- AT91_PMC_OSCBYPASS | AT91_PMC_KEY); ++ regmap_update_bits(regmap, ++ AT91_CKGR_MOR, MOR_KEY_MASK | ++ AT91_PMC_MOSCEN, ++ AT91_PMC_OSCBYPASS | AT91_PMC_KEY); + + clk = clk_register(NULL, &osc->hw); +- if (IS_ERR(clk)) { +- free_irq(irq, osc); ++ if (IS_ERR(clk)) + kfree(osc); +- } + + return clk; + } + +-void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_main_osc_setup(struct device_node *np) + { + struct clk *clk; +- unsigned int irq; + const char *name = np->name; + const char *parent_name; ++ struct regmap *regmap; + bool bypass; + + of_property_read_string(np, "clock-output-names", &name); + bypass = of_property_read_bool(np, "atmel,osc-bypass"); + parent_name = of_clk_get_parent_name(np, 0); + +- irq = irq_of_parse_and_map(np, 0); +- if (!irq) ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) + return; + +- clk = at91_clk_register_main_osc(pmc, irq, name, parent_name, bypass); ++ clk = at91_clk_register_main_osc(regmap, name, parent_name, bypass); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91rm9200_clk_main_osc, "atmel,at91rm9200-clk-main-osc", ++ of_at91rm9200_clk_main_osc_setup); + +-static irqreturn_t clk_main_rc_osc_irq_handler(int irq, void *dev_id) ++static bool clk_main_rc_osc_ready(struct regmap *regmap) + { +- struct clk_main_rc_osc *osc = dev_id; ++ unsigned int status; + +- wake_up(&osc->wait); +- disable_irq_nosync(osc->irq); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return IRQ_HANDLED; ++ return status & AT91_PMC_MOSCRCS; + } + + static int clk_main_rc_osc_prepare(struct clk_hw *hw) + { + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); +- struct at91_pmc *pmc = osc->pmc; +- u32 tmp; ++ struct regmap *regmap = osc->regmap; ++ unsigned int mor; + +- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; ++ regmap_read(regmap, AT91_CKGR_MOR, &mor); + +- if (!(tmp & AT91_PMC_MOSCRCEN)) { +- tmp |= AT91_PMC_MOSCRCEN | AT91_PMC_KEY; +- pmc_write(pmc, AT91_CKGR_MOR, tmp); +- } ++ if (!(mor & AT91_PMC_MOSCRCEN)) ++ regmap_update_bits(regmap, AT91_CKGR_MOR, ++ MOR_KEY_MASK | AT91_PMC_MOSCRCEN, ++ AT91_PMC_MOSCRCEN | AT91_PMC_KEY); + +- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS)) { +- enable_irq(osc->irq); +- wait_event(osc->wait, +- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS); +- } ++ while (!clk_main_rc_osc_ready(regmap)) ++ cpu_relax(); + + return 0; + } +@@ -250,23 +223,28 @@ + static void clk_main_rc_osc_unprepare(struct clk_hw *hw) + { + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); +- struct at91_pmc *pmc = osc->pmc; +- u32 tmp = pmc_read(pmc, AT91_CKGR_MOR); ++ struct regmap *regmap = osc->regmap; ++ unsigned int mor; + +- if (!(tmp & AT91_PMC_MOSCRCEN)) ++ regmap_read(regmap, AT91_CKGR_MOR, &mor); ++ ++ if (!(mor & AT91_PMC_MOSCRCEN)) + return; + +- tmp &= ~(MOR_KEY_MASK | AT91_PMC_MOSCRCEN); +- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_KEY); ++ regmap_update_bits(regmap, AT91_CKGR_MOR, ++ MOR_KEY_MASK | AT91_PMC_MOSCRCEN, AT91_PMC_KEY); + } + + static int clk_main_rc_osc_is_prepared(struct clk_hw *hw) + { + struct clk_main_rc_osc *osc = to_clk_main_rc_osc(hw); +- struct at91_pmc *pmc = osc->pmc; ++ struct regmap *regmap = osc->regmap; ++ unsigned int mor, status; ++ ++ regmap_read(regmap, AT91_CKGR_MOR, &mor); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return !!((pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCRCS) && +- (pmc_read(pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCRCEN)); ++ return (mor & AT91_PMC_MOSCRCEN) && (status & AT91_PMC_MOSCRCS); + } + + static unsigned long clk_main_rc_osc_recalc_rate(struct clk_hw *hw, +@@ -294,17 +272,15 @@ + }; + + static struct clk * __init +-at91_clk_register_main_rc_osc(struct at91_pmc *pmc, +- unsigned int irq, ++at91_clk_register_main_rc_osc(struct regmap *regmap, + const char *name, + u32 frequency, u32 accuracy) + { +- int ret; + struct clk_main_rc_osc *osc; + struct clk *clk = NULL; + struct clk_init_data init; + +- if (!pmc || !irq || !name || !frequency) ++ if (!name || !frequency) + return ERR_PTR(-EINVAL); + + osc = kzalloc(sizeof(*osc), GFP_KERNEL); +@@ -318,63 +294,53 @@ + init.flags = CLK_IS_ROOT | CLK_IGNORE_UNUSED; + + osc->hw.init = &init; +- osc->pmc = pmc; +- osc->irq = irq; ++ osc->regmap = regmap; + osc->frequency = frequency; + osc->accuracy = accuracy; + +- init_waitqueue_head(&osc->wait); +- irq_set_status_flags(osc->irq, IRQ_NOAUTOEN); +- ret = request_irq(osc->irq, clk_main_rc_osc_irq_handler, +- IRQF_TRIGGER_HIGH, name, osc); +- if (ret) +- return ERR_PTR(ret); +- + clk = clk_register(NULL, &osc->hw); +- if (IS_ERR(clk)) { +- free_irq(irq, osc); ++ if (IS_ERR(clk)) + kfree(osc); +- } + + return clk; + } + +-void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np) + { + struct clk *clk; +- unsigned int irq; + u32 frequency = 0; + u32 accuracy = 0; + const char *name = np->name; ++ struct regmap *regmap; + + of_property_read_string(np, "clock-output-names", &name); + of_property_read_u32(np, "clock-frequency", &frequency); + of_property_read_u32(np, "clock-accuracy", &accuracy); + +- irq = irq_of_parse_and_map(np, 0); +- if (!irq) ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) + return; + +- clk = at91_clk_register_main_rc_osc(pmc, irq, name, frequency, +- accuracy); ++ clk = at91_clk_register_main_rc_osc(regmap, name, frequency, accuracy); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91sam9x5_clk_main_rc_osc, "atmel,at91sam9x5-clk-main-rc-osc", ++ of_at91sam9x5_clk_main_rc_osc_setup); + + +-static int clk_main_probe_frequency(struct at91_pmc *pmc) ++static int clk_main_probe_frequency(struct regmap *regmap) + { + unsigned long prep_time, timeout; +- u32 tmp; ++ unsigned int mcfr; + + timeout = jiffies + usecs_to_jiffies(MAINFRDY_TIMEOUT); + do { + prep_time = jiffies; +- tmp = pmc_read(pmc, AT91_CKGR_MCFR); +- if (tmp & AT91_PMC_MAINRDY) ++ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); ++ if (mcfr & AT91_PMC_MAINRDY) + return 0; + usleep_range(MAINF_LOOP_MIN_WAIT, MAINF_LOOP_MAX_WAIT); + } while (time_before(prep_time, timeout)); +@@ -382,34 +348,37 @@ + return -ETIMEDOUT; + } + +-static unsigned long clk_main_recalc_rate(struct at91_pmc *pmc, ++static unsigned long clk_main_recalc_rate(struct regmap *regmap, + unsigned long parent_rate) + { +- u32 tmp; ++ unsigned int mcfr; + + if (parent_rate) + return parent_rate; + + pr_warn("Main crystal frequency not set, using approximate value\n"); +- tmp = pmc_read(pmc, AT91_CKGR_MCFR); +- if (!(tmp & AT91_PMC_MAINRDY)) ++ regmap_read(regmap, AT91_CKGR_MCFR, &mcfr); ++ if (!(mcfr & AT91_PMC_MAINRDY)) + return 0; + +- return ((tmp & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV; ++ return ((mcfr & AT91_PMC_MAINF) * SLOW_CLOCK_FREQ) / MAINF_DIV; + } + + static int clk_rm9200_main_prepare(struct clk_hw *hw) + { + struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); + +- return clk_main_probe_frequency(clkmain->pmc); ++ return clk_main_probe_frequency(clkmain->regmap); + } + + static int clk_rm9200_main_is_prepared(struct clk_hw *hw) + { + struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); ++ unsigned int status; ++ ++ regmap_read(clkmain->regmap, AT91_CKGR_MCFR, &status); + +- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MCFR) & AT91_PMC_MAINRDY); ++ return status & AT91_PMC_MAINRDY ? 1 : 0; + } + + static unsigned long clk_rm9200_main_recalc_rate(struct clk_hw *hw, +@@ -417,7 +386,7 @@ + { + struct clk_rm9200_main *clkmain = to_clk_rm9200_main(hw); + +- return clk_main_recalc_rate(clkmain->pmc, parent_rate); ++ return clk_main_recalc_rate(clkmain->regmap, parent_rate); + } + + static const struct clk_ops rm9200_main_ops = { +@@ -427,7 +396,7 @@ + }; + + static struct clk * __init +-at91_clk_register_rm9200_main(struct at91_pmc *pmc, ++at91_clk_register_rm9200_main(struct regmap *regmap, + const char *name, + const char *parent_name) + { +@@ -435,7 +404,7 @@ + struct clk *clk = NULL; + struct clk_init_data init; + +- if (!pmc || !name) ++ if (!name) + return ERR_PTR(-EINVAL); + + if (!parent_name) +@@ -452,7 +421,7 @@ + init.flags = 0; + + clkmain->hw.init = &init; +- clkmain->pmc = pmc; ++ clkmain->regmap = regmap; + + clk = clk_register(NULL, &clkmain->hw); + if (IS_ERR(clk)) +@@ -461,52 +430,54 @@ + return clk; + } + +-void __init of_at91rm9200_clk_main_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_main_setup(struct device_node *np) + { + struct clk *clk; + const char *parent_name; + const char *name = np->name; ++ struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + of_property_read_string(np, "clock-output-names", &name); + +- clk = at91_clk_register_rm9200_main(pmc, name, parent_name); ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ ++ clk = at91_clk_register_rm9200_main(regmap, name, parent_name); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91rm9200_clk_main, "atmel,at91rm9200-clk-main", ++ of_at91rm9200_clk_main_setup); + +-static irqreturn_t clk_sam9x5_main_irq_handler(int irq, void *dev_id) ++static inline bool clk_sam9x5_main_ready(struct regmap *regmap) + { +- struct clk_sam9x5_main *clkmain = dev_id; ++ unsigned int status; + +- wake_up(&clkmain->wait); +- disable_irq_nosync(clkmain->irq); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return IRQ_HANDLED; ++ return status & AT91_PMC_MOSCSELS ? 1 : 0; + } + + static int clk_sam9x5_main_prepare(struct clk_hw *hw) + { + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); +- struct at91_pmc *pmc = clkmain->pmc; ++ struct regmap *regmap = clkmain->regmap; + +- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) { +- enable_irq(clkmain->irq); +- wait_event(clkmain->wait, +- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); +- } ++ while (!clk_sam9x5_main_ready(regmap)) ++ cpu_relax(); + +- return clk_main_probe_frequency(pmc); ++ return clk_main_probe_frequency(regmap); + } + + static int clk_sam9x5_main_is_prepared(struct clk_hw *hw) + { + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); + +- return !!(pmc_read(clkmain->pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); ++ return clk_sam9x5_main_ready(clkmain->regmap); + } + + static unsigned long clk_sam9x5_main_recalc_rate(struct clk_hw *hw, +@@ -514,30 +485,28 @@ + { + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); + +- return clk_main_recalc_rate(clkmain->pmc, parent_rate); ++ return clk_main_recalc_rate(clkmain->regmap, parent_rate); + } + + static int clk_sam9x5_main_set_parent(struct clk_hw *hw, u8 index) + { + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); +- struct at91_pmc *pmc = clkmain->pmc; +- u32 tmp; ++ struct regmap *regmap = clkmain->regmap; ++ unsigned int tmp; + + if (index > 1) + return -EINVAL; + +- tmp = pmc_read(pmc, AT91_CKGR_MOR) & ~MOR_KEY_MASK; ++ regmap_read(regmap, AT91_CKGR_MOR, &tmp); ++ tmp &= ~MOR_KEY_MASK; + + if (index && !(tmp & AT91_PMC_MOSCSEL)) +- pmc_write(pmc, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); ++ regmap_write(regmap, AT91_CKGR_MOR, tmp | AT91_PMC_MOSCSEL); + else if (!index && (tmp & AT91_PMC_MOSCSEL)) +- pmc_write(pmc, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); ++ regmap_write(regmap, AT91_CKGR_MOR, tmp & ~AT91_PMC_MOSCSEL); + +- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS)) { +- enable_irq(clkmain->irq); +- wait_event(clkmain->wait, +- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MOSCSELS); +- } ++ while (!clk_sam9x5_main_ready(regmap)) ++ cpu_relax(); + + return 0; + } +@@ -545,8 +514,11 @@ + static u8 clk_sam9x5_main_get_parent(struct clk_hw *hw) + { + struct clk_sam9x5_main *clkmain = to_clk_sam9x5_main(hw); ++ unsigned int status; ++ ++ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); + +- return !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & AT91_PMC_MOSCEN); ++ return status & AT91_PMC_MOSCEN ? 1 : 0; + } + + static const struct clk_ops sam9x5_main_ops = { +@@ -558,18 +530,17 @@ + }; + + static struct clk * __init +-at91_clk_register_sam9x5_main(struct at91_pmc *pmc, +- unsigned int irq, ++at91_clk_register_sam9x5_main(struct regmap *regmap, + const char *name, + const char **parent_names, + int num_parents) + { +- int ret; + struct clk_sam9x5_main *clkmain; + struct clk *clk = NULL; + struct clk_init_data init; ++ unsigned int status; + +- if (!pmc || !irq || !name) ++ if (!name) + return ERR_PTR(-EINVAL); + + if (!parent_names || !num_parents) +@@ -586,51 +557,42 @@ + init.flags = CLK_SET_PARENT_GATE; + + clkmain->hw.init = &init; +- clkmain->pmc = pmc; +- clkmain->irq = irq; +- clkmain->parent = !!(pmc_read(clkmain->pmc, AT91_CKGR_MOR) & +- AT91_PMC_MOSCEN); +- init_waitqueue_head(&clkmain->wait); +- irq_set_status_flags(clkmain->irq, IRQ_NOAUTOEN); +- ret = request_irq(clkmain->irq, clk_sam9x5_main_irq_handler, +- IRQF_TRIGGER_HIGH, name, clkmain); +- if (ret) +- return ERR_PTR(ret); ++ clkmain->regmap = regmap; ++ regmap_read(clkmain->regmap, AT91_CKGR_MOR, &status); ++ clkmain->parent = status & AT91_PMC_MOSCEN ? 1 : 0; + + clk = clk_register(NULL, &clkmain->hw); +- if (IS_ERR(clk)) { +- free_irq(clkmain->irq, clkmain); ++ if (IS_ERR(clk)) + kfree(clkmain); +- } + + return clk; + } + +-void __init of_at91sam9x5_clk_main_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_main_setup(struct device_node *np) + { + struct clk *clk; + const char *parent_names[2]; + int num_parents; +- unsigned int irq; + const char *name = np->name; ++ struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents <= 0 || num_parents > 2) + return; + + of_clk_parent_fill(np, parent_names, num_parents); ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; + + of_property_read_string(np, "clock-output-names", &name); + +- irq = irq_of_parse_and_map(np, 0); +- if (!irq) +- return; +- +- clk = at91_clk_register_sam9x5_main(pmc, irq, name, parent_names, ++ clk = at91_clk_register_sam9x5_main(regmap, name, parent_names, + num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91sam9x5_clk_main, "atmel,at91sam9x5-clk-main", ++ of_at91sam9x5_clk_main_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-master.c linux-4.4.13/drivers/clk/at91/clk-master.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-master.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-master.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,13 +12,8 @@ + #include + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -44,32 +39,26 @@ + + struct clk_master { + struct clk_hw hw; +- struct at91_pmc *pmc; +- unsigned int irq; +- wait_queue_head_t wait; ++ struct regmap *regmap; + const struct clk_master_layout *layout; + const struct clk_master_characteristics *characteristics; + }; + +-static irqreturn_t clk_master_irq_handler(int irq, void *dev_id) ++static inline bool clk_master_ready(struct regmap *regmap) + { +- struct clk_master *master = (struct clk_master *)dev_id; ++ unsigned int status; + +- wake_up(&master->wait); +- disable_irq_nosync(master->irq); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return IRQ_HANDLED; ++ return status & AT91_PMC_MCKRDY ? 1 : 0; + } ++ + static int clk_master_prepare(struct clk_hw *hw) + { + struct clk_master *master = to_clk_master(hw); +- struct at91_pmc *pmc = master->pmc; + +- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY)) { +- enable_irq(master->irq); +- wait_event(master->wait, +- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY); +- } ++ while (!clk_master_ready(master->regmap)) ++ cpu_relax(); + + return 0; + } +@@ -78,7 +67,7 @@ + { + struct clk_master *master = to_clk_master(hw); + +- return !!(pmc_read(master->pmc, AT91_PMC_SR) & AT91_PMC_MCKRDY); ++ return clk_master_ready(master->regmap); + } + + static unsigned long clk_master_recalc_rate(struct clk_hw *hw, +@@ -88,18 +77,16 @@ + u8 div; + unsigned long rate = parent_rate; + struct clk_master *master = to_clk_master(hw); +- struct at91_pmc *pmc = master->pmc; + const struct clk_master_layout *layout = master->layout; + const struct clk_master_characteristics *characteristics = + master->characteristics; +- u32 tmp; ++ unsigned int mckr; + +- pmc_lock(pmc); +- tmp = pmc_read(pmc, AT91_PMC_MCKR) & layout->mask; +- pmc_unlock(pmc); ++ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr); ++ mckr &= layout->mask; + +- pres = (tmp >> layout->pres_shift) & MASTER_PRES_MASK; +- div = (tmp >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; ++ pres = (mckr >> layout->pres_shift) & MASTER_PRES_MASK; ++ div = (mckr >> MASTER_DIV_SHIFT) & MASTER_DIV_MASK; + + if (characteristics->have_div3_pres && pres == MASTER_PRES_MAX) + rate /= 3; +@@ -119,9 +106,11 @@ + static u8 clk_master_get_parent(struct clk_hw *hw) + { + struct clk_master *master = to_clk_master(hw); +- struct at91_pmc *pmc = master->pmc; ++ unsigned int mckr; + +- return pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_CSS; ++ regmap_read(master->regmap, AT91_PMC_MCKR, &mckr); ++ ++ return mckr & AT91_PMC_CSS; + } + + static const struct clk_ops master_ops = { +@@ -132,18 +121,17 @@ + }; + + static struct clk * __init +-at91_clk_register_master(struct at91_pmc *pmc, unsigned int irq, ++at91_clk_register_master(struct regmap *regmap, + const char *name, int num_parents, + const char **parent_names, + const struct clk_master_layout *layout, + const struct clk_master_characteristics *characteristics) + { +- int ret; + struct clk_master *master; + struct clk *clk = NULL; + struct clk_init_data init; + +- if (!pmc || !irq || !name || !num_parents || !parent_names) ++ if (!name || !num_parents || !parent_names) + return ERR_PTR(-EINVAL); + + master = kzalloc(sizeof(*master), GFP_KERNEL); +@@ -159,20 +147,10 @@ + master->hw.init = &init; + master->layout = layout; + master->characteristics = characteristics; +- master->pmc = pmc; +- master->irq = irq; +- init_waitqueue_head(&master->wait); +- irq_set_status_flags(master->irq, IRQ_NOAUTOEN); +- ret = request_irq(master->irq, clk_master_irq_handler, +- IRQF_TRIGGER_HIGH, "clk-master", master); +- if (ret) { +- kfree(master); +- return ERR_PTR(ret); +- } ++ master->regmap = regmap; + + clk = clk_register(NULL, &master->hw); + if (IS_ERR(clk)) { +- free_irq(master->irq, master); + kfree(master); + } + +@@ -217,15 +195,15 @@ + } + + static void __init +-of_at91_clk_master_setup(struct device_node *np, struct at91_pmc *pmc, ++of_at91_clk_master_setup(struct device_node *np, + const struct clk_master_layout *layout) + { + struct clk *clk; + int num_parents; +- unsigned int irq; + const char *parent_names[MASTER_SOURCE_MAX]; + const char *name = np->name; + struct clk_master_characteristics *characteristics; ++ struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents <= 0 || num_parents > MASTER_SOURCE_MAX) +@@ -239,11 +217,11 @@ + if (!characteristics) + return; + +- irq = irq_of_parse_and_map(np, 0); +- if (!irq) +- goto out_free_characteristics; ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; + +- clk = at91_clk_register_master(pmc, irq, name, num_parents, ++ clk = at91_clk_register_master(regmap, name, num_parents, + parent_names, layout, + characteristics); + if (IS_ERR(clk)) +@@ -256,14 +234,16 @@ + kfree(characteristics); + } + +-void __init of_at91rm9200_clk_master_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_master_setup(struct device_node *np) + { +- of_at91_clk_master_setup(np, pmc, &at91rm9200_master_layout); ++ of_at91_clk_master_setup(np, &at91rm9200_master_layout); + } ++CLK_OF_DECLARE(at91rm9200_clk_master, "atmel,at91rm9200-clk-master", ++ of_at91rm9200_clk_master_setup); + +-void __init of_at91sam9x5_clk_master_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_master_setup(struct device_node *np) + { +- of_at91_clk_master_setup(np, pmc, &at91sam9x5_master_layout); ++ of_at91_clk_master_setup(np, &at91sam9x5_master_layout); + } ++CLK_OF_DECLARE(at91sam9x5_clk_master, "atmel,at91sam9x5-clk-master", ++ of_at91sam9x5_clk_master_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-peripheral.c linux-4.4.13/drivers/clk/at91/clk-peripheral.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-peripheral.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-peripheral.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,11 +12,13 @@ + #include + #include + #include +-#include +-#include ++#include ++#include + + #include "pmc.h" + ++DEFINE_SPINLOCK(pmc_pcr_lock); ++ + #define PERIPHERAL_MAX 64 + + #define PERIPHERAL_AT91RM9200 0 +@@ -33,7 +35,7 @@ + + struct clk_peripheral { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + u32 id; + }; + +@@ -41,8 +43,9 @@ + + struct clk_sam9x5_peripheral { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + struct clk_range range; ++ spinlock_t *lock; + u32 id; + u32 div; + bool auto_div; +@@ -54,7 +57,6 @@ + static int clk_peripheral_enable(struct clk_hw *hw) + { + struct clk_peripheral *periph = to_clk_peripheral(hw); +- struct at91_pmc *pmc = periph->pmc; + int offset = AT91_PMC_PCER; + u32 id = periph->id; + +@@ -62,14 +64,14 @@ + return 0; + if (id > PERIPHERAL_ID_MAX) + offset = AT91_PMC_PCER1; +- pmc_write(pmc, offset, PERIPHERAL_MASK(id)); ++ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id)); ++ + return 0; + } + + static void clk_peripheral_disable(struct clk_hw *hw) + { + struct clk_peripheral *periph = to_clk_peripheral(hw); +- struct at91_pmc *pmc = periph->pmc; + int offset = AT91_PMC_PCDR; + u32 id = periph->id; + +@@ -77,21 +79,23 @@ + return; + if (id > PERIPHERAL_ID_MAX) + offset = AT91_PMC_PCDR1; +- pmc_write(pmc, offset, PERIPHERAL_MASK(id)); ++ regmap_write(periph->regmap, offset, PERIPHERAL_MASK(id)); + } + + static int clk_peripheral_is_enabled(struct clk_hw *hw) + { + struct clk_peripheral *periph = to_clk_peripheral(hw); +- struct at91_pmc *pmc = periph->pmc; + int offset = AT91_PMC_PCSR; ++ unsigned int status; + u32 id = periph->id; + + if (id < PERIPHERAL_ID_MIN) + return 1; + if (id > PERIPHERAL_ID_MAX) + offset = AT91_PMC_PCSR1; +- return !!(pmc_read(pmc, offset) & PERIPHERAL_MASK(id)); ++ regmap_read(periph->regmap, offset, &status); ++ ++ return status & PERIPHERAL_MASK(id) ? 1 : 0; + } + + static const struct clk_ops peripheral_ops = { +@@ -101,14 +105,14 @@ + }; + + static struct clk * __init +-at91_clk_register_peripheral(struct at91_pmc *pmc, const char *name, ++at91_clk_register_peripheral(struct regmap *regmap, const char *name, + const char *parent_name, u32 id) + { + struct clk_peripheral *periph; + struct clk *clk = NULL; + struct clk_init_data init; + +- if (!pmc || !name || !parent_name || id > PERIPHERAL_ID_MAX) ++ if (!name || !parent_name || id > PERIPHERAL_ID_MAX) + return ERR_PTR(-EINVAL); + + periph = kzalloc(sizeof(*periph), GFP_KERNEL); +@@ -123,7 +127,7 @@ + + periph->id = id; + periph->hw.init = &init; +- periph->pmc = pmc; ++ periph->regmap = regmap; + + clk = clk_register(NULL, &periph->hw); + if (IS_ERR(clk)) +@@ -160,53 +164,58 @@ + static int clk_sam9x5_peripheral_enable(struct clk_hw *hw) + { + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); +- struct at91_pmc *pmc = periph->pmc; +- u32 tmp; ++ unsigned long flags; + + if (periph->id < PERIPHERAL_ID_MIN) + return 0; + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); +- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_DIV_MASK; +- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_DIV(periph->div) +- | AT91_PMC_PCR_CMD +- | AT91_PMC_PCR_EN); +- pmc_unlock(pmc); ++ spin_lock_irqsave(periph->lock, flags); ++ regmap_write(periph->regmap, AT91_PMC_PCR, ++ (periph->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_update_bits(periph->regmap, AT91_PMC_PCR, ++ AT91_PMC_PCR_DIV_MASK | AT91_PMC_PCR_CMD | ++ AT91_PMC_PCR_EN, ++ AT91_PMC_PCR_DIV(periph->div) | ++ AT91_PMC_PCR_CMD | ++ AT91_PMC_PCR_EN); ++ spin_unlock_irqrestore(periph->lock, flags); ++ + return 0; + } + + static void clk_sam9x5_peripheral_disable(struct clk_hw *hw) + { + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); +- struct at91_pmc *pmc = periph->pmc; +- u32 tmp; ++ unsigned long flags; + + if (periph->id < PERIPHERAL_ID_MIN) + return; + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); +- tmp = pmc_read(pmc, AT91_PMC_PCR) & ~AT91_PMC_PCR_EN; +- pmc_write(pmc, AT91_PMC_PCR, tmp | AT91_PMC_PCR_CMD); +- pmc_unlock(pmc); ++ spin_lock_irqsave(periph->lock, flags); ++ regmap_write(periph->regmap, AT91_PMC_PCR, ++ (periph->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_update_bits(periph->regmap, AT91_PMC_PCR, ++ AT91_PMC_PCR_EN | AT91_PMC_PCR_CMD, ++ AT91_PMC_PCR_CMD); ++ spin_unlock_irqrestore(periph->lock, flags); + } + + static int clk_sam9x5_peripheral_is_enabled(struct clk_hw *hw) + { + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); +- struct at91_pmc *pmc = periph->pmc; +- int ret; ++ unsigned long flags; ++ unsigned int status; + + if (periph->id < PERIPHERAL_ID_MIN) + return 1; + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); +- ret = !!(pmc_read(pmc, AT91_PMC_PCR) & AT91_PMC_PCR_EN); +- pmc_unlock(pmc); ++ spin_lock_irqsave(periph->lock, flags); ++ regmap_write(periph->regmap, AT91_PMC_PCR, ++ (periph->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_read(periph->regmap, AT91_PMC_PCR, &status); ++ spin_unlock_irqrestore(periph->lock, flags); + +- return ret; ++ return status & AT91_PMC_PCR_EN ? 1 : 0; + } + + static unsigned long +@@ -214,19 +223,20 @@ + unsigned long parent_rate) + { + struct clk_sam9x5_peripheral *periph = to_clk_sam9x5_peripheral(hw); +- struct at91_pmc *pmc = periph->pmc; +- u32 tmp; ++ unsigned long flags; ++ unsigned int status; + + if (periph->id < PERIPHERAL_ID_MIN) + return parent_rate; + +- pmc_lock(pmc); +- pmc_write(pmc, AT91_PMC_PCR, (periph->id & AT91_PMC_PCR_PID_MASK)); +- tmp = pmc_read(pmc, AT91_PMC_PCR); +- pmc_unlock(pmc); ++ spin_lock_irqsave(periph->lock, flags); ++ regmap_write(periph->regmap, AT91_PMC_PCR, ++ (periph->id & AT91_PMC_PCR_PID_MASK)); ++ regmap_read(periph->regmap, AT91_PMC_PCR, &status); ++ spin_unlock_irqrestore(periph->lock, flags); + +- if (tmp & AT91_PMC_PCR_EN) { +- periph->div = PERIPHERAL_RSHIFT(tmp); ++ if (status & AT91_PMC_PCR_EN) { ++ periph->div = PERIPHERAL_RSHIFT(status); + periph->auto_div = false; + } else { + clk_sam9x5_peripheral_autodiv(periph); +@@ -318,15 +328,15 @@ + }; + + static struct clk * __init +-at91_clk_register_sam9x5_peripheral(struct at91_pmc *pmc, const char *name, +- const char *parent_name, u32 id, +- const struct clk_range *range) ++at91_clk_register_sam9x5_peripheral(struct regmap *regmap, spinlock_t *lock, ++ const char *name, const char *parent_name, ++ u32 id, const struct clk_range *range) + { + struct clk_sam9x5_peripheral *periph; + struct clk *clk = NULL; + struct clk_init_data init; + +- if (!pmc || !name || !parent_name) ++ if (!name || !parent_name) + return ERR_PTR(-EINVAL); + + periph = kzalloc(sizeof(*periph), GFP_KERNEL); +@@ -342,7 +352,8 @@ + periph->id = id; + periph->hw.init = &init; + periph->div = 0; +- periph->pmc = pmc; ++ periph->regmap = regmap; ++ periph->lock = lock; + periph->auto_div = true; + periph->range = *range; + +@@ -356,7 +367,7 @@ + } + + static void __init +-of_at91_clk_periph_setup(struct device_node *np, struct at91_pmc *pmc, u8 type) ++of_at91_clk_periph_setup(struct device_node *np, u8 type) + { + int num; + u32 id; +@@ -364,6 +375,7 @@ + const char *parent_name; + const char *name; + struct device_node *periphclknp; ++ struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) +@@ -373,6 +385,10 @@ + if (!num || num > PERIPHERAL_MAX) + return; + ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ + for_each_child_of_node(np, periphclknp) { + if (of_property_read_u32(periphclknp, "reg", &id)) + continue; +@@ -384,7 +400,7 @@ + name = periphclknp->name; + + if (type == PERIPHERAL_AT91RM9200) { +- clk = at91_clk_register_peripheral(pmc, name, ++ clk = at91_clk_register_peripheral(regmap, name, + parent_name, id); + } else { + struct clk_range range = CLK_RANGE(0, 0); +@@ -393,7 +409,9 @@ + "atmel,clk-output-range", + &range); + +- clk = at91_clk_register_sam9x5_peripheral(pmc, name, ++ clk = at91_clk_register_sam9x5_peripheral(regmap, ++ &pmc_pcr_lock, ++ name, + parent_name, + id, &range); + } +@@ -405,14 +423,16 @@ + } + } + +-void __init of_at91rm9200_clk_periph_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_periph_setup(struct device_node *np) + { +- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91RM9200); ++ of_at91_clk_periph_setup(np, PERIPHERAL_AT91RM9200); + } ++CLK_OF_DECLARE(at91rm9200_clk_periph, "atmel,at91rm9200-clk-peripheral", ++ of_at91rm9200_clk_periph_setup); + +-void __init of_at91sam9x5_clk_periph_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_periph_setup(struct device_node *np) + { +- of_at91_clk_periph_setup(np, pmc, PERIPHERAL_AT91SAM9X5); ++ of_at91_clk_periph_setup(np, PERIPHERAL_AT91SAM9X5); + } ++CLK_OF_DECLARE(at91sam9x5_clk_periph, "atmel,at91sam9x5-clk-peripheral", ++ of_at91sam9x5_clk_periph_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-pll.c linux-4.4.13/drivers/clk/at91/clk-pll.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-pll.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-pll.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,14 +12,8 @@ + #include + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -58,9 +52,7 @@ + + struct clk_pll { + struct clk_hw hw; +- struct at91_pmc *pmc; +- unsigned int irq; +- wait_queue_head_t wait; ++ struct regmap *regmap; + u8 id; + u8 div; + u8 range; +@@ -69,20 +61,19 @@ + const struct clk_pll_characteristics *characteristics; + }; + +-static irqreturn_t clk_pll_irq_handler(int irq, void *dev_id) ++static inline bool clk_pll_ready(struct regmap *regmap, int id) + { +- struct clk_pll *pll = (struct clk_pll *)dev_id; ++ unsigned int status; + +- wake_up(&pll->wait); +- disable_irq_nosync(pll->irq); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return IRQ_HANDLED; ++ return status & PLL_STATUS_MASK(id) ? 1 : 0; + } + + static int clk_pll_prepare(struct clk_hw *hw) + { + struct clk_pll *pll = to_clk_pll(hw); +- struct at91_pmc *pmc = pll->pmc; ++ struct regmap *regmap = pll->regmap; + const struct clk_pll_layout *layout = pll->layout; + const struct clk_pll_characteristics *characteristics = + pll->characteristics; +@@ -90,39 +81,34 @@ + u32 mask = PLL_STATUS_MASK(id); + int offset = PLL_REG(id); + u8 out = 0; +- u32 pllr, icpr; ++ unsigned int pllr; ++ unsigned int status; + u8 div; + u16 mul; + +- pllr = pmc_read(pmc, offset); ++ regmap_read(regmap, offset, &pllr); + div = PLL_DIV(pllr); + mul = PLL_MUL(pllr, layout); + +- if ((pmc_read(pmc, AT91_PMC_SR) & mask) && ++ regmap_read(regmap, AT91_PMC_SR, &status); ++ if ((status & mask) && + (div == pll->div && mul == pll->mul)) + return 0; + + if (characteristics->out) + out = characteristics->out[pll->range]; +- if (characteristics->icpll) { +- icpr = pmc_read(pmc, AT91_PMC_PLLICPR) & ~PLL_ICPR_MASK(id); +- icpr |= (characteristics->icpll[pll->range] << +- PLL_ICPR_SHIFT(id)); +- pmc_write(pmc, AT91_PMC_PLLICPR, icpr); +- } + +- pllr &= ~layout->pllr_mask; +- pllr |= layout->pllr_mask & +- (pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) | +- (out << PLL_OUT_SHIFT) | +- ((pll->mul & layout->mul_mask) << layout->mul_shift)); +- pmc_write(pmc, offset, pllr); +- +- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) { +- enable_irq(pll->irq); +- wait_event(pll->wait, +- pmc_read(pmc, AT91_PMC_SR) & mask); +- } ++ if (characteristics->icpll) ++ regmap_update_bits(regmap, AT91_PMC_PLLICPR, PLL_ICPR_MASK(id), ++ characteristics->icpll[pll->range] << PLL_ICPR_SHIFT(id)); ++ ++ regmap_update_bits(regmap, offset, layout->pllr_mask, ++ pll->div | (PLL_MAX_COUNT << PLL_COUNT_SHIFT) | ++ (out << PLL_OUT_SHIFT) | ++ ((pll->mul & layout->mul_mask) << layout->mul_shift)); ++ ++ while (!clk_pll_ready(regmap, pll->id)) ++ cpu_relax(); + + return 0; + } +@@ -130,32 +116,35 @@ + static int clk_pll_is_prepared(struct clk_hw *hw) + { + struct clk_pll *pll = to_clk_pll(hw); +- struct at91_pmc *pmc = pll->pmc; + +- return !!(pmc_read(pmc, AT91_PMC_SR) & +- PLL_STATUS_MASK(pll->id)); ++ return clk_pll_ready(pll->regmap, pll->id); + } + + static void clk_pll_unprepare(struct clk_hw *hw) + { + struct clk_pll *pll = to_clk_pll(hw); +- struct at91_pmc *pmc = pll->pmc; +- const struct clk_pll_layout *layout = pll->layout; +- int offset = PLL_REG(pll->id); +- u32 tmp = pmc_read(pmc, offset) & ~(layout->pllr_mask); ++ unsigned int mask = pll->layout->pllr_mask; + +- pmc_write(pmc, offset, tmp); ++ regmap_update_bits(pll->regmap, PLL_REG(pll->id), mask, ~mask); + } + + static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) + { + struct clk_pll *pll = to_clk_pll(hw); ++ unsigned int pllr; ++ u16 mul; ++ u8 div; ++ ++ regmap_read(pll->regmap, PLL_REG(pll->id), &pllr); ++ ++ div = PLL_DIV(pllr); ++ mul = PLL_MUL(pllr, pll->layout); + +- if (!pll->div || !pll->mul) ++ if (!div || !mul) + return 0; + +- return (parent_rate / pll->div) * (pll->mul + 1); ++ return (parent_rate / div) * (mul + 1); + } + + static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate, +@@ -308,7 +297,7 @@ + }; + + static struct clk * __init +-at91_clk_register_pll(struct at91_pmc *pmc, unsigned int irq, const char *name, ++at91_clk_register_pll(struct regmap *regmap, const char *name, + const char *parent_name, u8 id, + const struct clk_pll_layout *layout, + const struct clk_pll_characteristics *characteristics) +@@ -316,9 +305,8 @@ + struct clk_pll *pll; + struct clk *clk = NULL; + struct clk_init_data init; +- int ret; + int offset = PLL_REG(id); +- u32 tmp; ++ unsigned int pllr; + + if (id > PLL_MAX_ID) + return ERR_PTR(-EINVAL); +@@ -337,23 +325,13 @@ + pll->hw.init = &init; + pll->layout = layout; + pll->characteristics = characteristics; +- pll->pmc = pmc; +- pll->irq = irq; +- tmp = pmc_read(pmc, offset) & layout->pllr_mask; +- pll->div = PLL_DIV(tmp); +- pll->mul = PLL_MUL(tmp, layout); +- init_waitqueue_head(&pll->wait); +- irq_set_status_flags(pll->irq, IRQ_NOAUTOEN); +- ret = request_irq(pll->irq, clk_pll_irq_handler, IRQF_TRIGGER_HIGH, +- id ? "clk-pllb" : "clk-plla", pll); +- if (ret) { +- kfree(pll); +- return ERR_PTR(ret); +- } ++ pll->regmap = regmap; ++ regmap_read(regmap, offset, &pllr); ++ pll->div = PLL_DIV(pllr); ++ pll->mul = PLL_MUL(pllr, layout); + + clk = clk_register(NULL, &pll->hw); + if (IS_ERR(clk)) { +- free_irq(pll->irq, pll); + kfree(pll); + } + +@@ -483,12 +461,12 @@ + } + + static void __init +-of_at91_clk_pll_setup(struct device_node *np, struct at91_pmc *pmc, ++of_at91_clk_pll_setup(struct device_node *np, + const struct clk_pll_layout *layout) + { + u32 id; +- unsigned int irq; + struct clk *clk; ++ struct regmap *regmap; + const char *parent_name; + const char *name = np->name; + struct clk_pll_characteristics *characteristics; +@@ -500,15 +478,15 @@ + + of_property_read_string(np, "clock-output-names", &name); + +- characteristics = of_at91_clk_pll_get_characteristics(np); +- if (!characteristics) ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) + return; + +- irq = irq_of_parse_and_map(np, 0); +- if (!irq) ++ characteristics = of_at91_clk_pll_get_characteristics(np); ++ if (!characteristics) + return; + +- clk = at91_clk_register_pll(pmc, irq, name, parent_name, id, layout, ++ clk = at91_clk_register_pll(regmap, name, parent_name, id, layout, + characteristics); + if (IS_ERR(clk)) + goto out_free_characteristics; +@@ -520,26 +498,30 @@ + kfree(characteristics); + } + +-void __init of_at91rm9200_clk_pll_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_pll_setup(struct device_node *np) + { +- of_at91_clk_pll_setup(np, pmc, &at91rm9200_pll_layout); ++ of_at91_clk_pll_setup(np, &at91rm9200_pll_layout); + } ++CLK_OF_DECLARE(at91rm9200_clk_pll, "atmel,at91rm9200-clk-pll", ++ of_at91rm9200_clk_pll_setup); + +-void __init of_at91sam9g45_clk_pll_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9g45_clk_pll_setup(struct device_node *np) + { +- of_at91_clk_pll_setup(np, pmc, &at91sam9g45_pll_layout); ++ of_at91_clk_pll_setup(np, &at91sam9g45_pll_layout); + } ++CLK_OF_DECLARE(at91sam9g45_clk_pll, "atmel,at91sam9g45-clk-pll", ++ of_at91sam9g45_clk_pll_setup); + +-void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9g20_clk_pllb_setup(struct device_node *np) + { +- of_at91_clk_pll_setup(np, pmc, &at91sam9g20_pllb_layout); ++ of_at91_clk_pll_setup(np, &at91sam9g20_pllb_layout); + } ++CLK_OF_DECLARE(at91sam9g20_clk_pllb, "atmel,at91sam9g20-clk-pllb", ++ of_at91sam9g20_clk_pllb_setup); + +-void __init of_sama5d3_clk_pll_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_sama5d3_clk_pll_setup(struct device_node *np) + { +- of_at91_clk_pll_setup(np, pmc, &sama5d3_pll_layout); ++ of_at91_clk_pll_setup(np, &sama5d3_pll_layout); + } ++CLK_OF_DECLARE(sama5d3_clk_pll, "atmel,sama5d3-clk-pll", ++ of_sama5d3_clk_pll_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-plldiv.c linux-4.4.13/drivers/clk/at91/clk-plldiv.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-plldiv.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-plldiv.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,8 +12,8 @@ + #include + #include + #include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -21,16 +21,18 @@ + + struct clk_plldiv { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + }; + + static unsigned long clk_plldiv_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) + { + struct clk_plldiv *plldiv = to_clk_plldiv(hw); +- struct at91_pmc *pmc = plldiv->pmc; ++ unsigned int mckr; + +- if (pmc_read(pmc, AT91_PMC_MCKR) & AT91_PMC_PLLADIV2) ++ regmap_read(plldiv->regmap, AT91_PMC_MCKR, &mckr); ++ ++ if (mckr & AT91_PMC_PLLADIV2) + return parent_rate / 2; + + return parent_rate; +@@ -57,18 +59,12 @@ + unsigned long parent_rate) + { + struct clk_plldiv *plldiv = to_clk_plldiv(hw); +- struct at91_pmc *pmc = plldiv->pmc; +- u32 tmp; + +- if (parent_rate != rate && (parent_rate / 2) != rate) ++ if ((parent_rate != rate) && (parent_rate / 2 != rate)) + return -EINVAL; + +- pmc_lock(pmc); +- tmp = pmc_read(pmc, AT91_PMC_MCKR) & ~AT91_PMC_PLLADIV2; +- if ((parent_rate / 2) == rate) +- tmp |= AT91_PMC_PLLADIV2; +- pmc_write(pmc, AT91_PMC_MCKR, tmp); +- pmc_unlock(pmc); ++ regmap_update_bits(plldiv->regmap, AT91_PMC_MCKR, AT91_PMC_PLLADIV2, ++ parent_rate != rate ? AT91_PMC_PLLADIV2 : 0); + + return 0; + } +@@ -80,7 +76,7 @@ + }; + + static struct clk * __init +-at91_clk_register_plldiv(struct at91_pmc *pmc, const char *name, ++at91_clk_register_plldiv(struct regmap *regmap, const char *name, + const char *parent_name) + { + struct clk_plldiv *plldiv; +@@ -98,7 +94,7 @@ + init.flags = CLK_SET_RATE_GATE; + + plldiv->hw.init = &init; +- plldiv->pmc = pmc; ++ plldiv->regmap = regmap; + + clk = clk_register(NULL, &plldiv->hw); + +@@ -109,27 +105,27 @@ + } + + static void __init +-of_at91_clk_plldiv_setup(struct device_node *np, struct at91_pmc *pmc) ++of_at91sam9x5_clk_plldiv_setup(struct device_node *np) + { + struct clk *clk; + const char *parent_name; + const char *name = np->name; ++ struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + + of_property_read_string(np, "clock-output-names", &name); + +- clk = at91_clk_register_plldiv(pmc, name, parent_name); ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; + ++ clk = at91_clk_register_plldiv(regmap, name, parent_name); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + return; + } +- +-void __init of_at91sam9x5_clk_plldiv_setup(struct device_node *np, +- struct at91_pmc *pmc) +-{ +- of_at91_clk_plldiv_setup(np, pmc); +-} ++CLK_OF_DECLARE(at91sam9x5_clk_plldiv, "atmel,at91sam9x5-clk-plldiv", ++ of_at91sam9x5_clk_plldiv_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-programmable.c linux-4.4.13/drivers/clk/at91/clk-programmable.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-programmable.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-programmable.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,10 +12,8 @@ + #include + #include + #include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -24,6 +22,7 @@ + + #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) + #define PROG_PRES_MASK 0x7 ++#define PROG_PRES(layout, pckr) ((pckr >> layout->pres_shift) & PROG_PRES_MASK) + #define PROG_MAX_RM9200_CSS 3 + + struct clk_programmable_layout { +@@ -34,7 +33,7 @@ + + struct clk_programmable { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + u8 id; + const struct clk_programmable_layout *layout; + }; +@@ -44,14 +43,12 @@ + static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) + { +- u32 pres; + struct clk_programmable *prog = to_clk_programmable(hw); +- struct at91_pmc *pmc = prog->pmc; +- const struct clk_programmable_layout *layout = prog->layout; ++ unsigned int pckr; ++ ++ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); + +- pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) & +- PROG_PRES_MASK; +- return parent_rate >> pres; ++ return parent_rate >> PROG_PRES(prog->layout, pckr); + } + + static int clk_programmable_determine_rate(struct clk_hw *hw, +@@ -101,36 +98,36 @@ + { + struct clk_programmable *prog = to_clk_programmable(hw); + const struct clk_programmable_layout *layout = prog->layout; +- struct at91_pmc *pmc = prog->pmc; +- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask; ++ unsigned int mask = layout->css_mask; ++ unsigned int pckr = 0; + + if (layout->have_slck_mck) +- tmp &= AT91_PMC_CSSMCK_MCK; ++ mask |= AT91_PMC_CSSMCK_MCK; + + if (index > layout->css_mask) { +- if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) { +- tmp |= AT91_PMC_CSSMCK_MCK; +- return 0; +- } else { ++ if (index > PROG_MAX_RM9200_CSS && !layout->have_slck_mck) + return -EINVAL; +- } ++ ++ pckr |= AT91_PMC_CSSMCK_MCK; + } + +- pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index); ++ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), mask, pckr); ++ + return 0; + } + + static u8 clk_programmable_get_parent(struct clk_hw *hw) + { +- u32 tmp; +- u8 ret; + struct clk_programmable *prog = to_clk_programmable(hw); +- struct at91_pmc *pmc = prog->pmc; + const struct clk_programmable_layout *layout = prog->layout; ++ unsigned int pckr; ++ u8 ret; ++ ++ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); ++ ++ ret = pckr & layout->css_mask; + +- tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)); +- ret = tmp & layout->css_mask; +- if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret) ++ if (layout->have_slck_mck && (pckr & AT91_PMC_CSSMCK_MCK) && !ret) + ret = PROG_MAX_RM9200_CSS + 1; + + return ret; +@@ -140,26 +137,27 @@ + unsigned long parent_rate) + { + struct clk_programmable *prog = to_clk_programmable(hw); +- struct at91_pmc *pmc = prog->pmc; + const struct clk_programmable_layout *layout = prog->layout; + unsigned long div = parent_rate / rate; ++ unsigned int pckr; + int shift = 0; +- u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & +- ~(PROG_PRES_MASK << layout->pres_shift); ++ ++ regmap_read(prog->regmap, AT91_PMC_PCKR(prog->id), &pckr); + + if (!div) + return -EINVAL; + + shift = fls(div) - 1; + +- if (div != (1<= PROG_PRES_MASK) + return -EINVAL; + +- pmc_write(pmc, AT91_PMC_PCKR(prog->id), +- tmp | (shift << layout->pres_shift)); ++ regmap_update_bits(prog->regmap, AT91_PMC_PCKR(prog->id), ++ PROG_PRES_MASK << layout->pres_shift, ++ shift << layout->pres_shift); + + return 0; + } +@@ -173,7 +171,7 @@ + }; + + static struct clk * __init +-at91_clk_register_programmable(struct at91_pmc *pmc, ++at91_clk_register_programmable(struct regmap *regmap, + const char *name, const char **parent_names, + u8 num_parents, u8 id, + const struct clk_programmable_layout *layout) +@@ -198,7 +196,7 @@ + prog->id = id; + prog->layout = layout; + prog->hw.init = &init; +- prog->pmc = pmc; ++ prog->regmap = regmap; + + clk = clk_register(NULL, &prog->hw); + if (IS_ERR(clk)) +@@ -226,7 +224,7 @@ + }; + + static void __init +-of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, ++of_at91_clk_prog_setup(struct device_node *np, + const struct clk_programmable_layout *layout) + { + int num; +@@ -236,6 +234,7 @@ + const char *parent_names[PROG_SOURCE_MAX]; + const char *name; + struct device_node *progclknp; ++ struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX) +@@ -247,6 +246,10 @@ + if (!num || num > (PROG_ID_MAX + 1)) + return; + ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ + for_each_child_of_node(np, progclknp) { + if (of_property_read_u32(progclknp, "reg", &id)) + continue; +@@ -254,7 +257,7 @@ + if (of_property_read_string(np, "clock-output-names", &name)) + name = progclknp->name; + +- clk = at91_clk_register_programmable(pmc, name, ++ clk = at91_clk_register_programmable(regmap, name, + parent_names, num_parents, + id, layout); + if (IS_ERR(clk)) +@@ -265,20 +268,23 @@ + } + + +-void __init of_at91rm9200_clk_prog_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_prog_setup(struct device_node *np) + { +- of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout); ++ of_at91_clk_prog_setup(np, &at91rm9200_programmable_layout); + } ++CLK_OF_DECLARE(at91rm9200_clk_prog, "atmel,at91rm9200-clk-programmable", ++ of_at91rm9200_clk_prog_setup); + +-void __init of_at91sam9g45_clk_prog_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9g45_clk_prog_setup(struct device_node *np) + { +- of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout); ++ of_at91_clk_prog_setup(np, &at91sam9g45_programmable_layout); + } ++CLK_OF_DECLARE(at91sam9g45_clk_prog, "atmel,at91sam9g45-clk-programmable", ++ of_at91sam9g45_clk_prog_setup); + +-void __init of_at91sam9x5_clk_prog_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_prog_setup(struct device_node *np) + { +- of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout); ++ of_at91_clk_prog_setup(np, &at91sam9x5_programmable_layout); + } ++CLK_OF_DECLARE(at91sam9x5_clk_prog, "atmel,at91sam9x5-clk-programmable", ++ of_at91sam9x5_clk_prog_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-slow.c linux-4.4.13/drivers/clk/at91/clk-slow.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-slow.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-slow.c 2016-06-22 21:55:07.998523867 +0200 +@@ -13,17 +13,11 @@ + #include + #include + #include +-#include + #include + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + #include "sckc.h" +@@ -59,7 +53,7 @@ + + struct clk_sam9260_slow { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + }; + + #define to_clk_sam9260_slow(hw) container_of(hw, struct clk_sam9260_slow, hw) +@@ -393,8 +387,11 @@ + static u8 clk_sam9260_slow_get_parent(struct clk_hw *hw) + { + struct clk_sam9260_slow *slowck = to_clk_sam9260_slow(hw); ++ unsigned int status; + +- return !!(pmc_read(slowck->pmc, AT91_PMC_SR) & AT91_PMC_OSCSEL); ++ regmap_read(slowck->regmap, AT91_PMC_SR, &status); ++ ++ return status & AT91_PMC_OSCSEL ? 1 : 0; + } + + static const struct clk_ops sam9260_slow_ops = { +@@ -402,7 +399,7 @@ + }; + + static struct clk * __init +-at91_clk_register_sam9260_slow(struct at91_pmc *pmc, ++at91_clk_register_sam9260_slow(struct regmap *regmap, + const char *name, + const char **parent_names, + int num_parents) +@@ -411,7 +408,7 @@ + struct clk *clk = NULL; + struct clk_init_data init; + +- if (!pmc || !name) ++ if (!name) + return ERR_PTR(-EINVAL); + + if (!parent_names || !num_parents) +@@ -428,7 +425,7 @@ + init.flags = 0; + + slowck->hw.init = &init; +- slowck->pmc = pmc; ++ slowck->regmap = regmap; + + clk = clk_register(NULL, &slowck->hw); + if (IS_ERR(clk)) +@@ -439,29 +436,34 @@ + return clk; + } + +-void __init of_at91sam9260_clk_slow_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9260_clk_slow_setup(struct device_node *np) + { + struct clk *clk; + const char *parent_names[2]; + int num_parents; + const char *name = np->name; ++ struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents != 2) + return; + + of_clk_parent_fill(np, parent_names, num_parents); ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; + + of_property_read_string(np, "clock-output-names", &name); + +- clk = at91_clk_register_sam9260_slow(pmc, name, parent_names, ++ clk = at91_clk_register_sam9260_slow(regmap, name, parent_names, + num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91sam9260_clk_slow, "atmel,at91sam9260-clk-slow", ++ of_at91sam9260_clk_slow_setup); + + /* + * FIXME: All slow clk users are not properly claiming it (get + prepare + +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-smd.c linux-4.4.13/drivers/clk/at91/clk-smd.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-smd.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-smd.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,8 +12,8 @@ + #include + #include + #include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -24,7 +24,7 @@ + + struct at91sam9x5_clk_smd { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + }; + + #define to_at91sam9x5_clk_smd(hw) \ +@@ -33,13 +33,13 @@ + static unsigned long at91sam9x5_clk_smd_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) + { +- u32 tmp; +- u8 smddiv; + struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); +- struct at91_pmc *pmc = smd->pmc; ++ unsigned int smdr; ++ u8 smddiv; ++ ++ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr); ++ smddiv = (smdr & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT; + +- tmp = pmc_read(pmc, AT91_PMC_SMD); +- smddiv = (tmp & AT91_PMC_SMD_DIV) >> SMD_DIV_SHIFT; + return parent_rate / (smddiv + 1); + } + +@@ -67,40 +67,38 @@ + + static int at91sam9x5_clk_smd_set_parent(struct clk_hw *hw, u8 index) + { +- u32 tmp; + struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); +- struct at91_pmc *pmc = smd->pmc; + + if (index > 1) + return -EINVAL; +- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMDS; +- if (index) +- tmp |= AT91_PMC_SMDS; +- pmc_write(pmc, AT91_PMC_SMD, tmp); ++ ++ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMDS, ++ index ? AT91_PMC_SMDS : 0); ++ + return 0; + } + + static u8 at91sam9x5_clk_smd_get_parent(struct clk_hw *hw) + { + struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); +- struct at91_pmc *pmc = smd->pmc; ++ unsigned int smdr; + +- return pmc_read(pmc, AT91_PMC_SMD) & AT91_PMC_SMDS; ++ regmap_read(smd->regmap, AT91_PMC_SMD, &smdr); ++ ++ return smdr & AT91_PMC_SMDS; + } + + static int at91sam9x5_clk_smd_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) + { +- u32 tmp; + struct at91sam9x5_clk_smd *smd = to_at91sam9x5_clk_smd(hw); +- struct at91_pmc *pmc = smd->pmc; + unsigned long div = parent_rate / rate; + + if (parent_rate % rate || div < 1 || div > (SMD_MAX_DIV + 1)) + return -EINVAL; +- tmp = pmc_read(pmc, AT91_PMC_SMD) & ~AT91_PMC_SMD_DIV; +- tmp |= (div - 1) << SMD_DIV_SHIFT; +- pmc_write(pmc, AT91_PMC_SMD, tmp); ++ ++ regmap_update_bits(smd->regmap, AT91_PMC_SMD, AT91_PMC_SMD_DIV, ++ (div - 1) << SMD_DIV_SHIFT); + + return 0; + } +@@ -114,7 +112,7 @@ + }; + + static struct clk * __init +-at91sam9x5_clk_register_smd(struct at91_pmc *pmc, const char *name, ++at91sam9x5_clk_register_smd(struct regmap *regmap, const char *name, + const char **parent_names, u8 num_parents) + { + struct at91sam9x5_clk_smd *smd; +@@ -132,7 +130,7 @@ + init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; + + smd->hw.init = &init; +- smd->pmc = pmc; ++ smd->regmap = regmap; + + clk = clk_register(NULL, &smd->hw); + if (IS_ERR(clk)) +@@ -141,13 +139,13 @@ + return clk; + } + +-void __init of_at91sam9x5_clk_smd_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_smd_setup(struct device_node *np) + { + struct clk *clk; + int num_parents; + const char *parent_names[SMD_SOURCE_MAX]; + const char *name = np->name; ++ struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents <= 0 || num_parents > SMD_SOURCE_MAX) +@@ -157,10 +155,16 @@ + + of_property_read_string(np, "clock-output-names", &name); + +- clk = at91sam9x5_clk_register_smd(pmc, name, parent_names, ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ ++ clk = at91sam9x5_clk_register_smd(regmap, name, parent_names, + num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91sam9x5_clk_smd, "atmel,at91sam9x5-clk-smd", ++ of_at91sam9x5_clk_smd_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-system.c linux-4.4.13/drivers/clk/at91/clk-system.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-system.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-system.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,13 +12,8 @@ + #include + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -29,9 +24,7 @@ + #define to_clk_system(hw) container_of(hw, struct clk_system, hw) + struct clk_system { + struct clk_hw hw; +- struct at91_pmc *pmc; +- unsigned int irq; +- wait_queue_head_t wait; ++ struct regmap *regmap; + u8 id; + }; + +@@ -39,58 +32,54 @@ + { + return (id >= 8) && (id <= 15); + } +-static irqreturn_t clk_system_irq_handler(int irq, void *dev_id) ++ ++static inline bool clk_system_ready(struct regmap *regmap, int id) + { +- struct clk_system *sys = (struct clk_system *)dev_id; ++ unsigned int status; + +- wake_up(&sys->wait); +- disable_irq_nosync(sys->irq); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return IRQ_HANDLED; ++ return status & (1 << id) ? 1 : 0; + } + + static int clk_system_prepare(struct clk_hw *hw) + { + struct clk_system *sys = to_clk_system(hw); +- struct at91_pmc *pmc = sys->pmc; +- u32 mask = 1 << sys->id; + +- pmc_write(pmc, AT91_PMC_SCER, mask); ++ regmap_write(sys->regmap, AT91_PMC_SCER, 1 << sys->id); + + if (!is_pck(sys->id)) + return 0; + +- while (!(pmc_read(pmc, AT91_PMC_SR) & mask)) { +- if (sys->irq) { +- enable_irq(sys->irq); +- wait_event(sys->wait, +- pmc_read(pmc, AT91_PMC_SR) & mask); +- } else +- cpu_relax(); +- } ++ while (!clk_system_ready(sys->regmap, sys->id)) ++ cpu_relax(); ++ + return 0; + } + + static void clk_system_unprepare(struct clk_hw *hw) + { + struct clk_system *sys = to_clk_system(hw); +- struct at91_pmc *pmc = sys->pmc; + +- pmc_write(pmc, AT91_PMC_SCDR, 1 << sys->id); ++ regmap_write(sys->regmap, AT91_PMC_SCDR, 1 << sys->id); + } + + static int clk_system_is_prepared(struct clk_hw *hw) + { + struct clk_system *sys = to_clk_system(hw); +- struct at91_pmc *pmc = sys->pmc; ++ unsigned int status; ++ ++ regmap_read(sys->regmap, AT91_PMC_SCSR, &status); + +- if (!(pmc_read(pmc, AT91_PMC_SCSR) & (1 << sys->id))) ++ if (!(status & (1 << sys->id))) + return 0; + + if (!is_pck(sys->id)) + return 1; + +- return !!(pmc_read(pmc, AT91_PMC_SR) & (1 << sys->id)); ++ regmap_read(sys->regmap, AT91_PMC_SR, &status); ++ ++ return status & (1 << sys->id) ? 1 : 0; + } + + static const struct clk_ops system_ops = { +@@ -100,13 +89,12 @@ + }; + + static struct clk * __init +-at91_clk_register_system(struct at91_pmc *pmc, const char *name, +- const char *parent_name, u8 id, int irq) ++at91_clk_register_system(struct regmap *regmap, const char *name, ++ const char *parent_name, u8 id) + { + struct clk_system *sys; + struct clk *clk = NULL; + struct clk_init_data init; +- int ret; + + if (!parent_name || id > SYSTEM_MAX_ID) + return ERR_PTR(-EINVAL); +@@ -123,44 +111,33 @@ + + sys->id = id; + sys->hw.init = &init; +- sys->pmc = pmc; +- sys->irq = irq; +- if (irq) { +- init_waitqueue_head(&sys->wait); +- irq_set_status_flags(sys->irq, IRQ_NOAUTOEN); +- ret = request_irq(sys->irq, clk_system_irq_handler, +- IRQF_TRIGGER_HIGH, name, sys); +- if (ret) { +- kfree(sys); +- return ERR_PTR(ret); +- } +- } ++ sys->regmap = regmap; + + clk = clk_register(NULL, &sys->hw); +- if (IS_ERR(clk)) { +- if (irq) +- free_irq(sys->irq, sys); ++ if (IS_ERR(clk)) + kfree(sys); +- } + + return clk; + } + +-static void __init +-of_at91_clk_sys_setup(struct device_node *np, struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_sys_setup(struct device_node *np) + { + int num; +- int irq = 0; + u32 id; + struct clk *clk; + const char *name; + struct device_node *sysclknp; + const char *parent_name; ++ struct regmap *regmap; + + num = of_get_child_count(np); + if (num > (SYSTEM_MAX_ID + 1)) + return; + ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ + for_each_child_of_node(np, sysclknp) { + if (of_property_read_u32(sysclknp, "reg", &id)) + continue; +@@ -168,21 +145,14 @@ + if (of_property_read_string(np, "clock-output-names", &name)) + name = sysclknp->name; + +- if (is_pck(id)) +- irq = irq_of_parse_and_map(sysclknp, 0); +- + parent_name = of_clk_get_parent_name(sysclknp, 0); + +- clk = at91_clk_register_system(pmc, name, parent_name, id, irq); ++ clk = at91_clk_register_system(regmap, name, parent_name, id); + if (IS_ERR(clk)) + continue; + + of_clk_add_provider(sysclknp, of_clk_src_simple_get, clk); + } + } +- +-void __init of_at91rm9200_clk_sys_setup(struct device_node *np, +- struct at91_pmc *pmc) +-{ +- of_at91_clk_sys_setup(np, pmc); +-} ++CLK_OF_DECLARE(at91rm9200_clk_sys, "atmel,at91rm9200-clk-system", ++ of_at91rm9200_clk_sys_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-usb.c linux-4.4.13/drivers/clk/at91/clk-usb.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-usb.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-usb.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,8 +12,8 @@ + #include + #include + #include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -27,7 +27,7 @@ + + struct at91sam9x5_clk_usb { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + }; + + #define to_at91sam9x5_clk_usb(hw) \ +@@ -35,7 +35,7 @@ + + struct at91rm9200_clk_usb { + struct clk_hw hw; +- struct at91_pmc *pmc; ++ struct regmap *regmap; + u32 divisors[4]; + }; + +@@ -45,13 +45,12 @@ + static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw, + unsigned long parent_rate) + { +- u32 tmp; +- u8 usbdiv; + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; ++ unsigned int usbr; ++ u8 usbdiv; + +- tmp = pmc_read(pmc, AT91_PMC_USB); +- usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; ++ regmap_read(usb->regmap, AT91_PMC_USB, &usbr); ++ usbdiv = (usbr & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; + + return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1)); + } +@@ -109,33 +108,31 @@ + + static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) + { +- u32 tmp; + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; + + if (index > 1) + return -EINVAL; +- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS; +- if (index) +- tmp |= AT91_PMC_USBS; +- pmc_write(pmc, AT91_PMC_USB, tmp); ++ ++ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, ++ index ? AT91_PMC_USBS : 0); ++ + return 0; + } + + static u8 at91sam9x5_clk_usb_get_parent(struct clk_hw *hw) + { + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; ++ unsigned int usbr; + +- return pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS; ++ regmap_read(usb->regmap, AT91_PMC_USB, &usbr); ++ ++ return usbr & AT91_PMC_USBS; + } + + static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) + { +- u32 tmp; + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; + unsigned long div; + + if (!rate) +@@ -145,9 +142,8 @@ + if (div > SAM9X5_USB_MAX_DIV + 1 || !div) + return -EINVAL; + +- tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV; +- tmp |= (div - 1) << SAM9X5_USB_DIV_SHIFT; +- pmc_write(pmc, AT91_PMC_USB, tmp); ++ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_OHCIUSBDIV, ++ (div - 1) << SAM9X5_USB_DIV_SHIFT); + + return 0; + } +@@ -163,28 +159,28 @@ + static int at91sam9n12_clk_usb_enable(struct clk_hw *hw) + { + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; + +- pmc_write(pmc, AT91_PMC_USB, +- pmc_read(pmc, AT91_PMC_USB) | AT91_PMC_USBS); ++ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, ++ AT91_PMC_USBS); ++ + return 0; + } + + static void at91sam9n12_clk_usb_disable(struct clk_hw *hw) + { + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; + +- pmc_write(pmc, AT91_PMC_USB, +- pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_USBS); ++ regmap_update_bits(usb->regmap, AT91_PMC_USB, AT91_PMC_USBS, 0); + } + + static int at91sam9n12_clk_usb_is_enabled(struct clk_hw *hw) + { + struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; ++ unsigned int usbr; + +- return !!(pmc_read(pmc, AT91_PMC_USB) & AT91_PMC_USBS); ++ regmap_read(usb->regmap, AT91_PMC_USB, &usbr); ++ ++ return usbr & AT91_PMC_USBS; + } + + static const struct clk_ops at91sam9n12_usb_ops = { +@@ -197,7 +193,7 @@ + }; + + static struct clk * __init +-at91sam9x5_clk_register_usb(struct at91_pmc *pmc, const char *name, ++at91sam9x5_clk_register_usb(struct regmap *regmap, const char *name, + const char **parent_names, u8 num_parents) + { + struct at91sam9x5_clk_usb *usb; +@@ -216,7 +212,7 @@ + CLK_SET_RATE_PARENT; + + usb->hw.init = &init; +- usb->pmc = pmc; ++ usb->regmap = regmap; + + clk = clk_register(NULL, &usb->hw); + if (IS_ERR(clk)) +@@ -226,7 +222,7 @@ + } + + static struct clk * __init +-at91sam9n12_clk_register_usb(struct at91_pmc *pmc, const char *name, ++at91sam9n12_clk_register_usb(struct regmap *regmap, const char *name, + const char *parent_name) + { + struct at91sam9x5_clk_usb *usb; +@@ -244,7 +240,7 @@ + init.flags = CLK_SET_RATE_GATE | CLK_SET_RATE_PARENT; + + usb->hw.init = &init; +- usb->pmc = pmc; ++ usb->regmap = regmap; + + clk = clk_register(NULL, &usb->hw); + if (IS_ERR(clk)) +@@ -257,12 +253,12 @@ + unsigned long parent_rate) + { + struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; +- u32 tmp; ++ unsigned int pllbr; + u8 usbdiv; + +- tmp = pmc_read(pmc, AT91_CKGR_PLLBR); +- usbdiv = (tmp & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT; ++ regmap_read(usb->regmap, AT91_CKGR_PLLBR, &pllbr); ++ ++ usbdiv = (pllbr & AT91_PMC_USBDIV) >> RM9200_USB_DIV_SHIFT; + if (usb->divisors[usbdiv]) + return parent_rate / usb->divisors[usbdiv]; + +@@ -310,10 +306,8 @@ + static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) + { +- u32 tmp; + int i; + struct at91rm9200_clk_usb *usb = to_at91rm9200_clk_usb(hw); +- struct at91_pmc *pmc = usb->pmc; + unsigned long div; + + if (!rate) +@@ -323,10 +317,10 @@ + + for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) { + if (usb->divisors[i] == div) { +- tmp = pmc_read(pmc, AT91_CKGR_PLLBR) & +- ~AT91_PMC_USBDIV; +- tmp |= i << RM9200_USB_DIV_SHIFT; +- pmc_write(pmc, AT91_CKGR_PLLBR, tmp); ++ regmap_update_bits(usb->regmap, AT91_CKGR_PLLBR, ++ AT91_PMC_USBDIV, ++ i << RM9200_USB_DIV_SHIFT); ++ + return 0; + } + } +@@ -341,7 +335,7 @@ + }; + + static struct clk * __init +-at91rm9200_clk_register_usb(struct at91_pmc *pmc, const char *name, ++at91rm9200_clk_register_usb(struct regmap *regmap, const char *name, + const char *parent_name, const u32 *divisors) + { + struct at91rm9200_clk_usb *usb; +@@ -359,7 +353,7 @@ + init.flags = CLK_SET_RATE_PARENT; + + usb->hw.init = &init; +- usb->pmc = pmc; ++ usb->regmap = regmap; + memcpy(usb->divisors, divisors, sizeof(usb->divisors)); + + clk = clk_register(NULL, &usb->hw); +@@ -369,13 +363,13 @@ + return clk; + } + +-void __init of_at91sam9x5_clk_usb_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_usb_setup(struct device_node *np) + { + struct clk *clk; + int num_parents; + const char *parent_names[USB_SOURCE_MAX]; + const char *name = np->name; ++ struct regmap *regmap; + + num_parents = of_clk_get_parent_count(np); + if (num_parents <= 0 || num_parents > USB_SOURCE_MAX) +@@ -385,19 +379,26 @@ + + of_property_read_string(np, "clock-output-names", &name); + +- clk = at91sam9x5_clk_register_usb(pmc, name, parent_names, num_parents); ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ ++ clk = at91sam9x5_clk_register_usb(regmap, name, parent_names, ++ num_parents); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91sam9x5_clk_usb, "atmel,at91sam9x5-clk-usb", ++ of_at91sam9x5_clk_usb_setup); + +-void __init of_at91sam9n12_clk_usb_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91sam9n12_clk_usb_setup(struct device_node *np) + { + struct clk *clk; + const char *parent_name; + const char *name = np->name; ++ struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) +@@ -405,20 +406,26 @@ + + of_property_read_string(np, "clock-output-names", &name); + +- clk = at91sam9n12_clk_register_usb(pmc, name, parent_name); ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ ++ clk = at91sam9n12_clk_register_usb(regmap, name, parent_name); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91sam9n12_clk_usb, "atmel,at91sam9n12-clk-usb", ++ of_at91sam9n12_clk_usb_setup); + +-void __init of_at91rm9200_clk_usb_setup(struct device_node *np, +- struct at91_pmc *pmc) ++static void __init of_at91rm9200_clk_usb_setup(struct device_node *np) + { + struct clk *clk; + const char *parent_name; + const char *name = np->name; + u32 divisors[4] = {0, 0, 0, 0}; ++ struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + if (!parent_name) +@@ -430,9 +437,15 @@ + + of_property_read_string(np, "clock-output-names", &name); + +- clk = at91rm9200_clk_register_usb(pmc, name, parent_name, divisors); ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) ++ return; ++ ++ clk = at91rm9200_clk_register_usb(regmap, name, parent_name, divisors); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + } ++CLK_OF_DECLARE(at91rm9200_clk_usb, "atmel,at91rm9200-clk-usb", ++ of_at91rm9200_clk_usb_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/clk-utmi.c linux-4.4.13/drivers/clk/at91/clk-utmi.c +--- linux-4.4.13.orig/drivers/clk/at91/clk-utmi.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/clk-utmi.c 2016-06-22 21:55:07.998523867 +0200 +@@ -11,14 +11,9 @@ + #include + #include + #include +-#include +-#include + #include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include "pmc.h" + +@@ -26,37 +21,30 @@ + + struct clk_utmi { + struct clk_hw hw; +- struct at91_pmc *pmc; +- unsigned int irq; +- wait_queue_head_t wait; ++ struct regmap *regmap; + }; + + #define to_clk_utmi(hw) container_of(hw, struct clk_utmi, hw) + +-static irqreturn_t clk_utmi_irq_handler(int irq, void *dev_id) ++static inline bool clk_utmi_ready(struct regmap *regmap) + { +- struct clk_utmi *utmi = (struct clk_utmi *)dev_id; ++ unsigned int status; + +- wake_up(&utmi->wait); +- disable_irq_nosync(utmi->irq); ++ regmap_read(regmap, AT91_PMC_SR, &status); + +- return IRQ_HANDLED; ++ return status & AT91_PMC_LOCKU; + } + + static int clk_utmi_prepare(struct clk_hw *hw) + { + struct clk_utmi *utmi = to_clk_utmi(hw); +- struct at91_pmc *pmc = utmi->pmc; +- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) | AT91_PMC_UPLLEN | +- AT91_PMC_UPLLCOUNT | AT91_PMC_BIASEN; +- +- pmc_write(pmc, AT91_CKGR_UCKR, tmp); +- +- while (!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU)) { +- enable_irq(utmi->irq); +- wait_event(utmi->wait, +- pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU); +- } ++ unsigned int uckr = AT91_PMC_UPLLEN | AT91_PMC_UPLLCOUNT | ++ AT91_PMC_BIASEN; ++ ++ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, uckr, uckr); ++ ++ while (!clk_utmi_ready(utmi->regmap)) ++ cpu_relax(); + + return 0; + } +@@ -64,18 +52,15 @@ + static int clk_utmi_is_prepared(struct clk_hw *hw) + { + struct clk_utmi *utmi = to_clk_utmi(hw); +- struct at91_pmc *pmc = utmi->pmc; + +- return !!(pmc_read(pmc, AT91_PMC_SR) & AT91_PMC_LOCKU); ++ return clk_utmi_ready(utmi->regmap); + } + + static void clk_utmi_unprepare(struct clk_hw *hw) + { + struct clk_utmi *utmi = to_clk_utmi(hw); +- struct at91_pmc *pmc = utmi->pmc; +- u32 tmp = pmc_read(pmc, AT91_CKGR_UCKR) & ~AT91_PMC_UPLLEN; + +- pmc_write(pmc, AT91_CKGR_UCKR, tmp); ++ regmap_update_bits(utmi->regmap, AT91_CKGR_UCKR, AT91_PMC_UPLLEN, 0); + } + + static unsigned long clk_utmi_recalc_rate(struct clk_hw *hw, +@@ -93,10 +78,9 @@ + }; + + static struct clk * __init +-at91_clk_register_utmi(struct at91_pmc *pmc, unsigned int irq, ++at91_clk_register_utmi(struct regmap *regmap, + const char *name, const char *parent_name) + { +- int ret; + struct clk_utmi *utmi; + struct clk *clk = NULL; + struct clk_init_data init; +@@ -112,52 +96,36 @@ + init.flags = CLK_SET_RATE_GATE; + + utmi->hw.init = &init; +- utmi->pmc = pmc; +- utmi->irq = irq; +- init_waitqueue_head(&utmi->wait); +- irq_set_status_flags(utmi->irq, IRQ_NOAUTOEN); +- ret = request_irq(utmi->irq, clk_utmi_irq_handler, +- IRQF_TRIGGER_HIGH, "clk-utmi", utmi); +- if (ret) { +- kfree(utmi); +- return ERR_PTR(ret); +- } ++ utmi->regmap = regmap; + + clk = clk_register(NULL, &utmi->hw); +- if (IS_ERR(clk)) { +- free_irq(utmi->irq, utmi); ++ if (IS_ERR(clk)) + kfree(utmi); +- } + + return clk; + } + +-static void __init +-of_at91_clk_utmi_setup(struct device_node *np, struct at91_pmc *pmc) ++static void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np) + { +- unsigned int irq; + struct clk *clk; + const char *parent_name; + const char *name = np->name; ++ struct regmap *regmap; + + parent_name = of_clk_get_parent_name(np, 0); + + of_property_read_string(np, "clock-output-names", &name); + +- irq = irq_of_parse_and_map(np, 0); +- if (!irq) ++ regmap = syscon_node_to_regmap(of_get_parent(np)); ++ if (IS_ERR(regmap)) + return; + +- clk = at91_clk_register_utmi(pmc, irq, name, parent_name); ++ clk = at91_clk_register_utmi(regmap, name, parent_name); + if (IS_ERR(clk)) + return; + + of_clk_add_provider(np, of_clk_src_simple_get, clk); + return; + } +- +-void __init of_at91sam9x5_clk_utmi_setup(struct device_node *np, +- struct at91_pmc *pmc) +-{ +- of_at91_clk_utmi_setup(np, pmc); +-} ++CLK_OF_DECLARE(at91sam9x5_clk_utmi, "atmel,at91sam9x5-clk-utmi", ++ of_at91sam9x5_clk_utmi_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/pmc.c linux-4.4.13/drivers/clk/at91/pmc.c +--- linux-4.4.13.orig/drivers/clk/at91/pmc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/pmc.c 2016-06-22 21:55:07.998523867 +0200 +@@ -12,36 +12,13 @@ + #include + #include + #include +-#include +-#include +-#include +-#include +-#include +-#include +-#include ++#include ++#include + + #include + + #include "pmc.h" + +-void __iomem *at91_pmc_base; +-EXPORT_SYMBOL_GPL(at91_pmc_base); +- +-void at91rm9200_idle(void) +-{ +- /* +- * Disable the processor clock. The processor will be automatically +- * re-enabled by an interrupt or by a reset. +- */ +- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); +-} +- +-void at91sam9_idle(void) +-{ +- at91_pmc_write(AT91_PMC_SCDR, AT91_PMC_PCK); +- cpu_do_idle(); +-} +- + int of_at91_get_clk_range(struct device_node *np, const char *propname, + struct clk_range *range) + { +@@ -64,402 +41,3 @@ + return 0; + } + EXPORT_SYMBOL_GPL(of_at91_get_clk_range); +- +-static void pmc_irq_mask(struct irq_data *d) +-{ +- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); +- +- pmc_write(pmc, AT91_PMC_IDR, 1 << d->hwirq); +-} +- +-static void pmc_irq_unmask(struct irq_data *d) +-{ +- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); +- +- pmc_write(pmc, AT91_PMC_IER, 1 << d->hwirq); +-} +- +-static int pmc_irq_set_type(struct irq_data *d, unsigned type) +-{ +- if (type != IRQ_TYPE_LEVEL_HIGH) { +- pr_warn("PMC: type not supported (support only IRQ_TYPE_LEVEL_HIGH type)\n"); +- return -EINVAL; +- } +- +- return 0; +-} +- +-static void pmc_irq_suspend(struct irq_data *d) +-{ +- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); +- +- pmc->imr = pmc_read(pmc, AT91_PMC_IMR); +- pmc_write(pmc, AT91_PMC_IDR, pmc->imr); +-} +- +-static void pmc_irq_resume(struct irq_data *d) +-{ +- struct at91_pmc *pmc = irq_data_get_irq_chip_data(d); +- +- pmc_write(pmc, AT91_PMC_IER, pmc->imr); +-} +- +-static struct irq_chip pmc_irq = { +- .name = "PMC", +- .irq_disable = pmc_irq_mask, +- .irq_mask = pmc_irq_mask, +- .irq_unmask = pmc_irq_unmask, +- .irq_set_type = pmc_irq_set_type, +- .irq_suspend = pmc_irq_suspend, +- .irq_resume = pmc_irq_resume, +-}; +- +-static struct lock_class_key pmc_lock_class; +- +-static int pmc_irq_map(struct irq_domain *h, unsigned int virq, +- irq_hw_number_t hw) +-{ +- struct at91_pmc *pmc = h->host_data; +- +- irq_set_lockdep_class(virq, &pmc_lock_class); +- +- irq_set_chip_and_handler(virq, &pmc_irq, +- handle_level_irq); +- irq_set_chip_data(virq, pmc); +- +- return 0; +-} +- +-static int pmc_irq_domain_xlate(struct irq_domain *d, +- struct device_node *ctrlr, +- const u32 *intspec, unsigned int intsize, +- irq_hw_number_t *out_hwirq, +- unsigned int *out_type) +-{ +- struct at91_pmc *pmc = d->host_data; +- const struct at91_pmc_caps *caps = pmc->caps; +- +- if (WARN_ON(intsize < 1)) +- return -EINVAL; +- +- *out_hwirq = intspec[0]; +- +- if (!(caps->available_irqs & (1 << *out_hwirq))) +- return -EINVAL; +- +- *out_type = IRQ_TYPE_LEVEL_HIGH; +- +- return 0; +-} +- +-static const struct irq_domain_ops pmc_irq_ops = { +- .map = pmc_irq_map, +- .xlate = pmc_irq_domain_xlate, +-}; +- +-static irqreturn_t pmc_irq_handler(int irq, void *data) +-{ +- struct at91_pmc *pmc = (struct at91_pmc *)data; +- unsigned long sr; +- int n; +- +- sr = pmc_read(pmc, AT91_PMC_SR) & pmc_read(pmc, AT91_PMC_IMR); +- if (!sr) +- return IRQ_NONE; +- +- for_each_set_bit(n, &sr, BITS_PER_LONG) +- generic_handle_irq(irq_find_mapping(pmc->irqdomain, n)); +- +- return IRQ_HANDLED; +-} +- +-static const struct at91_pmc_caps at91rm9200_caps = { +- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | +- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | +- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | +- AT91_PMC_PCK3RDY, +-}; +- +-static const struct at91_pmc_caps at91sam9260_caps = { +- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | +- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | +- AT91_PMC_PCK1RDY, +-}; +- +-static const struct at91_pmc_caps at91sam9g45_caps = { +- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | +- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | +- AT91_PMC_PCK1RDY, +-}; +- +-static const struct at91_pmc_caps at91sam9n12_caps = { +- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_LOCKB | +- AT91_PMC_MCKRDY | AT91_PMC_PCK0RDY | +- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS | +- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV, +-}; +- +-static const struct at91_pmc_caps at91sam9x5_caps = { +- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | +- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | +- AT91_PMC_PCK1RDY | AT91_PMC_MOSCSELS | +- AT91_PMC_MOSCRCS | AT91_PMC_CFDEV, +-}; +- +-static const struct at91_pmc_caps sama5d2_caps = { +- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | +- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | +- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | +- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS | +- AT91_PMC_CFDEV | AT91_PMC_GCKRDY, +-}; +- +-static const struct at91_pmc_caps sama5d3_caps = { +- .available_irqs = AT91_PMC_MOSCS | AT91_PMC_LOCKA | AT91_PMC_MCKRDY | +- AT91_PMC_LOCKU | AT91_PMC_PCK0RDY | +- AT91_PMC_PCK1RDY | AT91_PMC_PCK2RDY | +- AT91_PMC_MOSCSELS | AT91_PMC_MOSCRCS | +- AT91_PMC_CFDEV, +-}; +- +-static struct at91_pmc *__init at91_pmc_init(struct device_node *np, +- void __iomem *regbase, int virq, +- const struct at91_pmc_caps *caps) +-{ +- struct at91_pmc *pmc; +- +- if (!regbase || !virq || !caps) +- return NULL; +- +- at91_pmc_base = regbase; +- +- pmc = kzalloc(sizeof(*pmc), GFP_KERNEL); +- if (!pmc) +- return NULL; +- +- spin_lock_init(&pmc->lock); +- pmc->regbase = regbase; +- pmc->virq = virq; +- pmc->caps = caps; +- +- pmc->irqdomain = irq_domain_add_linear(np, 32, &pmc_irq_ops, pmc); +- +- if (!pmc->irqdomain) +- goto out_free_pmc; +- +- pmc_write(pmc, AT91_PMC_IDR, 0xffffffff); +- if (request_irq(pmc->virq, pmc_irq_handler, +- IRQF_SHARED | IRQF_COND_SUSPEND, "pmc", pmc)) +- goto out_remove_irqdomain; +- +- return pmc; +- +-out_remove_irqdomain: +- irq_domain_remove(pmc->irqdomain); +-out_free_pmc: +- kfree(pmc); +- +- return NULL; +-} +- +-static const struct of_device_id pmc_clk_ids[] __initconst = { +- /* Slow oscillator */ +- { +- .compatible = "atmel,at91sam9260-clk-slow", +- .data = of_at91sam9260_clk_slow_setup, +- }, +- /* Main clock */ +- { +- .compatible = "atmel,at91rm9200-clk-main-osc", +- .data = of_at91rm9200_clk_main_osc_setup, +- }, +- { +- .compatible = "atmel,at91sam9x5-clk-main-rc-osc", +- .data = of_at91sam9x5_clk_main_rc_osc_setup, +- }, +- { +- .compatible = "atmel,at91rm9200-clk-main", +- .data = of_at91rm9200_clk_main_setup, +- }, +- { +- .compatible = "atmel,at91sam9x5-clk-main", +- .data = of_at91sam9x5_clk_main_setup, +- }, +- /* PLL clocks */ +- { +- .compatible = "atmel,at91rm9200-clk-pll", +- .data = of_at91rm9200_clk_pll_setup, +- }, +- { +- .compatible = "atmel,at91sam9g45-clk-pll", +- .data = of_at91sam9g45_clk_pll_setup, +- }, +- { +- .compatible = "atmel,at91sam9g20-clk-pllb", +- .data = of_at91sam9g20_clk_pllb_setup, +- }, +- { +- .compatible = "atmel,sama5d3-clk-pll", +- .data = of_sama5d3_clk_pll_setup, +- }, +- { +- .compatible = "atmel,at91sam9x5-clk-plldiv", +- .data = of_at91sam9x5_clk_plldiv_setup, +- }, +- /* Master clock */ +- { +- .compatible = "atmel,at91rm9200-clk-master", +- .data = of_at91rm9200_clk_master_setup, +- }, +- { +- .compatible = "atmel,at91sam9x5-clk-master", +- .data = of_at91sam9x5_clk_master_setup, +- }, +- /* System clocks */ +- { +- .compatible = "atmel,at91rm9200-clk-system", +- .data = of_at91rm9200_clk_sys_setup, +- }, +- /* Peripheral clocks */ +- { +- .compatible = "atmel,at91rm9200-clk-peripheral", +- .data = of_at91rm9200_clk_periph_setup, +- }, +- { +- .compatible = "atmel,at91sam9x5-clk-peripheral", +- .data = of_at91sam9x5_clk_periph_setup, +- }, +- /* Programmable clocks */ +- { +- .compatible = "atmel,at91rm9200-clk-programmable", +- .data = of_at91rm9200_clk_prog_setup, +- }, +- { +- .compatible = "atmel,at91sam9g45-clk-programmable", +- .data = of_at91sam9g45_clk_prog_setup, +- }, +- { +- .compatible = "atmel,at91sam9x5-clk-programmable", +- .data = of_at91sam9x5_clk_prog_setup, +- }, +- /* UTMI clock */ +-#if defined(CONFIG_HAVE_AT91_UTMI) +- { +- .compatible = "atmel,at91sam9x5-clk-utmi", +- .data = of_at91sam9x5_clk_utmi_setup, +- }, +-#endif +- /* USB clock */ +-#if defined(CONFIG_HAVE_AT91_USB_CLK) +- { +- .compatible = "atmel,at91rm9200-clk-usb", +- .data = of_at91rm9200_clk_usb_setup, +- }, +- { +- .compatible = "atmel,at91sam9x5-clk-usb", +- .data = of_at91sam9x5_clk_usb_setup, +- }, +- { +- .compatible = "atmel,at91sam9n12-clk-usb", +- .data = of_at91sam9n12_clk_usb_setup, +- }, +-#endif +- /* SMD clock */ +-#if defined(CONFIG_HAVE_AT91_SMD) +- { +- .compatible = "atmel,at91sam9x5-clk-smd", +- .data = of_at91sam9x5_clk_smd_setup, +- }, +-#endif +-#if defined(CONFIG_HAVE_AT91_H32MX) +- { +- .compatible = "atmel,sama5d4-clk-h32mx", +- .data = of_sama5d4_clk_h32mx_setup, +- }, +-#endif +-#if defined(CONFIG_HAVE_AT91_GENERATED_CLK) +- { +- .compatible = "atmel,sama5d2-clk-generated", +- .data = of_sama5d2_clk_generated_setup, +- }, +-#endif +- { /*sentinel*/ } +-}; +- +-static void __init of_at91_pmc_setup(struct device_node *np, +- const struct at91_pmc_caps *caps) +-{ +- struct at91_pmc *pmc; +- struct device_node *childnp; +- void (*clk_setup)(struct device_node *, struct at91_pmc *); +- const struct of_device_id *clk_id; +- void __iomem *regbase = of_iomap(np, 0); +- int virq; +- +- if (!regbase) +- return; +- +- virq = irq_of_parse_and_map(np, 0); +- if (!virq) +- return; +- +- pmc = at91_pmc_init(np, regbase, virq, caps); +- if (!pmc) +- return; +- for_each_child_of_node(np, childnp) { +- clk_id = of_match_node(pmc_clk_ids, childnp); +- if (!clk_id) +- continue; +- clk_setup = clk_id->data; +- clk_setup(childnp, pmc); +- } +-} +- +-static void __init of_at91rm9200_pmc_setup(struct device_node *np) +-{ +- of_at91_pmc_setup(np, &at91rm9200_caps); +-} +-CLK_OF_DECLARE(at91rm9200_clk_pmc, "atmel,at91rm9200-pmc", +- of_at91rm9200_pmc_setup); +- +-static void __init of_at91sam9260_pmc_setup(struct device_node *np) +-{ +- of_at91_pmc_setup(np, &at91sam9260_caps); +-} +-CLK_OF_DECLARE(at91sam9260_clk_pmc, "atmel,at91sam9260-pmc", +- of_at91sam9260_pmc_setup); +- +-static void __init of_at91sam9g45_pmc_setup(struct device_node *np) +-{ +- of_at91_pmc_setup(np, &at91sam9g45_caps); +-} +-CLK_OF_DECLARE(at91sam9g45_clk_pmc, "atmel,at91sam9g45-pmc", +- of_at91sam9g45_pmc_setup); +- +-static void __init of_at91sam9n12_pmc_setup(struct device_node *np) +-{ +- of_at91_pmc_setup(np, &at91sam9n12_caps); +-} +-CLK_OF_DECLARE(at91sam9n12_clk_pmc, "atmel,at91sam9n12-pmc", +- of_at91sam9n12_pmc_setup); +- +-static void __init of_at91sam9x5_pmc_setup(struct device_node *np) +-{ +- of_at91_pmc_setup(np, &at91sam9x5_caps); +-} +-CLK_OF_DECLARE(at91sam9x5_clk_pmc, "atmel,at91sam9x5-pmc", +- of_at91sam9x5_pmc_setup); +- +-static void __init of_sama5d2_pmc_setup(struct device_node *np) +-{ +- of_at91_pmc_setup(np, &sama5d2_caps); +-} +-CLK_OF_DECLARE(sama5d2_clk_pmc, "atmel,sama5d2-pmc", +- of_sama5d2_pmc_setup); +- +-static void __init of_sama5d3_pmc_setup(struct device_node *np) +-{ +- of_at91_pmc_setup(np, &sama5d3_caps); +-} +-CLK_OF_DECLARE(sama5d3_clk_pmc, "atmel,sama5d3-pmc", +- of_sama5d3_pmc_setup); +diff -Nur linux-4.4.13.orig/drivers/clk/at91/pmc.h linux-4.4.13/drivers/clk/at91/pmc.h +--- linux-4.4.13.orig/drivers/clk/at91/pmc.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clk/at91/pmc.h 2016-06-22 21:55:07.998523867 +0200 +@@ -14,8 +14,11 @@ + + #include + #include ++#include + #include + ++extern spinlock_t pmc_pcr_lock; ++ + struct clk_range { + unsigned long min; + unsigned long max; +@@ -23,102 +26,7 @@ + + #define CLK_RANGE(MIN, MAX) {.min = MIN, .max = MAX,} + +-struct at91_pmc_caps { +- u32 available_irqs; +-}; +- +-struct at91_pmc { +- void __iomem *regbase; +- int virq; +- spinlock_t lock; +- const struct at91_pmc_caps *caps; +- struct irq_domain *irqdomain; +- u32 imr; +-}; +- +-static inline void pmc_lock(struct at91_pmc *pmc) +-{ +- spin_lock(&pmc->lock); +-} +- +-static inline void pmc_unlock(struct at91_pmc *pmc) +-{ +- spin_unlock(&pmc->lock); +-} +- +-static inline u32 pmc_read(struct at91_pmc *pmc, int offset) +-{ +- return readl(pmc->regbase + offset); +-} +- +-static inline void pmc_write(struct at91_pmc *pmc, int offset, u32 value) +-{ +- writel(value, pmc->regbase + offset); +-} +- + int of_at91_get_clk_range(struct device_node *np, const char *propname, + struct clk_range *range); + +-void of_at91sam9260_clk_slow_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91rm9200_clk_main_osc_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9x5_clk_main_rc_osc_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91rm9200_clk_main_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9x5_clk_main_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91rm9200_clk_pll_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9g45_clk_pll_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9g20_clk_pllb_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_sama5d3_clk_pll_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9x5_clk_plldiv_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91rm9200_clk_master_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9x5_clk_master_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91rm9200_clk_sys_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91rm9200_clk_periph_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9x5_clk_periph_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91rm9200_clk_prog_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9g45_clk_prog_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9x5_clk_prog_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91sam9x5_clk_utmi_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91rm9200_clk_usb_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9x5_clk_usb_setup(struct device_node *np, +- struct at91_pmc *pmc); +-void of_at91sam9n12_clk_usb_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_at91sam9x5_clk_smd_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_sama5d4_clk_h32mx_setup(struct device_node *np, +- struct at91_pmc *pmc); +- +-void of_sama5d2_clk_generated_setup(struct device_node *np, +- struct at91_pmc *pmc); +- + #endif /* __PMC_H_ */ +diff -Nur linux-4.4.13.orig/drivers/clocksource/tcb_clksrc.c linux-4.4.13/drivers/clocksource/tcb_clksrc.c +--- linux-4.4.13.orig/drivers/clocksource/tcb_clksrc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clocksource/tcb_clksrc.c 2016-06-22 21:55:07.998523867 +0200 +@@ -23,8 +23,7 @@ + * this 32 bit free-running counter. the second channel is not used. + * + * - The third channel may be used to provide a 16-bit clockevent +- * source, used in either periodic or oneshot mode. This runs +- * at 32 KiHZ, and can handle delays of up to two seconds. ++ * source, used in either periodic or oneshot mode. + * + * A boot clocksource and clockevent source are also currently needed, + * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so +@@ -74,6 +73,8 @@ + struct tc_clkevt_device { + struct clock_event_device clkevt; + struct clk *clk; ++ bool clk_enabled; ++ u32 freq; + void __iomem *regs; + }; + +@@ -82,15 +83,26 @@ + return container_of(clkevt, struct tc_clkevt_device, clkevt); + } + +-/* For now, we always use the 32K clock ... this optimizes for NO_HZ, +- * because using one of the divided clocks would usually mean the +- * tick rate can never be less than several dozen Hz (vs 0.5 Hz). +- * +- * A divided clock could be good for high resolution timers, since +- * 30.5 usec resolution can seem "low". +- */ + static u32 timer_clock; + ++static void tc_clk_disable(struct clock_event_device *d) ++{ ++ struct tc_clkevt_device *tcd = to_tc_clkevt(d); ++ ++ clk_disable(tcd->clk); ++ tcd->clk_enabled = false; ++} ++ ++static void tc_clk_enable(struct clock_event_device *d) ++{ ++ struct tc_clkevt_device *tcd = to_tc_clkevt(d); ++ ++ if (tcd->clk_enabled) ++ return; ++ clk_enable(tcd->clk); ++ tcd->clk_enabled = true; ++} ++ + static int tc_shutdown(struct clock_event_device *d) + { + struct tc_clkevt_device *tcd = to_tc_clkevt(d); +@@ -98,8 +110,14 @@ + + __raw_writel(0xff, regs + ATMEL_TC_REG(2, IDR)); + __raw_writel(ATMEL_TC_CLKDIS, regs + ATMEL_TC_REG(2, CCR)); ++ return 0; ++} ++ ++static int tc_shutdown_clk_off(struct clock_event_device *d) ++{ ++ tc_shutdown(d); + if (!clockevent_state_detached(d)) +- clk_disable(tcd->clk); ++ tc_clk_disable(d); + + return 0; + } +@@ -112,9 +130,9 @@ + if (clockevent_state_oneshot(d) || clockevent_state_periodic(d)) + tc_shutdown(d); + +- clk_enable(tcd->clk); ++ tc_clk_enable(d); + +- /* slow clock, count up to RC, then irq and stop */ ++ /* count up to RC, then irq and stop */ + __raw_writel(timer_clock | ATMEL_TC_CPCSTOP | ATMEL_TC_WAVE | + ATMEL_TC_WAVESEL_UP_AUTO, regs + ATMEL_TC_REG(2, CMR)); + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -134,12 +152,12 @@ + /* By not making the gentime core emulate periodic mode on top + * of oneshot, we get lower overhead and improved accuracy. + */ +- clk_enable(tcd->clk); ++ tc_clk_enable(d); + +- /* slow clock, count up to RC, then irq and restart */ ++ /* count up to RC, then irq and restart */ + __raw_writel(timer_clock | ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO, + regs + ATMEL_TC_REG(2, CMR)); +- __raw_writel((32768 + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); ++ __raw_writel((tcd->freq + HZ / 2) / HZ, tcaddr + ATMEL_TC_REG(2, RC)); + + /* Enable clock and interrupts on RC compare */ + __raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER)); +@@ -166,9 +184,13 @@ + .features = CLOCK_EVT_FEAT_PERIODIC | + CLOCK_EVT_FEAT_ONESHOT, + /* Should be lower than at91rm9200's system timer */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + .rating = 125, ++#else ++ .rating = 200, ++#endif + .set_next_event = tc_next_event, +- .set_state_shutdown = tc_shutdown, ++ .set_state_shutdown = tc_shutdown_clk_off, + .set_state_periodic = tc_set_periodic, + .set_state_oneshot = tc_set_oneshot, + }, +@@ -188,8 +210,9 @@ + return IRQ_NONE; + } + +-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx) ++static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx) + { ++ unsigned divisor = atmel_tc_divisors[divisor_idx]; + int ret; + struct clk *t2_clk = tc->clk[2]; + int irq = tc->irq[2]; +@@ -210,7 +233,11 @@ + clkevt.regs = tc->regs; + clkevt.clk = t2_clk; + +- timer_clock = clk32k_divisor_idx; ++ timer_clock = divisor_idx; ++ if (!divisor) ++ clkevt.freq = 32768; ++ else ++ clkevt.freq = clk_get_rate(t2_clk) / divisor; + + clkevt.clkevt.cpumask = cpumask_of(0); + +@@ -221,7 +248,7 @@ + return ret; + } + +- clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff); ++ clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff); + + return ret; + } +@@ -358,7 +385,11 @@ + goto err_disable_t1; + + /* channel 2: periodic and oneshot timer support */ ++#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK + ret = setup_clkevents(tc, clk32k_divisor_idx); ++#else ++ ret = setup_clkevents(tc, best_divisor_idx); ++#endif + if (ret) + goto err_unregister_clksrc; + +diff -Nur linux-4.4.13.orig/drivers/clocksource/timer-atmel-pit.c linux-4.4.13/drivers/clocksource/timer-atmel-pit.c +--- linux-4.4.13.orig/drivers/clocksource/timer-atmel-pit.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clocksource/timer-atmel-pit.c 2016-06-22 21:55:07.998523867 +0200 +@@ -46,6 +46,7 @@ + u32 cycle; + u32 cnt; + unsigned int irq; ++ bool irq_requested; + struct clk *mck; + }; + +@@ -96,15 +97,29 @@ + + /* disable irq, leaving the clocksource active */ + pit_write(data->base, AT91_PIT_MR, (data->cycle - 1) | AT91_PIT_PITEN); ++ if (data->irq_requested) { ++ free_irq(data->irq, data); ++ data->irq_requested = false; ++ } + return 0; + } + ++static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id); + /* + * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) + */ + static int pit_clkevt_set_periodic(struct clock_event_device *dev) + { + struct pit_data *data = clkevt_to_pit_data(dev); ++ int ret; ++ ++ ret = request_irq(data->irq, at91sam926x_pit_interrupt, ++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, ++ "at91_tick", data); ++ if (ret) ++ panic(pr_fmt("Unable to setup IRQ\n")); ++ ++ data->irq_requested = true; + + /* update clocksource counter */ + data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR)); +@@ -181,7 +196,6 @@ + { + unsigned long pit_rate; + unsigned bits; +- int ret; + + /* + * Use our actual MCK to figure out how many MCK/16 ticks per +@@ -206,13 +220,6 @@ + data->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS; + clocksource_register_hz(&data->clksrc, pit_rate); + +- /* Set up irq handler */ +- ret = request_irq(data->irq, at91sam926x_pit_interrupt, +- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, +- "at91_tick", data); +- if (ret) +- panic(pr_fmt("Unable to setup IRQ\n")); +- + /* Set up and register clockevents */ + data->clkevt.name = "pit"; + data->clkevt.features = CLOCK_EVT_FEAT_PERIODIC; +diff -Nur linux-4.4.13.orig/drivers/clocksource/timer-atmel-st.c linux-4.4.13/drivers/clocksource/timer-atmel-st.c +--- linux-4.4.13.orig/drivers/clocksource/timer-atmel-st.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/clocksource/timer-atmel-st.c 2016-06-22 21:55:07.998523867 +0200 +@@ -115,18 +115,29 @@ + last_crtr = read_CRTR(); + } + ++static int atmel_st_irq; ++ + static int clkevt32k_shutdown(struct clock_event_device *evt) + { + clkdev32k_disable_and_flush_irq(); + irqmask = 0; + regmap_write(regmap_st, AT91_ST_IER, irqmask); ++ free_irq(atmel_st_irq, regmap_st); + return 0; + } + + static int clkevt32k_set_oneshot(struct clock_event_device *dev) + { ++ int ret; ++ + clkdev32k_disable_and_flush_irq(); + ++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, ++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, ++ "at91_tick", regmap_st); ++ if (ret) ++ panic(pr_fmt("Unable to setup IRQ\n")); ++ + /* + * ALM for oneshot irqs, set by next_event() + * before 32 seconds have passed. +@@ -139,8 +150,16 @@ + + static int clkevt32k_set_periodic(struct clock_event_device *dev) + { ++ int ret; ++ + clkdev32k_disable_and_flush_irq(); + ++ ret = request_irq(atmel_st_irq, at91rm9200_timer_interrupt, ++ IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, ++ "at91_tick", regmap_st); ++ if (ret) ++ panic(pr_fmt("Unable to setup IRQ\n")); ++ + /* PIT for periodic irqs; fixed rate of 1/HZ */ + irqmask = AT91_ST_PITS; + regmap_write(regmap_st, AT91_ST_PIMR, timer_latch); +@@ -198,7 +217,7 @@ + { + struct clk *sclk; + unsigned int sclk_rate, val; +- int irq, ret; ++ int ret; + + regmap_st = syscon_node_to_regmap(node); + if (IS_ERR(regmap_st)) +@@ -210,17 +229,10 @@ + regmap_read(regmap_st, AT91_ST_SR, &val); + + /* Get the interrupts property */ +- irq = irq_of_parse_and_map(node, 0); +- if (!irq) ++ atmel_st_irq = irq_of_parse_and_map(node, 0); ++ if (!atmel_st_irq) + panic(pr_fmt("Unable to get IRQ from DT\n")); + +- /* Make IRQs happen for the system timer */ +- ret = request_irq(irq, at91rm9200_timer_interrupt, +- IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, +- "at91_tick", regmap_st); +- if (ret) +- panic(pr_fmt("Unable to setup IRQ\n")); +- + sclk = of_clk_get(node, 0); + if (IS_ERR(sclk)) + panic(pr_fmt("Unable to get slow clock\n")); +diff -Nur linux-4.4.13.orig/drivers/cpufreq/Kconfig.x86 linux-4.4.13/drivers/cpufreq/Kconfig.x86 +--- linux-4.4.13.orig/drivers/cpufreq/Kconfig.x86 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/cpufreq/Kconfig.x86 2016-06-22 21:55:07.998523867 +0200 +@@ -123,7 +123,7 @@ + + config X86_POWERNOW_K8 + tristate "AMD Opteron/Athlon64 PowerNow!" +- depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ ++ depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE + help + This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors. + Support for K10 and newer processors is now in acpi-cpufreq. +diff -Nur linux-4.4.13.orig/drivers/cpuidle/coupled.c linux-4.4.13/drivers/cpuidle/coupled.c +--- linux-4.4.13.orig/drivers/cpuidle/coupled.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/cpuidle/coupled.c 2016-06-22 21:55:07.998523867 +0200 +@@ -119,7 +119,6 @@ + + #define CPUIDLE_COUPLED_NOT_IDLE (-1) + +-static DEFINE_MUTEX(cpuidle_coupled_lock); + static DEFINE_PER_CPU(struct call_single_data, cpuidle_coupled_poke_cb); + + /* +diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c linux-4.4.13/drivers/gpu/drm/i915/i915_gem_execbuffer.c +--- linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/gpu/drm/i915/i915_gem_execbuffer.c 2016-06-22 21:55:07.998523867 +0200 +@@ -1264,7 +1264,9 @@ + if (ret) + return ret; + ++#ifndef CONFIG_PREEMPT_RT_BASE + trace_i915_gem_ring_dispatch(params->request, params->dispatch_flags); ++#endif + + i915_gem_execbuffer_move_to_active(vmas, params->request); + i915_gem_execbuffer_retire_commands(params); +diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c linux-4.4.13/drivers/gpu/drm/i915/i915_gem_shrinker.c +--- linux-4.4.13.orig/drivers/gpu/drm/i915/i915_gem_shrinker.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/gpu/drm/i915/i915_gem_shrinker.c 2016-06-22 21:55:07.998523867 +0200 +@@ -39,7 +39,7 @@ + if (!mutex_is_locked(mutex)) + return false; + +-#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) ++#if (defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES)) && !defined(CONFIG_PREEMPT_RT_BASE) + return mutex->owner == task; + #else + /* Since UP may be pre-empted, we cannot assume that we own the lock */ +diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/i915_irq.c linux-4.4.13/drivers/gpu/drm/i915/i915_irq.c +--- linux-4.4.13.orig/drivers/gpu/drm/i915/i915_irq.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/gpu/drm/i915/i915_irq.c 2016-06-22 21:55:08.002523867 +0200 +@@ -812,6 +812,7 @@ + spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_disable_rt(); + + /* Get optional system timestamp before query. */ + if (stime) +@@ -863,6 +864,7 @@ + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_enable_rt(); + + spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); + +diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/intel_display.c linux-4.4.13/drivers/gpu/drm/i915/intel_display.c +--- linux-4.4.13.orig/drivers/gpu/drm/i915/intel_display.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/gpu/drm/i915/intel_display.c 2016-06-22 21:55:08.002523867 +0200 +@@ -11376,7 +11376,7 @@ + struct intel_crtc *intel_crtc = to_intel_crtc(crtc); + struct intel_unpin_work *work; + +- WARN_ON(!in_interrupt()); ++ WARN_ON_NONRT(!in_interrupt()); + + if (crtc == NULL) + return; +diff -Nur linux-4.4.13.orig/drivers/gpu/drm/i915/intel_sprite.c linux-4.4.13/drivers/gpu/drm/i915/intel_sprite.c +--- linux-4.4.13.orig/drivers/gpu/drm/i915/intel_sprite.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/gpu/drm/i915/intel_sprite.c 2016-06-22 21:55:08.002523867 +0200 +@@ -38,6 +38,7 @@ + #include "intel_drv.h" + #include + #include "i915_drv.h" ++#include + + static bool + format_is_yuv(uint32_t format) +@@ -64,6 +65,8 @@ + 1000 * adjusted_mode->crtc_htotal); + } + ++static DEFINE_LOCAL_IRQ_LOCK(pipe_update_lock); ++ + /** + * intel_pipe_update_start() - start update of a set of display registers + * @crtc: the crtc of which the registers are going to be updated +@@ -96,7 +99,7 @@ + min = vblank_start - usecs_to_scanlines(adjusted_mode, 100); + max = vblank_start - 1; + +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + + if (min <= 0 || max <= 0) + return; +@@ -126,11 +129,11 @@ + break; + } + +- local_irq_enable(); ++ local_unlock_irq(pipe_update_lock); + + timeout = schedule_timeout(timeout); + +- local_irq_disable(); ++ local_lock_irq(pipe_update_lock); + } + + finish_wait(wq, &wait); +@@ -164,7 +167,7 @@ + + trace_i915_pipe_update_end(crtc, end_vbl_count, scanline_end); + +- local_irq_enable(); ++ local_unlock_irq(pipe_update_lock); + + if (crtc->debug.start_vbl_count && + crtc->debug.start_vbl_count != end_vbl_count) { +diff -Nur linux-4.4.13.orig/drivers/gpu/drm/radeon/radeon_display.c linux-4.4.13/drivers/gpu/drm/radeon/radeon_display.c +--- linux-4.4.13.orig/drivers/gpu/drm/radeon/radeon_display.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/gpu/drm/radeon/radeon_display.c 2016-06-22 21:55:08.002523867 +0200 +@@ -1862,6 +1862,7 @@ + struct radeon_device *rdev = dev->dev_private; + + /* preempt_disable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_disable_rt(); + + /* Get optional system timestamp before query. */ + if (stime) +@@ -1954,6 +1955,7 @@ + *etime = ktime_get(); + + /* preempt_enable_rt() should go right here in PREEMPT_RT patchset. */ ++ preempt_enable_rt(); + + /* Decode into vertical and horizontal scanout position. */ + *vpos = position & 0x1fff; +diff -Nur linux-4.4.13.orig/drivers/i2c/busses/i2c-omap.c linux-4.4.13/drivers/i2c/busses/i2c-omap.c +--- linux-4.4.13.orig/drivers/i2c/busses/i2c-omap.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/i2c/busses/i2c-omap.c 2016-06-22 21:55:08.006523867 +0200 +@@ -995,15 +995,12 @@ + u16 mask; + u16 stat; + +- spin_lock(&omap->lock); +- mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); + stat = omap_i2c_read_reg(omap, OMAP_I2C_STAT_REG); ++ mask = omap_i2c_read_reg(omap, OMAP_I2C_IE_REG); + + if (stat & mask) + ret = IRQ_WAKE_THREAD; + +- spin_unlock(&omap->lock); +- + return ret; + } + +diff -Nur linux-4.4.13.orig/drivers/ide/alim15x3.c linux-4.4.13/drivers/ide/alim15x3.c +--- linux-4.4.13.orig/drivers/ide/alim15x3.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ide/alim15x3.c 2016-06-22 21:55:08.006523867 +0200 +@@ -234,7 +234,7 @@ + + isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + if (m5229_revision < 0xC2) { + /* +@@ -325,7 +325,7 @@ + } + pci_dev_put(north); + pci_dev_put(isa_dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return 0; + } + +diff -Nur linux-4.4.13.orig/drivers/ide/hpt366.c linux-4.4.13/drivers/ide/hpt366.c +--- linux-4.4.13.orig/drivers/ide/hpt366.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ide/hpt366.c 2016-06-22 21:55:08.006523867 +0200 +@@ -1241,7 +1241,7 @@ + + dma_old = inb(base + 2); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + dma_new = dma_old; + pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma); +@@ -1252,7 +1252,7 @@ + if (dma_new != dma_old) + outb(dma_new, base + 2); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", + hwif->name, base, base + 7); +diff -Nur linux-4.4.13.orig/drivers/ide/ide-io.c linux-4.4.13/drivers/ide/ide-io.c +--- linux-4.4.13.orig/drivers/ide/ide-io.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ide/ide-io.c 2016-06-22 21:55:08.006523867 +0200 +@@ -659,7 +659,7 @@ + /* disable_irq_nosync ?? */ + disable_irq(hwif->irq); + /* local CPU only, as if we were handling an interrupt */ +- local_irq_disable(); ++ local_irq_disable_nort(); + if (hwif->polling) { + startstop = handler(drive); + } else if (drive_is_ready(drive)) { +diff -Nur linux-4.4.13.orig/drivers/ide/ide-iops.c linux-4.4.13/drivers/ide/ide-iops.c +--- linux-4.4.13.orig/drivers/ide/ide-iops.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ide/ide-iops.c 2016-06-22 21:55:08.006523867 +0200 +@@ -129,12 +129,12 @@ + if ((stat & ATA_BUSY) == 0) + break; + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + *rstat = stat; + return -EBUSY; + } + } +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + /* + * Allow status to settle, then read it again. +diff -Nur linux-4.4.13.orig/drivers/ide/ide-io-std.c linux-4.4.13/drivers/ide/ide-io-std.c +--- linux-4.4.13.orig/drivers/ide/ide-io-std.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ide/ide-io-std.c 2016-06-22 21:55:08.006523867 +0200 +@@ -175,7 +175,7 @@ + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -186,7 +186,7 @@ + insl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +@@ -219,7 +219,7 @@ + unsigned long uninitialized_var(flags); + + if ((io_32bit & 2) && !mmio) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + ata_vlb_sync(io_ports->nsect_addr); + } + +@@ -230,7 +230,7 @@ + outsl(data_addr, buf, words); + + if ((io_32bit & 2) && !mmio) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + if (((len + 1) & 3) < 2) + return; +diff -Nur linux-4.4.13.orig/drivers/ide/ide-probe.c linux-4.4.13/drivers/ide/ide-probe.c +--- linux-4.4.13.orig/drivers/ide/ide-probe.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ide/ide-probe.c 2016-06-22 21:55:08.006523867 +0200 +@@ -196,10 +196,10 @@ + int bswap = 1; + + /* local CPU only; some systems need this */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + /* read 512 bytes of id info */ + hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + drive->dev_flags |= IDE_DFLAG_ID_READ; + #ifdef DEBUG +diff -Nur linux-4.4.13.orig/drivers/ide/ide-taskfile.c linux-4.4.13/drivers/ide/ide-taskfile.c +--- linux-4.4.13.orig/drivers/ide/ide-taskfile.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/ide/ide-taskfile.c 2016-06-22 21:55:08.006523867 +0200 +@@ -250,7 +250,7 @@ + + page_is_high = PageHighMem(page); + if (page_is_high) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + buf = kmap_atomic(page) + offset; + +@@ -271,7 +271,7 @@ + kunmap_atomic(buf); + + if (page_is_high) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + len -= nr_bytes; + } +@@ -414,7 +414,7 @@ + } + + if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0) +- local_irq_disable(); ++ local_irq_disable_nort(); + + ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE); + +diff -Nur linux-4.4.13.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c linux-4.4.13/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +--- linux-4.4.13.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/infiniband/ulp/ipoib/ipoib_multicast.c 2016-06-22 21:55:08.006523867 +0200 +@@ -857,7 +857,7 @@ + + ipoib_dbg_mcast(priv, "restarting multicast task\n"); + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + netif_addr_lock(dev); + spin_lock(&priv->lock); + +@@ -939,7 +939,7 @@ + + spin_unlock(&priv->lock); + netif_addr_unlock(dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + /* + * make sure the in-flight joins have finished before we attempt +diff -Nur linux-4.4.13.orig/drivers/input/gameport/gameport.c linux-4.4.13/drivers/input/gameport/gameport.c +--- linux-4.4.13.orig/drivers/input/gameport/gameport.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/input/gameport/gameport.c 2016-06-22 21:55:08.006523867 +0200 +@@ -91,13 +91,13 @@ + tx = ~0; + + for (i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + t1 = ktime_get_ns(); + for (t = 0; t < 50; t++) + gameport_read(gameport); + t2 = ktime_get_ns(); + t3 = ktime_get_ns(); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + t = (t2 - t1) - (t3 - t2); + if (t < tx) +@@ -124,12 +124,12 @@ + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + GET_TIME(t1); + for (t = 0; t < 50; t++) gameport_read(gameport); + GET_TIME(t2); + GET_TIME(t3); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t; + } +@@ -148,11 +148,11 @@ + tx = 1 << 30; + + for(i = 0; i < 50; i++) { +- local_irq_save(flags); ++ local_irq_save_nort(flags); + t1 = rdtsc(); + for (t = 0; t < 50; t++) gameport_read(gameport); + t2 = rdtsc(); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + udelay(i * 10); + if (t2 - t1 < tx) tx = t2 - t1; + } +diff -Nur linux-4.4.13.orig/drivers/iommu/amd_iommu.c linux-4.4.13/drivers/iommu/amd_iommu.c +--- linux-4.4.13.orig/drivers/iommu/amd_iommu.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/iommu/amd_iommu.c 2016-06-22 21:55:08.006523867 +0200 +@@ -2017,10 +2017,10 @@ + int ret; + + /* +- * Must be called with IRQs disabled. Warn here to detect early +- * when its not. ++ * Must be called with IRQs disabled on a non RT kernel. Warn here to ++ * detect early when its not. + */ +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + /* lock domain */ + spin_lock(&domain->lock); +@@ -2183,10 +2183,10 @@ + struct protection_domain *domain; + + /* +- * Must be called with IRQs disabled. Warn here to detect early +- * when its not. ++ * Must be called with IRQs disabled on a non RT kernel. Warn here to ++ * detect early when its not. + */ +- WARN_ON(!irqs_disabled()); ++ WARN_ON_NONRT(!irqs_disabled()); + + if (WARN_ON(!dev_data->domain)) + return; +diff -Nur linux-4.4.13.orig/drivers/leds/trigger/Kconfig linux-4.4.13/drivers/leds/trigger/Kconfig +--- linux-4.4.13.orig/drivers/leds/trigger/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/leds/trigger/Kconfig 2016-06-22 21:55:08.006523867 +0200 +@@ -61,7 +61,7 @@ + + config LEDS_TRIGGER_CPU + bool "LED CPU Trigger" +- depends on LEDS_TRIGGERS ++ depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE + help + This allows LEDs to be controlled by active CPUs. This shows + the active CPUs across an array of LEDs so you can see which +diff -Nur linux-4.4.13.orig/drivers/md/bcache/Kconfig linux-4.4.13/drivers/md/bcache/Kconfig +--- linux-4.4.13.orig/drivers/md/bcache/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/md/bcache/Kconfig 2016-06-22 21:55:08.006523867 +0200 +@@ -1,6 +1,7 @@ + + config BCACHE + tristate "Block device as cache" ++ depends on !PREEMPT_RT_FULL + ---help--- + Allows a block device to be used as cache for other devices; uses + a btree for indexing and the layout is optimized for SSDs. +diff -Nur linux-4.4.13.orig/drivers/md/dm.c linux-4.4.13/drivers/md/dm.c +--- linux-4.4.13.orig/drivers/md/dm.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/md/dm.c 2016-06-22 21:55:08.006523867 +0200 +@@ -2127,7 +2127,7 @@ + /* Establish tio->ti before queuing work (map_tio_request) */ + tio->ti = ti; + queue_kthread_work(&md->kworker, &tio->work); +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + } + + goto out; +diff -Nur linux-4.4.13.orig/drivers/md/raid5.c linux-4.4.13/drivers/md/raid5.c +--- linux-4.4.13.orig/drivers/md/raid5.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/md/raid5.c 2016-06-22 21:55:08.010523867 +0200 +@@ -1920,8 +1920,9 @@ + struct raid5_percpu *percpu; + unsigned long cpu; + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + percpu = per_cpu_ptr(conf->percpu, cpu); ++ spin_lock(&percpu->lock); + if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { + ops_run_biofill(sh); + overlap_clear++; +@@ -1977,7 +1978,8 @@ + if (test_and_clear_bit(R5_Overlap, &dev->flags)) + wake_up(&sh->raid_conf->wait_for_overlap); + } +- put_cpu(); ++ spin_unlock(&percpu->lock); ++ put_cpu_light(); + } + + static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) +@@ -6414,6 +6416,7 @@ + __func__, cpu); + break; + } ++ spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock); + } + put_online_cpus(); + +diff -Nur linux-4.4.13.orig/drivers/md/raid5.h linux-4.4.13/drivers/md/raid5.h +--- linux-4.4.13.orig/drivers/md/raid5.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/md/raid5.h 2016-06-22 21:55:08.010523867 +0200 +@@ -504,6 +504,7 @@ + int recovery_disabled; + /* per cpu variables */ + struct raid5_percpu { ++ spinlock_t lock; /* Protection for -RT */ + struct page *spare_page; /* Used when checking P/Q in raid6 */ + struct flex_array *scribble; /* space for constructing buffer + * lists and performing address +diff -Nur linux-4.4.13.orig/drivers/media/platform/vsp1/vsp1_video.c linux-4.4.13/drivers/media/platform/vsp1/vsp1_video.c +--- linux-4.4.13.orig/drivers/media/platform/vsp1/vsp1_video.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/media/platform/vsp1/vsp1_video.c 2016-06-22 21:55:08.010523867 +0200 +@@ -520,7 +520,7 @@ + bool stopped; + + spin_lock_irqsave(&pipe->irqlock, flags); +- stopped = pipe->state == VSP1_PIPELINE_STOPPED, ++ stopped = pipe->state == VSP1_PIPELINE_STOPPED; + spin_unlock_irqrestore(&pipe->irqlock, flags); + + return stopped; +diff -Nur linux-4.4.13.orig/drivers/misc/hwlat_detector.c linux-4.4.13/drivers/misc/hwlat_detector.c +--- linux-4.4.13.orig/drivers/misc/hwlat_detector.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/drivers/misc/hwlat_detector.c 2016-06-22 21:55:08.010523867 +0200 +@@ -0,0 +1,1240 @@ ++/* ++ * hwlat_detector.c - A simple Hardware Latency detector. ++ * ++ * Use this module to detect large system latencies induced by the behavior of ++ * certain underlying system hardware or firmware, independent of Linux itself. ++ * The code was developed originally to detect the presence of SMIs on Intel ++ * and AMD systems, although there is no dependency upon x86 herein. ++ * ++ * The classical example usage of this module is in detecting the presence of ++ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a ++ * somewhat special form of hardware interrupt spawned from earlier CPU debug ++ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge ++ * LPC (or other device) to generate a special interrupt under certain ++ * circumstances, for example, upon expiration of a special SMI timer device, ++ * due to certain external thermal readings, on certain I/O address accesses, ++ * and other situations. An SMI hits a special CPU pin, triggers a special ++ * SMI mode (complete with special memory map), and the OS is unaware. ++ * ++ * Although certain hardware-inducing latencies are necessary (for example, ++ * a modern system often requires an SMI handler for correct thermal control ++ * and remote management) they can wreak havoc upon any OS-level performance ++ * guarantees toward low-latency, especially when the OS is not even made ++ * aware of the presence of these interrupts. For this reason, we need a ++ * somewhat brute force mechanism to detect these interrupts. In this case, ++ * we do it by hogging all of the CPU(s) for configurable timer intervals, ++ * sampling the built-in CPU timer, looking for discontiguous readings. ++ * ++ * WARNING: This implementation necessarily introduces latencies. Therefore, ++ * you should NEVER use this module in a production environment ++ * requiring any kind of low-latency performance guarantee(s). ++ * ++ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. ++ * ++ * Includes useful feedback from Clark Williams ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define BUF_SIZE_DEFAULT 262144UL /* 8K*(sizeof(entry)) */ ++#define BUF_FLAGS (RB_FL_OVERWRITE) /* no block on full */ ++#define U64STR_SIZE 22 /* 20 digits max */ ++ ++#define VERSION "1.0.0" ++#define BANNER "hwlat_detector: " ++#define DRVNAME "hwlat_detector" ++#define DEFAULT_SAMPLE_WINDOW 1000000 /* 1s */ ++#define DEFAULT_SAMPLE_WIDTH 500000 /* 0.5s */ ++#define DEFAULT_LAT_THRESHOLD 10 /* 10us */ ++ ++/* Module metadata */ ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Jon Masters "); ++MODULE_DESCRIPTION("A simple hardware latency detector"); ++MODULE_VERSION(VERSION); ++ ++/* Module parameters */ ++ ++static int debug; ++static int enabled; ++static int threshold; ++ ++module_param(debug, int, 0); /* enable debug */ ++module_param(enabled, int, 0); /* enable detector */ ++module_param(threshold, int, 0); /* latency threshold */ ++ ++/* Buffering and sampling */ ++ ++static struct ring_buffer *ring_buffer; /* sample buffer */ ++static DEFINE_MUTEX(ring_buffer_mutex); /* lock changes */ ++static unsigned long buf_size = BUF_SIZE_DEFAULT; ++static struct task_struct *kthread; /* sampling thread */ ++ ++/* DebugFS filesystem entries */ ++ ++static struct dentry *debug_dir; /* debugfs directory */ ++static struct dentry *debug_max; /* maximum TSC delta */ ++static struct dentry *debug_count; /* total detect count */ ++static struct dentry *debug_sample_width; /* sample width us */ ++static struct dentry *debug_sample_window; /* sample window us */ ++static struct dentry *debug_sample; /* raw samples us */ ++static struct dentry *debug_threshold; /* threshold us */ ++static struct dentry *debug_enable; /* enable/disable */ ++ ++/* Individual samples and global state */ ++ ++struct sample; /* latency sample */ ++struct data; /* Global state */ ++ ++/* Sampling functions */ ++static int __buffer_add_sample(struct sample *sample); ++static struct sample *buffer_get_sample(struct sample *sample); ++ ++/* Threading and state */ ++static int kthread_fn(void *unused); ++static int start_kthread(void); ++static int stop_kthread(void); ++static void __reset_stats(void); ++static int init_stats(void); ++ ++/* Debugfs interface */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry); ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry); ++static int debug_sample_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static int debug_sample_release(struct inode *inode, struct file *filp); ++static int debug_enable_fopen(struct inode *inode, struct file *filp); ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos); ++static ssize_t debug_enable_fwrite(struct file *file, ++ const char __user *user_buffer, ++ size_t user_size, loff_t *offset); ++ ++/* Initialization functions */ ++static int init_debugfs(void); ++static void free_debugfs(void); ++static int detector_init(void); ++static void detector_exit(void); ++ ++/* Individual latency samples are stored here when detected and packed into ++ * the ring_buffer circular buffer, where they are overwritten when ++ * more than buf_size/sizeof(sample) samples are received. */ ++struct sample { ++ u64 seqnum; /* unique sequence */ ++ u64 duration; /* ktime delta */ ++ u64 outer_duration; /* ktime delta (outer loop) */ ++ struct timespec timestamp; /* wall time */ ++ unsigned long lost; ++}; ++ ++/* keep the global state somewhere. */ ++static struct data { ++ ++ struct mutex lock; /* protect changes */ ++ ++ u64 count; /* total since reset */ ++ u64 max_sample; /* max hardware latency */ ++ u64 threshold; /* sample threshold level */ ++ ++ u64 sample_window; /* total sampling window (on+off) */ ++ u64 sample_width; /* active sampling portion of window */ ++ ++ atomic_t sample_open; /* whether the sample file is open */ ++ ++ wait_queue_head_t wq; /* waitqeue for new sample values */ ++ ++} data; ++ ++/** ++ * __buffer_add_sample - add a new latency sample recording to the ring buffer ++ * @sample: The new latency sample value ++ * ++ * This receives a new latency sample and records it in a global ring buffer. ++ * No additional locking is used in this case. ++ */ ++static int __buffer_add_sample(struct sample *sample) ++{ ++ return ring_buffer_write(ring_buffer, ++ sizeof(struct sample), sample); ++} ++ ++/** ++ * buffer_get_sample - remove a hardware latency sample from the ring buffer ++ * @sample: Pre-allocated storage for the sample ++ * ++ * This retrieves a hardware latency sample from the global circular buffer ++ */ ++static struct sample *buffer_get_sample(struct sample *sample) ++{ ++ struct ring_buffer_event *e = NULL; ++ struct sample *s = NULL; ++ unsigned int cpu = 0; ++ ++ if (!sample) ++ return NULL; ++ ++ mutex_lock(&ring_buffer_mutex); ++ for_each_online_cpu(cpu) { ++ e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost); ++ if (e) ++ break; ++ } ++ ++ if (e) { ++ s = ring_buffer_event_data(e); ++ memcpy(sample, s, sizeof(struct sample)); ++ } else ++ sample = NULL; ++ mutex_unlock(&ring_buffer_mutex); ++ ++ return sample; ++} ++ ++#ifndef CONFIG_TRACING ++#define time_type ktime_t ++#define time_get() ktime_get() ++#define time_to_us(x) ktime_to_us(x) ++#define time_sub(a, b) ktime_sub(a, b) ++#define init_time(a, b) (a).tv64 = b ++#define time_u64(a) ((a).tv64) ++#else ++#define time_type u64 ++#define time_get() trace_clock_local() ++#define time_to_us(x) div_u64(x, 1000) ++#define time_sub(a, b) ((a) - (b)) ++#define init_time(a, b) (a = b) ++#define time_u64(a) a ++#endif ++/** ++ * get_sample - sample the CPU TSC and look for likely hardware latencies ++ * ++ * Used to repeatedly capture the CPU TSC (or similar), looking for potential ++ * hardware-induced latency. Called with interrupts disabled and with ++ * data.lock held. ++ */ ++static int get_sample(void) ++{ ++ time_type start, t1, t2, last_t2; ++ s64 diff, total = 0; ++ u64 sample = 0; ++ u64 outer_sample = 0; ++ int ret = -1; ++ ++ init_time(last_t2, 0); ++ start = time_get(); /* start timestamp */ ++ ++ do { ++ ++ t1 = time_get(); /* we'll look for a discontinuity */ ++ t2 = time_get(); ++ ++ if (time_u64(last_t2)) { ++ /* Check the delta from outer loop (t2 to next t1) */ ++ diff = time_to_us(time_sub(t1, last_t2)); ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ pr_err(BANNER "time running backwards\n"); ++ goto out; ++ } ++ if (diff > outer_sample) ++ outer_sample = diff; ++ } ++ last_t2 = t2; ++ ++ total = time_to_us(time_sub(t2, start)); /* sample width */ ++ ++ /* This checks the inner loop (t1 to t2) */ ++ diff = time_to_us(time_sub(t2, t1)); /* current diff */ ++ ++ /* This shouldn't happen */ ++ if (diff < 0) { ++ pr_err(BANNER "time running backwards\n"); ++ goto out; ++ } ++ ++ if (diff > sample) ++ sample = diff; /* only want highest value */ ++ ++ } while (total <= data.sample_width); ++ ++ ret = 0; ++ ++ /* If we exceed the threshold value, we have found a hardware latency */ ++ if (sample > data.threshold || outer_sample > data.threshold) { ++ struct sample s; ++ ++ ret = 1; ++ ++ data.count++; ++ s.seqnum = data.count; ++ s.duration = sample; ++ s.outer_duration = outer_sample; ++ s.timestamp = CURRENT_TIME; ++ __buffer_add_sample(&s); ++ ++ /* Keep a running maximum ever recorded hardware latency */ ++ if (sample > data.max_sample) ++ data.max_sample = sample; ++ } ++ ++out: ++ return ret; ++} ++ ++/* ++ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread ++ * @unused: A required part of the kthread API. ++ * ++ * Used to periodically sample the CPU TSC via a call to get_sample. We ++ * disable interrupts, which does (intentionally) introduce latency since we ++ * need to ensure nothing else might be running (and thus pre-empting). ++ * Obviously this should never be used in production environments. ++ * ++ * Currently this runs on which ever CPU it was scheduled on, but most ++ * real-worald hardware latency situations occur across several CPUs, ++ * but we might later generalize this if we find there are any actualy ++ * systems with alternate SMI delivery or other hardware latencies. ++ */ ++static int kthread_fn(void *unused) ++{ ++ int ret; ++ u64 interval; ++ ++ while (!kthread_should_stop()) { ++ ++ mutex_lock(&data.lock); ++ ++ local_irq_disable(); ++ ret = get_sample(); ++ local_irq_enable(); ++ ++ if (ret > 0) ++ wake_up(&data.wq); /* wake up reader(s) */ ++ ++ interval = data.sample_window - data.sample_width; ++ do_div(interval, USEC_PER_MSEC); /* modifies interval value */ ++ ++ mutex_unlock(&data.lock); ++ ++ if (msleep_interruptible(interval)) ++ break; ++ } ++ ++ return 0; ++} ++ ++/** ++ * start_kthread - Kick off the hardware latency sampling/detector kthread ++ * ++ * This starts a kernel thread that will sit and sample the CPU timestamp ++ * counter (TSC or similar) and look for potential hardware latencies. ++ */ ++static int start_kthread(void) ++{ ++ kthread = kthread_run(kthread_fn, NULL, ++ DRVNAME); ++ if (IS_ERR(kthread)) { ++ pr_err(BANNER "could not start sampling thread\n"); ++ enabled = 0; ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++/** ++ * stop_kthread - Inform the hardware latency samping/detector kthread to stop ++ * ++ * This kicks the running hardware latency sampling/detector kernel thread and ++ * tells it to stop sampling now. Use this on unload and at system shutdown. ++ */ ++static int stop_kthread(void) ++{ ++ int ret; ++ ++ ret = kthread_stop(kthread); ++ ++ return ret; ++} ++ ++/** ++ * __reset_stats - Reset statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We call this ++ * function in order to reset those when "enable" is toggled on or off, and ++ * also at initialization. Should be called with data.lock held. ++ */ ++static void __reset_stats(void) ++{ ++ data.count = 0; ++ data.max_sample = 0; ++ ring_buffer_reset(ring_buffer); /* flush out old sample entries */ ++} ++ ++/** ++ * init_stats - Setup global state statistics for the hardware latency detector ++ * ++ * We use data to store various statistics and global state. We also use ++ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware ++ * induced system latencies. This function initializes these structures and ++ * allocates the global ring buffer also. ++ */ ++static int init_stats(void) ++{ ++ int ret = -ENOMEM; ++ ++ mutex_init(&data.lock); ++ init_waitqueue_head(&data.wq); ++ atomic_set(&data.sample_open, 0); ++ ++ ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS); ++ ++ if (WARN(!ring_buffer, KERN_ERR BANNER ++ "failed to allocate ring buffer!\n")) ++ goto out; ++ ++ __reset_stats(); ++ data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */ ++ data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */ ++ data.sample_width = DEFAULT_SAMPLE_WIDTH; /* width us */ ++ ++ ret = 0; ++ ++out: ++ return ret; ++ ++} ++ ++/* ++ * simple_data_read - Wrapper read function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * @entry: The entry to read from ++ * ++ * This function provides a generic read implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_read directly, but we need to make sure that the data.lock ++ * is held during the actual read. ++ */ ++static ssize_t simple_data_read(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos, const u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ u64 val = 0; ++ int len = 0; ++ ++ memset(buf, 0, sizeof(buf)); ++ ++ if (!entry) ++ return -EFAULT; ++ ++ mutex_lock(&data.lock); ++ val = *entry; ++ mutex_unlock(&data.lock); ++ ++ len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val); ++ ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, len); ++ ++} ++ ++/* ++ * simple_data_write - Wrapper write function for global state debugfs entries ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to write value from ++ * @cnt: The maximum number of bytes to write ++ * @ppos: The current "file" position ++ * @entry: The entry to write to ++ * ++ * This function provides a generic write implementation for the global state ++ * "data" structure debugfs filesystem entries. It would be nice to use ++ * simple_attr_write directly, but we need to make sure that the data.lock ++ * is held during the actual write. ++ */ ++static ssize_t simple_data_write(struct file *filp, const char __user *ubuf, ++ size_t cnt, loff_t *ppos, u64 *entry) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ *entry = val; ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/** ++ * debug_count_fopen - Open function for "count" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "count" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_count_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_count_fread - Read function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to read the ++ * number of latency readings exceeding the configured threshold since ++ * the detector was last reset (e.g. by writing a zero into "count"). ++ */ ++static ssize_t debug_count_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_count_fwrite - Write function for "count" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "count" debugfs ++ * interface to the hardware latency detector. Can be used to write a ++ * desired value, especially to zero the total count. ++ */ ++static ssize_t debug_count_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.count); ++} ++ ++/** ++ * debug_enable_fopen - Dummy open function for "enable" debugfs interface ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "enable" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_enable_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_enable_fread - Read function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * whether the detector is currently enabled ("0\n" or "1\n" returned). ++ */ ++static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[4]; ++ ++ if ((cnt < sizeof(buf)) || (*ppos)) ++ return 0; ++ ++ buf[0] = enabled ? '1' : '0'; ++ buf[1] = '\n'; ++ buf[2] = '\0'; ++ if (copy_to_user(ubuf, buf, strlen(buf))) ++ return -EFAULT; ++ return *ppos = strlen(buf); ++} ++ ++/** ++ * debug_enable_fwrite - Write function for "enable" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "enable" debugfs ++ * interface to the hardware latency detector. Can be used to enable or ++ * disable the detector, which will have the side-effect of possibly ++ * also resetting the global stats and kicking off the measuring ++ * kthread (on an enable) or the converse (upon a disable). ++ */ ++static ssize_t debug_enable_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[4]; ++ int csize = min(cnt, sizeof(buf)); ++ long val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[sizeof(buf)-1] = '\0'; /* just in case */ ++ err = kstrtoul(buf, 10, &val); ++ if (err) ++ return -EINVAL; ++ ++ if (val) { ++ if (enabled) ++ goto unlock; ++ enabled = 1; ++ __reset_stats(); ++ if (start_kthread()) ++ return -EFAULT; ++ } else { ++ if (!enabled) ++ goto unlock; ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) { ++ pr_err(BANNER "cannot stop kthread\n"); ++ return -EFAULT; ++ } ++ wake_up(&data.wq); /* reader(s) should return */ ++ } ++unlock: ++ return csize; ++} ++ ++/** ++ * debug_max_fopen - Open function for "max" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "max" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_max_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_max_fread - Read function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to determine ++ * the maximum latency value observed since it was last reset. ++ */ ++static ssize_t debug_max_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++/** ++ * debug_max_fwrite - Write function for "max" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "max" debugfs ++ * interface to the hardware latency detector. Can be used to reset the ++ * maximum or set it to some other desired value - if, then, subsequent ++ * measurements exceed this value, the maximum will be updated. ++ */ ++static ssize_t debug_max_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample); ++} ++ ++ ++/** ++ * debug_sample_fopen - An open function for "sample" debugfs interface ++ * @inode: The in-kernel inode representation of this debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function handles opening the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. Can be opened blocking or non-blocking, ++ * affecting whether it behaves as a buffer read pipe, or does not. ++ * Implements simple locking to prevent multiple simultaneous use. ++ */ ++static int debug_sample_fopen(struct inode *inode, struct file *filp) ++{ ++ if (!atomic_add_unless(&data.sample_open, 1, 1)) ++ return -EBUSY; ++ else ++ return 0; ++} ++ ++/** ++ * debug_sample_fread - A read function for "sample" debugfs interface ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that will contain the samples read ++ * @cnt: The maximum bytes to read from the debugfs "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function handles reading from the "sample" file within the hardware ++ * latency detector debugfs directory interface. This file is used to read ++ * raw samples from the global ring_buffer and allows the user to see a ++ * running latency history. By default this will block pending a new ++ * value written into the sample buffer, unless there are already a ++ * number of value(s) waiting in the buffer, or the sample file was ++ * previously opened in a non-blocking mode of operation. ++ */ ++static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ int len = 0; ++ char buf[64]; ++ struct sample *sample = NULL; ++ ++ if (!enabled) ++ return 0; ++ ++ sample = kzalloc(sizeof(struct sample), GFP_KERNEL); ++ if (!sample) ++ return -ENOMEM; ++ ++ while (!buffer_get_sample(sample)) { ++ ++ DEFINE_WAIT(wait); ++ ++ if (filp->f_flags & O_NONBLOCK) { ++ len = -EAGAIN; ++ goto out; ++ } ++ ++ prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE); ++ schedule(); ++ finish_wait(&data.wq, &wait); ++ ++ if (signal_pending(current)) { ++ len = -EINTR; ++ goto out; ++ } ++ ++ if (!enabled) { /* enable was toggled */ ++ len = 0; ++ goto out; ++ } ++ } ++ ++ len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n", ++ sample->timestamp.tv_sec, ++ sample->timestamp.tv_nsec, ++ sample->duration, ++ sample->outer_duration); ++ ++ ++ /* handling partial reads is more trouble than it's worth */ ++ if (len > cnt) ++ goto out; ++ ++ if (copy_to_user(ubuf, buf, len)) ++ len = -EFAULT; ++ ++out: ++ kfree(sample); ++ return len; ++} ++ ++/** ++ * debug_sample_release - Release function for "sample" debugfs interface ++ * @inode: The in-kernel inode represenation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function completes the close of the debugfs interface "sample" file. ++ * Frees the sample_open "lock" so that other users may open the interface. ++ */ ++static int debug_sample_release(struct inode *inode, struct file *filp) ++{ ++ atomic_dec(&data.sample_open); ++ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fopen - Open function for "threshold" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_threshold_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_threshold_fread - Read function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * the current threshold level at which a latency will be recorded in the ++ * global ring buffer, typically on the order of 10us. ++ */ ++static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold); ++} ++ ++/** ++ * debug_threshold_fwrite - Write function for "threshold" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "threshold" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * the threshold level at which any subsequently detected latencies will ++ * be recorded into the global ring buffer. ++ */ ++static ssize_t debug_threshold_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ int ret; ++ ++ ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return ret; ++} ++ ++/** ++ * debug_width_fopen - Open function for "width" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "width" debugfs ++ * interface to the hardware latency detector. ++ */ ++static int debug_width_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_width_fread - Read function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to determine ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latecy periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. ++ */ ++static ssize_t debug_width_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width); ++} ++ ++/** ++ * debug_width_fwrite - Write function for "width" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "width" debugfs ++ * interface to the hardware latency detector. It can be used to configure ++ * for how many us of the total window us we will actively sample for any ++ * hardware-induced latency periods. Obviously, it is not possible to ++ * sample constantly and have the system respond to a sample reader, or, ++ * worse, without having the system appear to have gone out to lunch. It ++ * is enforced that width is less that the total window size. ++ */ ++static ssize_t debug_width_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (val < data.sample_window) ++ data.sample_width = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ if (enabled) ++ wake_up_process(kthread); ++ ++ return csize; ++} ++ ++/** ++ * debug_window_fopen - Open function for "window" debugfs entry ++ * @inode: The in-kernel inode representation of the debugfs "file" ++ * @filp: The active open file structure for the debugfs "file" ++ * ++ * This function provides an open implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. ++ */ ++static int debug_window_fopen(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++/** ++ * debug_window_fread - Read function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The userspace provided buffer to read value into ++ * @cnt: The maximum number of bytes to read ++ * @ppos: The current "file" position ++ * ++ * This function provides a read implementation for the "window" debugfs ++ * interface to the hardware latency detector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to read the total window size. ++ */ ++static ssize_t debug_window_fread(struct file *filp, char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window); ++} ++ ++/** ++ * debug_window_fwrite - Write function for "window" debugfs entry ++ * @filp: The active open file structure for the debugfs "file" ++ * @ubuf: The user buffer that contains the value to write ++ * @cnt: The maximum number of bytes to write to "file" ++ * @ppos: The current position in the debugfs "file" ++ * ++ * This function provides a write implementation for the "window" debufds ++ * interface to the hardware latency detetector. The window is the total time ++ * in us that will be considered one sample period. Conceptually, windows ++ * occur back-to-back and contain a sample width period during which ++ * actual sampling occurs. Can be used to write a new total window size. It ++ * is enfoced that any value written must be greater than the sample width ++ * size, or an error results. ++ */ ++static ssize_t debug_window_fwrite(struct file *filp, ++ const char __user *ubuf, ++ size_t cnt, ++ loff_t *ppos) ++{ ++ char buf[U64STR_SIZE]; ++ int csize = min(cnt, sizeof(buf)); ++ u64 val = 0; ++ int err = 0; ++ ++ memset(buf, '\0', sizeof(buf)); ++ if (copy_from_user(buf, ubuf, csize)) ++ return -EFAULT; ++ ++ buf[U64STR_SIZE-1] = '\0'; /* just in case */ ++ err = kstrtoull(buf, 10, &val); ++ if (err) ++ return -EINVAL; ++ ++ mutex_lock(&data.lock); ++ if (data.sample_width < val) ++ data.sample_window = val; ++ else { ++ mutex_unlock(&data.lock); ++ return -EINVAL; ++ } ++ mutex_unlock(&data.lock); ++ ++ return csize; ++} ++ ++/* ++ * Function pointers for the "count" debugfs file operations ++ */ ++static const struct file_operations count_fops = { ++ .open = debug_count_fopen, ++ .read = debug_count_fread, ++ .write = debug_count_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "enable" debugfs file operations ++ */ ++static const struct file_operations enable_fops = { ++ .open = debug_enable_fopen, ++ .read = debug_enable_fread, ++ .write = debug_enable_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "max" debugfs file operations ++ */ ++static const struct file_operations max_fops = { ++ .open = debug_max_fopen, ++ .read = debug_max_fread, ++ .write = debug_max_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "sample" debugfs file operations ++ */ ++static const struct file_operations sample_fops = { ++ .open = debug_sample_fopen, ++ .read = debug_sample_fread, ++ .release = debug_sample_release, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "threshold" debugfs file operations ++ */ ++static const struct file_operations threshold_fops = { ++ .open = debug_threshold_fopen, ++ .read = debug_threshold_fread, ++ .write = debug_threshold_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "width" debugfs file operations ++ */ ++static const struct file_operations width_fops = { ++ .open = debug_width_fopen, ++ .read = debug_width_fread, ++ .write = debug_width_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/* ++ * Function pointers for the "window" debugfs file operations ++ */ ++static const struct file_operations window_fops = { ++ .open = debug_window_fopen, ++ .read = debug_window_fread, ++ .write = debug_window_fwrite, ++ .owner = THIS_MODULE, ++}; ++ ++/** ++ * init_debugfs - A function to initialize the debugfs interface files ++ * ++ * This function creates entries in debugfs for "hwlat_detector", including ++ * files to read values from the detector, current samples, and the ++ * maximum sample that has been captured since the hardware latency ++ * dectector was started. ++ */ ++static int init_debugfs(void) ++{ ++ int ret = -ENOMEM; ++ ++ debug_dir = debugfs_create_dir(DRVNAME, NULL); ++ if (!debug_dir) ++ goto err_debug_dir; ++ ++ debug_sample = debugfs_create_file("sample", 0444, ++ debug_dir, NULL, ++ &sample_fops); ++ if (!debug_sample) ++ goto err_sample; ++ ++ debug_count = debugfs_create_file("count", 0444, ++ debug_dir, NULL, ++ &count_fops); ++ if (!debug_count) ++ goto err_count; ++ ++ debug_max = debugfs_create_file("max", 0444, ++ debug_dir, NULL, ++ &max_fops); ++ if (!debug_max) ++ goto err_max; ++ ++ debug_sample_window = debugfs_create_file("window", 0644, ++ debug_dir, NULL, ++ &window_fops); ++ if (!debug_sample_window) ++ goto err_window; ++ ++ debug_sample_width = debugfs_create_file("width", 0644, ++ debug_dir, NULL, ++ &width_fops); ++ if (!debug_sample_width) ++ goto err_width; ++ ++ debug_threshold = debugfs_create_file("threshold", 0644, ++ debug_dir, NULL, ++ &threshold_fops); ++ if (!debug_threshold) ++ goto err_threshold; ++ ++ debug_enable = debugfs_create_file("enable", 0644, ++ debug_dir, &enabled, ++ &enable_fops); ++ if (!debug_enable) ++ goto err_enable; ++ ++ else { ++ ret = 0; ++ goto out; ++ } ++ ++err_enable: ++ debugfs_remove(debug_threshold); ++err_threshold: ++ debugfs_remove(debug_sample_width); ++err_width: ++ debugfs_remove(debug_sample_window); ++err_window: ++ debugfs_remove(debug_max); ++err_max: ++ debugfs_remove(debug_count); ++err_count: ++ debugfs_remove(debug_sample); ++err_sample: ++ debugfs_remove(debug_dir); ++err_debug_dir: ++out: ++ return ret; ++} ++ ++/** ++ * free_debugfs - A function to cleanup the debugfs file interface ++ */ ++static void free_debugfs(void) ++{ ++ /* could also use a debugfs_remove_recursive */ ++ debugfs_remove(debug_enable); ++ debugfs_remove(debug_threshold); ++ debugfs_remove(debug_sample_width); ++ debugfs_remove(debug_sample_window); ++ debugfs_remove(debug_max); ++ debugfs_remove(debug_count); ++ debugfs_remove(debug_sample); ++ debugfs_remove(debug_dir); ++} ++ ++/** ++ * detector_init - Standard module initialization code ++ */ ++static int detector_init(void) ++{ ++ int ret = -ENOMEM; ++ ++ pr_info(BANNER "version %s\n", VERSION); ++ ++ ret = init_stats(); ++ if (ret) ++ goto out; ++ ++ ret = init_debugfs(); ++ if (ret) ++ goto err_stats; ++ ++ if (enabled) ++ ret = start_kthread(); ++ ++ goto out; ++ ++err_stats: ++ ring_buffer_free(ring_buffer); ++out: ++ return ret; ++ ++} ++ ++/** ++ * detector_exit - Standard module cleanup code ++ */ ++static void detector_exit(void) ++{ ++ int err; ++ ++ if (enabled) { ++ enabled = 0; ++ err = stop_kthread(); ++ if (err) ++ pr_err(BANNER "cannot stop kthread\n"); ++ } ++ ++ free_debugfs(); ++ ring_buffer_free(ring_buffer); /* free up the ring buffer */ ++ ++} ++ ++module_init(detector_init); ++module_exit(detector_exit); +diff -Nur linux-4.4.13.orig/drivers/misc/Kconfig linux-4.4.13/drivers/misc/Kconfig +--- linux-4.4.13.orig/drivers/misc/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/misc/Kconfig 2016-06-22 21:55:08.010523867 +0200 +@@ -54,6 +54,7 @@ + config ATMEL_TCLIB + bool "Atmel AT32/AT91 Timer/Counter Library" + depends on (AVR32 || ARCH_AT91) ++ default y if PREEMPT_RT_FULL + help + Select this if you want a library to allocate the Timer/Counter + blocks found on many Atmel processors. This facilitates using +@@ -69,8 +70,7 @@ + are combined to make a single 32-bit timer. + + When GENERIC_CLOCKEVENTS is defined, the third timer channel +- may be used as a clock event device supporting oneshot mode +- (delays of up to two seconds) based on the 32 KiHz clock. ++ may be used as a clock event device supporting oneshot mode. + + config ATMEL_TCB_CLKSRC_BLOCK + int +@@ -84,6 +84,15 @@ + TC can be used for other purposes, such as PWM generation and + interval timing. + ++config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK ++ bool "TC Block use 32 KiHz clock" ++ depends on ATMEL_TCB_CLKSRC ++ default y if !PREEMPT_RT_FULL ++ help ++ Select this to use 32 KiHz base clock rate as TC block clock ++ source for clock events. ++ ++ + config DUMMY_IRQ + tristate "Dummy IRQ handler" + default n +@@ -113,6 +122,35 @@ + for information on the specific driver level and support statement + for your IBM server. + ++config HWLAT_DETECTOR ++ tristate "Testing module to detect hardware-induced latencies" ++ depends on DEBUG_FS ++ depends on RING_BUFFER ++ default m ++ ---help--- ++ A simple hardware latency detector. Use this module to detect ++ large latencies introduced by the behavior of the underlying ++ system firmware external to Linux. We do this using periodic ++ use of stop_machine to grab all available CPUs and measure ++ for unexplainable gaps in the CPU timestamp counter(s). By ++ default, the module is not enabled until the "enable" file ++ within the "hwlat_detector" debugfs directory is toggled. ++ ++ This module is often used to detect SMI (System Management ++ Interrupts) on x86 systems, though is not x86 specific. To ++ this end, we default to using a sample window of 1 second, ++ during which we will sample for 0.5 seconds. If an SMI or ++ similar event occurs during that time, it is recorded ++ into an 8K samples global ring buffer until retreived. ++ ++ WARNING: This software should never be enabled (it can be built ++ but should not be turned on after it is loaded) in a production ++ environment where high latencies are a concern since the ++ sampling mechanism actually introduces latencies for ++ regular tasks while the CPU(s) are being held. ++ ++ If unsure, say N ++ + config PHANTOM + tristate "Sensable PHANToM (PCI)" + depends on PCI +diff -Nur linux-4.4.13.orig/drivers/misc/Makefile linux-4.4.13/drivers/misc/Makefile +--- linux-4.4.13.orig/drivers/misc/Makefile 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/misc/Makefile 2016-06-22 21:55:08.010523867 +0200 +@@ -39,6 +39,7 @@ + obj-$(CONFIG_HMC6352) += hmc6352.o + obj-y += eeprom/ + obj-y += cb710/ ++obj-$(CONFIG_HWLAT_DETECTOR) += hwlat_detector.o + obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o + obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o + obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o +diff -Nur linux-4.4.13.orig/drivers/mmc/host/mmci.c linux-4.4.13/drivers/mmc/host/mmci.c +--- linux-4.4.13.orig/drivers/mmc/host/mmci.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/mmc/host/mmci.c 2016-06-22 21:55:08.010523867 +0200 +@@ -1155,15 +1155,12 @@ + struct sg_mapping_iter *sg_miter = &host->sg_miter; + struct variant_data *variant = host->variant; + void __iomem *base = host->base; +- unsigned long flags; + u32 status; + + status = readl(base + MMCISTATUS); + + dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status); + +- local_irq_save(flags); +- + do { + unsigned int remain, len; + char *buffer; +@@ -1203,8 +1200,6 @@ + + sg_miter_stop(sg_miter); + +- local_irq_restore(flags); +- + /* + * If we have less than the fifo 'half-full' threshold to transfer, + * trigger a PIO interrupt as soon as any data is available. +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/3com/3c59x.c linux-4.4.13/drivers/net/ethernet/3com/3c59x.c +--- linux-4.4.13.orig/drivers/net/ethernet/3com/3c59x.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/3com/3c59x.c 2016-06-22 21:55:08.010523867 +0200 +@@ -842,9 +842,9 @@ + { + struct vortex_private *vp = netdev_priv(dev); + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + (vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + #endif + +@@ -1916,12 +1916,12 @@ + * Block interrupts because vortex_interrupt does a bare spin_lock() + */ + unsigned long flags; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (vp->full_bus_master_tx) + boomerang_interrupt(dev->irq, dev); + else + vortex_interrupt(dev->irq, dev); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + } + +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c linux-4.4.13/drivers/net/ethernet/atheros/atl1c/atl1c_main.c +--- linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/atheros/atl1c/atl1c_main.c 2016-06-22 21:55:08.010523867 +0200 +@@ -2221,11 +2221,7 @@ + } + + tpd_req = atl1c_cal_tpd_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) { +- if (netif_msg_pktdata(adapter)) +- dev_info(&adapter->pdev->dev, "tx locked\n"); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1c_tpd_avail(adapter, type) < tpd_req) { + /* no enough descriptor, just stop queue */ +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c linux-4.4.13/drivers/net/ethernet/atheros/atl1e/atl1e_main.c +--- linux-4.4.13.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/atheros/atl1e/atl1e_main.c 2016-06-22 21:55:08.010523867 +0200 +@@ -1880,8 +1880,7 @@ + return NETDEV_TX_OK; + } + tpd_req = atl1e_cal_tdp_req(skb); +- if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) +- return NETDEV_TX_LOCKED; ++ spin_lock_irqsave(&adapter->tx_lock, flags); + + if (atl1e_tpd_avail(adapter) < tpd_req) { + /* no enough descriptor, just stop queue */ +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/chelsio/cxgb/sge.c linux-4.4.13/drivers/net/ethernet/chelsio/cxgb/sge.c +--- linux-4.4.13.orig/drivers/net/ethernet/chelsio/cxgb/sge.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/chelsio/cxgb/sge.c 2016-06-22 21:55:08.014523867 +0200 +@@ -1664,8 +1664,7 @@ + struct cmdQ *q = &sge->cmdQ[qid]; + unsigned int credits, pidx, genbit, count, use_sched_skb = 0; + +- if (!spin_trylock(&q->lock)) +- return NETDEV_TX_LOCKED; ++ spin_lock(&q->lock); + + reclaim_completed_tx(sge, q); + +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/neterion/s2io.c linux-4.4.13/drivers/net/ethernet/neterion/s2io.c +--- linux-4.4.13.orig/drivers/net/ethernet/neterion/s2io.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/neterion/s2io.c 2016-06-22 21:55:08.014523867 +0200 +@@ -4084,12 +4084,7 @@ + [skb->priority & (MAX_TX_FIFOS - 1)]; + fifo = &mac_control->fifos[queue]; + +- if (do_spin_lock) +- spin_lock_irqsave(&fifo->tx_lock, flags); +- else { +- if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags))) +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&fifo->tx_lock, flags); + + if (sp->config.multiq) { + if (__netif_subqueue_stopped(dev, fifo->fifo_no)) { +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c linux-4.4.13/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c +--- linux-4.4.13.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c 2016-06-22 21:55:08.014523867 +0200 +@@ -2137,10 +2137,8 @@ + struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring; + unsigned long flags; + +- if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) { +- /* Collision - tell upper layer to requeue */ +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&tx_ring->tx_lock, flags); ++ + if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) { + netif_stop_queue(netdev); + spin_unlock_irqrestore(&tx_ring->tx_lock, flags); +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/realtek/8139too.c linux-4.4.13/drivers/net/ethernet/realtek/8139too.c +--- linux-4.4.13.orig/drivers/net/ethernet/realtek/8139too.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/realtek/8139too.c 2016-06-22 21:55:08.014523867 +0200 +@@ -2229,7 +2229,7 @@ + struct rtl8139_private *tp = netdev_priv(dev); + const int irq = tp->pci_dev->irq; + +- disable_irq(irq); ++ disable_irq_nosync(irq); + rtl8139_interrupt(irq, dev); + enable_irq(irq); + } +diff -Nur linux-4.4.13.orig/drivers/net/ethernet/tehuti/tehuti.c linux-4.4.13/drivers/net/ethernet/tehuti/tehuti.c +--- linux-4.4.13.orig/drivers/net/ethernet/tehuti/tehuti.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/ethernet/tehuti/tehuti.c 2016-06-22 21:55:08.014523867 +0200 +@@ -1629,13 +1629,8 @@ + unsigned long flags; + + ENTER; +- local_irq_save(flags); +- if (!spin_trylock(&priv->tx_lock)) { +- local_irq_restore(flags); +- DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n", +- BDX_DRV_NAME, ndev->name); +- return NETDEV_TX_LOCKED; +- } ++ ++ spin_lock_irqsave(&priv->tx_lock, flags); + + /* build tx descriptor */ + BDX_ASSERT(f->m.wptr >= f->m.memsz); /* started with valid wptr */ +diff -Nur linux-4.4.13.orig/drivers/net/rionet.c linux-4.4.13/drivers/net/rionet.c +--- linux-4.4.13.orig/drivers/net/rionet.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/rionet.c 2016-06-22 21:55:08.014523867 +0200 +@@ -174,11 +174,7 @@ + unsigned long flags; + int add_num = 1; + +- local_irq_save(flags); +- if (!spin_trylock(&rnet->tx_lock)) { +- local_irq_restore(flags); +- return NETDEV_TX_LOCKED; +- } ++ spin_lock_irqsave(&rnet->tx_lock, flags); + + if (is_multicast_ether_addr(eth->h_dest)) + add_num = nets[rnet->mport->id].nact; +diff -Nur linux-4.4.13.orig/drivers/net/wireless/orinoco/orinoco_usb.c linux-4.4.13/drivers/net/wireless/orinoco/orinoco_usb.c +--- linux-4.4.13.orig/drivers/net/wireless/orinoco/orinoco_usb.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/net/wireless/orinoco/orinoco_usb.c 2016-06-22 21:55:08.014523867 +0200 +@@ -697,7 +697,7 @@ + while (!ctx->done.done && msecs--) + udelay(1000); + } else { +- wait_event_interruptible(ctx->done.wait, ++ swait_event_interruptible(ctx->done.wait, + ctx->done.done); + } + break; +diff -Nur linux-4.4.13.orig/drivers/pci/access.c linux-4.4.13/drivers/pci/access.c +--- linux-4.4.13.orig/drivers/pci/access.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/pci/access.c 2016-06-22 21:55:08.014523867 +0200 +@@ -561,7 +561,7 @@ + WARN_ON(!dev->block_cfg_access); + + dev->block_cfg_access = 0; +- wake_up_all(&pci_cfg_wait); ++ wake_up_all_locked(&pci_cfg_wait); + raw_spin_unlock_irqrestore(&pci_lock, flags); + } + EXPORT_SYMBOL_GPL(pci_cfg_access_unlock); +diff -Nur linux-4.4.13.orig/drivers/scsi/fcoe/fcoe.c linux-4.4.13/drivers/scsi/fcoe/fcoe.c +--- linux-4.4.13.orig/drivers/scsi/fcoe/fcoe.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/scsi/fcoe/fcoe.c 2016-06-22 21:55:08.018523867 +0200 +@@ -1286,7 +1286,7 @@ + struct sk_buff *skb; + #ifdef CONFIG_SMP + struct fcoe_percpu_s *p0; +- unsigned targ_cpu = get_cpu(); ++ unsigned targ_cpu = get_cpu_light(); + #endif /* CONFIG_SMP */ + + FCOE_DBG("Destroying receive thread for CPU %d\n", cpu); +@@ -1342,7 +1342,7 @@ + kfree_skb(skb); + spin_unlock_bh(&p->fcoe_rx_list.lock); + } +- put_cpu(); ++ put_cpu_light(); + #else + /* + * This a non-SMP scenario where the singular Rx thread is +@@ -1566,11 +1566,11 @@ + static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen) + { + struct fcoe_percpu_s *fps; +- int rc; ++ int rc, cpu = get_cpu_light(); + +- fps = &get_cpu_var(fcoe_percpu); ++ fps = &per_cpu(fcoe_percpu, cpu); + rc = fcoe_get_paged_crc_eof(skb, tlen, fps); +- put_cpu_var(fcoe_percpu); ++ put_cpu_light(); + + return rc; + } +@@ -1766,11 +1766,11 @@ + return 0; + } + +- stats = per_cpu_ptr(lport->stats, get_cpu()); ++ stats = per_cpu_ptr(lport->stats, get_cpu_light()); + stats->InvalidCRCCount++; + if (stats->InvalidCRCCount < 5) + printk(KERN_WARNING "fcoe: dropping frame with CRC error\n"); +- put_cpu(); ++ put_cpu_light(); + return -EINVAL; + } + +@@ -1846,13 +1846,13 @@ + goto drop; + + if (!fcoe_filter_frames(lport, fp)) { +- put_cpu(); ++ put_cpu_light(); + fc_exch_recv(lport, fp); + return; + } + drop: + stats->ErrorFrames++; +- put_cpu(); ++ put_cpu_light(); + kfree_skb(skb); + } + +diff -Nur linux-4.4.13.orig/drivers/scsi/fcoe/fcoe_ctlr.c linux-4.4.13/drivers/scsi/fcoe/fcoe_ctlr.c +--- linux-4.4.13.orig/drivers/scsi/fcoe/fcoe_ctlr.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/scsi/fcoe/fcoe_ctlr.c 2016-06-22 21:55:08.018523867 +0200 +@@ -831,7 +831,7 @@ + + INIT_LIST_HEAD(&del_list); + +- stats = per_cpu_ptr(fip->lp->stats, get_cpu()); ++ stats = per_cpu_ptr(fip->lp->stats, get_cpu_light()); + + list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { + deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2; +@@ -867,7 +867,7 @@ + sel_time = fcf->time; + } + } +- put_cpu(); ++ put_cpu_light(); + + list_for_each_entry_safe(fcf, next, &del_list, list) { + /* Removes fcf from current list */ +diff -Nur linux-4.4.13.orig/drivers/scsi/libfc/fc_exch.c linux-4.4.13/drivers/scsi/libfc/fc_exch.c +--- linux-4.4.13.orig/drivers/scsi/libfc/fc_exch.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/scsi/libfc/fc_exch.c 2016-06-22 21:55:08.018523867 +0200 +@@ -814,10 +814,10 @@ + } + memset(ep, 0, sizeof(*ep)); + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = per_cpu_ptr(mp->pool, cpu); + spin_lock_bh(&pool->lock); +- put_cpu(); ++ put_cpu_light(); + + /* peek cache of free slot */ + if (pool->left != FC_XID_UNKNOWN) { +diff -Nur linux-4.4.13.orig/drivers/scsi/libsas/sas_ata.c linux-4.4.13/drivers/scsi/libsas/sas_ata.c +--- linux-4.4.13.orig/drivers/scsi/libsas/sas_ata.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/scsi/libsas/sas_ata.c 2016-06-22 21:55:08.018523867 +0200 +@@ -190,7 +190,7 @@ + /* TODO: audit callers to ensure they are ready for qc_issue to + * unconditionally re-enable interrupts + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + spin_unlock(ap->lock); + + /* If the device fell off, no sense in issuing commands */ +@@ -255,7 +255,7 @@ + + out: + spin_lock(ap->lock); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return ret; + } + +diff -Nur linux-4.4.13.orig/drivers/scsi/qla2xxx/qla_inline.h linux-4.4.13/drivers/scsi/qla2xxx/qla_inline.h +--- linux-4.4.13.orig/drivers/scsi/qla2xxx/qla_inline.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/scsi/qla2xxx/qla_inline.h 2016-06-22 21:55:08.018523867 +0200 +@@ -59,12 +59,12 @@ + { + unsigned long flags; + struct qla_hw_data *ha = rsp->hw; +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (IS_P3P_TYPE(ha)) + qla82xx_poll(0, rsp); + else + ha->isp_ops->intr_handler(0, rsp); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + static inline uint8_t * +diff -Nur linux-4.4.13.orig/drivers/thermal/x86_pkg_temp_thermal.c linux-4.4.13/drivers/thermal/x86_pkg_temp_thermal.c +--- linux-4.4.13.orig/drivers/thermal/x86_pkg_temp_thermal.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/thermal/x86_pkg_temp_thermal.c 2016-06-22 21:55:08.018523867 +0200 +@@ -29,6 +29,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -352,7 +353,7 @@ + } + } + +-static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) ++static void platform_thermal_notify_work(struct swork_event *event) + { + unsigned long flags; + int cpu = smp_processor_id(); +@@ -369,7 +370,7 @@ + pkg_work_scheduled[phy_id]) { + disable_pkg_thres_interrupt(); + spin_unlock_irqrestore(&pkg_work_lock, flags); +- return -EINVAL; ++ return; + } + pkg_work_scheduled[phy_id] = 1; + spin_unlock_irqrestore(&pkg_work_lock, flags); +@@ -378,9 +379,48 @@ + schedule_delayed_work_on(cpu, + &per_cpu(pkg_temp_thermal_threshold_work, cpu), + msecs_to_jiffies(notify_delay_ms)); ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct swork_event notify_work; ++ ++static int thermal_notify_work_init(void) ++{ ++ int err; ++ ++ err = swork_get(); ++ if (err) ++ return err; ++ ++ INIT_SWORK(¬ify_work, platform_thermal_notify_work); + return 0; + } + ++static void thermal_notify_work_cleanup(void) ++{ ++ swork_put(); ++} ++ ++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) ++{ ++ swork_queue(¬ify_work); ++ return 0; ++} ++ ++#else /* !CONFIG_PREEMPT_RT_FULL */ ++ ++static int thermal_notify_work_init(void) { return 0; } ++ ++static void thermal_notify_work_cleanup(void) { } ++ ++static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val) ++{ ++ platform_thermal_notify_work(NULL); ++ ++ return 0; ++} ++#endif /* CONFIG_PREEMPT_RT_FULL */ ++ + static int find_siblings_cpu(int cpu) + { + int i; +@@ -584,6 +624,9 @@ + if (!x86_match_cpu(pkg_temp_thermal_ids)) + return -ENODEV; + ++ if (!thermal_notify_work_init()) ++ return -ENODEV; ++ + spin_lock_init(&pkg_work_lock); + platform_thermal_package_notify = + pkg_temp_thermal_platform_thermal_notify; +@@ -608,7 +651,7 @@ + kfree(pkg_work_scheduled); + platform_thermal_package_notify = NULL; + platform_thermal_package_rate_control = NULL; +- ++ thermal_notify_work_cleanup(); + return -ENODEV; + } + +@@ -633,6 +676,7 @@ + mutex_unlock(&phy_dev_list_mutex); + platform_thermal_package_notify = NULL; + platform_thermal_package_rate_control = NULL; ++ thermal_notify_work_cleanup(); + for_each_online_cpu(i) + cancel_delayed_work_sync( + &per_cpu(pkg_temp_thermal_threshold_work, i)); +diff -Nur linux-4.4.13.orig/drivers/tty/serial/8250/8250_core.c linux-4.4.13/drivers/tty/serial/8250/8250_core.c +--- linux-4.4.13.orig/drivers/tty/serial/8250/8250_core.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/tty/serial/8250/8250_core.c 2016-06-22 21:55:08.018523867 +0200 +@@ -58,7 +58,16 @@ + + static unsigned int skip_txen_test; /* force skip of txen test at init time */ + +-#define PASS_LIMIT 512 ++/* ++ * On -rt we can have a more delays, and legitimately ++ * so - so don't drop work spuriously and spam the ++ * syslog: ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define PASS_LIMIT 1000000 ++#else ++# define PASS_LIMIT 512 ++#endif + + #include + /* +diff -Nur linux-4.4.13.orig/drivers/tty/serial/8250/8250_port.c linux-4.4.13/drivers/tty/serial/8250/8250_port.c +--- linux-4.4.13.orig/drivers/tty/serial/8250/8250_port.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/tty/serial/8250/8250_port.c 2016-06-22 21:55:08.018523867 +0200 +@@ -35,6 +35,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -2843,9 +2844,9 @@ + + serial8250_rpm_get(up); + +- if (port->sysrq) ++ if (port->sysrq || oops_in_progress) + locked = 0; +- else if (oops_in_progress) ++ else if (in_kdb_printk()) + locked = spin_trylock_irqsave(&port->lock, flags); + else + spin_lock_irqsave(&port->lock, flags); +diff -Nur linux-4.4.13.orig/drivers/tty/serial/amba-pl011.c linux-4.4.13/drivers/tty/serial/amba-pl011.c +--- linux-4.4.13.orig/drivers/tty/serial/amba-pl011.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/tty/serial/amba-pl011.c 2016-06-22 21:55:08.018523867 +0200 +@@ -2067,13 +2067,19 @@ + + clk_enable(uap->clk); + +- local_irq_save(flags); ++ /* ++ * local_irq_save(flags); ++ * ++ * This local_irq_save() is nonsense. If we come in via sysrq ++ * handling then interrupts are already disabled. Aside of ++ * that the port.sysrq check is racy on SMP regardless. ++ */ + if (uap->port.sysrq) + locked = 0; + else if (oops_in_progress) +- locked = spin_trylock(&uap->port.lock); ++ locked = spin_trylock_irqsave(&uap->port.lock, flags); + else +- spin_lock(&uap->port.lock); ++ spin_lock_irqsave(&uap->port.lock, flags); + + /* + * First save the CR then disable the interrupts +@@ -2098,8 +2104,7 @@ + writew(old_cr, uap->port.membase + UART011_CR); + + if (locked) +- spin_unlock(&uap->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&uap->port.lock, flags); + + clk_disable(uap->clk); + } +diff -Nur linux-4.4.13.orig/drivers/tty/serial/omap-serial.c linux-4.4.13/drivers/tty/serial/omap-serial.c +--- linux-4.4.13.orig/drivers/tty/serial/omap-serial.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/tty/serial/omap-serial.c 2016-06-22 21:55:08.018523867 +0200 +@@ -1257,13 +1257,10 @@ + + pm_runtime_get_sync(up->dev); + +- local_irq_save(flags); +- if (up->port.sysrq) +- locked = 0; +- else if (oops_in_progress) +- locked = spin_trylock(&up->port.lock); ++ if (up->port.sysrq || oops_in_progress) ++ locked = spin_trylock_irqsave(&up->port.lock, flags); + else +- spin_lock(&up->port.lock); ++ spin_lock_irqsave(&up->port.lock, flags); + + /* + * First save the IER then disable the interrupts +@@ -1292,8 +1289,7 @@ + pm_runtime_mark_last_busy(up->dev); + pm_runtime_put_autosuspend(up->dev); + if (locked) +- spin_unlock(&up->port.lock); +- local_irq_restore(flags); ++ spin_unlock_irqrestore(&up->port.lock, flags); + } + + static int __init +diff -Nur linux-4.4.13.orig/drivers/tty/serial/sc16is7xx.c linux-4.4.13/drivers/tty/serial/sc16is7xx.c +--- linux-4.4.13.orig/drivers/tty/serial/sc16is7xx.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/tty/serial/sc16is7xx.c 2016-06-22 21:55:08.018523867 +0200 +@@ -1230,7 +1230,7 @@ + + /* Setup interrupt */ + ret = devm_request_irq(dev, irq, sc16is7xx_irq, +- IRQF_ONESHOT | flags, dev_name(dev), s); ++ flags, dev_name(dev), s); + if (!ret) + return 0; + +diff -Nur linux-4.4.13.orig/drivers/usb/core/hcd.c linux-4.4.13/drivers/usb/core/hcd.c +--- linux-4.4.13.orig/drivers/usb/core/hcd.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/usb/core/hcd.c 2016-06-22 21:55:08.022523867 +0200 +@@ -1735,9 +1735,9 @@ + * and no one may trigger the above deadlock situation when + * running complete() in tasklet. + */ +- local_irq_save(flags); ++ local_irq_save_nort(flags); + urb->complete(urb); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + + usb_anchor_resume_wakeups(anchor); + atomic_dec(&urb->use_count); +diff -Nur linux-4.4.13.orig/drivers/usb/gadget/function/f_fs.c linux-4.4.13/drivers/usb/gadget/function/f_fs.c +--- linux-4.4.13.orig/drivers/usb/gadget/function/f_fs.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/usb/gadget/function/f_fs.c 2016-06-22 21:55:08.022523867 +0200 +@@ -1404,7 +1404,7 @@ + pr_info("%s(): freeing\n", __func__); + ffs_data_clear(ffs); + BUG_ON(waitqueue_active(&ffs->ev.waitq) || +- waitqueue_active(&ffs->ep0req_completion.wait)); ++ swait_active(&ffs->ep0req_completion.wait)); + kfree(ffs->dev_name); + kfree(ffs); + } +diff -Nur linux-4.4.13.orig/drivers/usb/gadget/legacy/inode.c linux-4.4.13/drivers/usb/gadget/legacy/inode.c +--- linux-4.4.13.orig/drivers/usb/gadget/legacy/inode.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/usb/gadget/legacy/inode.c 2016-06-22 21:55:08.022523867 +0200 +@@ -345,7 +345,7 @@ + spin_unlock_irq (&epdata->dev->lock); + + if (likely (value == 0)) { +- value = wait_event_interruptible (done.wait, done.done); ++ value = swait_event_interruptible (done.wait, done.done); + if (value != 0) { + spin_lock_irq (&epdata->dev->lock); + if (likely (epdata->ep != NULL)) { +@@ -354,7 +354,7 @@ + usb_ep_dequeue (epdata->ep, epdata->req); + spin_unlock_irq (&epdata->dev->lock); + +- wait_event (done.wait, done.done); ++ swait_event (done.wait, done.done); + if (epdata->status == -ECONNRESET) + epdata->status = -EINTR; + } else { +diff -Nur linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.c linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.c +--- linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.c 2016-06-22 21:55:08.022523867 +0200 +@@ -17,7 +17,9 @@ + #include + #include + #include ++#include + #include ++#include + #include + #include + #include +@@ -1888,20 +1890,15 @@ + #ifdef CONFIG_OF + static void at91sam9rl_toggle_bias(struct usba_udc *udc, int is_on) + { +- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR); +- +- if (is_on) +- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN); +- else +- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN)); ++ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, ++ is_on ? AT91_PMC_BIASEN : 0); + } + + static void at91sam9g45_pulse_bias(struct usba_udc *udc) + { +- unsigned int uckr = at91_pmc_read(AT91_CKGR_UCKR); +- +- at91_pmc_write(AT91_CKGR_UCKR, uckr & ~(AT91_PMC_BIASEN)); +- at91_pmc_write(AT91_CKGR_UCKR, uckr | AT91_PMC_BIASEN); ++ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, 0); ++ regmap_update_bits(udc->pmc, AT91_CKGR_UCKR, AT91_PMC_BIASEN, ++ AT91_PMC_BIASEN); + } + + static const struct usba_udc_errata at91sam9rl_errata = { +@@ -1938,6 +1935,9 @@ + return ERR_PTR(-EINVAL); + + udc->errata = match->data; ++ udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc"); ++ if (udc->errata && IS_ERR(udc->pmc)) ++ return ERR_CAST(udc->pmc); + + udc->num_ep = 0; + +diff -Nur linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.h linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.h +--- linux-4.4.13.orig/drivers/usb/gadget/udc/atmel_usba_udc.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/drivers/usb/gadget/udc/atmel_usba_udc.h 2016-06-22 21:55:08.022523867 +0200 +@@ -354,6 +354,8 @@ + struct dentry *debugfs_root; + struct dentry *debugfs_regs; + #endif ++ ++ struct regmap *pmc; + }; + + static inline struct usba_ep *to_usba_ep(struct usb_ep *ep) +diff -Nur linux-4.4.13.orig/fs/aio.c linux-4.4.13/fs/aio.c +--- linux-4.4.13.orig/fs/aio.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/aio.c 2016-06-22 21:55:08.022523867 +0200 +@@ -40,6 +40,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -115,7 +116,7 @@ + struct page **ring_pages; + long nr_pages; + +- struct work_struct free_work; ++ struct swork_event free_work; + + /* + * signals when all in-flight requests are done +@@ -253,6 +254,7 @@ + .mount = aio_mount, + .kill_sb = kill_anon_super, + }; ++ BUG_ON(swork_get()); + aio_mnt = kern_mount(&aio_fs); + if (IS_ERR(aio_mnt)) + panic("Failed to create aio fs mount."); +@@ -568,9 +570,9 @@ + return cancel(&kiocb->common); + } + +-static void free_ioctx(struct work_struct *work) ++static void free_ioctx(struct swork_event *sev) + { +- struct kioctx *ctx = container_of(work, struct kioctx, free_work); ++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); + + pr_debug("freeing %p\n", ctx); + +@@ -589,8 +591,8 @@ + if (ctx->rq_wait && atomic_dec_and_test(&ctx->rq_wait->count)) + complete(&ctx->rq_wait->comp); + +- INIT_WORK(&ctx->free_work, free_ioctx); +- schedule_work(&ctx->free_work); ++ INIT_SWORK(&ctx->free_work, free_ioctx); ++ swork_queue(&ctx->free_work); + } + + /* +@@ -598,9 +600,9 @@ + * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted - + * now it's safe to cancel any that need to be. + */ +-static void free_ioctx_users(struct percpu_ref *ref) ++static void free_ioctx_users_work(struct swork_event *sev) + { +- struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ struct kioctx *ctx = container_of(sev, struct kioctx, free_work); + struct aio_kiocb *req; + + spin_lock_irq(&ctx->ctx_lock); +@@ -619,6 +621,14 @@ + percpu_ref_put(&ctx->reqs); + } + ++static void free_ioctx_users(struct percpu_ref *ref) ++{ ++ struct kioctx *ctx = container_of(ref, struct kioctx, users); ++ ++ INIT_SWORK(&ctx->free_work, free_ioctx_users_work); ++ swork_queue(&ctx->free_work); ++} ++ + static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm) + { + unsigned i, new_nr; +diff -Nur linux-4.4.13.orig/fs/autofs4/autofs_i.h linux-4.4.13/fs/autofs4/autofs_i.h +--- linux-4.4.13.orig/fs/autofs4/autofs_i.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/autofs4/autofs_i.h 2016-06-22 21:55:08.022523867 +0200 +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + #include + #include + +diff -Nur linux-4.4.13.orig/fs/autofs4/expire.c linux-4.4.13/fs/autofs4/expire.c +--- linux-4.4.13.orig/fs/autofs4/expire.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/autofs4/expire.c 2016-06-22 21:55:08.022523867 +0200 +@@ -150,7 +150,7 @@ + parent = p->d_parent; + if (!spin_trylock(&parent->d_lock)) { + spin_unlock(&p->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto relock; + } + spin_unlock(&p->d_lock); +diff -Nur linux-4.4.13.orig/fs/buffer.c linux-4.4.13/fs/buffer.c +--- linux-4.4.13.orig/fs/buffer.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/buffer.c 2016-06-22 21:55:08.022523867 +0200 +@@ -305,8 +305,7 @@ + * decide that the page is now completely done. + */ + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -319,8 +318,7 @@ + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + + /* + * If none of the buffers had errors and they are all +@@ -332,9 +330,7 @@ + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /* +@@ -362,8 +358,7 @@ + } + + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + + clear_buffer_async_write(bh); + unlock_buffer(bh); +@@ -375,15 +370,12 @@ + } + tmp = tmp->b_this_page; + } +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + end_page_writeback(page); + return; + + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + EXPORT_SYMBOL(end_buffer_async_write); + +@@ -3325,6 +3317,7 @@ + struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags); + if (ret) { + INIT_LIST_HEAD(&ret->b_assoc_buffers); ++ buffer_head_init_locks(ret); + preempt_disable(); + __this_cpu_inc(bh_accounting.nr); + recalc_bh_state(); +diff -Nur linux-4.4.13.orig/fs/dcache.c linux-4.4.13/fs/dcache.c +--- linux-4.4.13.orig/fs/dcache.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/dcache.c 2016-06-22 21:55:08.022523867 +0200 +@@ -19,6 +19,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -578,7 +579,7 @@ + + failed: + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + return dentry; /* try again with same dentry */ + } + +@@ -2391,7 +2392,7 @@ + if (dentry->d_lockref.count == 1) { + if (!spin_trylock(&inode->i_lock)) { + spin_unlock(&dentry->d_lock); +- cpu_relax(); ++ cpu_chill(); + goto again; + } + dentry->d_flags &= ~DCACHE_CANT_MOUNT; +diff -Nur linux-4.4.13.orig/fs/eventpoll.c linux-4.4.13/fs/eventpoll.c +--- linux-4.4.13.orig/fs/eventpoll.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/eventpoll.c 2016-06-22 21:55:08.026523867 +0200 +@@ -505,12 +505,12 @@ + */ + static void ep_poll_safewake(wait_queue_head_t *wq) + { +- int this_cpu = get_cpu(); ++ int this_cpu = get_cpu_light(); + + ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS, + ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu); + +- put_cpu(); ++ put_cpu_light(); + } + + static void ep_remove_wait_queue(struct eppoll_entry *pwq) +diff -Nur linux-4.4.13.orig/fs/exec.c linux-4.4.13/fs/exec.c +--- linux-4.4.13.orig/fs/exec.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/exec.c 2016-06-22 21:55:08.026523867 +0200 +@@ -865,12 +865,14 @@ + } + } + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + tsk->mm = mm; + tsk->active_mm = mm; + activate_mm(active_mm, mm); + tsk->mm->vmacache_seqnum = 0; + vmacache_flush(tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + if (old_mm) { + up_read(&old_mm->mmap_sem); +diff -Nur linux-4.4.13.orig/fs/f2fs/f2fs.h linux-4.4.13/fs/f2fs/f2fs.h +--- linux-4.4.13.orig/fs/f2fs/f2fs.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/f2fs/f2fs.h 2016-06-22 21:55:08.026523867 +0200 +@@ -24,7 +24,6 @@ + + #ifdef CONFIG_F2FS_CHECK_FS + #define f2fs_bug_on(sbi, condition) BUG_ON(condition) +-#define f2fs_down_write(x, y) down_write_nest_lock(x, y) + #else + #define f2fs_bug_on(sbi, condition) \ + do { \ +@@ -33,7 +32,6 @@ + set_sbi_flag(sbi, SBI_NEED_FSCK); \ + } \ + } while (0) +-#define f2fs_down_write(x, y) down_write(x) + #endif + + /* +@@ -959,7 +957,7 @@ + + static inline void f2fs_lock_all(struct f2fs_sb_info *sbi) + { +- f2fs_down_write(&sbi->cp_rwsem, &sbi->cp_mutex); ++ down_write(&sbi->cp_rwsem); + } + + static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi) +diff -Nur linux-4.4.13.orig/fs/jbd2/checkpoint.c linux-4.4.13/fs/jbd2/checkpoint.c +--- linux-4.4.13.orig/fs/jbd2/checkpoint.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/jbd2/checkpoint.c 2016-06-22 21:55:08.026523867 +0200 +@@ -116,6 +116,8 @@ + nblocks = jbd2_space_needed(journal); + while (jbd2_log_space_left(journal) < nblocks) { + write_unlock(&journal->j_state_lock); ++ if (current->plug) ++ io_schedule(); + mutex_lock(&journal->j_checkpoint_mutex); + + /* +diff -Nur linux-4.4.13.orig/fs/namespace.c linux-4.4.13/fs/namespace.c +--- linux-4.4.13.orig/fs/namespace.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/namespace.c 2016-06-22 21:55:08.026523867 +0200 +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include /* init_rootfs */ +@@ -353,8 +354,11 @@ + * incremented count after it has set MNT_WRITE_HOLD. + */ + smp_mb(); +- while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) +- cpu_relax(); ++ while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) { ++ preempt_enable(); ++ cpu_chill(); ++ preempt_disable(); ++ } + /* + * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will + * be set to match its requirements. So we must not load that until +diff -Nur linux-4.4.13.orig/fs/ntfs/aops.c linux-4.4.13/fs/ntfs/aops.c +--- linux-4.4.13.orig/fs/ntfs/aops.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/ntfs/aops.c 2016-06-22 21:55:08.026523867 +0200 +@@ -107,8 +107,7 @@ + "0x%llx.", (unsigned long long)bh->b_blocknr); + } + first = page_buffers(page); +- local_irq_save(flags); +- bit_spin_lock(BH_Uptodate_Lock, &first->b_state); ++ flags = bh_uptodate_lock_irqsave(first); + clear_buffer_async_read(bh); + unlock_buffer(bh); + tmp = bh; +@@ -123,8 +122,7 @@ + } + tmp = tmp->b_this_page; + } while (tmp != bh); +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); ++ bh_uptodate_unlock_irqrestore(first, flags); + /* + * If none of the buffers had errors then we can set the page uptodate, + * but we first have to perform the post read mst fixups, if the +@@ -145,13 +143,13 @@ + recs = PAGE_CACHE_SIZE / rec_size; + /* Should have been verified before we got here... */ + BUG_ON(!recs); +- local_irq_save(flags); ++ local_irq_save_nort(flags); + kaddr = kmap_atomic(page); + for (i = 0; i < recs; i++) + post_read_mst_fixup((NTFS_RECORD*)(kaddr + + i * rec_size), rec_size); + kunmap_atomic(kaddr); +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + flush_dcache_page(page); + if (likely(page_uptodate && !PageError(page))) + SetPageUptodate(page); +@@ -159,9 +157,7 @@ + unlock_page(page); + return; + still_busy: +- bit_spin_unlock(BH_Uptodate_Lock, &first->b_state); +- local_irq_restore(flags); +- return; ++ bh_uptodate_unlock_irqrestore(first, flags); + } + + /** +diff -Nur linux-4.4.13.orig/fs/timerfd.c linux-4.4.13/fs/timerfd.c +--- linux-4.4.13.orig/fs/timerfd.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/fs/timerfd.c 2016-06-22 21:55:08.026523867 +0200 +@@ -450,7 +450,10 @@ + break; + } + spin_unlock_irq(&ctx->wqh.lock); +- cpu_relax(); ++ if (isalarm(ctx)) ++ hrtimer_wait_for_timer(&ctx->t.alarm.timer); ++ else ++ hrtimer_wait_for_timer(&ctx->t.tmr); + } + + /* +diff -Nur linux-4.4.13.orig/include/acpi/platform/aclinux.h linux-4.4.13/include/acpi/platform/aclinux.h +--- linux-4.4.13.orig/include/acpi/platform/aclinux.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/acpi/platform/aclinux.h 2016-06-22 21:55:08.026523867 +0200 +@@ -127,6 +127,7 @@ + + #define acpi_cache_t struct kmem_cache + #define acpi_spinlock spinlock_t * ++#define acpi_raw_spinlock raw_spinlock_t * + #define acpi_cpu_flags unsigned long + + /* Use native linux version of acpi_os_allocate_zeroed */ +@@ -145,6 +146,20 @@ + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id + #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock + ++#define acpi_os_create_raw_lock(__handle) \ ++({ \ ++ raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock)); \ ++ \ ++ if (lock) { \ ++ *(__handle) = lock; \ ++ raw_spin_lock_init(*(__handle)); \ ++ } \ ++ lock ? AE_OK : AE_NO_MEMORY; \ ++ }) ++ ++#define acpi_os_delete_raw_lock(__handle) kfree(__handle) ++ ++ + /* + * OSL interfaces used by debugger/disassembler + */ +diff -Nur linux-4.4.13.orig/include/asm-generic/bug.h linux-4.4.13/include/asm-generic/bug.h +--- linux-4.4.13.orig/include/asm-generic/bug.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/asm-generic/bug.h 2016-06-22 21:55:08.026523867 +0200 +@@ -206,6 +206,20 @@ + # define WARN_ON_SMP(x) ({0;}) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define BUG_ON_RT(c) BUG_ON(c) ++# define BUG_ON_NONRT(c) do { } while (0) ++# define WARN_ON_RT(condition) WARN_ON(condition) ++# define WARN_ON_NONRT(condition) do { } while (0) ++# define WARN_ON_ONCE_NONRT(condition) do { } while (0) ++#else ++# define BUG_ON_RT(c) do { } while (0) ++# define BUG_ON_NONRT(c) BUG_ON(c) ++# define WARN_ON_RT(condition) do { } while (0) ++# define WARN_ON_NONRT(condition) WARN_ON(condition) ++# define WARN_ON_ONCE_NONRT(condition) WARN_ON_ONCE(condition) ++#endif ++ + #endif /* __ASSEMBLY__ */ + + #endif +diff -Nur linux-4.4.13.orig/include/asm-generic/preempt.h linux-4.4.13/include/asm-generic/preempt.h +--- linux-4.4.13.orig/include/asm-generic/preempt.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/asm-generic/preempt.h 2016-06-22 21:55:08.026523867 +0200 +@@ -7,10 +7,10 @@ + + static __always_inline int preempt_count(void) + { +- return current_thread_info()->preempt_count; ++ return READ_ONCE(current_thread_info()->preempt_count); + } + +-static __always_inline int *preempt_count_ptr(void) ++static __always_inline volatile int *preempt_count_ptr(void) + { + return ¤t_thread_info()->preempt_count; + } +diff -Nur linux-4.4.13.orig/include/linux/blkdev.h linux-4.4.13/include/linux/blkdev.h +--- linux-4.4.13.orig/include/linux/blkdev.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/blkdev.h 2016-06-22 21:55:08.026523867 +0200 +@@ -89,6 +89,7 @@ + struct list_head queuelist; + union { + struct call_single_data csd; ++ struct work_struct work; + unsigned long fifo_time; + }; + +@@ -455,7 +456,7 @@ + struct throtl_data *td; + #endif + struct rcu_head rcu_head; +- wait_queue_head_t mq_freeze_wq; ++ struct swait_queue_head mq_freeze_wq; + struct percpu_ref q_usage_counter; + struct list_head all_q_node; + +diff -Nur linux-4.4.13.orig/include/linux/blk-mq.h linux-4.4.13/include/linux/blk-mq.h +--- linux-4.4.13.orig/include/linux/blk-mq.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/blk-mq.h 2016-06-22 21:55:08.026523867 +0200 +@@ -212,6 +212,7 @@ + + struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); + struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); ++void __blk_mq_complete_request_remote_work(struct work_struct *work); + + int blk_mq_request_started(struct request *rq); + void blk_mq_start_request(struct request *rq); +diff -Nur linux-4.4.13.orig/include/linux/bottom_half.h linux-4.4.13/include/linux/bottom_half.h +--- linux-4.4.13.orig/include/linux/bottom_half.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/bottom_half.h 2016-06-22 21:55:08.026523867 +0200 +@@ -3,6 +3,39 @@ + + #include + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++extern void __local_bh_disable(void); ++extern void _local_bh_enable(void); ++extern void __local_bh_enable(void); ++ ++static inline void local_bh_disable(void) ++{ ++ __local_bh_disable(); ++} ++ ++static inline void __local_bh_disable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_disable(); ++} ++ ++static inline void local_bh_enable(void) ++{ ++ __local_bh_enable(); ++} ++ ++static inline void __local_bh_enable_ip(unsigned long ip, unsigned int cnt) ++{ ++ __local_bh_enable(); ++} ++ ++static inline void local_bh_enable_ip(unsigned long ip) ++{ ++ __local_bh_enable(); ++} ++ ++#else ++ + #ifdef CONFIG_TRACE_IRQFLAGS + extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt); + #else +@@ -30,5 +63,6 @@ + { + __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET); + } ++#endif + + #endif /* _LINUX_BH_H */ +diff -Nur linux-4.4.13.orig/include/linux/buffer_head.h linux-4.4.13/include/linux/buffer_head.h +--- linux-4.4.13.orig/include/linux/buffer_head.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/buffer_head.h 2016-06-22 21:55:08.026523867 +0200 +@@ -75,8 +75,50 @@ + struct address_space *b_assoc_map; /* mapping this buffer is + associated with */ + atomic_t b_count; /* users using this buffer_head */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spinlock_t b_uptodate_lock; ++#if IS_ENABLED(CONFIG_JBD2) ++ spinlock_t b_state_lock; ++ spinlock_t b_journal_head_lock; ++#endif ++#endif + }; + ++static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh) ++{ ++ unsigned long flags; ++ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ local_irq_save(flags); ++ bit_spin_lock(BH_Uptodate_Lock, &bh->b_state); ++#else ++ spin_lock_irqsave(&bh->b_uptodate_lock, flags); ++#endif ++ return flags; ++} ++ ++static inline void ++bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags) ++{ ++#ifndef CONFIG_PREEMPT_RT_BASE ++ bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state); ++ local_irq_restore(flags); ++#else ++ spin_unlock_irqrestore(&bh->b_uptodate_lock, flags); ++#endif ++} ++ ++static inline void buffer_head_init_locks(struct buffer_head *bh) ++{ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ spin_lock_init(&bh->b_uptodate_lock); ++#if IS_ENABLED(CONFIG_JBD2) ++ spin_lock_init(&bh->b_state_lock); ++ spin_lock_init(&bh->b_journal_head_lock); ++#endif ++#endif ++} ++ + /* + * macro tricks to expand the set_buffer_foo(), clear_buffer_foo() + * and buffer_foo() functions. +diff -Nur linux-4.4.13.orig/include/linux/cgroup-defs.h linux-4.4.13/include/linux/cgroup-defs.h +--- linux-4.4.13.orig/include/linux/cgroup-defs.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/cgroup-defs.h 2016-06-22 21:55:08.026523867 +0200 +@@ -16,6 +16,7 @@ + #include + #include + #include ++#include + + #ifdef CONFIG_CGROUPS + +@@ -142,6 +143,7 @@ + /* percpu_ref killing and RCU release */ + struct rcu_head rcu_head; + struct work_struct destroy_work; ++ struct swork_event destroy_swork; + }; + + /* +diff -Nur linux-4.4.13.orig/include/linux/clk/at91_pmc.h linux-4.4.13/include/linux/clk/at91_pmc.h +--- linux-4.4.13.orig/include/linux/clk/at91_pmc.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/clk/at91_pmc.h 2016-06-22 21:55:08.026523867 +0200 +@@ -16,18 +16,6 @@ + #ifndef AT91_PMC_H + #define AT91_PMC_H + +-#ifndef __ASSEMBLY__ +-extern void __iomem *at91_pmc_base; +- +-#define at91_pmc_read(field) \ +- readl_relaxed(at91_pmc_base + field) +- +-#define at91_pmc_write(field, value) \ +- writel_relaxed(value, at91_pmc_base + field) +-#else +-.extern at91_pmc_base +-#endif +- + #define AT91_PMC_SCER 0x00 /* System Clock Enable Register */ + #define AT91_PMC_SCDR 0x04 /* System Clock Disable Register */ + +diff -Nur linux-4.4.13.orig/include/linux/completion.h linux-4.4.13/include/linux/completion.h +--- linux-4.4.13.orig/include/linux/completion.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/completion.h 2016-06-22 21:55:08.030523867 +0200 +@@ -7,8 +7,7 @@ + * Atomic wait-for-completion handler data structures. + * See kernel/sched/completion.c for details. + */ +- +-#include ++#include + + /* + * struct completion - structure used to maintain state for a "completion" +@@ -24,11 +23,11 @@ + */ + struct completion { + unsigned int done; +- wait_queue_head_t wait; ++ struct swait_queue_head wait; + }; + + #define COMPLETION_INITIALIZER(work) \ +- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) } ++ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) } + + #define COMPLETION_INITIALIZER_ONSTACK(work) \ + ({ init_completion(&work); work; }) +@@ -73,7 +72,7 @@ + static inline void init_completion(struct completion *x) + { + x->done = 0; +- init_waitqueue_head(&x->wait); ++ init_swait_queue_head(&x->wait); + } + + /** +diff -Nur linux-4.4.13.orig/include/linux/cpu.h linux-4.4.13/include/linux/cpu.h +--- linux-4.4.13.orig/include/linux/cpu.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/cpu.h 2016-06-22 21:55:08.030523867 +0200 +@@ -231,6 +231,8 @@ + extern void put_online_cpus(void); + extern void cpu_hotplug_disable(void); + extern void cpu_hotplug_enable(void); ++extern void pin_current_cpu(void); ++extern void unpin_current_cpu(void); + #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) + #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri) + #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) +@@ -248,6 +250,8 @@ + #define put_online_cpus() do { } while (0) + #define cpu_hotplug_disable() do { } while (0) + #define cpu_hotplug_enable() do { } while (0) ++static inline void pin_current_cpu(void) { } ++static inline void unpin_current_cpu(void) { } + #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0) + /* These aren't inline functions due to a GCC bug. */ +diff -Nur linux-4.4.13.orig/include/linux/delay.h linux-4.4.13/include/linux/delay.h +--- linux-4.4.13.orig/include/linux/delay.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/delay.h 2016-06-22 21:55:08.030523867 +0200 +@@ -52,4 +52,10 @@ + msleep(seconds * 1000); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void cpu_chill(void); ++#else ++# define cpu_chill() cpu_relax() ++#endif ++ + #endif /* defined(_LINUX_DELAY_H) */ +diff -Nur linux-4.4.13.orig/include/linux/ftrace.h linux-4.4.13/include/linux/ftrace.h +--- linux-4.4.13.orig/include/linux/ftrace.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/ftrace.h 2016-06-22 21:55:08.030523867 +0200 +@@ -694,6 +694,18 @@ + #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5)) + #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6)) + ++static inline unsigned long get_lock_parent_ip(void) ++{ ++ unsigned long addr = CALLER_ADDR0; ++ ++ if (!in_lock_functions(addr)) ++ return addr; ++ addr = CALLER_ADDR1; ++ if (!in_lock_functions(addr)) ++ return addr; ++ return CALLER_ADDR2; ++} ++ + #ifdef CONFIG_IRQSOFF_TRACER + extern void time_hardirqs_on(unsigned long a0, unsigned long a1); + extern void time_hardirqs_off(unsigned long a0, unsigned long a1); +diff -Nur linux-4.4.13.orig/include/linux/highmem.h linux-4.4.13/include/linux/highmem.h +--- linux-4.4.13.orig/include/linux/highmem.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/highmem.h 2016-06-22 21:55:08.030523867 +0200 +@@ -7,6 +7,7 @@ + #include + #include + #include ++#include + + #include + +@@ -65,7 +66,7 @@ + + static inline void *kmap_atomic(struct page *page) + { +- preempt_disable(); ++ preempt_disable_nort(); + pagefault_disable(); + return page_address(page); + } +@@ -74,7 +75,7 @@ + static inline void __kunmap_atomic(void *addr) + { + pagefault_enable(); +- preempt_enable(); ++ preempt_enable_nort(); + } + + #define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn)) +@@ -86,32 +87,51 @@ + + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + ++#ifndef CONFIG_PREEMPT_RT_FULL + DECLARE_PER_CPU(int, __kmap_atomic_idx); ++#endif + + static inline int kmap_atomic_idx_push(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1; + +-#ifdef CONFIG_DEBUG_HIGHMEM ++# ifdef CONFIG_DEBUG_HIGHMEM + WARN_ON_ONCE(in_irq() && !irqs_disabled()); + BUG_ON(idx >= KM_TYPE_NR); +-#endif ++# endif + return idx; ++#else ++ current->kmap_idx++; ++ BUG_ON(current->kmap_idx > KM_TYPE_NR); ++ return current->kmap_idx - 1; ++#endif + } + + static inline int kmap_atomic_idx(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + return __this_cpu_read(__kmap_atomic_idx) - 1; ++#else ++ return current->kmap_idx - 1; ++#endif + } + + static inline void kmap_atomic_idx_pop(void) + { +-#ifdef CONFIG_DEBUG_HIGHMEM ++#ifndef CONFIG_PREEMPT_RT_FULL ++# ifdef CONFIG_DEBUG_HIGHMEM + int idx = __this_cpu_dec_return(__kmap_atomic_idx); + + BUG_ON(idx < 0); +-#else ++# else + __this_cpu_dec(__kmap_atomic_idx); ++# endif ++#else ++ current->kmap_idx--; ++# ifdef CONFIG_DEBUG_HIGHMEM ++ BUG_ON(current->kmap_idx < 0); ++# endif + #endif + } + +diff -Nur linux-4.4.13.orig/include/linux/hrtimer.h linux-4.4.13/include/linux/hrtimer.h +--- linux-4.4.13.orig/include/linux/hrtimer.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/hrtimer.h 2016-06-22 21:55:08.030523867 +0200 +@@ -87,6 +87,9 @@ + * @function: timer expiry callback function + * @base: pointer to the timer base (per cpu and per clock) + * @state: state information (See bit values above) ++ * @cb_entry: list entry to defer timers from hardirq context ++ * @irqsafe: timer can run in hardirq context ++ * @praecox: timer expiry time if expired at the time of programming + * @is_rel: Set if the timer was armed relative + * @start_pid: timer statistics field to store the pid of the task which + * started the timer +@@ -103,6 +106,11 @@ + enum hrtimer_restart (*function)(struct hrtimer *); + struct hrtimer_clock_base *base; + u8 state; ++ struct list_head cb_entry; ++ int irqsafe; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ ktime_t praecox; ++#endif + u8 is_rel; + #ifdef CONFIG_TIMER_STATS + int start_pid; +@@ -123,11 +131,7 @@ + struct task_struct *task; + }; + +-#ifdef CONFIG_64BIT + # define HRTIMER_CLOCK_BASE_ALIGN 64 +-#else +-# define HRTIMER_CLOCK_BASE_ALIGN 32 +-#endif + + /** + * struct hrtimer_clock_base - the timer base for a specific clock +@@ -136,6 +140,7 @@ + * timer to a base on another cpu. + * @clockid: clock id for per_cpu support + * @active: red black tree root node for the active timers ++ * @expired: list head for deferred timers. + * @get_time: function to retrieve the current time of the clock + * @offset: offset of this clock to the monotonic base + */ +@@ -144,6 +149,7 @@ + int index; + clockid_t clockid; + struct timerqueue_head active; ++ struct list_head expired; + ktime_t (*get_time)(void); + ktime_t offset; + } __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN))); +@@ -187,6 +193,7 @@ + raw_spinlock_t lock; + seqcount_t seq; + struct hrtimer *running; ++ struct hrtimer *running_soft; + unsigned int cpu; + unsigned int active_bases; + unsigned int clock_was_set_seq; +@@ -203,6 +210,9 @@ + unsigned int nr_hangs; + unsigned int max_hang_time; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ wait_queue_head_t wait; ++#endif + struct hrtimer_clock_base clock_base[HRTIMER_MAX_CLOCK_BASES]; + } ____cacheline_aligned; + +@@ -412,6 +422,13 @@ + hrtimer_start_expires(timer, HRTIMER_MODE_ABS); + } + ++/* Softirq preemption could deadlock timer removal */ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ extern void hrtimer_wait_for_timer(const struct hrtimer *timer); ++#else ++# define hrtimer_wait_for_timer(timer) do { cpu_relax(); } while (0) ++#endif ++ + /* Query timers: */ + extern ktime_t __hrtimer_get_remaining(const struct hrtimer *timer, bool adjust); + +@@ -436,7 +453,7 @@ + * Helper function to check, whether the timer is running the callback + * function + */ +-static inline int hrtimer_callback_running(struct hrtimer *timer) ++static inline int hrtimer_callback_running(const struct hrtimer *timer) + { + return timer->base->cpu_base->running == timer; + } +diff -Nur linux-4.4.13.orig/include/linux/idr.h linux-4.4.13/include/linux/idr.h +--- linux-4.4.13.orig/include/linux/idr.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/idr.h 2016-06-22 21:55:08.030523867 +0200 +@@ -95,10 +95,14 @@ + * Each idr_preload() should be matched with an invocation of this + * function. See idr_preload() for details. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++void idr_preload_end(void); ++#else + static inline void idr_preload_end(void) + { + preempt_enable(); + } ++#endif + + /** + * idr_find - return pointer for given id +diff -Nur linux-4.4.13.orig/include/linux/init_task.h linux-4.4.13/include/linux/init_task.h +--- linux-4.4.13.orig/include/linux/init_task.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/init_task.h 2016-06-22 21:55:08.030523867 +0200 +@@ -148,9 +148,15 @@ + # define INIT_PERF_EVENTS(tsk) + #endif + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define INIT_TIMER_LIST .posix_timer_list = NULL, ++#else ++# define INIT_TIMER_LIST ++#endif ++ + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + # define INIT_VTIME(tsk) \ +- .vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock), \ ++ .vtime_seqcount = SEQCNT_ZERO(tsk.vtime_seqcount), \ + .vtime_snap = 0, \ + .vtime_snap_whence = VTIME_SYS, + #else +@@ -239,6 +245,7 @@ + .cpu_timers = INIT_CPU_TIMERS(tsk.cpu_timers), \ + .pi_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock), \ + .timer_slack_ns = 50000, /* 50 usec default slack */ \ ++ INIT_TIMER_LIST \ + .pids = { \ + [PIDTYPE_PID] = INIT_PID_LINK(PIDTYPE_PID), \ + [PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID), \ +diff -Nur linux-4.4.13.orig/include/linux/interrupt.h linux-4.4.13/include/linux/interrupt.h +--- linux-4.4.13.orig/include/linux/interrupt.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/interrupt.h 2016-06-22 21:55:08.030523867 +0200 +@@ -61,6 +61,7 @@ + * interrupt handler after suspending interrupts. For system + * wakeup devices users need to implement wakeup detection in + * their interrupt handlers. ++ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT) + */ + #define IRQF_SHARED 0x00000080 + #define IRQF_PROBE_SHARED 0x00000100 +@@ -74,6 +75,7 @@ + #define IRQF_NO_THREAD 0x00010000 + #define IRQF_EARLY_RESUME 0x00020000 + #define IRQF_COND_SUSPEND 0x00040000 ++#define IRQF_NO_SOFTIRQ_CALL 0x00080000 + + #define IRQF_TIMER (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD) + +@@ -186,7 +188,7 @@ + #ifdef CONFIG_LOCKDEP + # define local_irq_enable_in_hardirq() do { } while (0) + #else +-# define local_irq_enable_in_hardirq() local_irq_enable() ++# define local_irq_enable_in_hardirq() local_irq_enable_nort() + #endif + + extern void disable_irq_nosync(unsigned int irq); +@@ -206,6 +208,7 @@ + * @irq: Interrupt to which notification applies + * @kref: Reference count, for internal use + * @work: Work item, for internal use ++ * @list: List item for deferred callbacks + * @notify: Function to be called on change. This will be + * called in process context. + * @release: Function to be called on release. This will be +@@ -217,6 +220,7 @@ + unsigned int irq; + struct kref kref; + struct work_struct work; ++ struct list_head list; + void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask); + void (*release)(struct kref *ref); + }; +@@ -379,9 +383,13 @@ + bool state); + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + extern bool force_irqthreads; ++# else ++# define force_irqthreads (true) ++# endif + #else +-#define force_irqthreads (0) ++#define force_irqthreads (false) + #endif + + #ifndef __ARCH_SET_SOFTIRQ_PENDING +@@ -438,9 +446,10 @@ + void (*action)(struct softirq_action *); + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +- ++static inline void thread_do_softirq(void) { do_softirq(); } + #ifdef __ARCH_HAS_DO_SOFTIRQ + void do_softirq_own_stack(void); + #else +@@ -449,13 +458,25 @@ + __do_softirq(); + } + #endif ++#else ++extern void thread_do_softirq(void); ++#endif + + extern void open_softirq(int nr, void (*action)(struct softirq_action *)); + extern void softirq_init(void); + extern void __raise_softirq_irqoff(unsigned int nr); ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void __raise_softirq_irqoff_ksoft(unsigned int nr); ++#else ++static inline void __raise_softirq_irqoff_ksoft(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++} ++#endif + + extern void raise_softirq_irqoff(unsigned int nr); + extern void raise_softirq(unsigned int nr); ++extern void softirq_check_pending_idle(void); + + DECLARE_PER_CPU(struct task_struct *, ksoftirqd); + +@@ -477,8 +498,9 @@ + to be executed on some cpu at least once after this. + * If the tasklet is already scheduled, but its execution is still not + started, it will be executed only once. +- * If this tasklet is already running on another CPU (or schedule is called +- from tasklet itself), it is rescheduled for later. ++ * If this tasklet is already running on another CPU, it is rescheduled ++ for later. ++ * Schedule must not be called from the tasklet itself (a lockup occurs) + * Tasklet is strictly serialized wrt itself, but not + wrt another tasklets. If client needs some intertask synchronization, + he makes it with spinlocks. +@@ -503,27 +525,36 @@ + enum + { + TASKLET_STATE_SCHED, /* Tasklet is scheduled for execution */ +- TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_RUN, /* Tasklet is running (SMP only) */ ++ TASKLET_STATE_PENDING /* Tasklet is pending */ + }; + +-#ifdef CONFIG_SMP ++#define TASKLET_STATEF_SCHED (1 << TASKLET_STATE_SCHED) ++#define TASKLET_STATEF_RUN (1 << TASKLET_STATE_RUN) ++#define TASKLET_STATEF_PENDING (1 << TASKLET_STATE_PENDING) ++ ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + static inline int tasklet_trylock(struct tasklet_struct *t) + { + return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); + } + ++static inline int tasklet_tryunlock(struct tasklet_struct *t) ++{ ++ return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN; ++} ++ + static inline void tasklet_unlock(struct tasklet_struct *t) + { + smp_mb__before_atomic(); + clear_bit(TASKLET_STATE_RUN, &(t)->state); + } + +-static inline void tasklet_unlock_wait(struct tasklet_struct *t) +-{ +- while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); } +-} ++extern void tasklet_unlock_wait(struct tasklet_struct *t); ++ + #else + #define tasklet_trylock(t) 1 ++#define tasklet_tryunlock(t) 1 + #define tasklet_unlock_wait(t) do { } while (0) + #define tasklet_unlock(t) do { } while (0) + #endif +@@ -572,12 +603,7 @@ + smp_mb(); + } + +-static inline void tasklet_enable(struct tasklet_struct *t) +-{ +- smp_mb__before_atomic(); +- atomic_dec(&t->count); +-} +- ++extern void tasklet_enable(struct tasklet_struct *t); + extern void tasklet_kill(struct tasklet_struct *t); + extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu); + extern void tasklet_init(struct tasklet_struct *t, +@@ -608,6 +634,12 @@ + tasklet_kill(&ttimer->tasklet); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++extern void softirq_early_init(void); ++#else ++static inline void softirq_early_init(void) { } ++#endif ++ + /* + * Autoprobing for irqs: + * +diff -Nur linux-4.4.13.orig/include/linux/irqdesc.h linux-4.4.13/include/linux/irqdesc.h +--- linux-4.4.13.orig/include/linux/irqdesc.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/irqdesc.h 2016-06-22 21:55:08.030523867 +0200 +@@ -61,6 +61,7 @@ + unsigned int irqs_unhandled; + atomic_t threads_handled; + int threads_handled_last; ++ u64 random_ip; + raw_spinlock_t lock; + struct cpumask *percpu_enabled; + #ifdef CONFIG_SMP +diff -Nur linux-4.4.13.orig/include/linux/irqflags.h linux-4.4.13/include/linux/irqflags.h +--- linux-4.4.13.orig/include/linux/irqflags.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/irqflags.h 2016-06-22 21:55:08.030523867 +0200 +@@ -25,8 +25,6 @@ + # define trace_softirqs_enabled(p) ((p)->softirqs_enabled) + # define trace_hardirq_enter() do { current->hardirq_context++; } while (0) + # define trace_hardirq_exit() do { current->hardirq_context--; } while (0) +-# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) +-# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) + # define INIT_TRACE_IRQFLAGS .softirqs_enabled = 1, + #else + # define trace_hardirqs_on() do { } while (0) +@@ -39,9 +37,15 @@ + # define trace_softirqs_enabled(p) 0 + # define trace_hardirq_enter() do { } while (0) + # define trace_hardirq_exit() do { } while (0) ++# define INIT_TRACE_IRQFLAGS ++#endif ++ ++#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL) ++# define lockdep_softirq_enter() do { current->softirq_context++; } while (0) ++# define lockdep_softirq_exit() do { current->softirq_context--; } while (0) ++#else + # define lockdep_softirq_enter() do { } while (0) + # define lockdep_softirq_exit() do { } while (0) +-# define INIT_TRACE_IRQFLAGS + #endif + + #if defined(CONFIG_IRQSOFF_TRACER) || \ +@@ -148,4 +152,23 @@ + + #define irqs_disabled_flags(flags) raw_irqs_disabled_flags(flags) + ++/* ++ * local_irq* variants depending on RT/!RT ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define local_irq_disable_nort() do { } while (0) ++# define local_irq_enable_nort() do { } while (0) ++# define local_irq_save_nort(flags) local_save_flags(flags) ++# define local_irq_restore_nort(flags) (void)(flags) ++# define local_irq_disable_rt() local_irq_disable() ++# define local_irq_enable_rt() local_irq_enable() ++#else ++# define local_irq_disable_nort() local_irq_disable() ++# define local_irq_enable_nort() local_irq_enable() ++# define local_irq_save_nort(flags) local_irq_save(flags) ++# define local_irq_restore_nort(flags) local_irq_restore(flags) ++# define local_irq_disable_rt() do { } while (0) ++# define local_irq_enable_rt() do { } while (0) ++#endif ++ + #endif +diff -Nur linux-4.4.13.orig/include/linux/irq.h linux-4.4.13/include/linux/irq.h +--- linux-4.4.13.orig/include/linux/irq.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/irq.h 2016-06-22 21:55:08.030523867 +0200 +@@ -72,6 +72,7 @@ + * IRQ_IS_POLLED - Always polled by another interrupt. Exclude + * it from the spurious interrupt detection + * mechanism and from core side polling. ++ * IRQ_NO_SOFTIRQ_CALL - No softirq processing in the irq thread context (RT) + * IRQ_DISABLE_UNLAZY - Disable lazy irq disable + */ + enum { +@@ -99,13 +100,14 @@ + IRQ_PER_CPU_DEVID = (1 << 17), + IRQ_IS_POLLED = (1 << 18), + IRQ_DISABLE_UNLAZY = (1 << 19), ++ IRQ_NO_SOFTIRQ_CALL = (1 << 20), + }; + + #define IRQF_MODIFY_MASK \ + (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ + IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ + IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ +- IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY) ++ IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_NO_SOFTIRQ_CALL) + + #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) + +diff -Nur linux-4.4.13.orig/include/linux/irq_work.h linux-4.4.13/include/linux/irq_work.h +--- linux-4.4.13.orig/include/linux/irq_work.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/irq_work.h 2016-06-22 21:55:08.030523867 +0200 +@@ -16,6 +16,7 @@ + #define IRQ_WORK_BUSY 2UL + #define IRQ_WORK_FLAGS 3UL + #define IRQ_WORK_LAZY 4UL /* Doesn't want IPI, wait for tick */ ++#define IRQ_WORK_HARD_IRQ 8UL /* Run hard IRQ context, even on RT */ + + struct irq_work { + unsigned long flags; +@@ -51,4 +52,10 @@ + static inline void irq_work_run(void) { } + #endif + ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++void irq_work_tick_soft(void); ++#else ++static inline void irq_work_tick_soft(void) { } ++#endif ++ + #endif /* _LINUX_IRQ_WORK_H */ +diff -Nur linux-4.4.13.orig/include/linux/jbd2.h linux-4.4.13/include/linux/jbd2.h +--- linux-4.4.13.orig/include/linux/jbd2.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/jbd2.h 2016-06-22 21:55:08.030523867 +0200 +@@ -352,32 +352,56 @@ + + static inline void jbd_lock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_State, &bh->b_state); ++#else ++ spin_lock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_trylock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_trylock(BH_State, &bh->b_state); ++#else ++ return spin_trylock(&bh->b_state_lock); ++#endif + } + + static inline int jbd_is_locked_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + return bit_spin_is_locked(BH_State, &bh->b_state); ++#else ++ return spin_is_locked(&bh->b_state_lock); ++#endif + } + + static inline void jbd_unlock_bh_state(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_State, &bh->b_state); ++#else ++ spin_unlock(&bh->b_state_lock); ++#endif + } + + static inline void jbd_lock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(BH_JournalHead, &bh->b_state); ++#else ++ spin_lock(&bh->b_journal_head_lock); ++#endif + } + + static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_unlock(BH_JournalHead, &bh->b_state); ++#else ++ spin_unlock(&bh->b_journal_head_lock); ++#endif + } + + #define J_ASSERT(assert) BUG_ON(!(assert)) +diff -Nur linux-4.4.13.orig/include/linux/kdb.h linux-4.4.13/include/linux/kdb.h +--- linux-4.4.13.orig/include/linux/kdb.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/kdb.h 2016-06-22 21:55:08.030523867 +0200 +@@ -167,6 +167,7 @@ + extern __printf(1, 2) int kdb_printf(const char *, ...); + typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...); + ++#define in_kdb_printk() (kdb_trap_printk) + extern void kdb_init(int level); + + /* Access to kdb specific polling devices */ +@@ -201,6 +202,7 @@ + extern int kdb_unregister(char *); + #else /* ! CONFIG_KGDB_KDB */ + static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } ++#define in_kdb_printk() (0) + static inline void kdb_init(int level) {} + static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen) { return 0; } +diff -Nur linux-4.4.13.orig/include/linux/kernel.h linux-4.4.13/include/linux/kernel.h +--- linux-4.4.13.orig/include/linux/kernel.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/kernel.h 2016-06-22 21:55:08.030523867 +0200 +@@ -188,6 +188,9 @@ + */ + # define might_sleep() \ + do { __might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) ++ ++# define might_sleep_no_state_check() \ ++ do { ___might_sleep(__FILE__, __LINE__, 0); might_resched(); } while (0) + # define sched_annotate_sleep() (current->task_state_change = 0) + #else + static inline void ___might_sleep(const char *file, int line, +@@ -195,6 +198,7 @@ + static inline void __might_sleep(const char *file, int line, + int preempt_offset) { } + # define might_sleep() do { might_resched(); } while (0) ++# define might_sleep_no_state_check() do { might_resched(); } while (0) + # define sched_annotate_sleep() do { } while (0) + #endif + +@@ -255,6 +259,7 @@ + __printf(1, 2) + void panic(const char *fmt, ...) + __noreturn __cold; ++void nmi_panic(struct pt_regs *regs, const char *msg); + extern void oops_enter(void); + extern void oops_exit(void); + void print_oops_end_marker(void); +@@ -446,6 +451,14 @@ + extern bool crash_kexec_post_notifiers; + + /* ++ * panic_cpu is used for synchronizing panic() and crash_kexec() execution. It ++ * holds a CPU number which is executing panic() currently. A value of ++ * PANIC_CPU_INVALID means no CPU has entered panic() or crash_kexec(). ++ */ ++extern atomic_t panic_cpu; ++#define PANIC_CPU_INVALID -1 ++ ++/* + * Only to be used by arch init code. If the user over-wrote the default + * CONFIG_PANIC_TIMEOUT, honor it. + */ +@@ -473,6 +486,7 @@ + SYSTEM_HALT, + SYSTEM_POWER_OFF, + SYSTEM_RESTART, ++ SYSTEM_SUSPEND, + } system_state; + + #define TAINT_PROPRIETARY_MODULE 0 +diff -Nur linux-4.4.13.orig/include/linux/kvm_host.h linux-4.4.13/include/linux/kvm_host.h +--- linux-4.4.13.orig/include/linux/kvm_host.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/kvm_host.h 2016-06-22 21:55:08.030523867 +0200 +@@ -25,6 +25,7 @@ + #include + #include + #include ++#include + #include + + #include +@@ -243,7 +244,7 @@ + int fpu_active; + int guest_fpu_loaded, guest_xcr0_loaded; + unsigned char fpu_counter; +- wait_queue_head_t wq; ++ struct swait_queue_head wq; + struct pid *pid; + int sigset_active; + sigset_t sigset; +@@ -794,7 +795,7 @@ + } + #endif + +-static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) ++static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) + { + #ifdef __KVM_HAVE_ARCH_WQP + return vcpu->arch.wqp; +diff -Nur linux-4.4.13.orig/include/linux/lglock.h linux-4.4.13/include/linux/lglock.h +--- linux-4.4.13.orig/include/linux/lglock.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/lglock.h 2016-06-22 21:55:08.030523867 +0200 +@@ -34,13 +34,30 @@ + #endif + + struct lglock { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ struct rt_mutex __percpu *lock; ++#else + arch_spinlock_t __percpu *lock; ++#endif + #ifdef CONFIG_DEBUG_LOCK_ALLOC + struct lock_class_key lock_key; + struct lockdep_map lock_dep_map; + #endif + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define DEFINE_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ ++ = __RT_MUTEX_INITIALIZER( name ## _lock); \ ++ struct lglock name = { .lock = &name ## _lock } ++ ++# define DEFINE_STATIC_LGLOCK(name) \ ++ static DEFINE_PER_CPU(struct rt_mutex, name ## _lock) \ ++ = __RT_MUTEX_INITIALIZER( name ## _lock); \ ++ static struct lglock name = { .lock = &name ## _lock } ++ ++#else ++ + #define DEFINE_LGLOCK(name) \ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ +@@ -50,6 +67,7 @@ + static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \ + = __ARCH_SPIN_LOCK_UNLOCKED; \ + static struct lglock name = { .lock = &name ## _lock } ++#endif + + void lg_lock_init(struct lglock *lg, char *name); + +@@ -64,6 +82,12 @@ + void lg_global_lock(struct lglock *lg); + void lg_global_unlock(struct lglock *lg); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define lg_global_trylock_relax(name) lg_global_lock(name) ++#else ++void lg_global_trylock_relax(struct lglock *lg); ++#endif ++ + #else + /* When !CONFIG_SMP, map lglock to spinlock */ + #define lglock spinlock +diff -Nur linux-4.4.13.orig/include/linux/list_bl.h linux-4.4.13/include/linux/list_bl.h +--- linux-4.4.13.orig/include/linux/list_bl.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/list_bl.h 2016-06-22 21:55:08.030523867 +0200 +@@ -2,6 +2,7 @@ + #define _LINUX_LIST_BL_H + + #include ++#include + #include + + /* +@@ -32,13 +33,24 @@ + + struct hlist_bl_head { + struct hlist_bl_node *first; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ raw_spinlock_t lock; ++#endif + }; + + struct hlist_bl_node { + struct hlist_bl_node *next, **pprev; + }; +-#define INIT_HLIST_BL_HEAD(ptr) \ +- ((ptr)->first = NULL) ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++#define INIT_HLIST_BL_HEAD(h) \ ++do { \ ++ (h)->first = NULL; \ ++ raw_spin_lock_init(&(h)->lock); \ ++} while (0) ++#else ++#define INIT_HLIST_BL_HEAD(h) (h)->first = NULL ++#endif + + static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h) + { +@@ -118,12 +130,26 @@ + + static inline void hlist_bl_lock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + bit_spin_lock(0, (unsigned long *)b); ++#else ++ raw_spin_lock(&b->lock); ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __set_bit(0, (unsigned long *)b); ++#endif ++#endif + } + + static inline void hlist_bl_unlock(struct hlist_bl_head *b) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + __bit_spin_unlock(0, (unsigned long *)b); ++#else ++#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK) ++ __clear_bit(0, (unsigned long *)b); ++#endif ++ raw_spin_unlock(&b->lock); ++#endif + } + + static inline bool hlist_bl_is_locked(struct hlist_bl_head *b) +diff -Nur linux-4.4.13.orig/include/linux/locallock.h linux-4.4.13/include/linux/locallock.h +--- linux-4.4.13.orig/include/linux/locallock.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/locallock.h 2016-06-22 21:55:08.030523867 +0200 +@@ -0,0 +1,270 @@ ++#ifndef _LINUX_LOCALLOCK_H ++#define _LINUX_LOCALLOCK_H ++ ++#include ++#include ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define LL_WARN(cond) WARN_ON(cond) ++#else ++# define LL_WARN(cond) do { } while (0) ++#endif ++ ++/* ++ * per cpu lock based substitute for local_irq_*() ++ */ ++struct local_irq_lock { ++ spinlock_t lock; ++ struct task_struct *owner; ++ int nestcnt; ++ unsigned long flags; ++}; ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) \ ++ DEFINE_PER_CPU(struct local_irq_lock, lvar) = { \ ++ .lock = __SPIN_LOCK_UNLOCKED((lvar).lock) } ++ ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) \ ++ DECLARE_PER_CPU(struct local_irq_lock, lvar) ++ ++#define local_irq_lock_init(lvar) \ ++ do { \ ++ int __cpu; \ ++ for_each_possible_cpu(__cpu) \ ++ spin_lock_init(&per_cpu(lvar, __cpu).lock); \ ++ } while (0) ++ ++/* ++ * spin_lock|trylock|unlock_local flavour that does not migrate disable ++ * used for __local_lock|trylock|unlock where get_local_var/put_local_var ++ * already takes care of the migrate_disable/enable ++ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls. ++ */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define spin_lock_local(lock) rt_spin_lock__no_mg(lock) ++# define spin_trylock_local(lock) rt_spin_trylock__no_mg(lock) ++# define spin_unlock_local(lock) rt_spin_unlock__no_mg(lock) ++#else ++# define spin_lock_local(lock) spin_lock(lock) ++# define spin_trylock_local(lock) spin_trylock(lock) ++# define spin_unlock_local(lock) spin_unlock(lock) ++#endif ++ ++static inline void __local_lock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ spin_lock_local(&lv->lock); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ } ++ lv->nestcnt++; ++} ++ ++#define local_lock(lvar) \ ++ do { __local_lock(&get_local_var(lvar)); } while (0) ++ ++static inline int __local_trylock(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current && spin_trylock_local(&lv->lock)) { ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++ return 1; ++ } ++ return 0; ++} ++ ++#define local_trylock(lvar) \ ++ ({ \ ++ int __locked; \ ++ __locked = __local_trylock(&get_local_var(lvar)); \ ++ if (!__locked) \ ++ put_local_var(lvar); \ ++ __locked; \ ++ }) ++ ++static inline void __local_unlock(struct local_irq_lock *lv) ++{ ++ LL_WARN(lv->nestcnt == 0); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return; ++ ++ lv->owner = NULL; ++ spin_unlock_local(&lv->lock); ++} ++ ++#define local_unlock(lvar) \ ++ do { \ ++ __local_unlock(this_cpu_ptr(&lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++static inline void __local_lock_irq(struct local_irq_lock *lv) ++{ ++ spin_lock_irqsave(&lv->lock, lv->flags); ++ LL_WARN(lv->owner); ++ LL_WARN(lv->nestcnt); ++ lv->owner = current; ++ lv->nestcnt = 1; ++} ++ ++#define local_lock_irq(lvar) \ ++ do { __local_lock_irq(&get_local_var(lvar)); } while (0) ++ ++#define local_lock_irq_on(lvar, cpu) \ ++ do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0) ++ ++static inline void __local_unlock_irq(struct local_irq_lock *lv) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ lv->owner = NULL; ++ lv->nestcnt = 0; ++ spin_unlock_irq(&lv->lock); ++} ++ ++#define local_unlock_irq(lvar) \ ++ do { \ ++ __local_unlock_irq(this_cpu_ptr(&lvar)); \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irq_on(lvar, cpu) \ ++ do { \ ++ __local_unlock_irq(&per_cpu(lvar, cpu)); \ ++ } while (0) ++ ++static inline int __local_lock_irqsave(struct local_irq_lock *lv) ++{ ++ if (lv->owner != current) { ++ __local_lock_irq(lv); ++ return 0; ++ } else { ++ lv->nestcnt++; ++ return 1; ++ } ++} ++ ++#define local_lock_irqsave(lvar, _flags) \ ++ do { \ ++ if (__local_lock_irqsave(&get_local_var(lvar))) \ ++ put_local_var(lvar); \ ++ _flags = __this_cpu_read(lvar.flags); \ ++ } while (0) ++ ++#define local_lock_irqsave_on(lvar, _flags, cpu) \ ++ do { \ ++ __local_lock_irqsave(&per_cpu(lvar, cpu)); \ ++ _flags = per_cpu(lvar, cpu).flags; \ ++ } while (0) ++ ++static inline int __local_unlock_irqrestore(struct local_irq_lock *lv, ++ unsigned long flags) ++{ ++ LL_WARN(!lv->nestcnt); ++ LL_WARN(lv->owner != current); ++ if (--lv->nestcnt) ++ return 0; ++ ++ lv->owner = NULL; ++ spin_unlock_irqrestore(&lv->lock, lv->flags); ++ return 1; ++} ++ ++#define local_unlock_irqrestore(lvar, flags) \ ++ do { \ ++ if (__local_unlock_irqrestore(this_cpu_ptr(&lvar), flags)) \ ++ put_local_var(lvar); \ ++ } while (0) ++ ++#define local_unlock_irqrestore_on(lvar, flags, cpu) \ ++ do { \ ++ __local_unlock_irqrestore(&per_cpu(lvar, cpu), flags); \ ++ } while (0) ++ ++#define local_spin_trylock_irq(lvar, lock) \ ++ ({ \ ++ int __locked; \ ++ local_lock_irq(lvar); \ ++ __locked = spin_trylock(lock); \ ++ if (!__locked) \ ++ local_unlock_irq(lvar); \ ++ __locked; \ ++ }) ++ ++#define local_spin_lock_irq(lvar, lock) \ ++ do { \ ++ local_lock_irq(lvar); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irq(lvar, lock) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irq(lvar); \ ++ } while (0) ++ ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ do { \ ++ local_lock_irqsave(lvar, flags); \ ++ spin_lock(lock); \ ++ } while (0) ++ ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ do { \ ++ spin_unlock(lock); \ ++ local_unlock_irqrestore(lvar, flags); \ ++ } while (0) ++ ++#define get_locked_var(lvar, var) \ ++ (*({ \ ++ local_lock(lvar); \ ++ this_cpu_ptr(&var); \ ++ })) ++ ++#define put_locked_var(lvar, var) local_unlock(lvar); ++ ++#define local_lock_cpu(lvar) \ ++ ({ \ ++ local_lock(lvar); \ ++ smp_processor_id(); \ ++ }) ++ ++#define local_unlock_cpu(lvar) local_unlock(lvar) ++ ++#else /* PREEMPT_RT_BASE */ ++ ++#define DEFINE_LOCAL_IRQ_LOCK(lvar) __typeof__(const int) lvar ++#define DECLARE_LOCAL_IRQ_LOCK(lvar) extern __typeof__(const int) lvar ++ ++static inline void local_irq_lock_init(int lvar) { } ++ ++#define local_lock(lvar) preempt_disable() ++#define local_unlock(lvar) preempt_enable() ++#define local_lock_irq(lvar) local_irq_disable() ++#define local_unlock_irq(lvar) local_irq_enable() ++#define local_lock_irqsave(lvar, flags) local_irq_save(flags) ++#define local_unlock_irqrestore(lvar, flags) local_irq_restore(flags) ++ ++#define local_spin_trylock_irq(lvar, lock) spin_trylock_irq(lock) ++#define local_spin_lock_irq(lvar, lock) spin_lock_irq(lock) ++#define local_spin_unlock_irq(lvar, lock) spin_unlock_irq(lock) ++#define local_spin_lock_irqsave(lvar, lock, flags) \ ++ spin_lock_irqsave(lock, flags) ++#define local_spin_unlock_irqrestore(lvar, lock, flags) \ ++ spin_unlock_irqrestore(lock, flags) ++ ++#define get_locked_var(lvar, var) get_cpu_var(var) ++#define put_locked_var(lvar, var) put_cpu_var(var) ++ ++#define local_lock_cpu(lvar) get_cpu() ++#define local_unlock_cpu(lvar) put_cpu() ++ ++#endif ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/mm_types.h linux-4.4.13/include/linux/mm_types.h +--- linux-4.4.13.orig/include/linux/mm_types.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/mm_types.h 2016-06-22 21:55:08.030523867 +0200 +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -504,6 +505,9 @@ + bool tlb_flush_pending; + #endif + struct uprobes_state uprobes_state; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head delayed_drop; ++#endif + #ifdef CONFIG_X86_INTEL_MPX + /* address of the bounds directory */ + void __user *bd_addr; +diff -Nur linux-4.4.13.orig/include/linux/mutex.h linux-4.4.13/include/linux/mutex.h +--- linux-4.4.13.orig/include/linux/mutex.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/mutex.h 2016-06-22 21:55:08.034523867 +0200 +@@ -19,6 +19,17 @@ + #include + #include + ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ ++ , .dep_map = { .name = #lockname } ++#else ++# define __DEP_MAP_MUTEX_INITIALIZER(lockname) ++#endif ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++ + /* + * Simple, straightforward mutexes with strict semantics: + * +@@ -99,13 +110,6 @@ + static inline void mutex_destroy(struct mutex *lock) {} + #endif + +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \ +- , .dep_map = { .name = #lockname } +-#else +-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) +-#endif +- + #define __MUTEX_INITIALIZER(lockname) \ + { .count = ATOMIC_INIT(1) \ + , .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \ +@@ -173,6 +177,8 @@ + extern int mutex_trylock(struct mutex *lock); + extern void mutex_unlock(struct mutex *lock); + ++#endif /* !PREEMPT_RT_FULL */ ++ + extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock); + + #endif /* __LINUX_MUTEX_H */ +diff -Nur linux-4.4.13.orig/include/linux/mutex_rt.h linux-4.4.13/include/linux/mutex_rt.h +--- linux-4.4.13.orig/include/linux/mutex_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/mutex_rt.h 2016-06-22 21:55:08.034523867 +0200 +@@ -0,0 +1,84 @@ ++#ifndef __LINUX_MUTEX_RT_H ++#define __LINUX_MUTEX_RT_H ++ ++#ifndef __LINUX_MUTEX_H ++#error "Please include mutex.h" ++#endif ++ ++#include ++ ++/* FIXME: Just for __lockfunc */ ++#include ++ ++struct mutex { ++ struct rt_mutex lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __MUTEX_INITIALIZER(mutexname) \ ++ { \ ++ .lock = __RT_MUTEX_INITIALIZER(mutexname.lock) \ ++ __DEP_MAP_MUTEX_INITIALIZER(mutexname) \ ++ } ++ ++#define DEFINE_MUTEX(mutexname) \ ++ struct mutex mutexname = __MUTEX_INITIALIZER(mutexname) ++ ++extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key); ++extern void __lockfunc _mutex_lock(struct mutex *lock); ++extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock); ++extern int __lockfunc _mutex_lock_killable(struct mutex *lock); ++extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass); ++extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock); ++extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass); ++extern int __lockfunc _mutex_trylock(struct mutex *lock); ++extern void __lockfunc _mutex_unlock(struct mutex *lock); ++ ++#define mutex_is_locked(l) rt_mutex_is_locked(&(l)->lock) ++#define mutex_lock(l) _mutex_lock(l) ++#define mutex_lock_interruptible(l) _mutex_lock_interruptible(l) ++#define mutex_lock_killable(l) _mutex_lock_killable(l) ++#define mutex_trylock(l) _mutex_trylock(l) ++#define mutex_unlock(l) _mutex_unlock(l) ++#define mutex_destroy(l) rt_mutex_destroy(&(l)->lock) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define mutex_lock_nested(l, s) _mutex_lock_nested(l, s) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible_nested(l, s) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable_nested(l, s) ++ ++# define mutex_lock_nest_lock(lock, nest_lock) \ ++do { \ ++ typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \ ++ _mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \ ++} while (0) ++ ++#else ++# define mutex_lock_nested(l, s) _mutex_lock(l) ++# define mutex_lock_interruptible_nested(l, s) \ ++ _mutex_lock_interruptible(l) ++# define mutex_lock_killable_nested(l, s) \ ++ _mutex_lock_killable(l) ++# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock) ++#endif ++ ++# define mutex_init(mutex) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), #mutex, &__key); \ ++} while (0) ++ ++# define __mutex_init(mutex, name, key) \ ++do { \ ++ rt_mutex_init(&(mutex)->lock); \ ++ __mutex_do_init((mutex), name, key); \ ++} while (0) ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/netdevice.h linux-4.4.13/include/linux/netdevice.h +--- linux-4.4.13.orig/include/linux/netdevice.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/netdevice.h 2016-06-22 21:55:08.034523867 +0200 +@@ -2249,11 +2249,20 @@ + void synchronize_net(void); + int init_dummy_netdev(struct net_device *dev); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int dev_recursion_level(void) ++{ ++ return current->xmit_recursion; ++} ++ ++#else ++ + DECLARE_PER_CPU(int, xmit_recursion); + static inline int dev_recursion_level(void) + { + return this_cpu_read(xmit_recursion); + } ++#endif + + struct net_device *dev_get_by_index(struct net *net, int ifindex); + struct net_device *__dev_get_by_index(struct net *net, int ifindex); +@@ -2564,6 +2573,7 @@ + unsigned int dropped; + struct sk_buff_head input_pkt_queue; + struct napi_struct backlog; ++ struct sk_buff_head tofree_queue; + + }; + +diff -Nur linux-4.4.13.orig/include/linux/netfilter/x_tables.h linux-4.4.13/include/linux/netfilter/x_tables.h +--- linux-4.4.13.orig/include/linux/netfilter/x_tables.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/netfilter/x_tables.h 2016-06-22 21:55:08.034523867 +0200 +@@ -4,6 +4,7 @@ + + #include + #include ++#include + #include + + /** +@@ -282,6 +283,8 @@ + */ + DECLARE_PER_CPU(seqcount_t, xt_recseq); + ++DECLARE_LOCAL_IRQ_LOCK(xt_write_lock); ++ + /* xt_tee_enabled - true if x_tables needs to handle reentrancy + * + * Enabled if current ip(6)tables ruleset has at least one -j TEE rule. +@@ -302,6 +305,9 @@ + { + unsigned int addend; + ++ /* RT protection */ ++ local_lock(xt_write_lock); ++ + /* + * Low order bit of sequence is set if we already + * called xt_write_recseq_begin(). +@@ -332,6 +338,7 @@ + /* this is kind of a write_seqcount_end(), but addend is 0 or 1 */ + smp_wmb(); + __this_cpu_add(xt_recseq.sequence, addend); ++ local_unlock(xt_write_lock); + } + + /* +diff -Nur linux-4.4.13.orig/include/linux/notifier.h linux-4.4.13/include/linux/notifier.h +--- linux-4.4.13.orig/include/linux/notifier.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/notifier.h 2016-06-22 21:55:08.034523867 +0200 +@@ -6,7 +6,7 @@ + * + * Alan Cox + */ +- ++ + #ifndef _LINUX_NOTIFIER_H + #define _LINUX_NOTIFIER_H + #include +@@ -42,9 +42,7 @@ + * in srcu_notifier_call_chain(): no cache bounces and no memory barriers. + * As compensation, srcu_notifier_chain_unregister() is rather expensive. + * SRCU notifier chains should be used when the chain will be called very +- * often but notifier_blocks will seldom be removed. Also, SRCU notifier +- * chains are slightly more difficult to use because they require special +- * runtime initialization. ++ * often but notifier_blocks will seldom be removed. + */ + + typedef int (*notifier_fn_t)(struct notifier_block *nb, +@@ -88,7 +86,7 @@ + (name)->head = NULL; \ + } while (0) + +-/* srcu_notifier_heads must be initialized and cleaned up dynamically */ ++/* srcu_notifier_heads must be cleaned up dynamically */ + extern void srcu_init_notifier_head(struct srcu_notifier_head *nh); + #define srcu_cleanup_notifier_head(name) \ + cleanup_srcu_struct(&(name)->srcu); +@@ -101,7 +99,13 @@ + .head = NULL } + #define RAW_NOTIFIER_INIT(name) { \ + .head = NULL } +-/* srcu_notifier_heads cannot be initialized statically */ ++ ++#define SRCU_NOTIFIER_INIT(name, pcpu) \ ++ { \ ++ .mutex = __MUTEX_INITIALIZER(name.mutex), \ ++ .head = NULL, \ ++ .srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu), \ ++ } + + #define ATOMIC_NOTIFIER_HEAD(name) \ + struct atomic_notifier_head name = \ +@@ -113,6 +117,18 @@ + struct raw_notifier_head name = \ + RAW_NOTIFIER_INIT(name) + ++#define _SRCU_NOTIFIER_HEAD(name, mod) \ ++ static DEFINE_PER_CPU(struct srcu_struct_array, \ ++ name##_head_srcu_array); \ ++ mod struct srcu_notifier_head name = \ ++ SRCU_NOTIFIER_INIT(name, name##_head_srcu_array) ++ ++#define SRCU_NOTIFIER_HEAD(name) \ ++ _SRCU_NOTIFIER_HEAD(name, ) ++ ++#define SRCU_NOTIFIER_HEAD_STATIC(name) \ ++ _SRCU_NOTIFIER_HEAD(name, static) ++ + #ifdef __KERNEL__ + + extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh, +@@ -182,12 +198,12 @@ + + /* + * Declared notifiers so far. I can imagine quite a few more chains +- * over time (eg laptop power reset chains, reboot chain (to clean ++ * over time (eg laptop power reset chains, reboot chain (to clean + * device units up), device [un]mount chain, module load/unload chain, +- * low memory chain, screenblank chain (for plug in modular screenblankers) ++ * low memory chain, screenblank chain (for plug in modular screenblankers) + * VC switch chains (for loadable kernel svgalib VC switch helpers) etc... + */ +- ++ + /* CPU notfiers are defined in include/linux/cpu.h. */ + + /* netdevice notifiers are defined in include/linux/netdevice.h */ +diff -Nur linux-4.4.13.orig/include/linux/percpu.h linux-4.4.13/include/linux/percpu.h +--- linux-4.4.13.orig/include/linux/percpu.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/percpu.h 2016-06-22 21:55:08.034523867 +0200 +@@ -24,6 +24,35 @@ + PERCPU_MODULE_RESERVE) + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++#define get_local_var(var) (*({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(&var); })) ++ ++#define put_local_var(var) do { \ ++ (void)&(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++# define get_local_ptr(var) ({ \ ++ migrate_disable(); \ ++ this_cpu_ptr(var); }) ++ ++# define put_local_ptr(var) do { \ ++ (void)(var); \ ++ migrate_enable(); \ ++} while (0) ++ ++#else ++ ++#define get_local_var(var) get_cpu_var(var) ++#define put_local_var(var) put_cpu_var(var) ++#define get_local_ptr(var) get_cpu_ptr(var) ++#define put_local_ptr(var) put_cpu_ptr(var) ++ ++#endif ++ + /* minimum unit size, also is the maximum supported allocation size */ + #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(32 << 10) + +diff -Nur linux-4.4.13.orig/include/linux/pid.h linux-4.4.13/include/linux/pid.h +--- linux-4.4.13.orig/include/linux/pid.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/pid.h 2016-06-22 21:55:08.034523867 +0200 +@@ -2,6 +2,7 @@ + #define _LINUX_PID_H + + #include ++#include + + enum pid_type + { +diff -Nur linux-4.4.13.orig/include/linux/preempt.h linux-4.4.13/include/linux/preempt.h +--- linux-4.4.13.orig/include/linux/preempt.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/preempt.h 2016-06-22 21:55:08.034523867 +0200 +@@ -50,7 +50,11 @@ + #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT) + #define NMI_OFFSET (1UL << NMI_SHIFT) + +-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET) ++#else ++# define SOFTIRQ_DISABLE_OFFSET (0) ++#endif + + /* We use the MSB mostly because its available */ + #define PREEMPT_NEED_RESCHED 0x80000000 +@@ -59,9 +63,15 @@ + #include + + #define hardirq_count() (preempt_count() & HARDIRQ_MASK) +-#define softirq_count() (preempt_count() & SOFTIRQ_MASK) + #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \ + | NMI_MASK)) ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define softirq_count() (preempt_count() & SOFTIRQ_MASK) ++# define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) ++#else ++# define softirq_count() (0UL) ++extern int in_serving_softirq(void); ++#endif + + /* + * Are we doing bottom half or hardware interrupt processing? +@@ -72,7 +82,6 @@ + #define in_irq() (hardirq_count()) + #define in_softirq() (softirq_count()) + #define in_interrupt() (irq_count()) +-#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET) + + /* + * Are we in NMI context? +@@ -91,7 +100,11 @@ + /* + * The preempt_count offset after spin_lock() + */ ++#if !defined(CONFIG_PREEMPT_RT_FULL) + #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET ++#else ++#define PREEMPT_LOCK_OFFSET 0 ++#endif + + /* + * The preempt_count offset needed for things like: +@@ -140,6 +153,20 @@ + #define preempt_count_inc() preempt_count_add(1) + #define preempt_count_dec() preempt_count_sub(1) + ++#ifdef CONFIG_PREEMPT_LAZY ++#define add_preempt_lazy_count(val) do { preempt_lazy_count() += (val); } while (0) ++#define sub_preempt_lazy_count(val) do { preempt_lazy_count() -= (val); } while (0) ++#define inc_preempt_lazy_count() add_preempt_lazy_count(1) ++#define dec_preempt_lazy_count() sub_preempt_lazy_count(1) ++#define preempt_lazy_count() (current_thread_info()->preempt_lazy_count) ++#else ++#define add_preempt_lazy_count(val) do { } while (0) ++#define sub_preempt_lazy_count(val) do { } while (0) ++#define inc_preempt_lazy_count() do { } while (0) ++#define dec_preempt_lazy_count() do { } while (0) ++#define preempt_lazy_count() (0) ++#endif ++ + #ifdef CONFIG_PREEMPT_COUNT + + #define preempt_disable() \ +@@ -148,13 +175,25 @@ + barrier(); \ + } while (0) + ++#define preempt_lazy_disable() \ ++do { \ ++ inc_preempt_lazy_count(); \ ++ barrier(); \ ++} while (0) ++ + #define sched_preempt_enable_no_resched() \ + do { \ + barrier(); \ + preempt_count_dec(); \ + } while (0) + +-#define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define preempt_enable_no_resched() sched_preempt_enable_no_resched() ++# define preempt_check_resched_rt() preempt_check_resched() ++#else ++# define preempt_enable_no_resched() preempt_enable() ++# define preempt_check_resched_rt() barrier(); ++#endif + + #define preemptible() (preempt_count() == 0 && !irqs_disabled()) + +@@ -179,6 +218,13 @@ + __preempt_schedule(); \ + } while (0) + ++#define preempt_lazy_enable() \ ++do { \ ++ dec_preempt_lazy_count(); \ ++ barrier(); \ ++ preempt_check_resched(); \ ++} while (0) ++ + #else /* !CONFIG_PREEMPT */ + #define preempt_enable() \ + do { \ +@@ -224,6 +270,7 @@ + #define preempt_disable_notrace() barrier() + #define preempt_enable_no_resched_notrace() barrier() + #define preempt_enable_notrace() barrier() ++#define preempt_check_resched_rt() barrier() + #define preemptible() 0 + + #endif /* CONFIG_PREEMPT_COUNT */ +@@ -244,10 +291,31 @@ + } while (0) + #define preempt_fold_need_resched() \ + do { \ +- if (tif_need_resched()) \ ++ if (tif_need_resched_now()) \ + set_preempt_need_resched(); \ + } while (0) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define preempt_disable_rt() preempt_disable() ++# define preempt_enable_rt() preempt_enable() ++# define preempt_disable_nort() barrier() ++# define preempt_enable_nort() barrier() ++# ifdef CONFIG_SMP ++ extern void migrate_disable(void); ++ extern void migrate_enable(void); ++# else /* CONFIG_SMP */ ++# define migrate_disable() barrier() ++# define migrate_enable() barrier() ++# endif /* CONFIG_SMP */ ++#else ++# define preempt_disable_rt() barrier() ++# define preempt_enable_rt() barrier() ++# define preempt_disable_nort() preempt_disable() ++# define preempt_enable_nort() preempt_enable() ++# define migrate_disable() preempt_disable() ++# define migrate_enable() preempt_enable() ++#endif ++ + #ifdef CONFIG_PREEMPT_NOTIFIERS + + struct preempt_notifier; +diff -Nur linux-4.4.13.orig/include/linux/printk.h linux-4.4.13/include/linux/printk.h +--- linux-4.4.13.orig/include/linux/printk.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/printk.h 2016-06-22 21:55:08.034523867 +0200 +@@ -117,9 +117,11 @@ + #ifdef CONFIG_EARLY_PRINTK + extern asmlinkage __printf(1, 2) + void early_printk(const char *fmt, ...); ++extern void printk_kill(void); + #else + static inline __printf(1, 2) __cold + void early_printk(const char *s, ...) { } ++static inline void printk_kill(void) { } + #endif + + typedef __printf(1, 0) int (*printk_func_t)(const char *fmt, va_list args); +diff -Nur linux-4.4.13.orig/include/linux/radix-tree.h linux-4.4.13/include/linux/radix-tree.h +--- linux-4.4.13.orig/include/linux/radix-tree.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/radix-tree.h 2016-06-22 21:55:08.034523867 +0200 +@@ -277,8 +277,13 @@ + unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root, + void ***results, unsigned long *indices, + unsigned long first_index, unsigned int max_items); ++#ifndef CONFIG_PREEMPT_RT_FULL + int radix_tree_preload(gfp_t gfp_mask); + int radix_tree_maybe_preload(gfp_t gfp_mask); ++#else ++static inline int radix_tree_preload(gfp_t gm) { return 0; } ++static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; } ++#endif + void radix_tree_init(void); + void *radix_tree_tag_set(struct radix_tree_root *root, + unsigned long index, unsigned int tag); +@@ -303,7 +308,7 @@ + + static inline void radix_tree_preload_end(void) + { +- preempt_enable(); ++ preempt_enable_nort(); + } + + /** +diff -Nur linux-4.4.13.orig/include/linux/random.h linux-4.4.13/include/linux/random.h +--- linux-4.4.13.orig/include/linux/random.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/random.h 2016-06-22 21:55:08.034523867 +0200 +@@ -20,7 +20,7 @@ + extern void add_device_randomness(const void *, unsigned int); + extern void add_input_randomness(unsigned int type, unsigned int code, + unsigned int value); +-extern void add_interrupt_randomness(int irq, int irq_flags); ++extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip); + + extern void get_random_bytes(void *buf, int nbytes); + extern int add_random_ready_callback(struct random_ready_callback *rdy); +diff -Nur linux-4.4.13.orig/include/linux/rbtree.h linux-4.4.13/include/linux/rbtree.h +--- linux-4.4.13.orig/include/linux/rbtree.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/rbtree.h 2016-06-22 21:55:08.034523867 +0200 +@@ -31,7 +31,6 @@ + + #include + #include +-#include + + struct rb_node { + unsigned long __rb_parent_color; +@@ -86,14 +85,8 @@ + *rb_link = node; + } + +-static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, +- struct rb_node **rb_link) +-{ +- node->__rb_parent_color = (unsigned long)parent; +- node->rb_left = node->rb_right = NULL; +- +- rcu_assign_pointer(*rb_link, node); +-} ++void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, ++ struct rb_node **rb_link); + + #define rb_entry_safe(ptr, type, member) \ + ({ typeof(ptr) ____ptr = (ptr); \ +diff -Nur linux-4.4.13.orig/include/linux/rcupdate.h linux-4.4.13/include/linux/rcupdate.h +--- linux-4.4.13.orig/include/linux/rcupdate.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/rcupdate.h 2016-06-22 21:55:08.034523867 +0200 +@@ -169,6 +169,9 @@ + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define call_rcu_bh call_rcu ++#else + /** + * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period. + * @head: structure to be used for queueing the RCU updates. +@@ -192,6 +195,7 @@ + */ + void call_rcu_bh(struct rcu_head *head, + rcu_callback_t func); ++#endif + + /** + * call_rcu_sched() - Queue an RCU for invocation after sched grace period. +@@ -292,6 +296,11 @@ + * types of kernel builds, the rcu_read_lock() nesting depth is unknowable. + */ + #define rcu_preempt_depth() (current->rcu_read_lock_nesting) ++#ifndef CONFIG_PREEMPT_RT_FULL ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++#else ++static inline int sched_rcu_preempt_depth(void) { return 0; } ++#endif + + #else /* #ifdef CONFIG_PREEMPT_RCU */ + +@@ -317,6 +326,8 @@ + return 0; + } + ++#define sched_rcu_preempt_depth() rcu_preempt_depth() ++ + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + + /* Internal to kernel */ +@@ -489,7 +500,14 @@ + int debug_lockdep_rcu_enabled(void); + + int rcu_read_lock_held(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline int rcu_read_lock_bh_held(void) ++{ ++ return rcu_read_lock_held(); ++} ++#else + int rcu_read_lock_bh_held(void); ++#endif + + /** + * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section? +@@ -937,10 +955,14 @@ + static inline void rcu_read_lock_bh(void) + { + local_bh_disable(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_lock(); ++#else + __acquire(RCU_BH); + rcu_lock_acquire(&rcu_bh_lock_map); + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_lock_bh() used illegally while idle"); ++#endif + } + + /* +@@ -950,10 +972,14 @@ + */ + static inline void rcu_read_unlock_bh(void) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ rcu_read_unlock(); ++#else + RCU_LOCKDEP_WARN(!rcu_is_watching(), + "rcu_read_unlock_bh() used illegally while idle"); + rcu_lock_release(&rcu_bh_lock_map); + __release(RCU_BH); ++#endif + local_bh_enable(); + } + +diff -Nur linux-4.4.13.orig/include/linux/rcutree.h linux-4.4.13/include/linux/rcutree.h +--- linux-4.4.13.orig/include/linux/rcutree.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/rcutree.h 2016-06-22 21:55:08.034523867 +0200 +@@ -44,7 +44,11 @@ + rcu_note_context_switch(); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define synchronize_rcu_bh synchronize_rcu ++#else + void synchronize_rcu_bh(void); ++#endif + void synchronize_sched_expedited(void); + void synchronize_rcu_expedited(void); + +@@ -72,7 +76,11 @@ + } + + void rcu_barrier(void); ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define rcu_barrier_bh rcu_barrier ++#else + void rcu_barrier_bh(void); ++#endif + void rcu_barrier_sched(void); + unsigned long get_state_synchronize_rcu(void); + void cond_synchronize_rcu(unsigned long oldstate); +@@ -85,12 +93,10 @@ + unsigned long rcu_batches_started_bh(void); + unsigned long rcu_batches_started_sched(void); + unsigned long rcu_batches_completed(void); +-unsigned long rcu_batches_completed_bh(void); + unsigned long rcu_batches_completed_sched(void); + void show_rcu_gp_kthreads(void); + + void rcu_force_quiescent_state(void); +-void rcu_bh_force_quiescent_state(void); + void rcu_sched_force_quiescent_state(void); + + void rcu_idle_enter(void); +@@ -105,6 +111,14 @@ + + bool rcu_is_watching(void); + ++#ifndef CONFIG_PREEMPT_RT_FULL ++void rcu_bh_force_quiescent_state(void); ++unsigned long rcu_batches_completed_bh(void); ++#else ++# define rcu_bh_force_quiescent_state rcu_force_quiescent_state ++# define rcu_batches_completed_bh rcu_batches_completed ++#endif ++ + void rcu_all_qs(void); + + #endif /* __LINUX_RCUTREE_H */ +diff -Nur linux-4.4.13.orig/include/linux/rtmutex.h linux-4.4.13/include/linux/rtmutex.h +--- linux-4.4.13.orig/include/linux/rtmutex.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/rtmutex.h 2016-06-22 21:55:08.034523867 +0200 +@@ -13,11 +13,15 @@ + #define __LINUX_RT_MUTEX_H + + #include ++#include + #include +-#include + + extern int max_lock_depth; /* for sysctl */ + ++#ifdef CONFIG_DEBUG_MUTEXES ++#include ++#endif ++ + /** + * The rt_mutex structure + * +@@ -31,8 +35,8 @@ + struct rb_root waiters; + struct rb_node *waiters_leftmost; + struct task_struct *owner; +-#ifdef CONFIG_DEBUG_RT_MUTEXES + int save_state; ++#ifdef CONFIG_DEBUG_RT_MUTEXES + const char *name, *file; + int line; + void *magic; +@@ -55,22 +59,33 @@ + # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) + #endif + ++# define rt_mutex_init(mutex) \ ++ do { \ ++ raw_spin_lock_init(&(mutex)->wait_lock); \ ++ __rt_mutex_init(mutex, #mutex); \ ++ } while (0) ++ + #ifdef CONFIG_DEBUG_RT_MUTEXES + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ + , .name = #mutexname, .file = __FILE__, .line = __LINE__ +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, __func__) + extern void rt_mutex_debug_task_free(struct task_struct *tsk); + #else + # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) +-# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) + # define rt_mutex_debug_task_free(t) do { } while (0) + #endif + +-#define __RT_MUTEX_INITIALIZER(mutexname) \ +- { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ ++#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ + , .waiters = RB_ROOT \ + , .owner = NULL \ +- __DEBUG_RT_MUTEX_INITIALIZER(mutexname)} ++ __DEBUG_RT_MUTEX_INITIALIZER(mutexname) ++ ++#define __RT_MUTEX_INITIALIZER(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) } ++ ++#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \ ++ { __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \ ++ , .save_state = 1 } + + #define DEFINE_RT_MUTEX(mutexname) \ + struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname) +@@ -91,6 +106,7 @@ + + extern void rt_mutex_lock(struct rt_mutex *lock); + extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); ++extern int rt_mutex_lock_killable(struct rt_mutex *lock); + extern int rt_mutex_timed_lock(struct rt_mutex *lock, + struct hrtimer_sleeper *timeout); + +diff -Nur linux-4.4.13.orig/include/linux/rwlock_rt.h linux-4.4.13/include/linux/rwlock_rt.h +--- linux-4.4.13.orig/include/linux/rwlock_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/rwlock_rt.h 2016-06-22 21:55:08.034523867 +0200 +@@ -0,0 +1,99 @@ ++#ifndef __LINUX_RWLOCK_RT_H ++#define __LINUX_RWLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#define rwlock_init(rwl) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(rwl)->lock); \ ++ __rt_rwlock_init(rwl, #rwl, &__key); \ ++} while (0) ++ ++extern void __lockfunc rt_write_lock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_lock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock(rwlock_t *rwlock); ++extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags); ++extern int __lockfunc rt_read_trylock(rwlock_t *rwlock); ++extern void __lockfunc rt_write_unlock(rwlock_t *rwlock); ++extern void __lockfunc rt_read_unlock(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock); ++extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock); ++extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key); ++ ++#define read_trylock(lock) __cond_lock(lock, rt_read_trylock(lock)) ++#define write_trylock(lock) __cond_lock(lock, rt_write_trylock(lock)) ++ ++#define write_trylock_irqsave(lock, flags) \ ++ __cond_lock(lock, rt_write_trylock_irqsave(lock, &flags)) ++ ++#define read_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = rt_read_lock_irqsave(lock); \ ++ } while (0) ++ ++#define write_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = rt_write_lock_irqsave(lock); \ ++ } while (0) ++ ++#define read_lock(lock) rt_read_lock(lock) ++ ++#define read_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_read_lock(lock); \ ++ } while (0) ++ ++#define read_lock_irq(lock) read_lock(lock) ++ ++#define write_lock(lock) rt_write_lock(lock) ++ ++#define write_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_write_lock(lock); \ ++ } while (0) ++ ++#define write_lock_irq(lock) write_lock(lock) ++ ++#define read_unlock(lock) rt_read_unlock(lock) ++ ++#define read_unlock_bh(lock) \ ++ do { \ ++ rt_read_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define read_unlock_irq(lock) read_unlock(lock) ++ ++#define write_unlock(lock) rt_write_unlock(lock) ++ ++#define write_unlock_bh(lock) \ ++ do { \ ++ rt_write_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define write_unlock_irq(lock) write_unlock(lock) ++ ++#define read_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_read_unlock(lock); \ ++ } while (0) ++ ++#define write_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ rt_write_unlock(lock); \ ++ } while (0) ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/rwlock_types.h linux-4.4.13/include/linux/rwlock_types.h +--- linux-4.4.13.orig/include/linux/rwlock_types.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/rwlock_types.h 2016-06-22 21:55:08.034523867 +0200 +@@ -1,6 +1,10 @@ + #ifndef __LINUX_RWLOCK_TYPES_H + #define __LINUX_RWLOCK_TYPES_H + ++#if !defined(__LINUX_SPINLOCK_TYPES_H) ++# error "Do not include directly, include spinlock_types.h" ++#endif ++ + /* + * include/linux/rwlock_types.h - generic rwlock type definitions + * and initializers +@@ -43,6 +47,7 @@ + RW_DEP_MAP_INIT(lockname) } + #endif + +-#define DEFINE_RWLOCK(x) rwlock_t x = __RW_LOCK_UNLOCKED(x) ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) + + #endif /* __LINUX_RWLOCK_TYPES_H */ +diff -Nur linux-4.4.13.orig/include/linux/rwlock_types_rt.h linux-4.4.13/include/linux/rwlock_types_rt.h +--- linux-4.4.13.orig/include/linux/rwlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/rwlock_types_rt.h 2016-06-22 21:55:08.034523867 +0200 +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_RWLOCK_TYPES_RT_H ++#define __LINUX_RWLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * rwlocks - rtmutex which allows single reader recursion ++ */ ++typedef struct { ++ struct rt_mutex lock; ++ int read_depth; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} rwlock_t; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define RW_DEP_MAP_INIT(lockname) ++#endif ++ ++#define __RW_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DEFINE_RWLOCK(name) \ ++ rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name) ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/rwsem.h linux-4.4.13/include/linux/rwsem.h +--- linux-4.4.13.orig/include/linux/rwsem.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/rwsem.h 2016-06-22 21:55:08.034523867 +0200 +@@ -18,6 +18,10 @@ + #include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++#include ++#else /* PREEMPT_RT_FULL */ ++ + struct rw_semaphore; + + #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK +@@ -177,4 +181,6 @@ + # define up_read_non_owner(sem) up_read(sem) + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* _LINUX_RWSEM_H */ +diff -Nur linux-4.4.13.orig/include/linux/rwsem_rt.h linux-4.4.13/include/linux/rwsem_rt.h +--- linux-4.4.13.orig/include/linux/rwsem_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/rwsem_rt.h 2016-06-22 21:55:08.034523867 +0200 +@@ -0,0 +1,152 @@ ++#ifndef _LINUX_RWSEM_RT_H ++#define _LINUX_RWSEM_RT_H ++ ++#ifndef _LINUX_RWSEM_H ++#error "Include rwsem.h" ++#endif ++ ++/* ++ * RW-semaphores are a spinlock plus a reader-depth count. ++ * ++ * Note that the semantics are different from the usual ++ * Linux rw-sems, in PREEMPT_RT mode we do not allow ++ * multiple readers to hold the lock at once, we only allow ++ * a read-lock owner to read-lock recursively. This is ++ * better for latency, makes the implementation inherently ++ * fair and makes it simpler as well. ++ */ ++ ++#include ++ ++struct rw_semaphore { ++ struct rt_mutex lock; ++ int read_depth; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++}; ++ ++#define __RWSEM_INITIALIZER(name) \ ++ { .lock = __RT_MUTEX_INITIALIZER(name.lock), \ ++ RW_DEP_MAP_INIT(name) } ++ ++#define DECLARE_RWSEM(lockname) \ ++ struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname) ++ ++extern void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key); ++ ++#define __rt_init_rwsem(sem, name, key) \ ++ do { \ ++ rt_mutex_init(&(sem)->lock); \ ++ __rt_rwsem_init((sem), (name), (key));\ ++ } while (0) ++ ++#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key) ++ ++# define rt_init_rwsem(sem) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ __rt_init_rwsem((sem), #sem, &__key); \ ++} while (0) ++ ++extern void rt_down_write(struct rw_semaphore *rwsem); ++extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass); ++extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest); ++extern void rt__down_read(struct rw_semaphore *rwsem); ++extern void rt_down_read(struct rw_semaphore *rwsem); ++extern int rt_down_write_trylock(struct rw_semaphore *rwsem); ++extern int rt__down_read_trylock(struct rw_semaphore *rwsem); ++extern int rt_down_read_trylock(struct rw_semaphore *rwsem); ++extern void __rt_up_read(struct rw_semaphore *rwsem); ++extern void rt_up_read(struct rw_semaphore *rwsem); ++extern void rt_up_write(struct rw_semaphore *rwsem); ++extern void rt_downgrade_write(struct rw_semaphore *rwsem); ++ ++#define init_rwsem(sem) rt_init_rwsem(sem) ++#define rwsem_is_locked(s) rt_mutex_is_locked(&(s)->lock) ++ ++static inline int rwsem_is_contended(struct rw_semaphore *sem) ++{ ++ /* rt_mutex_has_waiters() */ ++ return !RB_EMPTY_ROOT(&sem->lock.waiters); ++} ++ ++static inline void __down_read(struct rw_semaphore *sem) ++{ ++ rt__down_read(sem); ++} ++ ++static inline void down_read(struct rw_semaphore *sem) ++{ ++ rt_down_read(sem); ++} ++ ++static inline int __down_read_trylock(struct rw_semaphore *sem) ++{ ++ return rt__down_read_trylock(sem); ++} ++ ++static inline int down_read_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_read_trylock(sem); ++} ++ ++static inline void down_write(struct rw_semaphore *sem) ++{ ++ rt_down_write(sem); ++} ++ ++static inline int down_write_trylock(struct rw_semaphore *sem) ++{ ++ return rt_down_write_trylock(sem); ++} ++ ++static inline void __up_read(struct rw_semaphore *sem) ++{ ++ __rt_up_read(sem); ++} ++ ++static inline void up_read(struct rw_semaphore *sem) ++{ ++ rt_up_read(sem); ++} ++ ++static inline void up_write(struct rw_semaphore *sem) ++{ ++ rt_up_write(sem); ++} ++ ++static inline void downgrade_write(struct rw_semaphore *sem) ++{ ++ rt_downgrade_write(sem); ++} ++ ++static inline void down_read_nested(struct rw_semaphore *sem, int subclass) ++{ ++ return rt_down_read_nested(sem, subclass); ++} ++ ++static inline void down_write_nested(struct rw_semaphore *sem, int subclass) ++{ ++ rt_down_write_nested(sem, subclass); ++} ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, &nest_lock->dep_map); ++} ++ ++#else ++ ++static inline void down_write_nest_lock(struct rw_semaphore *sem, ++ struct rw_semaphore *nest_lock) ++{ ++ rt_down_write_nested_lock(sem, NULL); ++} ++#endif ++#endif +diff -Nur linux-4.4.13.orig/include/linux/sched.h linux-4.4.13/include/linux/sched.h +--- linux-4.4.13.orig/include/linux/sched.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/sched.h 2016-06-22 21:55:08.038523867 +0200 +@@ -26,6 +26,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -182,8 +183,6 @@ + static inline void update_cpu_load_nohz(void) { } + #endif + +-extern unsigned long get_parent_ip(unsigned long addr); +- + extern void dump_cpu_task(int cpu); + + struct seq_file; +@@ -242,10 +241,7 @@ + TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \ + __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD) + +-#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0) + #define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0) +-#define task_is_stopped_or_traced(task) \ +- ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0) + #define task_contributes_to_load(task) \ + ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \ + (task->flags & PF_FROZEN) == 0 && \ +@@ -311,6 +307,11 @@ + + #endif + ++#define __set_current_state_no_track(state_value) \ ++ do { current->state = (state_value); } while (0) ++#define set_current_state_no_track(state_value) \ ++ set_mb(current->state, (state_value)) ++ + /* Task command name length */ + #define TASK_COMM_LEN 16 + +@@ -970,8 +971,18 @@ + struct wake_q_head name = { WAKE_Q_TAIL, &name.first } + + extern void wake_q_add(struct wake_q_head *head, +- struct task_struct *task); +-extern void wake_up_q(struct wake_q_head *head); ++ struct task_struct *task); ++extern void __wake_up_q(struct wake_q_head *head, bool sleeper); ++ ++static inline void wake_up_q(struct wake_q_head *head) ++{ ++ __wake_up_q(head, false); ++} ++ ++static inline void wake_up_q_sleeper(struct wake_q_head *head) ++{ ++ __wake_up_q(head, true); ++} + + /* + * sched-domains (multiprocessor balancing) declarations: +@@ -1379,6 +1390,7 @@ + + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ ++ volatile long saved_state; /* saved state for "spinlock sleepers" */ + void *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ +@@ -1415,6 +1427,12 @@ + #endif + + unsigned int policy; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int migrate_disable; ++# ifdef CONFIG_SCHED_DEBUG ++ int migrate_disable_atomic; ++# endif ++#endif + int nr_cpus_allowed; + cpumask_t cpus_allowed; + +@@ -1522,11 +1540,14 @@ + cputime_t gtime; + struct prev_cputime prev_cputime; + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +- seqlock_t vtime_seqlock; ++ seqcount_t vtime_seqcount; + unsigned long long vtime_snap; + enum { +- VTIME_SLEEPING = 0, ++ /* Task is sleeping or running in a CPU with VTIME inactive */ ++ VTIME_INACTIVE = 0, ++ /* Task runs in userspace in a CPU with VTIME active */ + VTIME_USER, ++ /* Task runs in kernelspace in a CPU with VTIME active */ + VTIME_SYS, + } vtime_snap_whence; + #endif +@@ -1538,6 +1559,9 @@ + + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *posix_timer_list; ++#endif + + /* process credentials */ + const struct cred __rcu *real_cred; /* objective and real subjective task +@@ -1568,10 +1592,15 @@ + /* signal handlers */ + struct signal_struct *signal; + struct sighand_struct *sighand; ++ struct sigqueue *sigqueue_cache; + + sigset_t blocked, real_blocked; + sigset_t saved_sigmask; /* restored if set_restore_sigmask() was used */ + struct sigpending pending; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* TODO: move me into ->restart_block ? */ ++ struct siginfo forced_info; ++#endif + + unsigned long sas_ss_sp; + size_t sas_ss_size; +@@ -1795,6 +1824,12 @@ + unsigned long trace; + /* bitmask and counter of trace recursion */ + unsigned long trace_recursion; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ u64 preempt_timestamp_hist; ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ long timer_offset; ++#endif ++#endif + #endif /* CONFIG_TRACING */ + #ifdef CONFIG_MEMCG + struct mem_cgroup *memcg_in_oom; +@@ -1811,9 +1846,23 @@ + unsigned int sequential_io; + unsigned int sequential_io_avg; + #endif ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct rcu_head put_rcu; ++ int softirq_nestcnt; ++ unsigned int softirqs_raised; ++#endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32 ++ int kmap_idx; ++ pte_t kmap_pte[KM_TYPE_NR]; ++# endif ++#endif + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + unsigned long task_state_change; + #endif ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int xmit_recursion; ++#endif + int pagefault_disabled; + /* CPU-specific state of this task */ + struct thread_struct thread; +@@ -1831,9 +1880,6 @@ + # define arch_task_struct_size (sizeof(struct task_struct)) + #endif + +-/* Future-safe accessor for struct task_struct's cpus_allowed. */ +-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) +- + #define TNF_MIGRATED 0x01 + #define TNF_NO_GROUP 0x02 + #define TNF_SHARED 0x04 +@@ -2023,6 +2069,15 @@ + extern void free_task(struct task_struct *tsk); + #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0) + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __put_task_struct_cb(struct rcu_head *rhp); ++ ++static inline void put_task_struct(struct task_struct *t) ++{ ++ if (atomic_dec_and_test(&t->usage)) ++ call_rcu(&t->put_rcu, __put_task_struct_cb); ++} ++#else + extern void __put_task_struct(struct task_struct *t); + + static inline void put_task_struct(struct task_struct *t) +@@ -2030,6 +2085,7 @@ + if (atomic_dec_and_test(&t->usage)) + __put_task_struct(t); + } ++#endif + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN + extern void task_cputime(struct task_struct *t, +@@ -2068,6 +2124,7 @@ + /* + * Per process flags + */ ++#define PF_IN_SOFTIRQ 0x00000001 /* Task is serving softirq */ + #define PF_EXITING 0x00000004 /* getting shut down */ + #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ + #define PF_VCPU 0x00000010 /* I'm a virtual CPU */ +@@ -2232,6 +2289,10 @@ + + extern int set_cpus_allowed_ptr(struct task_struct *p, + const struct cpumask *new_mask); ++int migrate_me(void); ++void tell_sched_cpu_down_begin(int cpu); ++void tell_sched_cpu_down_done(int cpu); ++ + #else + static inline void do_set_cpus_allowed(struct task_struct *p, + const struct cpumask *new_mask) +@@ -2244,6 +2305,9 @@ + return -EINVAL; + return 0; + } ++static inline int migrate_me(void) { return 0; } ++static inline void tell_sched_cpu_down_begin(int cpu) { } ++static inline void tell_sched_cpu_down_done(int cpu) { } + #endif + + #ifdef CONFIG_NO_HZ_COMMON +@@ -2453,6 +2517,7 @@ + + extern int wake_up_state(struct task_struct *tsk, unsigned int state); + extern int wake_up_process(struct task_struct *tsk); ++extern int wake_up_lock_sleeper(struct task_struct * tsk); + extern void wake_up_new_task(struct task_struct *tsk); + #ifdef CONFIG_SMP + extern void kick_process(struct task_struct *tsk); +@@ -2576,12 +2641,24 @@ + + /* mmdrop drops the mm and the page tables */ + extern void __mmdrop(struct mm_struct *); ++ + static inline void mmdrop(struct mm_struct * mm) + { + if (unlikely(atomic_dec_and_test(&mm->mm_count))) + __mmdrop(mm); + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++extern void __mmdrop_delayed(struct rcu_head *rhp); ++static inline void mmdrop_delayed(struct mm_struct *mm) ++{ ++ if (atomic_dec_and_test(&mm->mm_count)) ++ call_rcu(&mm->delayed_drop, __mmdrop_delayed); ++} ++#else ++# define mmdrop_delayed(mm) mmdrop(mm) ++#endif ++ + /* mmput gets rid of the mappings and all user-space */ + extern void mmput(struct mm_struct *); + /* Grab a reference to a task's mm, if it is not already going away */ +@@ -2891,6 +2968,43 @@ + return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED)); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++static inline void set_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int test_tsk_need_resched_lazy(struct task_struct *tsk) ++{ ++ return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY)); ++} ++ ++static inline int need_resched_lazy(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED_LAZY); ++} ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#else ++static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { } ++static inline int need_resched_lazy(void) { return 0; } ++ ++static inline int need_resched_now(void) ++{ ++ return test_thread_flag(TIF_NEED_RESCHED); ++} ++ ++#endif ++ + static inline int restart_syscall(void) + { + set_tsk_thread_flag(current, TIF_SIGPENDING); +@@ -2922,6 +3036,51 @@ + return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); + } + ++static inline bool __task_is_stopped_or_traced(struct task_struct *task) ++{ ++ if (task->state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED)) ++ return true; ++#endif ++ return false; ++} ++ ++static inline bool task_is_stopped_or_traced(struct task_struct *task) ++{ ++ bool traced_stopped; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ traced_stopped = __task_is_stopped_or_traced(task); ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++#else ++ traced_stopped = __task_is_stopped_or_traced(task); ++#endif ++ return traced_stopped; ++} ++ ++static inline bool task_is_traced(struct task_struct *task) ++{ ++ bool traced = false; ++ ++ if (task->state & __TASK_TRACED) ++ return true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* in case the task is sleeping on tasklist_lock */ ++ raw_spin_lock_irq(&task->pi_lock); ++ if (task->state & __TASK_TRACED) ++ traced = true; ++ else if (task->saved_state & __TASK_TRACED) ++ traced = true; ++ raw_spin_unlock_irq(&task->pi_lock); ++#endif ++ return traced; ++} ++ + /* + * cond_resched() and cond_resched_lock(): latency reduction via + * explicit rescheduling in places that are safe. The return +@@ -2943,12 +3102,16 @@ + __cond_resched_lock(lock); \ + }) + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern int __cond_resched_softirq(void); + + #define cond_resched_softirq() ({ \ + ___might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET); \ + __cond_resched_softirq(); \ + }) ++#else ++# define cond_resched_softirq() cond_resched() ++#endif + + static inline void cond_resched_rcu(void) + { +@@ -3110,6 +3273,31 @@ + + #endif /* CONFIG_SMP */ + ++static inline int __migrate_disabled(struct task_struct *p) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ return p->migrate_disable; ++#else ++ return 0; ++#endif ++} ++ ++/* Future-safe accessor for struct task_struct's cpus_allowed. */ ++static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p) ++{ ++ if (__migrate_disabled(p)) ++ return cpumask_of(task_cpu(p)); ++ ++ return &p->cpus_allowed; ++} ++ ++static inline int tsk_nr_cpus_allowed(struct task_struct *p) ++{ ++ if (__migrate_disabled(p)) ++ return 1; ++ return p->nr_cpus_allowed; ++} ++ + extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask); + extern long sched_getaffinity(pid_t pid, struct cpumask *mask); + +diff -Nur linux-4.4.13.orig/include/linux/seqlock.h linux-4.4.13/include/linux/seqlock.h +--- linux-4.4.13.orig/include/linux/seqlock.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/seqlock.h 2016-06-22 21:55:08.038523867 +0200 +@@ -220,20 +220,30 @@ + return __read_seqcount_retry(s, start); + } + +- +- +-static inline void raw_write_seqcount_begin(seqcount_t *s) ++static inline void __raw_write_seqcount_begin(seqcount_t *s) + { + s->sequence++; + smp_wmb(); + } + +-static inline void raw_write_seqcount_end(seqcount_t *s) ++static inline void raw_write_seqcount_begin(seqcount_t *s) ++{ ++ preempt_disable_rt(); ++ __raw_write_seqcount_begin(s); ++} ++ ++static inline void __raw_write_seqcount_end(seqcount_t *s) + { + smp_wmb(); + s->sequence++; + } + ++static inline void raw_write_seqcount_end(seqcount_t *s) ++{ ++ __raw_write_seqcount_end(s); ++ preempt_enable_rt(); ++} ++ + /** + * raw_write_seqcount_barrier - do a seq write barrier + * @s: pointer to seqcount_t +@@ -425,10 +435,32 @@ + /* + * Read side functions for starting and finalizing a read side section. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + static inline unsigned read_seqbegin(const seqlock_t *sl) + { + return read_seqcount_begin(&sl->seqcount); + } ++#else ++/* ++ * Starvation safe read side for RT ++ */ ++static inline unsigned read_seqbegin(seqlock_t *sl) ++{ ++ unsigned ret; ++ ++repeat: ++ ret = ACCESS_ONCE(sl->seqcount.sequence); ++ if (unlikely(ret & 1)) { ++ /* ++ * Take the lock and let the writer proceed (i.e. evtl ++ * boost it), otherwise we could loop here forever. ++ */ ++ spin_unlock_wait(&sl->lock); ++ goto repeat; ++ } ++ return ret; ++} ++#endif + + static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start) + { +@@ -443,36 +475,36 @@ + static inline void write_seqlock(seqlock_t *sl) + { + spin_lock(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock(&sl->lock); + } + + static inline void write_seqlock_bh(seqlock_t *sl) + { + spin_lock_bh(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_bh(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_bh(&sl->lock); + } + + static inline void write_seqlock_irq(seqlock_t *sl) + { + spin_lock_irq(&sl->lock); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + } + + static inline void write_sequnlock_irq(seqlock_t *sl) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irq(&sl->lock); + } + +@@ -481,7 +513,7 @@ + unsigned long flags; + + spin_lock_irqsave(&sl->lock, flags); +- write_seqcount_begin(&sl->seqcount); ++ __raw_write_seqcount_begin(&sl->seqcount); + return flags; + } + +@@ -491,7 +523,7 @@ + static inline void + write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags) + { +- write_seqcount_end(&sl->seqcount); ++ __raw_write_seqcount_end(&sl->seqcount); + spin_unlock_irqrestore(&sl->lock, flags); + } + +diff -Nur linux-4.4.13.orig/include/linux/signal.h linux-4.4.13/include/linux/signal.h +--- linux-4.4.13.orig/include/linux/signal.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/signal.h 2016-06-22 21:55:08.038523867 +0200 +@@ -233,6 +233,7 @@ + } + + extern void flush_sigqueue(struct sigpending *queue); ++extern void flush_task_sigqueue(struct task_struct *tsk); + + /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */ + static inline int valid_signal(unsigned long sig) +diff -Nur linux-4.4.13.orig/include/linux/skbuff.h linux-4.4.13/include/linux/skbuff.h +--- linux-4.4.13.orig/include/linux/skbuff.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/skbuff.h 2016-06-22 21:55:08.038523867 +0200 +@@ -203,6 +203,7 @@ + + __u32 qlen; + spinlock_t lock; ++ raw_spinlock_t raw_lock; + }; + + struct sk_buff; +@@ -1464,6 +1465,12 @@ + __skb_queue_head_init(list); + } + ++static inline void skb_queue_head_init_raw(struct sk_buff_head *list) ++{ ++ raw_spin_lock_init(&list->raw_lock); ++ __skb_queue_head_init(list); ++} ++ + static inline void skb_queue_head_init_class(struct sk_buff_head *list, + struct lock_class_key *class) + { +diff -Nur linux-4.4.13.orig/include/linux/smp.h linux-4.4.13/include/linux/smp.h +--- linux-4.4.13.orig/include/linux/smp.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/smp.h 2016-06-22 21:55:08.038523867 +0200 +@@ -185,6 +185,9 @@ + #define get_cpu() ({ preempt_disable(); smp_processor_id(); }) + #define put_cpu() preempt_enable() + ++#define get_cpu_light() ({ migrate_disable(); smp_processor_id(); }) ++#define put_cpu_light() migrate_enable() ++ + /* + * Callback to arch code if there's nosmp or maxcpus=0 on the + * boot command line: +diff -Nur linux-4.4.13.orig/include/linux/spinlock_api_smp.h linux-4.4.13/include/linux/spinlock_api_smp.h +--- linux-4.4.13.orig/include/linux/spinlock_api_smp.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/spinlock_api_smp.h 2016-06-22 21:55:08.038523867 +0200 +@@ -189,6 +189,8 @@ + return 0; + } + +-#include ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++#endif + + #endif /* __LINUX_SPINLOCK_API_SMP_H */ +diff -Nur linux-4.4.13.orig/include/linux/spinlock.h linux-4.4.13/include/linux/spinlock.h +--- linux-4.4.13.orig/include/linux/spinlock.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/spinlock.h 2016-06-22 21:55:08.038523867 +0200 +@@ -271,7 +271,11 @@ + #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock)) + + /* Include rwlock functions */ +-#include ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else ++# include ++#endif + + /* + * Pull the _spin_*()/_read_*()/_write_*() functions/declarations: +@@ -282,6 +286,10 @@ + # include + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++# include ++#else /* PREEMPT_RT_FULL */ ++ + /* + * Map the spin_lock functions to the raw variants for PREEMPT_RT=n + */ +@@ -416,4 +424,6 @@ + #define atomic_dec_and_lock(atomic, lock) \ + __cond_lock(lock, _atomic_dec_and_lock(atomic, lock)) + ++#endif /* !PREEMPT_RT_FULL */ ++ + #endif /* __LINUX_SPINLOCK_H */ +diff -Nur linux-4.4.13.orig/include/linux/spinlock_rt.h linux-4.4.13/include/linux/spinlock_rt.h +--- linux-4.4.13.orig/include/linux/spinlock_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/spinlock_rt.h 2016-06-22 21:55:08.038523867 +0200 +@@ -0,0 +1,163 @@ ++#ifndef __LINUX_SPINLOCK_RT_H ++#define __LINUX_SPINLOCK_RT_H ++ ++#ifndef __LINUX_SPINLOCK_H ++#error Do not include directly. Use spinlock.h ++#endif ++ ++#include ++ ++extern void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key); ++ ++#define spin_lock_init(slock) \ ++do { \ ++ static struct lock_class_key __key; \ ++ \ ++ rt_mutex_init(&(slock)->lock); \ ++ __rt_spin_lock_init(slock, #slock, &__key); \ ++} while (0) ++ ++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock); ++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock); ++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock); ++ ++extern void __lockfunc rt_spin_lock(spinlock_t *lock); ++extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock); ++extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass); ++extern void __lockfunc rt_spin_unlock(spinlock_t *lock); ++extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags); ++extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock); ++extern int __lockfunc rt_spin_trylock(spinlock_t *lock); ++extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock); ++ ++/* ++ * lockdep-less calls, for derived types like rwlock: ++ * (for trylock they can use rt_mutex_trylock() directly. ++ */ ++extern void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock); ++extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock); ++extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock); ++ ++#define spin_lock(lock) rt_spin_lock(lock) ++ ++#define spin_lock_bh(lock) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_spin_lock(lock); \ ++ } while (0) ++ ++#define spin_lock_irq(lock) spin_lock(lock) ++ ++#define spin_do_trylock(lock) __cond_lock(lock, rt_spin_trylock(lock)) ++ ++#define spin_trylock(lock) \ ++({ \ ++ int __locked; \ ++ __locked = spin_do_trylock(lock); \ ++ __locked; \ ++}) ++ ++#ifdef CONFIG_LOCKDEP ++# define spin_lock_nested(lock, subclass) \ ++ do { \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++#define spin_lock_bh_nested(lock, subclass) \ ++ do { \ ++ local_bh_disable(); \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ rt_spin_lock_nested(lock, subclass); \ ++ } while (0) ++#else ++# define spin_lock_nested(lock, subclass) spin_lock(lock) ++# define spin_lock_bh_nested(lock, subclass) spin_lock_bh(lock) ++ ++# define spin_lock_irqsave_nested(lock, flags, subclass) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++#endif ++ ++#define spin_lock_irqsave(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ flags = 0; \ ++ spin_lock(lock); \ ++ } while (0) ++ ++static inline unsigned long spin_lock_trace_flags(spinlock_t *lock) ++{ ++ unsigned long flags = 0; ++#ifdef CONFIG_TRACE_IRQFLAGS ++ flags = rt_spin_lock_trace_flags(lock); ++#else ++ spin_lock(lock); /* lock_local */ ++#endif ++ return flags; ++} ++ ++/* FIXME: we need rt_spin_lock_nest_lock */ ++#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0) ++ ++#define spin_unlock(lock) rt_spin_unlock(lock) ++ ++#define spin_unlock_bh(lock) \ ++ do { \ ++ rt_spin_unlock(lock); \ ++ local_bh_enable(); \ ++ } while (0) ++ ++#define spin_unlock_irq(lock) spin_unlock(lock) ++ ++#define spin_unlock_irqrestore(lock, flags) \ ++ do { \ ++ typecheck(unsigned long, flags); \ ++ (void) flags; \ ++ spin_unlock(lock); \ ++ } while (0) ++ ++#define spin_trylock_bh(lock) __cond_lock(lock, rt_spin_trylock_bh(lock)) ++#define spin_trylock_irq(lock) spin_trylock(lock) ++ ++#define spin_trylock_irqsave(lock, flags) \ ++ rt_spin_trylock_irqsave(lock, &(flags)) ++ ++#define spin_unlock_wait(lock) rt_spin_unlock_wait(lock) ++ ++#ifdef CONFIG_GENERIC_LOCKBREAK ++# define spin_is_contended(lock) ((lock)->break_lock) ++#else ++# define spin_is_contended(lock) (((void)(lock), 0)) ++#endif ++ ++static inline int spin_can_lock(spinlock_t *lock) ++{ ++ return !rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline int spin_is_locked(spinlock_t *lock) ++{ ++ return rt_mutex_is_locked(&lock->lock); ++} ++ ++static inline void assert_spin_locked(spinlock_t *lock) ++{ ++ BUG_ON(!spin_is_locked(lock)); ++} ++ ++#define atomic_dec_and_lock(atomic, lock) \ ++ atomic_dec_and_spin_lock(atomic, lock) ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/spinlock_types.h linux-4.4.13/include/linux/spinlock_types.h +--- linux-4.4.13.orig/include/linux/spinlock_types.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/spinlock_types.h 2016-06-22 21:55:08.038523867 +0200 +@@ -9,80 +9,15 @@ + * Released under the General Public License (GPL). + */ + +-#if defined(CONFIG_SMP) +-# include +-#else +-# include +-#endif +- +-#include +- +-typedef struct raw_spinlock { +- arch_spinlock_t raw_lock; +-#ifdef CONFIG_GENERIC_LOCKBREAK +- unsigned int break_lock; +-#endif +-#ifdef CONFIG_DEBUG_SPINLOCK +- unsigned int magic, owner_cpu; +- void *owner; +-#endif +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +- struct lockdep_map dep_map; +-#endif +-} raw_spinlock_t; +- +-#define SPINLOCK_MAGIC 0xdead4ead +- +-#define SPINLOCK_OWNER_INIT ((void *)-1L) +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } +-#else +-# define SPIN_DEP_MAP_INIT(lockname) +-#endif ++#include + +-#ifdef CONFIG_DEBUG_SPINLOCK +-# define SPIN_DEBUG_INIT(lockname) \ +- .magic = SPINLOCK_MAGIC, \ +- .owner_cpu = -1, \ +- .owner = SPINLOCK_OWNER_INIT, ++#ifndef CONFIG_PREEMPT_RT_FULL ++# include ++# include + #else +-# define SPIN_DEBUG_INIT(lockname) ++# include ++# include ++# include + #endif + +-#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ +- { \ +- .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ +- SPIN_DEBUG_INIT(lockname) \ +- SPIN_DEP_MAP_INIT(lockname) } +- +-#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ +- (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) +- +-typedef struct spinlock { +- union { +- struct raw_spinlock rlock; +- +-#ifdef CONFIG_DEBUG_LOCK_ALLOC +-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) +- struct { +- u8 __padding[LOCK_PADSIZE]; +- struct lockdep_map dep_map; +- }; +-#endif +- }; +-} spinlock_t; +- +-#define __SPIN_LOCK_INITIALIZER(lockname) \ +- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } +- +-#define __SPIN_LOCK_UNLOCKED(lockname) \ +- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) +- +-#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) +- +-#include +- + #endif /* __LINUX_SPINLOCK_TYPES_H */ +diff -Nur linux-4.4.13.orig/include/linux/spinlock_types_nort.h linux-4.4.13/include/linux/spinlock_types_nort.h +--- linux-4.4.13.orig/include/linux/spinlock_types_nort.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/spinlock_types_nort.h 2016-06-22 21:55:08.038523867 +0200 +@@ -0,0 +1,33 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_NORT_H ++#define __LINUX_SPINLOCK_TYPES_NORT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++/* ++ * The non RT version maps spinlocks to raw_spinlocks ++ */ ++typedef struct spinlock { ++ union { ++ struct raw_spinlock rlock; ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map)) ++ struct { ++ u8 __padding[LOCK_PADSIZE]; ++ struct lockdep_map dep_map; ++ }; ++#endif ++ }; ++} spinlock_t; ++ ++#define __SPIN_LOCK_INITIALIZER(lockname) \ ++ { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } } ++ ++#define __SPIN_LOCK_UNLOCKED(lockname) \ ++ (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x) ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/spinlock_types_raw.h linux-4.4.13/include/linux/spinlock_types_raw.h +--- linux-4.4.13.orig/include/linux/spinlock_types_raw.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/spinlock_types_raw.h 2016-06-22 21:55:08.038523867 +0200 +@@ -0,0 +1,56 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RAW_H ++#define __LINUX_SPINLOCK_TYPES_RAW_H ++ ++#if defined(CONFIG_SMP) ++# include ++#else ++# include ++#endif ++ ++#include ++ ++typedef struct raw_spinlock { ++ arch_spinlock_t raw_lock; ++#ifdef CONFIG_GENERIC_LOCKBREAK ++ unsigned int break_lock; ++#endif ++#ifdef CONFIG_DEBUG_SPINLOCK ++ unsigned int magic, owner_cpu; ++ void *owner; ++#endif ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} raw_spinlock_t; ++ ++#define SPINLOCK_MAGIC 0xdead4ead ++ ++#define SPINLOCK_OWNER_INIT ((void *)-1L) ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname } ++#else ++# define SPIN_DEP_MAP_INIT(lockname) ++#endif ++ ++#ifdef CONFIG_DEBUG_SPINLOCK ++# define SPIN_DEBUG_INIT(lockname) \ ++ .magic = SPINLOCK_MAGIC, \ ++ .owner_cpu = -1, \ ++ .owner = SPINLOCK_OWNER_INIT, ++#else ++# define SPIN_DEBUG_INIT(lockname) ++#endif ++ ++#define __RAW_SPIN_LOCK_INITIALIZER(lockname) \ ++ { \ ++ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \ ++ SPIN_DEBUG_INIT(lockname) \ ++ SPIN_DEP_MAP_INIT(lockname) } ++ ++#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \ ++ (raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname) ++ ++#define DEFINE_RAW_SPINLOCK(x) raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x) ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/spinlock_types_rt.h linux-4.4.13/include/linux/spinlock_types_rt.h +--- linux-4.4.13.orig/include/linux/spinlock_types_rt.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/spinlock_types_rt.h 2016-06-22 21:55:08.038523867 +0200 +@@ -0,0 +1,51 @@ ++#ifndef __LINUX_SPINLOCK_TYPES_RT_H ++#define __LINUX_SPINLOCK_TYPES_RT_H ++ ++#ifndef __LINUX_SPINLOCK_TYPES_H ++#error "Do not include directly. Include spinlock_types.h instead" ++#endif ++ ++#include ++ ++/* ++ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field: ++ */ ++typedef struct spinlock { ++ struct rt_mutex lock; ++ unsigned int break_lock; ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ struct lockdep_map dep_map; ++#endif ++} spinlock_t; ++ ++#ifdef CONFIG_DEBUG_RT_MUTEXES ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ .file = __FILE__, \ ++ .line = __LINE__ , \ ++ } ++#else ++# define __RT_SPIN_INITIALIZER(name) \ ++ { \ ++ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \ ++ .save_state = 1, \ ++ } ++#endif ++ ++/* ++.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock) ++*/ ++ ++#define __SPIN_LOCK_UNLOCKED(name) \ ++ { .lock = __RT_SPIN_INITIALIZER(name.lock), \ ++ SPIN_DEP_MAP_INIT(name) } ++ ++#define __DEFINE_SPINLOCK(name) \ ++ spinlock_t name = __SPIN_LOCK_UNLOCKED(name) ++ ++#define DEFINE_SPINLOCK(name) \ ++ spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name) ++ ++#endif +diff -Nur linux-4.4.13.orig/include/linux/srcu.h linux-4.4.13/include/linux/srcu.h +--- linux-4.4.13.orig/include/linux/srcu.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/srcu.h 2016-06-22 21:55:08.038523867 +0200 +@@ -84,10 +84,10 @@ + + void process_srcu(struct work_struct *work); + +-#define __SRCU_STRUCT_INIT(name) \ ++#define __SRCU_STRUCT_INIT(name, pcpu_name) \ + { \ + .completed = -300, \ +- .per_cpu_ref = &name##_srcu_array, \ ++ .per_cpu_ref = &pcpu_name, \ + .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ + .running = false, \ + .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ +@@ -104,7 +104,7 @@ + */ + #define __DEFINE_SRCU(name, is_static) \ + static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ +- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) ++ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array) + #define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) + #define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) + +diff -Nur linux-4.4.13.orig/include/linux/suspend.h linux-4.4.13/include/linux/suspend.h +--- linux-4.4.13.orig/include/linux/suspend.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/suspend.h 2016-06-22 21:55:08.038523867 +0200 +@@ -194,6 +194,12 @@ + void (*end)(void); + }; + ++#if defined(CONFIG_SUSPEND) || defined(CONFIG_HIBERNATION) ++extern bool pm_in_action; ++#else ++# define pm_in_action false ++#endif ++ + #ifdef CONFIG_SUSPEND + /** + * suspend_set_ops - set platform dependent suspend operations +diff -Nur linux-4.4.13.orig/include/linux/swait.h linux-4.4.13/include/linux/swait.h +--- linux-4.4.13.orig/include/linux/swait.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/swait.h 2016-06-22 21:55:08.038523867 +0200 +@@ -0,0 +1,173 @@ ++#ifndef _LINUX_SWAIT_H ++#define _LINUX_SWAIT_H ++ ++#include ++#include ++#include ++#include ++ ++/* ++ * Simple wait queues ++ * ++ * While these are very similar to the other/complex wait queues (wait.h) the ++ * most important difference is that the simple waitqueue allows for ++ * deterministic behaviour -- IOW it has strictly bounded IRQ and lock hold ++ * times. ++ * ++ * In order to make this so, we had to drop a fair number of features of the ++ * other waitqueue code; notably: ++ * ++ * - mixing INTERRUPTIBLE and UNINTERRUPTIBLE sleeps on the same waitqueue; ++ * all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right ++ * sleeper state. ++ * ++ * - the exclusive mode; because this requires preserving the list order ++ * and this is hard. ++ * ++ * - custom wake functions; because you cannot give any guarantees about ++ * random code. ++ * ++ * As a side effect of this; the data structures are slimmer. ++ * ++ * One would recommend using this wait queue where possible. ++ */ ++ ++struct task_struct; ++ ++struct swait_queue_head { ++ raw_spinlock_t lock; ++ struct list_head task_list; ++}; ++ ++struct swait_queue { ++ struct task_struct *task; ++ struct list_head task_list; ++}; ++ ++#define __SWAITQUEUE_INITIALIZER(name) { \ ++ .task = current, \ ++ .task_list = LIST_HEAD_INIT((name).task_list), \ ++} ++ ++#define DECLARE_SWAITQUEUE(name) \ ++ struct swait_queue name = __SWAITQUEUE_INITIALIZER(name) ++ ++#define __SWAIT_QUEUE_HEAD_INITIALIZER(name) { \ ++ .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \ ++ .task_list = LIST_HEAD_INIT((name).task_list), \ ++} ++ ++#define DECLARE_SWAIT_QUEUE_HEAD(name) \ ++ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INITIALIZER(name) ++ ++extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name, ++ struct lock_class_key *key); ++ ++#define init_swait_queue_head(q) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ __init_swait_queue_head((q), #q, &__key); \ ++ } while (0) ++ ++#ifdef CONFIG_LOCKDEP ++# define __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) \ ++ ({ init_swait_queue_head(&name); name; }) ++# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ ++ struct swait_queue_head name = __SWAIT_QUEUE_HEAD_INIT_ONSTACK(name) ++#else ++# define DECLARE_SWAIT_QUEUE_HEAD_ONSTACK(name) \ ++ DECLARE_SWAIT_QUEUE_HEAD(name) ++#endif ++ ++static inline int swait_active(struct swait_queue_head *q) ++{ ++ return !list_empty(&q->task_list); ++} ++ ++extern void swake_up(struct swait_queue_head *q); ++extern void swake_up_all(struct swait_queue_head *q); ++extern void swake_up_locked(struct swait_queue_head *q); ++extern void swake_up_all_locked(struct swait_queue_head *q); ++ ++extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait); ++extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state); ++extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state); ++ ++extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait); ++extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait); ++ ++/* as per ___wait_event() but for swait, therefore "exclusive == 0" */ ++#define ___swait_event(wq, condition, state, ret, cmd) \ ++({ \ ++ struct swait_queue __wait; \ ++ long __ret = ret; \ ++ \ ++ INIT_LIST_HEAD(&__wait.task_list); \ ++ for (;;) { \ ++ long __int = prepare_to_swait_event(&wq, &__wait, state);\ ++ \ ++ if (condition) \ ++ break; \ ++ \ ++ if (___wait_is_interruptible(state) && __int) { \ ++ __ret = __int; \ ++ break; \ ++ } \ ++ \ ++ cmd; \ ++ } \ ++ finish_swait(&wq, &__wait); \ ++ __ret; \ ++}) ++ ++#define __swait_event(wq, condition) \ ++ (void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \ ++ schedule()) ++ ++#define swait_event(wq, condition) \ ++do { \ ++ if (condition) \ ++ break; \ ++ __swait_event(wq, condition); \ ++} while (0) ++ ++#define __swait_event_timeout(wq, condition, timeout) \ ++ ___swait_event(wq, ___wait_cond_timeout(condition), \ ++ TASK_UNINTERRUPTIBLE, timeout, \ ++ __ret = schedule_timeout(__ret)) ++ ++#define swait_event_timeout(wq, condition, timeout) \ ++({ \ ++ long __ret = timeout; \ ++ if (!___wait_cond_timeout(condition)) \ ++ __ret = __swait_event_timeout(wq, condition, timeout); \ ++ __ret; \ ++}) ++ ++#define __swait_event_interruptible(wq, condition) \ ++ ___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \ ++ schedule()) ++ ++#define swait_event_interruptible(wq, condition) \ ++({ \ ++ int __ret = 0; \ ++ if (!(condition)) \ ++ __ret = __swait_event_interruptible(wq, condition); \ ++ __ret; \ ++}) ++ ++#define __swait_event_interruptible_timeout(wq, condition, timeout) \ ++ ___swait_event(wq, ___wait_cond_timeout(condition), \ ++ TASK_INTERRUPTIBLE, timeout, \ ++ __ret = schedule_timeout(__ret)) ++ ++#define swait_event_interruptible_timeout(wq, condition, timeout) \ ++({ \ ++ long __ret = timeout; \ ++ if (!___wait_cond_timeout(condition)) \ ++ __ret = __swait_event_interruptible_timeout(wq, \ ++ condition, timeout); \ ++ __ret; \ ++}) ++ ++#endif /* _LINUX_SWAIT_H */ +diff -Nur linux-4.4.13.orig/include/linux/swap.h linux-4.4.13/include/linux/swap.h +--- linux-4.4.13.orig/include/linux/swap.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/swap.h 2016-06-22 21:55:08.038523867 +0200 +@@ -11,6 +11,7 @@ + #include + #include + #include ++#include + #include + + struct notifier_block; +@@ -252,7 +253,8 @@ + void *workingset_eviction(struct address_space *mapping, struct page *page); + bool workingset_refault(void *shadow); + void workingset_activation(struct page *page); +-extern struct list_lru workingset_shadow_nodes; ++extern struct list_lru __workingset_shadow_nodes; ++DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock); + + static inline unsigned int workingset_node_pages(struct radix_tree_node *node) + { +@@ -296,6 +298,7 @@ + + + /* linux/mm/swap.c */ ++DECLARE_LOCAL_IRQ_LOCK(swapvec_lock); + extern void lru_cache_add(struct page *); + extern void lru_cache_add_anon(struct page *page); + extern void lru_cache_add_file(struct page *page); +diff -Nur linux-4.4.13.orig/include/linux/thread_info.h linux-4.4.13/include/linux/thread_info.h +--- linux-4.4.13.orig/include/linux/thread_info.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/thread_info.h 2016-06-22 21:55:08.038523867 +0200 +@@ -102,7 +102,17 @@ + #define test_thread_flag(flag) \ + test_ti_thread_flag(current_thread_info(), flag) + +-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#ifdef CONFIG_PREEMPT_LAZY ++#define tif_need_resched() (test_thread_flag(TIF_NEED_RESCHED) || \ ++ test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++#define tif_need_resched_now() (test_thread_flag(TIF_NEED_RESCHED)) ++#define tif_need_resched_lazy() test_thread_flag(TIF_NEED_RESCHED_LAZY)) ++ ++#else ++#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_now() test_thread_flag(TIF_NEED_RESCHED) ++#define tif_need_resched_lazy() 0 ++#endif + + #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK + /* +diff -Nur linux-4.4.13.orig/include/linux/timer.h linux-4.4.13/include/linux/timer.h +--- linux-4.4.13.orig/include/linux/timer.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/timer.h 2016-06-22 21:55:08.038523867 +0200 +@@ -225,7 +225,7 @@ + + extern int try_to_del_timer_sync(struct timer_list *timer); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + extern int del_timer_sync(struct timer_list *timer); + #else + # define del_timer_sync(t) del_timer(t) +diff -Nur linux-4.4.13.orig/include/linux/trace_events.h linux-4.4.13/include/linux/trace_events.h +--- linux-4.4.13.orig/include/linux/trace_events.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/trace_events.h 2016-06-22 21:55:08.038523867 +0200 +@@ -66,6 +66,9 @@ + unsigned char flags; + unsigned char preempt_count; + int pid; ++ unsigned short migrate_disable; ++ unsigned short padding; ++ unsigned char preempt_lazy_count; + }; + + #define TRACE_EVENT_TYPE_MAX \ +diff -Nur linux-4.4.13.orig/include/linux/uaccess.h linux-4.4.13/include/linux/uaccess.h +--- linux-4.4.13.orig/include/linux/uaccess.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/uaccess.h 2016-06-22 21:55:08.038523867 +0200 +@@ -24,6 +24,7 @@ + */ + static inline void pagefault_disable(void) + { ++ migrate_disable(); + pagefault_disabled_inc(); + /* + * make sure to have issued the store before a pagefault +@@ -40,6 +41,7 @@ + */ + barrier(); + pagefault_disabled_dec(); ++ migrate_enable(); + } + + /* +diff -Nur linux-4.4.13.orig/include/linux/uprobes.h linux-4.4.13/include/linux/uprobes.h +--- linux-4.4.13.orig/include/linux/uprobes.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/uprobes.h 2016-06-22 21:55:08.038523867 +0200 +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + + struct vm_area_struct; + struct mm_struct; +diff -Nur linux-4.4.13.orig/include/linux/vmstat.h linux-4.4.13/include/linux/vmstat.h +--- linux-4.4.13.orig/include/linux/vmstat.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/vmstat.h 2016-06-22 21:55:08.042523867 +0200 +@@ -33,7 +33,9 @@ + */ + static inline void __count_vm_event(enum vm_event_item item) + { ++ preempt_disable_rt(); + raw_cpu_inc(vm_event_states.event[item]); ++ preempt_enable_rt(); + } + + static inline void count_vm_event(enum vm_event_item item) +@@ -43,7 +45,9 @@ + + static inline void __count_vm_events(enum vm_event_item item, long delta) + { ++ preempt_disable_rt(); + raw_cpu_add(vm_event_states.event[item], delta); ++ preempt_enable_rt(); + } + + static inline void count_vm_events(enum vm_event_item item, long delta) +diff -Nur linux-4.4.13.orig/include/linux/wait.h linux-4.4.13/include/linux/wait.h +--- linux-4.4.13.orig/include/linux/wait.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/linux/wait.h 2016-06-22 21:55:08.042523867 +0200 +@@ -8,6 +8,7 @@ + #include + #include + #include ++#include + + typedef struct __wait_queue wait_queue_t; + typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key); +diff -Nur linux-4.4.13.orig/include/linux/work-simple.h linux-4.4.13/include/linux/work-simple.h +--- linux-4.4.13.orig/include/linux/work-simple.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/linux/work-simple.h 2016-06-22 21:55:08.042523867 +0200 +@@ -0,0 +1,24 @@ ++#ifndef _LINUX_SWORK_H ++#define _LINUX_SWORK_H ++ ++#include ++ ++struct swork_event { ++ struct list_head item; ++ unsigned long flags; ++ void (*func)(struct swork_event *); ++}; ++ ++static inline void INIT_SWORK(struct swork_event *event, ++ void (*func)(struct swork_event *)) ++{ ++ event->flags = 0; ++ event->func = func; ++} ++ ++bool swork_queue(struct swork_event *sev); ++ ++int swork_get(void); ++void swork_put(void); ++ ++#endif /* _LINUX_SWORK_H */ +diff -Nur linux-4.4.13.orig/include/net/dst.h linux-4.4.13/include/net/dst.h +--- linux-4.4.13.orig/include/net/dst.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/net/dst.h 2016-06-22 21:55:08.042523867 +0200 +@@ -437,7 +437,7 @@ + static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n, + struct sk_buff *skb) + { +- const struct hh_cache *hh; ++ struct hh_cache *hh; + + if (dst->pending_confirm) { + unsigned long now = jiffies; +diff -Nur linux-4.4.13.orig/include/net/neighbour.h linux-4.4.13/include/net/neighbour.h +--- linux-4.4.13.orig/include/net/neighbour.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/net/neighbour.h 2016-06-22 21:55:08.042523867 +0200 +@@ -446,7 +446,7 @@ + } + #endif + +-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb) ++static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb) + { + unsigned int seq; + int hh_len; +@@ -501,7 +501,7 @@ + + #define NEIGH_CB(skb) ((struct neighbour_cb *)(skb)->cb) + +-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n, ++static inline void neigh_ha_snapshot(char *dst, struct neighbour *n, + const struct net_device *dev) + { + unsigned int seq; +diff -Nur linux-4.4.13.orig/include/net/netns/ipv4.h linux-4.4.13/include/net/netns/ipv4.h +--- linux-4.4.13.orig/include/net/netns/ipv4.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/net/netns/ipv4.h 2016-06-22 21:55:08.042523867 +0200 +@@ -70,6 +70,7 @@ + + int sysctl_icmp_echo_ignore_all; + int sysctl_icmp_echo_ignore_broadcasts; ++ int sysctl_icmp_echo_sysrq; + int sysctl_icmp_ignore_bogus_error_responses; + int sysctl_icmp_ratelimit; + int sysctl_icmp_ratemask; +diff -Nur linux-4.4.13.orig/include/trace/events/hist.h linux-4.4.13/include/trace/events/hist.h +--- linux-4.4.13.orig/include/trace/events/hist.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/trace/events/hist.h 2016-06-22 21:55:08.042523867 +0200 +@@ -0,0 +1,73 @@ ++#undef TRACE_SYSTEM ++#define TRACE_SYSTEM hist ++ ++#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ) ++#define _TRACE_HIST_H ++ ++#include "latency_hist.h" ++#include ++ ++#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST) ++#define trace_preemptirqsoff_hist(a, b) ++#define trace_preemptirqsoff_hist_rcuidle(a, b) ++#else ++TRACE_EVENT(preemptirqsoff_hist, ++ ++ TP_PROTO(int reason, int starthist), ++ ++ TP_ARGS(reason, starthist), ++ ++ TP_STRUCT__entry( ++ __field(int, reason) ++ __field(int, starthist) ++ ), ++ ++ TP_fast_assign( ++ __entry->reason = reason; ++ __entry->starthist = starthist; ++ ), ++ ++ TP_printk("reason=%s starthist=%s", getaction(__entry->reason), ++ __entry->starthist ? "start" : "stop") ++); ++#endif ++ ++#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST ++#define trace_hrtimer_interrupt(a, b, c, d) ++#else ++TRACE_EVENT(hrtimer_interrupt, ++ ++ TP_PROTO(int cpu, long long offset, struct task_struct *curr, ++ struct task_struct *task), ++ ++ TP_ARGS(cpu, offset, curr, task), ++ ++ TP_STRUCT__entry( ++ __field(int, cpu) ++ __field(long long, offset) ++ __array(char, ccomm, TASK_COMM_LEN) ++ __field(int, cprio) ++ __array(char, tcomm, TASK_COMM_LEN) ++ __field(int, tprio) ++ ), ++ ++ TP_fast_assign( ++ __entry->cpu = cpu; ++ __entry->offset = offset; ++ memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN); ++ __entry->cprio = curr->prio; ++ memcpy(__entry->tcomm, task != NULL ? task->comm : "", ++ task != NULL ? TASK_COMM_LEN : 7); ++ __entry->tprio = task != NULL ? task->prio : -1; ++ ), ++ ++ TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]", ++ __entry->cpu, __entry->offset, __entry->ccomm, ++ __entry->cprio, __entry->tcomm, __entry->tprio) ++); ++#endif ++ ++#endif /* _TRACE_HIST_H */ ++ ++/* This part must be outside protection */ ++#include +diff -Nur linux-4.4.13.orig/include/trace/events/latency_hist.h linux-4.4.13/include/trace/events/latency_hist.h +--- linux-4.4.13.orig/include/trace/events/latency_hist.h 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/include/trace/events/latency_hist.h 2016-06-22 21:55:08.042523867 +0200 +@@ -0,0 +1,29 @@ ++#ifndef _LATENCY_HIST_H ++#define _LATENCY_HIST_H ++ ++enum hist_action { ++ IRQS_ON, ++ PREEMPT_ON, ++ TRACE_STOP, ++ IRQS_OFF, ++ PREEMPT_OFF, ++ TRACE_START, ++}; ++ ++static char *actions[] = { ++ "IRQS_ON", ++ "PREEMPT_ON", ++ "TRACE_STOP", ++ "IRQS_OFF", ++ "PREEMPT_OFF", ++ "TRACE_START", ++}; ++ ++static inline char *getaction(int action) ++{ ++ if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0])) ++ return actions[action]; ++ return "unknown"; ++} ++ ++#endif /* _LATENCY_HIST_H */ +diff -Nur linux-4.4.13.orig/include/trace/events/writeback.h linux-4.4.13/include/trace/events/writeback.h +--- linux-4.4.13.orig/include/trace/events/writeback.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/include/trace/events/writeback.h 2016-06-22 21:55:08.042523867 +0200 +@@ -134,58 +134,28 @@ + #ifdef CREATE_TRACE_POINTS + #ifdef CONFIG_CGROUP_WRITEBACK + +-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb) ++static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) + { +- return kernfs_path_len(wb->memcg_css->cgroup->kn) + 1; ++ return wb->memcg_css->cgroup->kn->ino; + } + +-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb) +-{ +- struct cgroup *cgrp = wb->memcg_css->cgroup; +- char *path; +- +- path = cgroup_path(cgrp, buf, kernfs_path_len(cgrp->kn) + 1); +- WARN_ON_ONCE(path != buf); +-} +- +-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc) +-{ +- if (wbc->wb) +- return __trace_wb_cgroup_size(wbc->wb); +- else +- return 2; +-} +- +-static inline void __trace_wbc_assign_cgroup(char *buf, +- struct writeback_control *wbc) ++static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) + { + if (wbc->wb) +- __trace_wb_assign_cgroup(buf, wbc->wb); ++ return __trace_wb_assign_cgroup(wbc->wb); + else +- strcpy(buf, "/"); ++ return -1U; + } +- + #else /* CONFIG_CGROUP_WRITEBACK */ + +-static inline size_t __trace_wb_cgroup_size(struct bdi_writeback *wb) +-{ +- return 2; +-} +- +-static inline void __trace_wb_assign_cgroup(char *buf, struct bdi_writeback *wb) +-{ +- strcpy(buf, "/"); +-} +- +-static inline size_t __trace_wbc_cgroup_size(struct writeback_control *wbc) ++static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb) + { +- return 2; ++ return -1U; + } + +-static inline void __trace_wbc_assign_cgroup(char *buf, +- struct writeback_control *wbc) ++static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc) + { +- strcpy(buf, "/"); ++ return -1U; + } + + #endif /* CONFIG_CGROUP_WRITEBACK */ +@@ -201,7 +171,7 @@ + __array(char, name, 32) + __field(unsigned long, ino) + __field(int, sync_mode) +- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) ++ __field(unsigned int, cgroup_ino) + ), + + TP_fast_assign( +@@ -209,14 +179,14 @@ + dev_name(inode_to_bdi(inode)->dev), 32); + __entry->ino = inode->i_ino; + __entry->sync_mode = wbc->sync_mode; +- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); ++ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); + ), + +- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup=%s", ++ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u", + __entry->name, + __entry->ino, + __entry->sync_mode, +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + +@@ -246,7 +216,7 @@ + __field(int, range_cyclic) + __field(int, for_background) + __field(int, reason) +- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ++ __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( + strncpy(__entry->name, +@@ -258,10 +228,10 @@ + __entry->range_cyclic = work->range_cyclic; + __entry->for_background = work->for_background; + __entry->reason = work->reason; +- __trace_wb_assign_cgroup(__get_str(cgroup), wb); ++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), + TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d " +- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup=%s", ++ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u", + __entry->name, + MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev), + __entry->nr_pages, +@@ -270,7 +240,7 @@ + __entry->range_cyclic, + __entry->for_background, + __print_symbolic(__entry->reason, WB_WORK_REASON), +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + #define DEFINE_WRITEBACK_WORK_EVENT(name) \ +@@ -300,15 +270,15 @@ + TP_ARGS(wb), + TP_STRUCT__entry( + __array(char, name, 32) +- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ++ __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( + strncpy(__entry->name, dev_name(wb->bdi->dev), 32); +- __trace_wb_assign_cgroup(__get_str(cgroup), wb); ++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), +- TP_printk("bdi %s: cgroup=%s", ++ TP_printk("bdi %s: cgroup_ino=%u", + __entry->name, +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + #define DEFINE_WRITEBACK_EVENT(name) \ +@@ -347,7 +317,7 @@ + __field(int, range_cyclic) + __field(long, range_start) + __field(long, range_end) +- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) ++ __field(unsigned int, cgroup_ino) + ), + + TP_fast_assign( +@@ -361,12 +331,12 @@ + __entry->range_cyclic = wbc->range_cyclic; + __entry->range_start = (long)wbc->range_start; + __entry->range_end = (long)wbc->range_end; +- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); ++ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); + ), + + TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d " + "bgrd=%d reclm=%d cyclic=%d " +- "start=0x%lx end=0x%lx cgroup=%s", ++ "start=0x%lx end=0x%lx cgroup_ino=%u", + __entry->name, + __entry->nr_to_write, + __entry->pages_skipped, +@@ -377,7 +347,7 @@ + __entry->range_cyclic, + __entry->range_start, + __entry->range_end, +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ) + +@@ -398,7 +368,7 @@ + __field(long, age) + __field(int, moved) + __field(int, reason) +- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ++ __field(unsigned int, cgroup_ino) + ), + TP_fast_assign( + unsigned long *older_than_this = work->older_than_this; +@@ -408,15 +378,15 @@ + (jiffies - *older_than_this) * 1000 / HZ : -1; + __entry->moved = moved; + __entry->reason = work->reason; +- __trace_wb_assign_cgroup(__get_str(cgroup), wb); ++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), +- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup=%s", ++ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u", + __entry->name, + __entry->older, /* older_than_this in jiffies */ + __entry->age, /* older_than_this in relative milliseconds */ + __entry->moved, + __print_symbolic(__entry->reason, WB_WORK_REASON), +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + +@@ -484,7 +454,7 @@ + __field(unsigned long, dirty_ratelimit) + __field(unsigned long, task_ratelimit) + __field(unsigned long, balanced_dirty_ratelimit) +- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ++ __field(unsigned int, cgroup_ino) + ), + + TP_fast_assign( +@@ -496,13 +466,13 @@ + __entry->task_ratelimit = KBps(task_ratelimit); + __entry->balanced_dirty_ratelimit = + KBps(wb->balanced_dirty_ratelimit); +- __trace_wb_assign_cgroup(__get_str(cgroup), wb); ++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), + + TP_printk("bdi %s: " + "write_bw=%lu awrite_bw=%lu dirty_rate=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " +- "balanced_dirty_ratelimit=%lu cgroup=%s", ++ "balanced_dirty_ratelimit=%lu cgroup_ino=%u", + __entry->bdi, + __entry->write_bw, /* write bandwidth */ + __entry->avg_write_bw, /* avg write bandwidth */ +@@ -510,7 +480,7 @@ + __entry->dirty_ratelimit, /* base ratelimit */ + __entry->task_ratelimit, /* ratelimit with position control */ + __entry->balanced_dirty_ratelimit, /* the balanced ratelimit */ +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + +@@ -548,7 +518,7 @@ + __field( long, pause) + __field(unsigned long, period) + __field( long, think) +- __dynamic_array(char, cgroup, __trace_wb_cgroup_size(wb)) ++ __field(unsigned int, cgroup_ino) + ), + + TP_fast_assign( +@@ -571,7 +541,7 @@ + __entry->period = period * 1000 / HZ; + __entry->pause = pause * 1000 / HZ; + __entry->paused = (jiffies - start_time) * 1000 / HZ; +- __trace_wb_assign_cgroup(__get_str(cgroup), wb); ++ __entry->cgroup_ino = __trace_wb_assign_cgroup(wb); + ), + + +@@ -580,7 +550,7 @@ + "bdi_setpoint=%lu bdi_dirty=%lu " + "dirty_ratelimit=%lu task_ratelimit=%lu " + "dirtied=%u dirtied_pause=%u " +- "paused=%lu pause=%ld period=%lu think=%ld cgroup=%s", ++ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u", + __entry->bdi, + __entry->limit, + __entry->setpoint, +@@ -595,7 +565,7 @@ + __entry->pause, /* ms */ + __entry->period, /* ms */ + __entry->think, /* ms */ +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + +@@ -609,8 +579,7 @@ + __field(unsigned long, ino) + __field(unsigned long, state) + __field(unsigned long, dirtied_when) +- __dynamic_array(char, cgroup, +- __trace_wb_cgroup_size(inode_to_wb(inode))) ++ __field(unsigned int, cgroup_ino) + ), + + TP_fast_assign( +@@ -619,16 +588,16 @@ + __entry->ino = inode->i_ino; + __entry->state = inode->i_state; + __entry->dirtied_when = inode->dirtied_when; +- __trace_wb_assign_cgroup(__get_str(cgroup), inode_to_wb(inode)); ++ __entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode)); + ), + +- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup=%s", ++ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u", + __entry->name, + __entry->ino, + show_inode_state(__entry->state), + __entry->dirtied_when, + (jiffies - __entry->dirtied_when) / HZ, +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + +@@ -684,7 +653,7 @@ + __field(unsigned long, writeback_index) + __field(long, nr_to_write) + __field(unsigned long, wrote) +- __dynamic_array(char, cgroup, __trace_wbc_cgroup_size(wbc)) ++ __field(unsigned int, cgroup_ino) + ), + + TP_fast_assign( +@@ -696,11 +665,11 @@ + __entry->writeback_index = inode->i_mapping->writeback_index; + __entry->nr_to_write = nr_to_write; + __entry->wrote = nr_to_write - wbc->nr_to_write; +- __trace_wbc_assign_cgroup(__get_str(cgroup), wbc); ++ __entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc); + ), + + TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu " +- "index=%lu to_write=%ld wrote=%lu cgroup=%s", ++ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u", + __entry->name, + __entry->ino, + show_inode_state(__entry->state), +@@ -709,7 +678,7 @@ + __entry->writeback_index, + __entry->nr_to_write, + __entry->wrote, +- __get_str(cgroup) ++ __entry->cgroup_ino + ) + ); + +diff -Nur linux-4.4.13.orig/init/Kconfig linux-4.4.13/init/Kconfig +--- linux-4.4.13.orig/init/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/init/Kconfig 2016-06-22 21:55:08.042523867 +0200 +@@ -498,7 +498,7 @@ + + config RCU_EXPERT + bool "Make expert-level adjustments to RCU configuration" +- default n ++ default y if PREEMPT_RT_FULL + help + This option needs to be enabled if you wish to make + expert-level adjustments to RCU configuration. By default, +@@ -614,7 +614,7 @@ + + config RCU_FAST_NO_HZ + bool "Accelerate last non-dyntick-idle CPU's grace periods" +- depends on NO_HZ_COMMON && SMP && RCU_EXPERT ++ depends on NO_HZ_COMMON && SMP && RCU_EXPERT && !PREEMPT_RT_FULL + default n + help + This option permits CPUs to enter dynticks-idle state even if +@@ -641,7 +641,7 @@ + config RCU_BOOST + bool "Enable RCU priority boosting" + depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT +- default n ++ default y if PREEMPT_RT_FULL + help + This option boosts the priority of preempted RCU readers that + block the current preemptible RCU grace period for too long. +@@ -1106,6 +1106,7 @@ + config RT_GROUP_SCHED + bool "Group scheduling for SCHED_RR/FIFO" + depends on CGROUP_SCHED ++ depends on !PREEMPT_RT_FULL + default n + help + This feature lets you explicitly allocate real CPU bandwidth +@@ -1719,6 +1720,7 @@ + + config SLAB + bool "SLAB" ++ depends on !PREEMPT_RT_FULL + help + The regular slab allocator that is established and known to work + well in all environments. It organizes cache hot objects in +@@ -1737,6 +1739,7 @@ + config SLOB + depends on EXPERT + bool "SLOB (Simple Allocator)" ++ depends on !PREEMPT_RT_FULL + help + SLOB replaces the stock allocator with a drastically simpler + allocator. SLOB is generally more space efficient but +@@ -1746,7 +1749,7 @@ + + config SLUB_CPU_PARTIAL + default y +- depends on SLUB && SMP ++ depends on SLUB && SMP && !PREEMPT_RT_FULL + bool "SLUB per cpu partial cache" + help + Per cpu partial caches accellerate objects allocation and freeing +diff -Nur linux-4.4.13.orig/init/main.c linux-4.4.13/init/main.c +--- linux-4.4.13.orig/init/main.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/init/main.c 2016-06-22 21:55:08.042523867 +0200 +@@ -530,6 +530,7 @@ + setup_command_line(command_line); + setup_nr_cpu_ids(); + setup_per_cpu_areas(); ++ softirq_early_init(); + smp_prepare_boot_cpu(); /* arch-specific boot-cpu hooks */ + + build_all_zonelists(NULL, NULL); +diff -Nur linux-4.4.13.orig/init/Makefile linux-4.4.13/init/Makefile +--- linux-4.4.13.orig/init/Makefile 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/init/Makefile 2016-06-22 21:55:08.042523867 +0200 +@@ -33,4 +33,4 @@ + include/generated/compile.h: FORCE + @$($(quiet)chk_compile.h) + $(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \ +- "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)" ++ "$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)" +diff -Nur linux-4.4.13.orig/ipc/msg.c linux-4.4.13/ipc/msg.c +--- linux-4.4.13.orig/ipc/msg.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/ipc/msg.c 2016-06-22 21:55:08.042523867 +0200 +@@ -183,20 +183,14 @@ + } + } + +-static void expunge_all(struct msg_queue *msq, int res) ++static void expunge_all(struct msg_queue *msq, int res, ++ struct wake_q_head *wake_q) + { + struct msg_receiver *msr, *t; + + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { +- msr->r_msg = NULL; /* initialize expunge ordering */ +- wake_up_process(msr->r_tsk); +- /* +- * Ensure that the wakeup is visible before setting r_msg as +- * the receiving end depends on it: either spinning on a nil, +- * or dealing with -EAGAIN cases. See lockless receive part 1 +- * and 2 in do_msgrcv(). +- */ +- smp_wmb(); /* barrier (B) */ ++ ++ wake_q_add(wake_q, msr->r_tsk); + msr->r_msg = ERR_PTR(res); + } + } +@@ -213,11 +207,13 @@ + { + struct msg_msg *msg, *t; + struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); ++ WAKE_Q(wake_q); + +- expunge_all(msq, -EIDRM); ++ expunge_all(msq, -EIDRM, &wake_q); + ss_wakeup(&msq->q_senders, 1); + msg_rmid(ns, msq); + ipc_unlock_object(&msq->q_perm); ++ wake_up_q(&wake_q); + rcu_read_unlock(); + + list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { +@@ -342,6 +338,7 @@ + struct kern_ipc_perm *ipcp; + struct msqid64_ds uninitialized_var(msqid64); + struct msg_queue *msq; ++ WAKE_Q(wake_q); + int err; + + if (cmd == IPC_SET) { +@@ -389,7 +386,7 @@ + /* sleeping receivers might be excluded by + * stricter permissions. + */ +- expunge_all(msq, -EAGAIN); ++ expunge_all(msq, -EAGAIN, &wake_q); + /* sleeping senders might be able to send + * due to a larger queue size. + */ +@@ -402,6 +399,7 @@ + + out_unlock0: + ipc_unlock_object(&msq->q_perm); ++ wake_up_q(&wake_q); + out_unlock1: + rcu_read_unlock(); + out_up: +@@ -566,7 +564,8 @@ + return 0; + } + +-static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) ++static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg, ++ struct wake_q_head *wake_q) + { + struct msg_receiver *msr, *t; + +@@ -577,27 +576,13 @@ + + list_del(&msr->r_list); + if (msr->r_maxsize < msg->m_ts) { +- /* initialize pipelined send ordering */ +- msr->r_msg = NULL; +- wake_up_process(msr->r_tsk); +- /* barrier (B) see barrier comment below */ +- smp_wmb(); ++ wake_q_add(wake_q, msr->r_tsk); + msr->r_msg = ERR_PTR(-E2BIG); + } else { +- msr->r_msg = NULL; + msq->q_lrpid = task_pid_vnr(msr->r_tsk); + msq->q_rtime = get_seconds(); +- wake_up_process(msr->r_tsk); +- /* +- * Ensure that the wakeup is visible before +- * setting r_msg, as the receiving can otherwise +- * exit - once r_msg is set, the receiver can +- * continue. See lockless receive part 1 and 2 +- * in do_msgrcv(). Barrier (B). +- */ +- smp_wmb(); ++ wake_q_add(wake_q, msr->r_tsk); + msr->r_msg = msg; +- + return 1; + } + } +@@ -613,6 +598,7 @@ + struct msg_msg *msg; + int err; + struct ipc_namespace *ns; ++ WAKE_Q(wake_q); + + ns = current->nsproxy->ipc_ns; + +@@ -698,7 +684,7 @@ + msq->q_lspid = task_tgid_vnr(current); + msq->q_stime = get_seconds(); + +- if (!pipelined_send(msq, msg)) { ++ if (!pipelined_send(msq, msg, &wake_q)) { + /* no one is waiting for this message, enqueue it */ + list_add_tail(&msg->m_list, &msq->q_messages); + msq->q_cbytes += msgsz; +@@ -712,6 +698,7 @@ + + out_unlock0: + ipc_unlock_object(&msq->q_perm); ++ wake_up_q(&wake_q); + out_unlock1: + rcu_read_unlock(); + if (msg != NULL) +@@ -932,57 +919,25 @@ + rcu_read_lock(); + + /* Lockless receive, part 2: +- * Wait until pipelined_send or expunge_all are outside of +- * wake_up_process(). There is a race with exit(), see +- * ipc/mqueue.c for the details. The correct serialization +- * ensures that a receiver cannot continue without the wakeup +- * being visibible _before_ setting r_msg: ++ * The work in pipelined_send() and expunge_all(): ++ * - Set pointer to message ++ * - Queue the receiver task for later wakeup ++ * - Wake up the process after the lock is dropped. + * +- * CPU 0 CPU 1 +- * +- * smp_rmb(); (A) <-- pair -. +- * r_msg> | msr->r_msg = NULL; +- * | wake_up_process(); +- * `------> smp_wmb(); (B) +- * msr->r_msg = msg; +- * +- * Where (A) orders the message value read and where (B) orders +- * the write to the r_msg -- done in both pipelined_send and +- * expunge_all. ++ * Should the process wake up before this wakeup (due to a ++ * signal) it will either see the message and continue … + */ +- for (;;) { +- /* +- * Pairs with writer barrier in pipelined_send +- * or expunge_all. +- */ +- smp_rmb(); /* barrier (A) */ +- msg = (struct msg_msg *)msr_d.r_msg; +- if (msg) +- break; + +- /* +- * The cpu_relax() call is a compiler barrier +- * which forces everything in this loop to be +- * re-loaded. +- */ +- cpu_relax(); +- } +- +- /* Lockless receive, part 3: +- * If there is a message or an error then accept it without +- * locking. +- */ ++ msg = (struct msg_msg *)msr_d.r_msg; + if (msg != ERR_PTR(-EAGAIN)) + goto out_unlock1; + +- /* Lockless receive, part 3: +- * Acquire the queue spinlock. +- */ ++ /* ++ * … or see -EAGAIN, acquire the lock to check the message ++ * again. ++ */ + ipc_lock_object(&msq->q_perm); + +- /* Lockless receive, part 4: +- * Repeat test after acquiring the spinlock. +- */ + msg = (struct msg_msg *)msr_d.r_msg; + if (msg != ERR_PTR(-EAGAIN)) + goto out_unlock0; +diff -Nur linux-4.4.13.orig/ipc/sem.c linux-4.4.13/ipc/sem.c +--- linux-4.4.13.orig/ipc/sem.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/ipc/sem.c 2016-06-22 21:55:08.042523867 +0200 +@@ -690,6 +690,13 @@ + static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ struct task_struct *p = q->sleeper; ++ get_task_struct(p); ++ q->status = error; ++ wake_up_process(p); ++ put_task_struct(p); ++#else + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the +@@ -701,6 +708,7 @@ + q->pid = error; + + list_add_tail(&q->list, pt); ++#endif + } + + /** +@@ -714,6 +722,7 @@ + */ + static void wake_up_sem_queue_do(struct list_head *pt) + { ++#ifndef CONFIG_PREEMPT_RT_BASE + struct sem_queue *q, *t; + int did_something; + +@@ -726,6 +735,7 @@ + } + if (did_something) + preempt_enable(); ++#endif + } + + static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +diff -Nur linux-4.4.13.orig/kernel/cgroup.c linux-4.4.13/kernel/cgroup.c +--- linux-4.4.13.orig/kernel/cgroup.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/cgroup.c 2016-06-22 21:55:08.042523867 +0200 +@@ -4737,10 +4737,10 @@ + queue_work(cgroup_destroy_wq, &css->destroy_work); + } + +-static void css_release_work_fn(struct work_struct *work) ++static void css_release_work_fn(struct swork_event *sev) + { + struct cgroup_subsys_state *css = +- container_of(work, struct cgroup_subsys_state, destroy_work); ++ container_of(sev, struct cgroup_subsys_state, destroy_swork); + struct cgroup_subsys *ss = css->ss; + struct cgroup *cgrp = css->cgroup; + +@@ -4779,8 +4779,8 @@ + struct cgroup_subsys_state *css = + container_of(ref, struct cgroup_subsys_state, refcnt); + +- INIT_WORK(&css->destroy_work, css_release_work_fn); +- queue_work(cgroup_destroy_wq, &css->destroy_work); ++ INIT_SWORK(&css->destroy_swork, css_release_work_fn); ++ swork_queue(&css->destroy_swork); + } + + static void init_and_link_css(struct cgroup_subsys_state *css, +@@ -5396,6 +5396,7 @@ + */ + cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1); + BUG_ON(!cgroup_destroy_wq); ++ BUG_ON(swork_get()); + + /* + * Used to destroy pidlists and separate to serve as flush domain. +diff -Nur linux-4.4.13.orig/kernel/cpu.c linux-4.4.13/kernel/cpu.c +--- linux-4.4.13.orig/kernel/cpu.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/cpu.c 2016-06-22 21:55:08.046523867 +0200 +@@ -75,8 +75,8 @@ + #endif + } cpu_hotplug = { + .active_writer = NULL, +- .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), + .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock), ++ .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq), + #ifdef CONFIG_DEBUG_LOCK_ALLOC + .dep_map = {.name = "cpu_hotplug.lock" }, + #endif +@@ -89,6 +89,289 @@ + #define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map) + #define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map) + ++/** ++ * hotplug_pcp - per cpu hotplug descriptor ++ * @unplug: set when pin_current_cpu() needs to sync tasks ++ * @sync_tsk: the task that waits for tasks to finish pinned sections ++ * @refcount: counter of tasks in pinned sections ++ * @grab_lock: set when the tasks entering pinned sections should wait ++ * @synced: notifier for @sync_tsk to tell cpu_down it's finished ++ * @mutex: the mutex to make tasks wait (used when @grab_lock is true) ++ * @mutex_init: zero if the mutex hasn't been initialized yet. ++ * ++ * Although @unplug and @sync_tsk may point to the same task, the @unplug ++ * is used as a flag and still exists after @sync_tsk has exited and ++ * @sync_tsk set to NULL. ++ */ ++struct hotplug_pcp { ++ struct task_struct *unplug; ++ struct task_struct *sync_tsk; ++ int refcount; ++ int grab_lock; ++ struct completion synced; ++ struct completion unplug_wait; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * Note, on PREEMPT_RT, the hotplug lock must save the state of ++ * the task, otherwise the mutex will cause the task to fail ++ * to sleep when required. (Because it's called from migrate_disable()) ++ * ++ * The spinlock_t on PREEMPT_RT is a mutex that saves the task's ++ * state. ++ */ ++ spinlock_t lock; ++#else ++ struct mutex mutex; ++#endif ++ int mutex_init; ++}; ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++# define hotplug_lock(hp) rt_spin_lock__no_mg(&(hp)->lock) ++# define hotplug_unlock(hp) rt_spin_unlock__no_mg(&(hp)->lock) ++#else ++# define hotplug_lock(hp) mutex_lock(&(hp)->mutex) ++# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex) ++#endif ++ ++static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp); ++ ++/** ++ * pin_current_cpu - Prevent the current cpu from being unplugged ++ * ++ * Lightweight version of get_online_cpus() to prevent cpu from being ++ * unplugged when code runs in a migration disabled region. ++ * ++ * Must be called with preemption disabled (preempt_count = 1)! ++ */ ++void pin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp; ++ int force = 0; ++ ++retry: ++ hp = this_cpu_ptr(&hotplug_pcp); ++ ++ if (!hp->unplug || hp->refcount || force || preempt_count() > 1 || ++ hp->unplug == current) { ++ hp->refcount++; ++ return; ++ } ++ if (hp->grab_lock) { ++ preempt_enable(); ++ hotplug_lock(hp); ++ hotplug_unlock(hp); ++ } else { ++ preempt_enable(); ++ /* ++ * Try to push this task off of this CPU. ++ */ ++ if (!migrate_me()) { ++ preempt_disable(); ++ hp = this_cpu_ptr(&hotplug_pcp); ++ if (!hp->grab_lock) { ++ /* ++ * Just let it continue it's already pinned ++ * or about to sleep. ++ */ ++ force = 1; ++ goto retry; ++ } ++ preempt_enable(); ++ } ++ } ++ preempt_disable(); ++ goto retry; ++} ++ ++/** ++ * unpin_current_cpu - Allow unplug of current cpu ++ * ++ * Must be called with preemption or interrupts disabled! ++ */ ++void unpin_current_cpu(void) ++{ ++ struct hotplug_pcp *hp = this_cpu_ptr(&hotplug_pcp); ++ ++ WARN_ON(hp->refcount <= 0); ++ ++ /* This is safe. sync_unplug_thread is pinned to this cpu */ ++ if (!--hp->refcount && hp->unplug && hp->unplug != current) ++ wake_up_process(hp->unplug); ++} ++ ++static void wait_for_pinned_cpus(struct hotplug_pcp *hp) ++{ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (hp->refcount) { ++ schedule_preempt_disabled(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++} ++ ++static int sync_unplug_thread(void *data) ++{ ++ struct hotplug_pcp *hp = data; ++ ++ wait_for_completion(&hp->unplug_wait); ++ preempt_disable(); ++ hp->unplug = current; ++ wait_for_pinned_cpus(hp); ++ ++ /* ++ * This thread will synchronize the cpu_down() with threads ++ * that have pinned the CPU. When the pinned CPU count reaches ++ * zero, we inform the cpu_down code to continue to the next step. ++ */ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ preempt_enable(); ++ complete(&hp->synced); ++ ++ /* ++ * If all succeeds, the next step will need tasks to wait till ++ * the CPU is offline before continuing. To do this, the grab_lock ++ * is set and tasks going into pin_current_cpu() will block on the ++ * mutex. But we still need to wait for those that are already in ++ * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop() ++ * will kick this thread out. ++ */ ++ while (!hp->grab_lock && !kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ ++ /* Make sure grab_lock is seen before we see a stale completion */ ++ smp_mb(); ++ ++ /* ++ * Now just before cpu_down() enters stop machine, we need to make ++ * sure all tasks that are in pinned CPU sections are out, and new ++ * tasks will now grab the lock, keeping them from entering pinned ++ * CPU sections. ++ */ ++ if (!kthread_should_stop()) { ++ preempt_disable(); ++ wait_for_pinned_cpus(hp); ++ preempt_enable(); ++ complete(&hp->synced); ++ } ++ ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_UNINTERRUPTIBLE); ++ } ++ set_current_state(TASK_RUNNING); ++ ++ /* ++ * Force this thread off this CPU as it's going down and ++ * we don't want any more work on this CPU. ++ */ ++ current->flags &= ~PF_NO_SETAFFINITY; ++ set_cpus_allowed_ptr(current, cpu_present_mask); ++ migrate_me(); ++ return 0; ++} ++ ++static void __cpu_unplug_sync(struct hotplug_pcp *hp) ++{ ++ wake_up_process(hp->sync_tsk); ++ wait_for_completion(&hp->synced); ++} ++ ++static void __cpu_unplug_wait(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ complete(&hp->unplug_wait); ++ wait_for_completion(&hp->synced); ++} ++ ++/* ++ * Start the sync_unplug_thread on the target cpu and wait for it to ++ * complete. ++ */ ++static int cpu_unplug_begin(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ int err; ++ ++ /* Protected by cpu_hotplug.lock */ ++ if (!hp->mutex_init) { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ spin_lock_init(&hp->lock); ++#else ++ mutex_init(&hp->mutex); ++#endif ++ hp->mutex_init = 1; ++ } ++ ++ /* Inform the scheduler to migrate tasks off this CPU */ ++ tell_sched_cpu_down_begin(cpu); ++ ++ init_completion(&hp->synced); ++ init_completion(&hp->unplug_wait); ++ ++ hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu); ++ if (IS_ERR(hp->sync_tsk)) { ++ err = PTR_ERR(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++ return err; ++ } ++ kthread_bind(hp->sync_tsk, cpu); ++ ++ /* ++ * Wait for tasks to get out of the pinned sections, ++ * it's still OK if new tasks enter. Some CPU notifiers will ++ * wait for tasks that are going to enter these sections and ++ * we must not have them block. ++ */ ++ wake_up_process(hp->sync_tsk); ++ return 0; ++} ++ ++static void cpu_unplug_sync(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ init_completion(&hp->synced); ++ /* The completion needs to be initialzied before setting grab_lock */ ++ smp_wmb(); ++ ++ /* Grab the mutex before setting grab_lock */ ++ hotplug_lock(hp); ++ hp->grab_lock = 1; ++ ++ /* ++ * The CPU notifiers have been completed. ++ * Wait for tasks to get out of pinned CPU sections and have new ++ * tasks block until the CPU is completely down. ++ */ ++ __cpu_unplug_sync(hp); ++ ++ /* All done with the sync thread */ ++ kthread_stop(hp->sync_tsk); ++ hp->sync_tsk = NULL; ++} ++ ++static void cpu_unplug_done(unsigned int cpu) ++{ ++ struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu); ++ ++ hp->unplug = NULL; ++ /* Let all tasks know cpu unplug is finished before cleaning up */ ++ smp_wmb(); ++ ++ if (hp->sync_tsk) ++ kthread_stop(hp->sync_tsk); ++ ++ if (hp->grab_lock) { ++ hotplug_unlock(hp); ++ /* protected by cpu_hotplug.lock */ ++ hp->grab_lock = 0; ++ } ++ tell_sched_cpu_down_done(cpu); ++} + + void get_online_cpus(void) + { +@@ -338,13 +621,15 @@ + /* Requires cpu_add_remove_lock to be held */ + static int _cpu_down(unsigned int cpu, int tasks_frozen) + { +- int err, nr_calls = 0; ++ int mycpu, err, nr_calls = 0; + void *hcpu = (void *)(long)cpu; + unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0; + struct take_cpu_down_param tcd_param = { + .mod = mod, + .hcpu = hcpu, + }; ++ cpumask_var_t cpumask; ++ cpumask_var_t cpumask_org; + + if (num_online_cpus() == 1) + return -EBUSY; +@@ -352,7 +637,34 @@ + if (!cpu_online(cpu)) + return -EINVAL; + ++ /* Move the downtaker off the unplug cpu */ ++ if (!alloc_cpumask_var(&cpumask, GFP_KERNEL)) ++ return -ENOMEM; ++ if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL)) { ++ free_cpumask_var(cpumask); ++ return -ENOMEM; ++ } ++ ++ cpumask_copy(cpumask_org, tsk_cpus_allowed(current)); ++ cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu)); ++ set_cpus_allowed_ptr(current, cpumask); ++ free_cpumask_var(cpumask); ++ migrate_disable(); ++ mycpu = smp_processor_id(); ++ if (mycpu == cpu) { ++ printk(KERN_ERR "Yuck! Still on unplug CPU\n!"); ++ migrate_enable(); ++ err = -EBUSY; ++ goto restore_cpus; ++ } ++ migrate_enable(); ++ + cpu_hotplug_begin(); ++ err = cpu_unplug_begin(cpu); ++ if (err) { ++ printk("cpu_unplug_begin(%d) failed\n", cpu); ++ goto out_cancel; ++ } + + err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls); + if (err) { +@@ -378,8 +690,12 @@ + else + synchronize_rcu(); + ++ __cpu_unplug_wait(cpu); + smpboot_park_threads(cpu); + ++ /* Notifiers are done. Don't let any more tasks pin this CPU. */ ++ cpu_unplug_sync(cpu); ++ + /* + * Prevent irq alloc/free while the dying cpu reorganizes the + * interrupt affinities. +@@ -424,9 +740,14 @@ + check_for_tasks(cpu); + + out_release: ++ cpu_unplug_done(cpu); ++out_cancel: + cpu_hotplug_done(); + if (!err) + cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu); ++restore_cpus: ++ set_cpus_allowed_ptr(current, cpumask_org); ++ free_cpumask_var(cpumask_org); + return err; + } + +diff -Nur linux-4.4.13.orig/kernel/debug/kdb/kdb_io.c linux-4.4.13/kernel/debug/kdb/kdb_io.c +--- linux-4.4.13.orig/kernel/debug/kdb/kdb_io.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/debug/kdb/kdb_io.c 2016-06-22 21:55:08.046523867 +0200 +@@ -554,7 +554,6 @@ + int linecount; + int colcount; + int logging, saved_loglevel = 0; +- int saved_trap_printk; + int got_printf_lock = 0; + int retlen = 0; + int fnd, len; +@@ -565,8 +564,6 @@ + unsigned long uninitialized_var(flags); + + preempt_disable(); +- saved_trap_printk = kdb_trap_printk; +- kdb_trap_printk = 0; + + /* Serialize kdb_printf if multiple cpus try to write at once. + * But if any cpu goes recursive in kdb, just print the output, +@@ -855,7 +852,6 @@ + } else { + __release(kdb_printf_lock); + } +- kdb_trap_printk = saved_trap_printk; + preempt_enable(); + return retlen; + } +@@ -865,9 +861,11 @@ + va_list ap; + int r; + ++ kdb_trap_printk++; + va_start(ap, fmt); + r = vkdb_printf(KDB_MSGSRC_INTERNAL, fmt, ap); + va_end(ap); ++ kdb_trap_printk--; + + return r; + } +diff -Nur linux-4.4.13.orig/kernel/events/core.c linux-4.4.13/kernel/events/core.c +--- linux-4.4.13.orig/kernel/events/core.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/events/core.c 2016-06-22 21:55:08.046523867 +0200 +@@ -802,6 +802,7 @@ + raw_spin_lock_init(&cpuctx->hrtimer_lock); + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED); + timer->function = perf_mux_hrtimer_handler; ++ timer->irqsafe = 1; + } + + static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx) +@@ -7219,6 +7220,7 @@ + + hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hwc->hrtimer.function = perf_swevent_hrtimer; ++ hwc->hrtimer.irqsafe = 1; + + /* + * Since hrtimers have a fixed rate, we can do a static freq->period +diff -Nur linux-4.4.13.orig/kernel/exit.c linux-4.4.13/kernel/exit.c +--- linux-4.4.13.orig/kernel/exit.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/exit.c 2016-06-22 21:55:08.046523867 +0200 +@@ -144,7 +144,7 @@ + * Do this under ->siglock, we can race with another thread + * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. + */ +- flush_sigqueue(&tsk->pending); ++ flush_task_sigqueue(tsk); + tsk->sighand = NULL; + spin_unlock(&sighand->siglock); + +diff -Nur linux-4.4.13.orig/kernel/fork.c linux-4.4.13/kernel/fork.c +--- linux-4.4.13.orig/kernel/fork.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/fork.c 2016-06-22 21:55:08.046523867 +0200 +@@ -108,7 +108,7 @@ + + DEFINE_PER_CPU(unsigned long, process_counts) = 0; + +-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ ++DEFINE_RWLOCK(tasklist_lock); /* outer */ + + #ifdef CONFIG_PROVE_RCU + int lockdep_tasklist_lock_is_held(void) +@@ -244,7 +244,9 @@ + if (atomic_dec_and_test(&sig->sigcnt)) + free_signal_struct(sig); + } +- ++#ifdef CONFIG_PREEMPT_RT_BASE ++static ++#endif + void __put_task_struct(struct task_struct *tsk) + { + WARN_ON(!tsk->exit_state); +@@ -261,7 +263,18 @@ + if (!profile_handoff_task(tsk)) + free_task(tsk); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + EXPORT_SYMBOL_GPL(__put_task_struct); ++#else ++void __put_task_struct_cb(struct rcu_head *rhp) ++{ ++ struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu); ++ ++ __put_task_struct(tsk); ++ ++} ++EXPORT_SYMBOL_GPL(__put_task_struct_cb); ++#endif + + void __init __weak arch_task_cache_init(void) { } + +@@ -689,6 +702,19 @@ + } + EXPORT_SYMBOL_GPL(__mmdrop); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++/* ++ * RCU callback for delayed mm drop. Not strictly rcu, but we don't ++ * want another facility to make this work. ++ */ ++void __mmdrop_delayed(struct rcu_head *rhp) ++{ ++ struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop); ++ ++ __mmdrop(mm); ++} ++#endif ++ + /* + * Decrement the use count and release all resources for an mm. + */ +@@ -1218,6 +1244,9 @@ + */ + static void posix_cpu_timers_init(struct task_struct *tsk) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ tsk->posix_timer_list = NULL; ++#endif + tsk->cputime_expires.prof_exp = 0; + tsk->cputime_expires.virt_exp = 0; + tsk->cputime_expires.sched_exp = 0; +@@ -1343,15 +1372,16 @@ + spin_lock_init(&p->alloc_lock); + + init_sigpending(&p->pending); ++ p->sigqueue_cache = NULL; + + p->utime = p->stime = p->gtime = 0; + p->utimescaled = p->stimescaled = 0; + prev_cputime_init(&p->prev_cputime); + + #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN +- seqlock_init(&p->vtime_seqlock); ++ seqcount_init(&p->vtime_seqcount); + p->vtime_snap = 0; +- p->vtime_snap_whence = VTIME_SLEEPING; ++ p->vtime_snap_whence = VTIME_INACTIVE; + #endif + + #if defined(SPLIT_RSS_COUNTING) +diff -Nur linux-4.4.13.orig/kernel/futex.c linux-4.4.13/kernel/futex.c +--- linux-4.4.13.orig/kernel/futex.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/futex.c 2016-06-22 21:55:08.046523867 +0200 +@@ -815,7 +815,9 @@ + * task still owns the PI-state: + */ + if (head->next != next) { ++ raw_spin_unlock_irq(&curr->pi_lock); + spin_unlock(&hb->lock); ++ raw_spin_lock_irq(&curr->pi_lock); + continue; + } + +@@ -1210,6 +1212,7 @@ + struct futex_pi_state *pi_state = this->pi_state; + u32 uninitialized_var(curval), newval; + WAKE_Q(wake_q); ++ WAKE_Q(wake_sleeper_q); + bool deboost; + int ret = 0; + +@@ -1223,7 +1226,7 @@ + if (pi_state->owner != current) + return -EINVAL; + +- raw_spin_lock(&pi_state->pi_mutex.wait_lock); ++ raw_spin_lock_irq(&pi_state->pi_mutex.wait_lock); + new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); + + /* +@@ -1259,24 +1262,25 @@ + ret = -EINVAL; + } + if (ret) { +- raw_spin_unlock(&pi_state->pi_mutex.wait_lock); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + return ret; + } + +- raw_spin_lock_irq(&pi_state->owner->pi_lock); ++ raw_spin_lock(&pi_state->owner->pi_lock); + WARN_ON(list_empty(&pi_state->list)); + list_del_init(&pi_state->list); +- raw_spin_unlock_irq(&pi_state->owner->pi_lock); ++ raw_spin_unlock(&pi_state->owner->pi_lock); + +- raw_spin_lock_irq(&new_owner->pi_lock); ++ raw_spin_lock(&new_owner->pi_lock); + WARN_ON(!list_empty(&pi_state->list)); + list_add(&pi_state->list, &new_owner->pi_state_list); + pi_state->owner = new_owner; +- raw_spin_unlock_irq(&new_owner->pi_lock); ++ raw_spin_unlock(&new_owner->pi_lock); + +- raw_spin_unlock(&pi_state->pi_mutex.wait_lock); ++ raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock); + +- deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q); ++ deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q, ++ &wake_sleeper_q); + + /* + * First unlock HB so the waiter does not spin on it once he got woken +@@ -1286,6 +1290,7 @@ + */ + spin_unlock(&hb->lock); + wake_up_q(&wake_q); ++ wake_up_q_sleeper(&wake_sleeper_q); + if (deboost) + rt_mutex_adjust_prio(current); + +@@ -1822,6 +1827,16 @@ + requeue_pi_wake_futex(this, &key2, hb2); + drop_count++; + continue; ++ } else if (ret == -EAGAIN) { ++ /* ++ * Waiter was woken by timeout or ++ * signal and has set pi_blocked_on to ++ * PI_WAKEUP_INPROGRESS before we ++ * tried to enqueue it on the rtmutex. ++ */ ++ this->pi_state = NULL; ++ free_pi_state(pi_state); ++ continue; + } else if (ret) { + /* -EDEADLK */ + this->pi_state = NULL; +@@ -2139,11 +2154,11 @@ + * we returned due to timeout or signal without taking the + * rt_mutex. Too late. + */ +- raw_spin_lock(&q->pi_state->pi_mutex.wait_lock); ++ raw_spin_lock_irq(&q->pi_state->pi_mutex.wait_lock); + owner = rt_mutex_owner(&q->pi_state->pi_mutex); + if (!owner) + owner = rt_mutex_next_owner(&q->pi_state->pi_mutex); +- raw_spin_unlock(&q->pi_state->pi_mutex.wait_lock); ++ raw_spin_unlock_irq(&q->pi_state->pi_mutex.wait_lock); + ret = fixup_pi_state_owner(uaddr, q, owner); + goto out; + } +@@ -2691,7 +2706,7 @@ + struct hrtimer_sleeper timeout, *to = NULL; + struct rt_mutex_waiter rt_waiter; + struct rt_mutex *pi_mutex = NULL; +- struct futex_hash_bucket *hb; ++ struct futex_hash_bucket *hb, *hb2; + union futex_key key2 = FUTEX_KEY_INIT; + struct futex_q q = futex_q_init; + int res, ret; +@@ -2716,10 +2731,7 @@ + * The waiter is allocated on our stack, manipulated by the requeue + * code while we sleep on uaddr. + */ +- debug_rt_mutex_init_waiter(&rt_waiter); +- RB_CLEAR_NODE(&rt_waiter.pi_tree_entry); +- RB_CLEAR_NODE(&rt_waiter.tree_entry); +- rt_waiter.task = NULL; ++ rt_mutex_init_waiter(&rt_waiter, false); + + ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE); + if (unlikely(ret != 0)) +@@ -2750,20 +2762,55 @@ + /* Queue the futex_q, drop the hb lock, wait for wakeup. */ + futex_wait_queue_me(hb, &q, to); + +- spin_lock(&hb->lock); +- ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); +- spin_unlock(&hb->lock); +- if (ret) +- goto out_put_keys; ++ /* ++ * On RT we must avoid races with requeue and trying to block ++ * on two mutexes (hb->lock and uaddr2's rtmutex) by ++ * serializing access to pi_blocked_on with pi_lock. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ if (current->pi_blocked_on) { ++ /* ++ * We have been requeued or are in the process of ++ * being requeued. ++ */ ++ raw_spin_unlock_irq(¤t->pi_lock); ++ } else { ++ /* ++ * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS ++ * prevents a concurrent requeue from moving us to the ++ * uaddr2 rtmutex. After that we can safely acquire ++ * (and possibly block on) hb->lock. ++ */ ++ current->pi_blocked_on = PI_WAKEUP_INPROGRESS; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ spin_lock(&hb->lock); ++ ++ /* ++ * Clean up pi_blocked_on. We might leak it otherwise ++ * when we succeeded with the hb->lock in the fast ++ * path. ++ */ ++ raw_spin_lock_irq(¤t->pi_lock); ++ current->pi_blocked_on = NULL; ++ raw_spin_unlock_irq(¤t->pi_lock); ++ ++ ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to); ++ spin_unlock(&hb->lock); ++ if (ret) ++ goto out_put_keys; ++ } + + /* +- * In order for us to be here, we know our q.key == key2, and since +- * we took the hb->lock above, we also know that futex_requeue() has +- * completed and we no longer have to concern ourselves with a wakeup +- * race with the atomic proxy lock acquisition by the requeue code. The +- * futex_requeue dropped our key1 reference and incremented our key2 +- * reference count. ++ * In order to be here, we have either been requeued, are in ++ * the process of being requeued, or requeue successfully ++ * acquired uaddr2 on our behalf. If pi_blocked_on was ++ * non-null above, we may be racing with a requeue. Do not ++ * rely on q->lock_ptr to be hb2->lock until after blocking on ++ * hb->lock or hb2->lock. The futex_requeue dropped our key1 ++ * reference and incremented our key2 reference count. + */ ++ hb2 = hash_futex(&key2); + + /* Check if the requeue code acquired the second futex for us. */ + if (!q.rt_waiter) { +@@ -2772,14 +2819,15 @@ + * did a lock-steal - fix up the PI-state in that case. + */ + if (q.pi_state && (q.pi_state->owner != current)) { +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + ret = fixup_pi_state_owner(uaddr2, &q, current); + /* + * Drop the reference to the pi state which + * the requeue_pi() code acquired for us. + */ + free_pi_state(q.pi_state); +- spin_unlock(q.lock_ptr); ++ spin_unlock(&hb2->lock); + } + } else { + /* +@@ -2792,7 +2840,8 @@ + ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter); + debug_rt_mutex_free_waiter(&rt_waiter); + +- spin_lock(q.lock_ptr); ++ spin_lock(&hb2->lock); ++ BUG_ON(&hb2->lock != q.lock_ptr); + /* + * Fixup the pi_state owner and possibly acquire the lock if we + * haven't already. +diff -Nur linux-4.4.13.orig/kernel/irq/handle.c linux-4.4.13/kernel/irq/handle.c +--- linux-4.4.13.orig/kernel/irq/handle.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/irq/handle.c 2016-06-22 21:55:08.046523867 +0200 +@@ -134,6 +134,8 @@ + + irqreturn_t handle_irq_event_percpu(struct irq_desc *desc) + { ++ struct pt_regs *regs = get_irq_regs(); ++ u64 ip = regs ? instruction_pointer(regs) : 0; + irqreturn_t retval = IRQ_NONE; + unsigned int flags = 0, irq = desc->irq_data.irq; + struct irqaction *action = desc->action; +@@ -176,7 +178,11 @@ + action = action->next; + } + +- add_interrupt_randomness(irq, flags); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ desc->random_ip = ip; ++#else ++ add_interrupt_randomness(irq, flags, ip); ++#endif + + if (!noirqdebug) + note_interrupt(desc, retval); +diff -Nur linux-4.4.13.orig/kernel/irq/irqdesc.c linux-4.4.13/kernel/irq/irqdesc.c +--- linux-4.4.13.orig/kernel/irq/irqdesc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/irq/irqdesc.c 2016-06-22 21:55:08.046523867 +0200 +@@ -24,10 +24,27 @@ + static struct lock_class_key irq_desc_lock_class; + + #if defined(CONFIG_SMP) ++static int __init irq_affinity_setup(char *str) ++{ ++ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); ++ cpulist_parse(str, irq_default_affinity); ++ /* ++ * Set at least the boot cpu. We don't want to end up with ++ * bugreports caused by random comandline masks ++ */ ++ cpumask_set_cpu(smp_processor_id(), irq_default_affinity); ++ return 1; ++} ++__setup("irqaffinity=", irq_affinity_setup); ++ + static void __init init_irq_default_affinity(void) + { +- alloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); +- cpumask_setall(irq_default_affinity); ++#ifdef CONFIG_CPUMASK_OFFSTACK ++ if (!irq_default_affinity) ++ zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT); ++#endif ++ if (cpumask_empty(irq_default_affinity)) ++ cpumask_setall(irq_default_affinity); + } + #else + static void __init init_irq_default_affinity(void) +diff -Nur linux-4.4.13.orig/kernel/irq/manage.c linux-4.4.13/kernel/irq/manage.c +--- linux-4.4.13.orig/kernel/irq/manage.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/irq/manage.c 2016-06-22 21:55:08.046523867 +0200 +@@ -22,6 +22,7 @@ + #include "internals.h" + + #ifdef CONFIG_IRQ_FORCED_THREADING ++# ifndef CONFIG_PREEMPT_RT_BASE + __read_mostly bool force_irqthreads; + + static int __init setup_forced_irqthreads(char *arg) +@@ -30,6 +31,7 @@ + return 0; + } + early_param("threadirqs", setup_forced_irqthreads); ++# endif + #endif + + static void __synchronize_hardirq(struct irq_desc *desc) +@@ -181,6 +183,62 @@ + irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void _irq_affinity_notify(struct irq_affinity_notify *notify); ++static struct task_struct *set_affinity_helper; ++static LIST_HEAD(affinity_list); ++static DEFINE_RAW_SPINLOCK(affinity_list_lock); ++ ++static int set_affinity_thread(void *unused) ++{ ++ while (1) { ++ struct irq_affinity_notify *notify; ++ int empty; ++ ++ set_current_state(TASK_INTERRUPTIBLE); ++ ++ raw_spin_lock_irq(&affinity_list_lock); ++ empty = list_empty(&affinity_list); ++ raw_spin_unlock_irq(&affinity_list_lock); ++ ++ if (empty) ++ schedule(); ++ if (kthread_should_stop()) ++ break; ++ set_current_state(TASK_RUNNING); ++try_next: ++ notify = NULL; ++ ++ raw_spin_lock_irq(&affinity_list_lock); ++ if (!list_empty(&affinity_list)) { ++ notify = list_first_entry(&affinity_list, ++ struct irq_affinity_notify, list); ++ list_del_init(¬ify->list); ++ } ++ raw_spin_unlock_irq(&affinity_list_lock); ++ ++ if (!notify) ++ continue; ++ _irq_affinity_notify(notify); ++ goto try_next; ++ } ++ return 0; ++} ++ ++static void init_helper_thread(void) ++{ ++ if (set_affinity_helper) ++ return; ++ set_affinity_helper = kthread_run(set_affinity_thread, NULL, ++ "affinity-cb"); ++ WARN_ON(IS_ERR(set_affinity_helper)); ++} ++#else ++ ++static inline void init_helper_thread(void) { } ++ ++#endif ++ + int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) + { +@@ -220,7 +278,17 @@ + + if (desc->affinity_notify) { + kref_get(&desc->affinity_notify->kref); ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ raw_spin_lock(&affinity_list_lock); ++ if (list_empty(&desc->affinity_notify->list)) ++ list_add_tail(&affinity_list, ++ &desc->affinity_notify->list); ++ raw_spin_unlock(&affinity_list_lock); ++ wake_up_process(set_affinity_helper); ++#else + schedule_work(&desc->affinity_notify->work); ++#endif + } + irqd_set(data, IRQD_AFFINITY_SET); + +@@ -258,10 +326,8 @@ + } + EXPORT_SYMBOL_GPL(irq_set_affinity_hint); + +-static void irq_affinity_notify(struct work_struct *work) ++static void _irq_affinity_notify(struct irq_affinity_notify *notify) + { +- struct irq_affinity_notify *notify = +- container_of(work, struct irq_affinity_notify, work); + struct irq_desc *desc = irq_to_desc(notify->irq); + cpumask_var_t cpumask; + unsigned long flags; +@@ -283,6 +349,13 @@ + kref_put(¬ify->kref, notify->release); + } + ++static void irq_affinity_notify(struct work_struct *work) ++{ ++ struct irq_affinity_notify *notify = ++ container_of(work, struct irq_affinity_notify, work); ++ _irq_affinity_notify(notify); ++} ++ + /** + * irq_set_affinity_notifier - control notification of IRQ affinity changes + * @irq: Interrupt for which to enable/disable notification +@@ -312,6 +385,8 @@ + notify->irq = irq; + kref_init(¬ify->kref); + INIT_WORK(¬ify->work, irq_affinity_notify); ++ INIT_LIST_HEAD(¬ify->list); ++ init_helper_thread(); + } + + raw_spin_lock_irqsave(&desc->lock, flags); +@@ -865,7 +940,15 @@ + local_bh_disable(); + ret = action->thread_fn(action->irq, action->dev_id); + irq_finalize_oneshot(desc, action); +- local_bh_enable(); ++ /* ++ * Interrupts which have real time requirements can be set up ++ * to avoid softirq processing in the thread handler. This is ++ * safe as these interrupts do not raise soft interrupts. ++ */ ++ if (irq_settings_no_softirq_call(desc)) ++ _local_bh_enable(); ++ else ++ local_bh_enable(); + return ret; + } + +@@ -962,6 +1045,12 @@ + if (action_ret == IRQ_WAKE_THREAD) + irq_wake_secondary(desc, action); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ migrate_disable(); ++ add_interrupt_randomness(action->irq, 0, ++ desc->random_ip ^ (unsigned long) action); ++ migrate_enable(); ++#endif + wake_threads_waitq(desc); + } + +@@ -1315,6 +1404,9 @@ + irqd_set(&desc->irq_data, IRQD_NO_BALANCING); + } + ++ if (new->flags & IRQF_NO_SOFTIRQ_CALL) ++ irq_settings_set_no_softirq_call(desc); ++ + /* Set default affinity mask once everything is setup */ + setup_affinity(desc, mask); + +@@ -1968,7 +2060,7 @@ + * This call sets the internal irqchip state of an interrupt, + * depending on the value of @which. + * +- * This function should be called with preemption disabled if the ++ * This function should be called with migration disabled if the + * interrupt controller has per-cpu registers. + */ + int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, +diff -Nur linux-4.4.13.orig/kernel/irq/settings.h linux-4.4.13/kernel/irq/settings.h +--- linux-4.4.13.orig/kernel/irq/settings.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/irq/settings.h 2016-06-22 21:55:08.046523867 +0200 +@@ -16,6 +16,7 @@ + _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, + _IRQ_IS_POLLED = IRQ_IS_POLLED, + _IRQ_DISABLE_UNLAZY = IRQ_DISABLE_UNLAZY, ++ _IRQ_NO_SOFTIRQ_CALL = IRQ_NO_SOFTIRQ_CALL, + _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, + }; + +@@ -30,6 +31,7 @@ + #define IRQ_PER_CPU_DEVID GOT_YOU_MORON + #define IRQ_IS_POLLED GOT_YOU_MORON + #define IRQ_DISABLE_UNLAZY GOT_YOU_MORON ++#define IRQ_NO_SOFTIRQ_CALL GOT_YOU_MORON + #undef IRQF_MODIFY_MASK + #define IRQF_MODIFY_MASK GOT_YOU_MORON + +@@ -40,6 +42,16 @@ + desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); + } + ++static inline bool irq_settings_no_softirq_call(struct irq_desc *desc) ++{ ++ return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL; ++} ++ ++static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc) ++{ ++ desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL; ++} ++ + static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) + { + return desc->status_use_accessors & _IRQ_PER_CPU; +diff -Nur linux-4.4.13.orig/kernel/irq/spurious.c linux-4.4.13/kernel/irq/spurious.c +--- linux-4.4.13.orig/kernel/irq/spurious.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/irq/spurious.c 2016-06-22 21:55:08.050523867 +0200 +@@ -444,6 +444,10 @@ + + static int __init irqfixup_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 1; + printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); + printk(KERN_WARNING "This may impact system performance.\n"); +@@ -456,6 +460,10 @@ + + static int __init irqpoll_setup(char *str) + { ++#ifdef CONFIG_PREEMPT_RT_BASE ++ pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n"); ++ return 1; ++#endif + irqfixup = 2; + printk(KERN_WARNING "Misrouted IRQ fixup and polling support " + "enabled\n"); +diff -Nur linux-4.4.13.orig/kernel/irq_work.c linux-4.4.13/kernel/irq_work.c +--- linux-4.4.13.orig/kernel/irq_work.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/irq_work.c 2016-06-22 21:55:08.050523867 +0200 +@@ -17,6 +17,7 @@ + #include + #include + #include ++#include + #include + + +@@ -65,6 +66,8 @@ + */ + bool irq_work_queue_on(struct irq_work *work, int cpu) + { ++ struct llist_head *list; ++ + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(cpu)); + +@@ -75,7 +78,12 @@ + if (!irq_work_claim(work)) + return false; + +- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ)) ++ list = &per_cpu(lazy_list, cpu); ++ else ++ list = &per_cpu(raised_list, cpu); ++ ++ if (llist_add(&work->llnode, list)) + arch_send_call_function_single_ipi(cpu); + + return true; +@@ -86,6 +94,9 @@ + /* Enqueue the irq work @work on the current CPU */ + bool irq_work_queue(struct irq_work *work) + { ++ struct llist_head *list; ++ bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL); ++ + /* Only queue if not already pending */ + if (!irq_work_claim(work)) + return false; +@@ -93,13 +104,15 @@ + /* Queue the entry and raise the IPI if needed. */ + preempt_disable(); + +- /* If the work is "lazy", handle it from next tick if any */ +- if (work->flags & IRQ_WORK_LAZY) { +- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) && +- tick_nohz_tick_stopped()) +- arch_irq_work_raise(); +- } else { +- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list))) ++ lazy_work = work->flags & IRQ_WORK_LAZY; ++ ++ if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ))) ++ list = this_cpu_ptr(&lazy_list); ++ else ++ list = this_cpu_ptr(&raised_list); ++ ++ if (llist_add(&work->llnode, list)) { ++ if (!lazy_work || tick_nohz_tick_stopped()) + arch_irq_work_raise(); + } + +@@ -116,9 +129,8 @@ + raised = this_cpu_ptr(&raised_list); + lazy = this_cpu_ptr(&lazy_list); + +- if (llist_empty(raised) || arch_irq_work_has_interrupt()) +- if (llist_empty(lazy)) +- return false; ++ if (llist_empty(raised) && llist_empty(lazy)) ++ return false; + + /* All work should have been flushed before going offline */ + WARN_ON_ONCE(cpu_is_offline(smp_processor_id())); +@@ -132,7 +144,7 @@ + struct irq_work *work; + struct llist_node *llnode; + +- BUG_ON(!irqs_disabled()); ++ BUG_ON_NONRT(!irqs_disabled()); + + if (llist_empty(list)) + return; +@@ -169,7 +181,16 @@ + void irq_work_run(void) + { + irq_work_run_list(this_cpu_ptr(&raised_list)); +- irq_work_run_list(this_cpu_ptr(&lazy_list)); ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) { ++ /* ++ * NOTE: we raise softirq via IPI for safety, ++ * and execute in irq_work_tick() to move the ++ * overhead from hard to soft irq context. ++ */ ++ if (!llist_empty(this_cpu_ptr(&lazy_list))) ++ raise_softirq(TIMER_SOFTIRQ); ++ } else ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); + } + EXPORT_SYMBOL_GPL(irq_work_run); + +@@ -179,8 +200,17 @@ + + if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) + irq_work_run_list(raised); ++ ++ if (!IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) ++ irq_work_run_list(this_cpu_ptr(&lazy_list)); ++} ++ ++#if defined(CONFIG_IRQ_WORK) && defined(CONFIG_PREEMPT_RT_FULL) ++void irq_work_tick_soft(void) ++{ + irq_work_run_list(this_cpu_ptr(&lazy_list)); + } ++#endif + + /* + * Synchronize against the irq_work @entry, ensures the entry is not +diff -Nur linux-4.4.13.orig/kernel/Kconfig.locks linux-4.4.13/kernel/Kconfig.locks +--- linux-4.4.13.orig/kernel/Kconfig.locks 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/Kconfig.locks 2016-06-22 21:55:08.050523867 +0200 +@@ -225,11 +225,11 @@ + + config MUTEX_SPIN_ON_OWNER + def_bool y +- depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW ++ depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL + + config RWSEM_SPIN_ON_OWNER + def_bool y +- depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW ++ depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL + + config LOCK_SPIN_ON_OWNER + def_bool y +diff -Nur linux-4.4.13.orig/kernel/Kconfig.preempt linux-4.4.13/kernel/Kconfig.preempt +--- linux-4.4.13.orig/kernel/Kconfig.preempt 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/Kconfig.preempt 2016-06-22 21:55:08.050523867 +0200 +@@ -1,3 +1,16 @@ ++config PREEMPT ++ bool ++ select PREEMPT_COUNT ++ ++config PREEMPT_RT_BASE ++ bool ++ select PREEMPT ++ ++config HAVE_PREEMPT_LAZY ++ bool ++ ++config PREEMPT_LAZY ++ def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL + + choice + prompt "Preemption Model" +@@ -33,9 +46,9 @@ + + Select this if you are building a kernel for a desktop system. + +-config PREEMPT ++config PREEMPT__LL + bool "Preemptible Kernel (Low-Latency Desktop)" +- select PREEMPT_COUNT ++ select PREEMPT + select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK + help + This option reduces the latency of the kernel by making +@@ -52,6 +65,22 @@ + embedded system with latency requirements in the milliseconds + range. + ++config PREEMPT_RTB ++ bool "Preemptible Kernel (Basic RT)" ++ select PREEMPT_RT_BASE ++ help ++ This option is basically the same as (Low-Latency Desktop) but ++ enables changes which are preliminary for the full preemptible ++ RT kernel. ++ ++config PREEMPT_RT_FULL ++ bool "Fully Preemptible Kernel (RT)" ++ depends on IRQ_FORCED_THREADING ++ select PREEMPT_RT_BASE ++ select PREEMPT_RCU ++ help ++ All and everything ++ + endchoice + + config PREEMPT_COUNT +diff -Nur linux-4.4.13.orig/kernel/ksysfs.c linux-4.4.13/kernel/ksysfs.c +--- linux-4.4.13.orig/kernel/ksysfs.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/ksysfs.c 2016-06-22 21:55:08.050523867 +0200 +@@ -136,6 +136,15 @@ + + #endif /* CONFIG_KEXEC_CORE */ + ++#if defined(CONFIG_PREEMPT_RT_FULL) ++static ssize_t realtime_show(struct kobject *kobj, ++ struct kobj_attribute *attr, char *buf) ++{ ++ return sprintf(buf, "%d\n", 1); ++} ++KERNEL_ATTR_RO(realtime); ++#endif ++ + /* whether file capabilities are enabled */ + static ssize_t fscaps_show(struct kobject *kobj, + struct kobj_attribute *attr, char *buf) +@@ -203,6 +212,9 @@ + &vmcoreinfo_attr.attr, + #endif + &rcu_expedited_attr.attr, ++#ifdef CONFIG_PREEMPT_RT_FULL ++ &realtime_attr.attr, ++#endif + NULL + }; + +diff -Nur linux-4.4.13.orig/kernel/locking/lglock.c linux-4.4.13/kernel/locking/lglock.c +--- linux-4.4.13.orig/kernel/locking/lglock.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/lglock.c 2016-06-22 21:55:08.050523867 +0200 +@@ -4,6 +4,15 @@ + #include + #include + ++#ifndef CONFIG_PREEMPT_RT_FULL ++# define lg_lock_ptr arch_spinlock_t ++# define lg_do_lock(l) arch_spin_lock(l) ++# define lg_do_unlock(l) arch_spin_unlock(l) ++#else ++# define lg_lock_ptr struct rt_mutex ++# define lg_do_lock(l) __rt_spin_lock__no_mg(l) ++# define lg_do_unlock(l) __rt_spin_unlock(l) ++#endif + /* + * Note there is no uninit, so lglocks cannot be defined in + * modules (but it's fine to use them from there) +@@ -12,51 +21,60 @@ + + void lg_lock_init(struct lglock *lg, char *name) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int i; ++ ++ for_each_possible_cpu(i) { ++ struct rt_mutex *lock = per_cpu_ptr(lg->lock, i); ++ ++ rt_mutex_init(lock); ++ } ++#endif + LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0); + } + EXPORT_SYMBOL(lg_lock_init); + + void lg_local_lock(struct lglock *lg) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + +- preempt_disable(); ++ migrate_disable(); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); + lock = this_cpu_ptr(lg->lock); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + EXPORT_SYMBOL(lg_local_lock); + + void lg_local_unlock(struct lglock *lg) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + + lock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = this_cpu_ptr(lg->lock); +- arch_spin_unlock(lock); +- preempt_enable(); ++ lg_do_unlock(lock); ++ migrate_enable(); + } + EXPORT_SYMBOL(lg_local_unlock); + + void lg_local_lock_cpu(struct lglock *lg, int cpu) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + +- preempt_disable(); ++ preempt_disable_nort(); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + EXPORT_SYMBOL(lg_local_lock_cpu); + + void lg_local_unlock_cpu(struct lglock *lg, int cpu) + { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + + lock_release(&lg->lock_dep_map, 1, _RET_IP_); + lock = per_cpu_ptr(lg->lock, cpu); +- arch_spin_unlock(lock); +- preempt_enable(); ++ lg_do_unlock(lock); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(lg_local_unlock_cpu); + +@@ -68,30 +86,30 @@ + if (cpu2 < cpu1) + swap(cpu1, cpu2); + +- preempt_disable(); ++ preempt_disable_nort(); + lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); +- arch_spin_lock(per_cpu_ptr(lg->lock, cpu1)); +- arch_spin_lock(per_cpu_ptr(lg->lock, cpu2)); ++ lg_do_lock(per_cpu_ptr(lg->lock, cpu1)); ++ lg_do_lock(per_cpu_ptr(lg->lock, cpu2)); + } + + void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2) + { + lock_release(&lg->lock_dep_map, 1, _RET_IP_); +- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1)); +- arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2)); +- preempt_enable(); ++ lg_do_unlock(per_cpu_ptr(lg->lock, cpu1)); ++ lg_do_unlock(per_cpu_ptr(lg->lock, cpu2)); ++ preempt_enable_nort(); + } + + void lg_global_lock(struct lglock *lg) + { + int i; + +- preempt_disable(); ++ preempt_disable_nort(); + lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); + for_each_possible_cpu(i) { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + lock = per_cpu_ptr(lg->lock, i); +- arch_spin_lock(lock); ++ lg_do_lock(lock); + } + } + EXPORT_SYMBOL(lg_global_lock); +@@ -102,10 +120,35 @@ + + lock_release(&lg->lock_dep_map, 1, _RET_IP_); + for_each_possible_cpu(i) { +- arch_spinlock_t *lock; ++ lg_lock_ptr *lock; + lock = per_cpu_ptr(lg->lock, i); +- arch_spin_unlock(lock); ++ lg_do_unlock(lock); + } +- preempt_enable(); ++ preempt_enable_nort(); + } + EXPORT_SYMBOL(lg_global_unlock); ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * HACK: If you use this, you get to keep the pieces. ++ * Used in queue_stop_cpus_work() when stop machinery ++ * is called from inactive CPU, so we can't schedule. ++ */ ++# define lg_do_trylock_relax(l) \ ++ do { \ ++ while (!__rt_spin_trylock(l)) \ ++ cpu_relax(); \ ++ } while (0) ++ ++void lg_global_trylock_relax(struct lglock *lg) ++{ ++ int i; ++ ++ lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_); ++ for_each_possible_cpu(i) { ++ lg_lock_ptr *lock; ++ lock = per_cpu_ptr(lg->lock, i); ++ lg_do_trylock_relax(lock); ++ } ++} ++#endif +diff -Nur linux-4.4.13.orig/kernel/locking/lockdep.c linux-4.4.13/kernel/locking/lockdep.c +--- linux-4.4.13.orig/kernel/locking/lockdep.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/lockdep.c 2016-06-22 21:55:08.050523867 +0200 +@@ -3525,6 +3525,7 @@ + } + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * We dont accurately track softirq state in e.g. + * hardirq contexts (such as on 4KSTACKS), so only +@@ -3539,6 +3540,7 @@ + DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); + } + } ++#endif + + if (!debug_locks) + print_irqtrace_events(current); +diff -Nur linux-4.4.13.orig/kernel/locking/locktorture.c linux-4.4.13/kernel/locking/locktorture.c +--- linux-4.4.13.orig/kernel/locking/locktorture.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/locktorture.c 2016-06-22 21:55:08.050523867 +0200 +@@ -26,7 +26,6 @@ + #include + #include + #include +-#include + #include + #include + #include +diff -Nur linux-4.4.13.orig/kernel/locking/Makefile linux-4.4.13/kernel/locking/Makefile +--- linux-4.4.13.orig/kernel/locking/Makefile 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/Makefile 2016-06-22 21:55:08.050523867 +0200 +@@ -1,5 +1,5 @@ + +-obj-y += mutex.o semaphore.o rwsem.o percpu-rwsem.o ++obj-y += semaphore.o percpu-rwsem.o + + ifdef CONFIG_FUNCTION_TRACER + CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) +@@ -8,7 +8,11 @@ + CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE) + endif + ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) ++obj-y += mutex.o + obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o ++obj-y += rwsem.o ++endif + obj-$(CONFIG_LOCKDEP) += lockdep.o + ifeq ($(CONFIG_PROC_FS),y) + obj-$(CONFIG_LOCKDEP) += lockdep_proc.o +@@ -22,7 +26,10 @@ + obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o + obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o ++ifneq ($(CONFIG_PREEMPT_RT_FULL),y) + obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o + obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o ++endif ++obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o + obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o + obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o +diff -Nur linux-4.4.13.orig/kernel/locking/rt.c linux-4.4.13/kernel/locking/rt.c +--- linux-4.4.13.orig/kernel/locking/rt.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/kernel/locking/rt.c 2016-06-22 21:55:08.050523867 +0200 +@@ -0,0 +1,474 @@ ++/* ++ * kernel/rt.c ++ * ++ * Real-Time Preemption Support ++ * ++ * started by Ingo Molnar: ++ * ++ * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * ++ * historic credit for proving that Linux spinlocks can be implemented via ++ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow ++ * and others) who prototyped it on 2.4 and did lots of comparative ++ * research and analysis; TimeSys, for proving that you can implement a ++ * fully preemptible kernel via the use of IRQ threading and mutexes; ++ * Bill Huey for persuasively arguing on lkml that the mutex model is the ++ * right one; and to MontaVista, who ported pmutexes to 2.6. ++ * ++ * This code is a from-scratch implementation and is not based on pmutexes, ++ * but the idea of converting spinlocks to mutexes is used here too. ++ * ++ * lock debugging, locking tree, deadlock detection: ++ * ++ * Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey ++ * Released under the General Public License (GPL). ++ * ++ * Includes portions of the generic R/W semaphore implementation from: ++ * ++ * Copyright (c) 2001 David Howells (dhowells@redhat.com). ++ * - Derived partially from idea by Andrea Arcangeli ++ * - Derived also from comments by Linus ++ * ++ * Pending ownership of locks and ownership stealing: ++ * ++ * Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt ++ * ++ * (also by Steven Rostedt) ++ * - Converted single pi_lock to individual task locks. ++ * ++ * By Esben Nielsen: ++ * Doing priority inheritance with help of the scheduler. ++ * ++ * Copyright (C) 2006, Timesys Corp., Thomas Gleixner ++ * - major rework based on Esben Nielsens initial patch ++ * - replaced thread_info references by task_struct refs ++ * - removed task->pending_owner dependency ++ * - BKL drop/reacquire for semaphore style locks to avoid deadlocks ++ * in the scheduler return path as discussed with Steven Rostedt ++ * ++ * Copyright (C) 2006, Kihon Technologies Inc. ++ * Steven Rostedt ++ * - debugged and patched Thomas Gleixner's rework. ++ * - added back the cmpxchg to the rework. ++ * - turned atomic require back on for SMP. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "rtmutex_common.h" ++ ++/* ++ * struct mutex functions ++ */ ++void __mutex_do_init(struct mutex *mutex, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)mutex, sizeof(*mutex)); ++ lockdep_init_map(&mutex->dep_map, name, key, 0); ++#endif ++ mutex->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__mutex_do_init); ++ ++void __lockfunc _mutex_lock(struct mutex *lock) ++{ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock); ++ ++int __lockfunc _mutex_lock_interruptible(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible); ++ ++int __lockfunc _mutex_lock_killable(struct mutex *lock) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass) ++{ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nested); ++ ++void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest) ++{ ++ mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_lock_nest_lock); ++ ++int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_); ++ ret = rt_mutex_lock_interruptible(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_interruptible_nested); ++ ++int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass) ++{ ++ int ret; ++ ++ mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ ret = rt_mutex_lock_killable(&lock->lock); ++ if (ret) ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_lock_killable_nested); ++#endif ++ ++int __lockfunc _mutex_trylock(struct mutex *lock) ++{ ++ int ret = rt_mutex_trylock(&lock->lock); ++ ++ if (ret) ++ mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ ++ return ret; ++} ++EXPORT_SYMBOL(_mutex_trylock); ++ ++void __lockfunc _mutex_unlock(struct mutex *lock) ++{ ++ mutex_release(&lock->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&lock->lock); ++} ++EXPORT_SYMBOL(_mutex_unlock); ++ ++/* ++ * rwlock_t functions ++ */ ++int __lockfunc rt_write_trylock(rwlock_t *rwlock) ++{ ++ int ret; ++ ++ migrate_disable(); ++ ret = rt_mutex_trylock(&rwlock->lock); ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock); ++ ++int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ ret = rt_write_trylock(rwlock); ++ return ret; ++} ++EXPORT_SYMBOL(rt_write_trylock_irqsave); ++ ++int __lockfunc rt_read_trylock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the lock, ++ * but not when read_depth == 0 which means that the lock is ++ * write locked. ++ */ ++ if (rt_mutex_owner(lock) != current) { ++ migrate_disable(); ++ ret = rt_mutex_trylock(lock); ++ if (ret) ++ rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ ++ } else if (!rwlock->read_depth) { ++ ret = 0; ++ } ++ ++ if (ret) ++ rwlock->read_depth++; ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_read_trylock); ++ ++void __lockfunc rt_write_lock(rwlock_t *rwlock) ++{ ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ __rt_spin_lock(&rwlock->lock); ++} ++EXPORT_SYMBOL(rt_write_lock); ++ ++void __lockfunc rt_read_lock(rwlock_t *rwlock) ++{ ++ struct rt_mutex *lock = &rwlock->lock; ++ ++ ++ /* ++ * recursive read locks succeed when current owns the lock ++ */ ++ if (rt_mutex_owner(lock) != current) { ++ rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_); ++ __rt_spin_lock(lock); ++ } ++ rwlock->read_depth++; ++} ++ ++EXPORT_SYMBOL(rt_read_lock); ++ ++void __lockfunc rt_write_unlock(rwlock_t *rwlock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ __rt_spin_unlock(&rwlock->lock); ++ migrate_enable(); ++} ++EXPORT_SYMBOL(rt_write_unlock); ++ ++void __lockfunc rt_read_unlock(rwlock_t *rwlock) ++{ ++ /* Release the lock only when read_depth is down to 0 */ ++ if (--rwlock->read_depth == 0) { ++ rwlock_release(&rwlock->dep_map, 1, _RET_IP_); ++ __rt_spin_unlock(&rwlock->lock); ++ migrate_enable(); ++ } ++} ++EXPORT_SYMBOL(rt_read_unlock); ++ ++unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_write_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_write_lock_irqsave); ++ ++unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock) ++{ ++ rt_read_lock(rwlock); ++ ++ return 0; ++} ++EXPORT_SYMBOL(rt_read_lock_irqsave); ++ ++void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock)); ++ lockdep_init_map(&rwlock->dep_map, name, key, 0); ++#endif ++ rwlock->lock.save_state = 1; ++ rwlock->read_depth = 0; ++} ++EXPORT_SYMBOL(__rt_rwlock_init); ++ ++/* ++ * rw_semaphores ++ */ ++ ++void rt_up_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ rt_mutex_unlock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_up_write); ++ ++void __rt_up_read(struct rw_semaphore *rwsem) ++{ ++ if (--rwsem->read_depth == 0) ++ rt_mutex_unlock(&rwsem->lock); ++} ++ ++void rt_up_read(struct rw_semaphore *rwsem) ++{ ++ rwsem_release(&rwsem->dep_map, 1, _RET_IP_); ++ __rt_up_read(rwsem); ++} ++EXPORT_SYMBOL(rt_up_read); ++ ++/* ++ * downgrade a write lock into a read lock ++ * - just wake up any readers at the front of the queue ++ */ ++void rt_downgrade_write(struct rw_semaphore *rwsem) ++{ ++ BUG_ON(rt_mutex_owner(&rwsem->lock) != current); ++ rwsem->read_depth = 1; ++} ++EXPORT_SYMBOL(rt_downgrade_write); ++ ++int rt_down_write_trylock(struct rw_semaphore *rwsem) ++{ ++ int ret = rt_mutex_trylock(&rwsem->lock); ++ ++ if (ret) ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_write_trylock); ++ ++void rt_down_write(struct rw_semaphore *rwsem) ++{ ++ rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write); ++ ++void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write_nested); ++ ++void rt_down_write_nested_lock(struct rw_semaphore *rwsem, ++ struct lockdep_map *nest) ++{ ++ rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_); ++ rt_mutex_lock(&rwsem->lock); ++} ++EXPORT_SYMBOL(rt_down_write_nested_lock); ++ ++int rt__down_read_trylock(struct rw_semaphore *rwsem) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ int ret = 1; ++ ++ /* ++ * recursive read locks succeed when current owns the rwsem, ++ * but not when read_depth == 0 which means that the rwsem is ++ * write locked. ++ */ ++ if (rt_mutex_owner(lock) != current) ++ ret = rt_mutex_trylock(&rwsem->lock); ++ else if (!rwsem->read_depth) ++ ret = 0; ++ ++ if (ret) ++ rwsem->read_depth++; ++ return ret; ++ ++} ++ ++int rt_down_read_trylock(struct rw_semaphore *rwsem) ++{ ++ int ret; ++ ++ ret = rt__down_read_trylock(rwsem); ++ if (ret) ++ rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_); ++ ++ return ret; ++} ++EXPORT_SYMBOL(rt_down_read_trylock); ++ ++void rt__down_read(struct rw_semaphore *rwsem) ++{ ++ struct rt_mutex *lock = &rwsem->lock; ++ ++ if (rt_mutex_owner(lock) != current) ++ rt_mutex_lock(&rwsem->lock); ++ rwsem->read_depth++; ++} ++EXPORT_SYMBOL(rt__down_read); ++ ++static void __rt_down_read(struct rw_semaphore *rwsem, int subclass) ++{ ++ rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_); ++ rt__down_read(rwsem); ++} ++ ++void rt_down_read(struct rw_semaphore *rwsem) ++{ ++ __rt_down_read(rwsem, 0); ++} ++EXPORT_SYMBOL(rt_down_read); ++ ++void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass) ++{ ++ __rt_down_read(rwsem, subclass); ++} ++EXPORT_SYMBOL(rt_down_read_nested); ++ ++void __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name, ++ struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem)); ++ lockdep_init_map(&rwsem->dep_map, name, key, 0); ++#endif ++ rwsem->read_depth = 0; ++ rwsem->lock.save_state = 0; ++} ++EXPORT_SYMBOL(__rt_rwsem_init); ++ ++/** ++ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0 ++ * @cnt: the atomic which we are to dec ++ * @lock: the mutex to return holding if we dec to 0 ++ * ++ * return true and hold lock if we dec to 0, return false otherwise ++ */ ++int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock) ++{ ++ /* dec if we can't possibly hit 0 */ ++ if (atomic_add_unless(cnt, -1, 1)) ++ return 0; ++ /* we might hit 0, so take the lock */ ++ mutex_lock(lock); ++ if (!atomic_dec_and_test(cnt)) { ++ /* when we actually did the dec, we didn't hit 0 */ ++ mutex_unlock(lock); ++ return 0; ++ } ++ /* we hit 0, and we hold the lock */ ++ return 1; ++} ++EXPORT_SYMBOL(atomic_dec_and_mutex_lock); +diff -Nur linux-4.4.13.orig/kernel/locking/rtmutex.c linux-4.4.13/kernel/locking/rtmutex.c +--- linux-4.4.13.orig/kernel/locking/rtmutex.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/rtmutex.c 2016-06-22 21:55:08.050523867 +0200 +@@ -7,6 +7,11 @@ + * Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner + * Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt + * Copyright (C) 2006 Esben Nielsen ++ * Adaptive Spinlocks: ++ * Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich, ++ * and Peter Morreale, ++ * Adaptive Spinlocks simplification: ++ * Copyright (C) 2008 Red Hat, Inc., Steven Rostedt + * + * See Documentation/locking/rt-mutex-design.txt for details. + */ +@@ -16,6 +21,7 @@ + #include + #include + #include ++#include + + #include "rtmutex_common.h" + +@@ -69,6 +75,12 @@ + clear_rt_mutex_waiters(lock); + } + ++static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter) ++{ ++ return waiter && waiter != PI_WAKEUP_INPROGRESS && ++ waiter != PI_REQUEUE_INPROGRESS; ++} ++ + /* + * We can speed up the acquire/release, if there's no debugging state to be + * set up. +@@ -99,13 +111,14 @@ + * 2) Drop lock->wait_lock + * 3) Try to unlock the lock with cmpxchg + */ +-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) ++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, ++ unsigned long flags) + __releases(lock->wait_lock) + { + struct task_struct *owner = rt_mutex_owner(lock); + + clear_rt_mutex_waiters(lock); +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + /* + * If a new waiter comes in between the unlock and the cmpxchg + * we have two situations: +@@ -147,11 +160,12 @@ + /* + * Simple slow path only version: lock->owner is protected by lock->wait_lock. + */ +-static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock) ++static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock, ++ unsigned long flags) + __releases(lock->wait_lock) + { + lock->owner = NULL; +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + return true; + } + #endif +@@ -348,6 +362,14 @@ + return debug_rt_mutex_detect_deadlock(waiter, chwalk); + } + ++static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter) ++{ ++ if (waiter->savestate) ++ wake_up_lock_sleeper(waiter->task); ++ else ++ wake_up_process(waiter->task); ++} ++ + /* + * Max number of times we'll walk the boosting chain: + */ +@@ -355,7 +377,8 @@ + + static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p) + { +- return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL; ++ return rt_mutex_real_waiter(p->pi_blocked_on) ? ++ p->pi_blocked_on->lock : NULL; + } + + /* +@@ -433,7 +456,6 @@ + int ret = 0, depth = 0; + struct rt_mutex *lock; + bool detect_deadlock; +- unsigned long flags; + bool requeue = true; + + detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk); +@@ -476,7 +498,7 @@ + /* + * [1] Task cannot go away as we did a get_task() before ! + */ +- raw_spin_lock_irqsave(&task->pi_lock, flags); ++ raw_spin_lock_irq(&task->pi_lock); + + /* + * [2] Get the waiter on which @task is blocked on. +@@ -492,7 +514,7 @@ + * reached or the state of the chain has changed while we + * dropped the locks. + */ +- if (!waiter) ++ if (!rt_mutex_real_waiter(waiter)) + goto out_unlock_pi; + + /* +@@ -560,7 +582,7 @@ + * operations. + */ + if (!raw_spin_trylock(&lock->wait_lock)) { +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ raw_spin_unlock_irq(&task->pi_lock); + cpu_relax(); + goto retry; + } +@@ -591,7 +613,7 @@ + /* + * No requeue[7] here. Just release @task [8] + */ +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ raw_spin_unlock(&task->pi_lock); + put_task_struct(task); + + /* +@@ -599,14 +621,14 @@ + * If there is no owner of the lock, end of chain. + */ + if (!rt_mutex_owner(lock)) { +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + return 0; + } + + /* [10] Grab the next task, i.e. owner of @lock */ + task = rt_mutex_owner(lock); + get_task_struct(task); +- raw_spin_lock_irqsave(&task->pi_lock, flags); ++ raw_spin_lock(&task->pi_lock); + + /* + * No requeue [11] here. We just do deadlock detection. +@@ -621,8 +643,8 @@ + top_waiter = rt_mutex_top_waiter(lock); + + /* [13] Drop locks */ +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock(&task->pi_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + + /* If owner is not blocked, end of chain. */ + if (!next_lock) +@@ -643,7 +665,7 @@ + rt_mutex_enqueue(lock, waiter); + + /* [8] Release the task */ +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ raw_spin_unlock(&task->pi_lock); + put_task_struct(task); + + /* +@@ -654,21 +676,24 @@ + * follow here. This is the end of the chain we are walking. + */ + if (!rt_mutex_owner(lock)) { ++ struct rt_mutex_waiter *lock_top_waiter; ++ + /* + * If the requeue [7] above changed the top waiter, + * then we need to wake the new top waiter up to try + * to get the lock. + */ +- if (prerequeue_top_waiter != rt_mutex_top_waiter(lock)) +- wake_up_process(rt_mutex_top_waiter(lock)->task); +- raw_spin_unlock(&lock->wait_lock); ++ lock_top_waiter = rt_mutex_top_waiter(lock); ++ if (prerequeue_top_waiter != lock_top_waiter) ++ rt_mutex_wake_waiter(lock_top_waiter); ++ raw_spin_unlock_irq(&lock->wait_lock); + return 0; + } + + /* [10] Grab the next task, i.e. the owner of @lock */ + task = rt_mutex_owner(lock); + get_task_struct(task); +- raw_spin_lock_irqsave(&task->pi_lock, flags); ++ raw_spin_lock(&task->pi_lock); + + /* [11] requeue the pi waiters if necessary */ + if (waiter == rt_mutex_top_waiter(lock)) { +@@ -722,8 +747,8 @@ + top_waiter = rt_mutex_top_waiter(lock); + + /* [13] Drop the locks */ +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock(&task->pi_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + + /* + * Make the actual exit decisions [12], based on the stored +@@ -746,28 +771,46 @@ + goto again; + + out_unlock_pi: +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ raw_spin_unlock_irq(&task->pi_lock); + out_put_task: + put_task_struct(task); + + return ret; + } + ++ ++#define STEAL_NORMAL 0 ++#define STEAL_LATERAL 1 ++ ++/* ++ * Note that RT tasks are excluded from lateral-steals to prevent the ++ * introduction of an unbounded latency ++ */ ++static inline int lock_is_stealable(struct task_struct *task, ++ struct task_struct *pendowner, int mode) ++{ ++ if (mode == STEAL_NORMAL || rt_task(task)) { ++ if (task->prio >= pendowner->prio) ++ return 0; ++ } else if (task->prio > pendowner->prio) ++ return 0; ++ return 1; ++} ++ + /* + * Try to take an rt-mutex + * +- * Must be called with lock->wait_lock held. ++ * Must be called with lock->wait_lock held and interrupts disabled + * + * @lock: The lock to be acquired. + * @task: The task which wants to acquire the lock + * @waiter: The waiter that is queued to the lock's wait tree if the + * callsite called task_blocked_on_lock(), otherwise NULL + */ +-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, +- struct rt_mutex_waiter *waiter) ++static int __try_to_take_rt_mutex(struct rt_mutex *lock, ++ struct task_struct *task, ++ struct rt_mutex_waiter *waiter, int mode) + { +- unsigned long flags; +- + /* + * Before testing whether we can acquire @lock, we set the + * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all +@@ -803,8 +846,10 @@ + * If waiter is not the highest priority waiter of + * @lock, give up. + */ +- if (waiter != rt_mutex_top_waiter(lock)) ++ if (waiter != rt_mutex_top_waiter(lock)) { ++ /* XXX lock_is_stealable() ? */ + return 0; ++ } + + /* + * We can acquire the lock. Remove the waiter from the +@@ -822,14 +867,10 @@ + * not need to be dequeued. + */ + if (rt_mutex_has_waiters(lock)) { +- /* +- * If @task->prio is greater than or equal to +- * the top waiter priority (kernel view), +- * @task lost. +- */ +- if (task->prio >= rt_mutex_top_waiter(lock)->prio) +- return 0; ++ struct task_struct *pown = rt_mutex_top_waiter(lock)->task; + ++ if (task != pown && !lock_is_stealable(task, pown, mode)) ++ return 0; + /* + * The current top waiter stays enqueued. We + * don't have to change anything in the lock +@@ -852,7 +893,7 @@ + * case, but conditionals are more expensive than a redundant + * store. + */ +- raw_spin_lock_irqsave(&task->pi_lock, flags); ++ raw_spin_lock(&task->pi_lock); + task->pi_blocked_on = NULL; + /* + * Finish the lock acquisition. @task is the new owner. If +@@ -861,7 +902,7 @@ + */ + if (rt_mutex_has_waiters(lock)) + rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ raw_spin_unlock(&task->pi_lock); + + takeit: + /* We got the lock. */ +@@ -878,12 +919,405 @@ + return 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * preemptible spin_lock functions: ++ */ ++static inline void rt_spin_lock_fastlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock, ++ bool mg_off), ++ bool do_mig_dis) ++{ ++ might_sleep_no_state_check(); ++ ++ if (do_mig_dis) ++ migrate_disable(); ++ ++ if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) ++ rt_mutex_deadlock_account_lock(lock, current); ++ else ++ slowfn(lock, do_mig_dis); ++} ++ ++static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock, ++ void (*slowfn)(struct rt_mutex *lock)) ++{ ++ if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) ++ rt_mutex_deadlock_account_unlock(current); ++ else ++ slowfn(lock); ++} ++#ifdef CONFIG_SMP ++/* ++ * Note that owner is a speculative pointer and dereferencing relies ++ * on rcu_read_lock() and the check against the lock owner. ++ */ ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *owner) ++{ ++ int res = 0; ++ ++ rcu_read_lock(); ++ for (;;) { ++ if (owner != rt_mutex_owner(lock)) ++ break; ++ /* ++ * Ensure that owner->on_cpu is dereferenced _after_ ++ * checking the above to be valid. ++ */ ++ barrier(); ++ if (!owner->on_cpu) { ++ res = 1; ++ break; ++ } ++ cpu_relax(); ++ } ++ rcu_read_unlock(); ++ return res; ++} ++#else ++static int adaptive_wait(struct rt_mutex *lock, ++ struct task_struct *orig_owner) ++{ ++ return 1; ++} ++#endif ++ ++static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ++ struct rt_mutex_waiter *waiter, ++ struct task_struct *task, ++ enum rtmutex_chainwalk chwalk); ++/* ++ * Slow path lock function spin_lock style: this variant is very ++ * careful not to miss any non-lock wakeups. ++ * ++ * We store the current state under p->pi_lock in p->saved_state and ++ * the try_to_wake_up() code handles this accordingly. ++ */ ++static void noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock, ++ bool mg_off) ++{ ++ struct task_struct *lock_owner, *self = current; ++ struct rt_mutex_waiter waiter, *top_waiter; ++ unsigned long flags; ++ int ret; ++ ++ rt_mutex_init_waiter(&waiter, true); ++ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ ++ if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) { ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ return; ++ } ++ ++ BUG_ON(rt_mutex_owner(lock) == self); ++ ++ /* ++ * We save whatever state the task is in and we'll restore it ++ * after acquiring the lock taking real wakeups into account ++ * as well. We are serialized via pi_lock against wakeups. See ++ * try_to_wake_up(). ++ */ ++ raw_spin_lock(&self->pi_lock); ++ self->saved_state = self->state; ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); ++ ++ ret = task_blocks_on_rt_mutex(lock, &waiter, self, RT_MUTEX_MIN_CHAINWALK); ++ BUG_ON(ret); ++ ++ for (;;) { ++ /* Try to acquire the lock again. */ ++ if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL)) ++ break; ++ ++ top_waiter = rt_mutex_top_waiter(lock); ++ lock_owner = rt_mutex_owner(lock); ++ ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ ++ debug_rt_mutex_print_deadlock(&waiter); ++ ++ if (top_waiter != &waiter || adaptive_wait(lock, lock_owner)) { ++ if (mg_off) ++ migrate_enable(); ++ schedule(); ++ if (mg_off) ++ migrate_disable(); ++ } ++ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(TASK_UNINTERRUPTIBLE); ++ raw_spin_unlock(&self->pi_lock); ++ } ++ ++ /* ++ * Restore the task state to current->saved_state. We set it ++ * to the original state above and the try_to_wake_up() code ++ * has possibly updated it when a real (non-rtmutex) wakeup ++ * happened while we were blocked. Clear saved_state so ++ * try_to_wakeup() does not get confused. ++ */ ++ raw_spin_lock(&self->pi_lock); ++ __set_current_state_no_track(self->saved_state); ++ self->saved_state = TASK_RUNNING; ++ raw_spin_unlock(&self->pi_lock); ++ ++ /* ++ * try_to_take_rt_mutex() sets the waiter bit ++ * unconditionally. We might have to fix that up: ++ */ ++ fixup_rt_mutex_waiters(lock); ++ ++ BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock)); ++ BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry)); ++ ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ ++ debug_rt_mutex_free_waiter(&waiter); ++} ++ ++static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q, ++ struct rt_mutex *lock); ++/* ++ * Slow path to release a rt_mutex spin_lock style ++ */ ++static void noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock) ++{ ++ unsigned long flags; ++ WAKE_Q(wake_q); ++ WAKE_Q(wake_sleeper_q); ++ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); ++ ++ debug_rt_mutex_unlock(lock); ++ ++ rt_mutex_deadlock_account_unlock(current); ++ ++ if (!rt_mutex_has_waiters(lock)) { ++ lock->owner = NULL; ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ return; ++ } ++ ++ mark_wakeup_next_waiter(&wake_q, &wake_sleeper_q, lock); ++ ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); ++ wake_up_q(&wake_q); ++ wake_up_q_sleeper(&wake_sleeper_q); ++ ++ /* Undo pi boosting.when necessary */ ++ rt_mutex_adjust_prio(current); ++} ++ ++void __lockfunc rt_spin_lock__no_mg(spinlock_t *lock) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, false); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock__no_mg); ++ ++void __lockfunc rt_spin_lock(spinlock_t *lock) ++{ ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); ++ spin_acquire(&lock->dep_map, 0, 0, _RET_IP_); ++} ++EXPORT_SYMBOL(rt_spin_lock); ++ ++void __lockfunc __rt_spin_lock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, true); ++} ++EXPORT_SYMBOL(__rt_spin_lock); ++ ++void __lockfunc __rt_spin_lock__no_mg(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock, false); ++} ++EXPORT_SYMBOL(__rt_spin_lock__no_mg); ++ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass) ++{ ++ spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_); ++ rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock, true); ++} ++EXPORT_SYMBOL(rt_spin_lock_nested); ++#endif ++ ++void __lockfunc rt_spin_unlock__no_mg(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(rt_spin_unlock__no_mg); ++ ++void __lockfunc rt_spin_unlock(spinlock_t *lock) ++{ ++ /* NOTE: we always pass in '1' for nested, for simplicity */ ++ spin_release(&lock->dep_map, 1, _RET_IP_); ++ rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock); ++ migrate_enable(); ++} ++EXPORT_SYMBOL(rt_spin_unlock); ++ ++void __lockfunc __rt_spin_unlock(struct rt_mutex *lock) ++{ ++ rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock); ++} ++EXPORT_SYMBOL(__rt_spin_unlock); ++ ++/* ++ * Wait for the lock to get unlocked: instead of polling for an unlock ++ * (like raw spinlocks do), we lock and unlock, to force the kernel to ++ * schedule if there's contention: ++ */ ++void __lockfunc rt_spin_unlock_wait(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ spin_unlock(lock); ++} ++EXPORT_SYMBOL(rt_spin_unlock_wait); ++ ++int __lockfunc __rt_spin_trylock(struct rt_mutex *lock) ++{ ++ return rt_mutex_trylock(lock); ++} ++ ++int __lockfunc rt_spin_trylock__no_mg(spinlock_t *lock) ++{ ++ int ret; ++ ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock__no_mg); ++ ++int __lockfunc rt_spin_trylock(spinlock_t *lock) ++{ ++ int ret; ++ ++ migrate_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ else ++ migrate_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock); ++ ++int __lockfunc rt_spin_trylock_bh(spinlock_t *lock) ++{ ++ int ret; ++ ++ local_bh_disable(); ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } else ++ local_bh_enable(); ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_bh); ++ ++int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags) ++{ ++ int ret; ++ ++ *flags = 0; ++ ret = rt_mutex_trylock(&lock->lock); ++ if (ret) { ++ migrate_disable(); ++ spin_acquire(&lock->dep_map, 0, 1, _RET_IP_); ++ } ++ return ret; ++} ++EXPORT_SYMBOL(rt_spin_trylock_irqsave); ++ ++int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock) ++{ ++ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ ++ if (atomic_add_unless(atomic, -1, 1)) ++ return 0; ++ rt_spin_lock(lock); ++ if (atomic_dec_and_test(atomic)) ++ return 1; ++ rt_spin_unlock(lock); ++ return 0; ++} ++EXPORT_SYMBOL(atomic_dec_and_spin_lock); ++ ++ void ++__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key) ++{ ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++ /* ++ * Make sure we are not reinitializing a held lock: ++ */ ++ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); ++ lockdep_init_map(&lock->dep_map, name, key, 0); ++#endif ++} ++EXPORT_SYMBOL(__rt_spin_lock_init); ++ ++#endif /* PREEMPT_RT_FULL */ ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx); ++ ++ if (!hold_ctx) ++ return 0; ++ ++ if (unlikely(ctx == hold_ctx)) ++ return -EALREADY; ++ ++ if (ctx->stamp - hold_ctx->stamp <= LONG_MAX && ++ (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(ctx->contending_lock); ++ ctx->contending_lock = ww; ++#endif ++ return -EDEADLK; ++ } ++ ++ return 0; ++} ++#else ++ static inline int __sched ++__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++ BUG(); ++ return 0; ++} ++ ++#endif ++ ++static inline int ++try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ++ struct rt_mutex_waiter *waiter) ++{ ++ return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL); ++} ++ + /* + * Task blocks on lock. + * + * Prepare waiter and propagate pi chain + * +- * This must be called with lock->wait_lock held. ++ * This must be called with lock->wait_lock held and interrupts disabled + */ + static int task_blocks_on_rt_mutex(struct rt_mutex *lock, + struct rt_mutex_waiter *waiter, +@@ -894,7 +1328,6 @@ + struct rt_mutex_waiter *top_waiter = waiter; + struct rt_mutex *next_lock; + int chain_walk = 0, res; +- unsigned long flags; + + /* + * Early deadlock detection. We really don't want the task to +@@ -908,7 +1341,24 @@ + if (owner == task) + return -EDEADLK; + +- raw_spin_lock_irqsave(&task->pi_lock, flags); ++ raw_spin_lock(&task->pi_lock); ++ ++ /* ++ * In the case of futex requeue PI, this will be a proxy ++ * lock. The task will wake unaware that it is enqueueed on ++ * this lock. Avoid blocking on two locks and corrupting ++ * pi_blocked_on via the PI_WAKEUP_INPROGRESS ++ * flag. futex_wait_requeue_pi() sets this when it wakes up ++ * before requeue (due to a signal or timeout). Do not enqueue ++ * the task if PI_WAKEUP_INPROGRESS is set. ++ */ ++ if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) { ++ raw_spin_unlock(&task->pi_lock); ++ return -EAGAIN; ++ } ++ ++ BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on)); ++ + __rt_mutex_adjust_prio(task); + waiter->task = task; + waiter->lock = lock; +@@ -921,18 +1371,18 @@ + + task->pi_blocked_on = waiter; + +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); ++ raw_spin_unlock(&task->pi_lock); + + if (!owner) + return 0; + +- raw_spin_lock_irqsave(&owner->pi_lock, flags); ++ raw_spin_lock(&owner->pi_lock); + if (waiter == rt_mutex_top_waiter(lock)) { + rt_mutex_dequeue_pi(owner, top_waiter); + rt_mutex_enqueue_pi(owner, waiter); + + __rt_mutex_adjust_prio(owner); +- if (owner->pi_blocked_on) ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) + chain_walk = 1; + } else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) { + chain_walk = 1; +@@ -941,7 +1391,7 @@ + /* Store the lock on which owner is blocked or NULL */ + next_lock = task_blocked_on_lock(owner); + +- raw_spin_unlock_irqrestore(&owner->pi_lock, flags); ++ raw_spin_unlock(&owner->pi_lock); + /* + * Even if full deadlock detection is on, if the owner is not + * blocked itself, we can avoid finding this out in the chain +@@ -957,12 +1407,12 @@ + */ + get_task_struct(owner); + +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + + res = rt_mutex_adjust_prio_chain(owner, chwalk, lock, + next_lock, waiter, task); + +- raw_spin_lock(&lock->wait_lock); ++ raw_spin_lock_irq(&lock->wait_lock); + + return res; + } +@@ -971,15 +1421,15 @@ + * Remove the top waiter from the current tasks pi waiter tree and + * queue it up. + * +- * Called with lock->wait_lock held. ++ * Called with lock->wait_lock held and interrupts disabled. + */ + static void mark_wakeup_next_waiter(struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q, + struct rt_mutex *lock) + { + struct rt_mutex_waiter *waiter; +- unsigned long flags; + +- raw_spin_lock_irqsave(¤t->pi_lock, flags); ++ raw_spin_lock(¤t->pi_lock); + + waiter = rt_mutex_top_waiter(lock); + +@@ -1001,15 +1451,18 @@ + */ + lock->owner = (void *) RT_MUTEX_HAS_WAITERS; + +- raw_spin_unlock_irqrestore(¤t->pi_lock, flags); ++ raw_spin_unlock(¤t->pi_lock); + +- wake_q_add(wake_q, waiter->task); ++ if (waiter->savestate) ++ wake_q_add(wake_sleeper_q, waiter->task); ++ else ++ wake_q_add(wake_q, waiter->task); + } + + /* + * Remove a waiter from a lock and give up + * +- * Must be called with lock->wait_lock held and ++ * Must be called with lock->wait_lock held and interrupts disabled. I must + * have just failed to try_to_take_rt_mutex(). + */ + static void remove_waiter(struct rt_mutex *lock, +@@ -1017,13 +1470,12 @@ + { + bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock)); + struct task_struct *owner = rt_mutex_owner(lock); +- struct rt_mutex *next_lock; +- unsigned long flags; ++ struct rt_mutex *next_lock = NULL; + +- raw_spin_lock_irqsave(¤t->pi_lock, flags); ++ raw_spin_lock(¤t->pi_lock); + rt_mutex_dequeue(lock, waiter); + current->pi_blocked_on = NULL; +- raw_spin_unlock_irqrestore(¤t->pi_lock, flags); ++ raw_spin_unlock(¤t->pi_lock); + + /* + * Only update priority if the waiter was the highest priority +@@ -1032,7 +1484,7 @@ + if (!owner || !is_top_waiter) + return; + +- raw_spin_lock_irqsave(&owner->pi_lock, flags); ++ raw_spin_lock(&owner->pi_lock); + + rt_mutex_dequeue_pi(owner, waiter); + +@@ -1042,9 +1494,10 @@ + __rt_mutex_adjust_prio(owner); + + /* Store the lock on which owner is blocked or NULL */ +- next_lock = task_blocked_on_lock(owner); ++ if (rt_mutex_real_waiter(owner->pi_blocked_on)) ++ next_lock = task_blocked_on_lock(owner); + +- raw_spin_unlock_irqrestore(&owner->pi_lock, flags); ++ raw_spin_unlock(&owner->pi_lock); + + /* + * Don't walk the chain, if the owner task is not blocked +@@ -1056,12 +1509,12 @@ + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(owner); + +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + + rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock, + next_lock, NULL, current); + +- raw_spin_lock(&lock->wait_lock); ++ raw_spin_lock_irq(&lock->wait_lock); + } + + /* +@@ -1078,17 +1531,17 @@ + raw_spin_lock_irqsave(&task->pi_lock, flags); + + waiter = task->pi_blocked_on; +- if (!waiter || (waiter->prio == task->prio && ++ if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio && + !dl_prio(task->prio))) { + raw_spin_unlock_irqrestore(&task->pi_lock, flags); + return; + } + next_lock = waiter->lock; +- raw_spin_unlock_irqrestore(&task->pi_lock, flags); + + /* gets dropped in rt_mutex_adjust_prio_chain()! */ + get_task_struct(task); + ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL, + next_lock, NULL, task); + } +@@ -1097,16 +1550,17 @@ + * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop + * @lock: the rt_mutex to take + * @state: the state the task should block in (TASK_INTERRUPTIBLE +- * or TASK_UNINTERRUPTIBLE) ++ * or TASK_UNINTERRUPTIBLE) + * @timeout: the pre-initialized and started timer, or NULL for none + * @waiter: the pre-initialized rt_mutex_waiter + * +- * lock->wait_lock must be held by the caller. ++ * Must be called with lock->wait_lock held and interrupts disabled + */ + static int __sched + __rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- struct rt_mutex_waiter *waiter) ++ struct rt_mutex_waiter *waiter, ++ struct ww_acquire_ctx *ww_ctx) + { + int ret = 0; + +@@ -1129,13 +1583,19 @@ + break; + } + +- raw_spin_unlock(&lock->wait_lock); ++ if (ww_ctx && ww_ctx->acquired > 0) { ++ ret = __mutex_lock_check_stamp(lock, ww_ctx); ++ if (ret) ++ break; ++ } ++ ++ raw_spin_unlock_irq(&lock->wait_lock); + + debug_rt_mutex_print_deadlock(waiter); + + schedule(); + +- raw_spin_lock(&lock->wait_lock); ++ raw_spin_lock_irq(&lock->wait_lock); + set_current_state(state); + } + +@@ -1163,26 +1623,112 @@ + } + } + ++static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++#ifdef CONFIG_DEBUG_MUTEXES ++ /* ++ * If this WARN_ON triggers, you used ww_mutex_lock to acquire, ++ * but released with a normal mutex_unlock in this call. ++ * ++ * This should never happen, always use ww_mutex_unlock. ++ */ ++ DEBUG_LOCKS_WARN_ON(ww->ctx); ++ ++ /* ++ * Not quite done after calling ww_acquire_done() ? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire); ++ ++ if (ww_ctx->contending_lock) { ++ /* ++ * After -EDEADLK you tried to ++ * acquire a different ww_mutex? Bad! ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww); ++ ++ /* ++ * You called ww_mutex_lock after receiving -EDEADLK, ++ * but 'forgot' to unlock everything else first? ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0); ++ ww_ctx->contending_lock = NULL; ++ } ++ ++ /* ++ * Naughty, using a different class will lead to undefined behavior! ++ */ ++ DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class); ++#endif ++ ww_ctx->acquired++; ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock); ++ struct rt_mutex_waiter *waiter, *n; ++ ++ /* ++ * This branch gets optimized out for the common case, ++ * and is only important for ww_mutex_lock. ++ */ ++ ww_mutex_lock_acquired(ww, ww_ctx); ++ ww->ctx = ww_ctx; ++ ++ /* ++ * Give any possible sleeping processes the chance to wake up, ++ * so they can recheck if they have to back off. ++ */ ++ rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters, ++ tree_entry) { ++ /* XXX debug rt mutex waiter wakeup */ ++ ++ BUG_ON(waiter->lock != lock); ++ rt_mutex_wake_waiter(waiter); ++ } ++} ++ ++#else ++ ++static void ww_mutex_account_lock(struct rt_mutex *lock, ++ struct ww_acquire_ctx *ww_ctx) ++{ ++ BUG(); ++} ++#endif ++ + /* + * Slow path lock function: + */ + static int __sched + rt_mutex_slowlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk) ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx) + { + struct rt_mutex_waiter waiter; ++ unsigned long flags; + int ret = 0; + +- debug_rt_mutex_init_waiter(&waiter); +- RB_CLEAR_NODE(&waiter.pi_tree_entry); +- RB_CLEAR_NODE(&waiter.tree_entry); ++ rt_mutex_init_waiter(&waiter, false); + +- raw_spin_lock(&lock->wait_lock); ++ /* ++ * Technically we could use raw_spin_[un]lock_irq() here, but this can ++ * be called in early boot if the cmpxchg() fast path is disabled ++ * (debug, no architecture support). In this case we will acquire the ++ * rtmutex with lock->wait_lock held. But we cannot unconditionally ++ * enable interrupts in that early boot case. So we need to use the ++ * irqsave/restore variants. ++ */ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); + + /* Try to acquire the lock again: */ + if (try_to_take_rt_mutex(lock, current, NULL)) { +- raw_spin_unlock(&lock->wait_lock); ++ if (ww_ctx) ++ ww_mutex_account_lock(lock, ww_ctx); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + return 0; + } + +@@ -1196,13 +1742,23 @@ + + if (likely(!ret)) + /* sleep on the mutex */ +- ret = __rt_mutex_slowlock(lock, state, timeout, &waiter); ++ ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ++ ww_ctx); ++ else if (ww_ctx) { ++ /* ww_mutex received EDEADLK, let it become EALREADY */ ++ ret = __mutex_lock_check_stamp(lock, ww_ctx); ++ BUG_ON(!ret); ++ } + + if (unlikely(ret)) { + __set_current_state(TASK_RUNNING); + if (rt_mutex_has_waiters(lock)) + remove_waiter(lock, &waiter); +- rt_mutex_handle_deadlock(ret, chwalk, &waiter); ++ /* ww_mutex want to report EDEADLK/EALREADY, let them */ ++ if (!ww_ctx) ++ rt_mutex_handle_deadlock(ret, chwalk, &waiter); ++ } else if (ww_ctx) { ++ ww_mutex_account_lock(lock, ww_ctx); + } + + /* +@@ -1211,7 +1767,7 @@ + */ + fixup_rt_mutex_waiters(lock); + +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + /* Remove pending timer: */ + if (unlikely(timeout)) +@@ -1227,6 +1783,7 @@ + */ + static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) + { ++ unsigned long flags; + int ret; + + /* +@@ -1238,10 +1795,10 @@ + return 0; + + /* +- * The mutex has currently no owner. Lock the wait lock and +- * try to acquire the lock. ++ * The mutex has currently no owner. Lock the wait lock and try to ++ * acquire the lock. We use irqsave here to support early boot calls. + */ +- raw_spin_lock(&lock->wait_lock); ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); + + ret = try_to_take_rt_mutex(lock, current, NULL); + +@@ -1251,7 +1808,7 @@ + */ + fixup_rt_mutex_waiters(lock); + +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + return ret; + } +@@ -1261,9 +1818,13 @@ + * Return whether the current task needs to undo a potential priority boosting. + */ + static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock, +- struct wake_q_head *wake_q) ++ struct wake_q_head *wake_q, ++ struct wake_q_head *wake_sleeper_q) + { +- raw_spin_lock(&lock->wait_lock); ++ unsigned long flags; ++ ++ /* irqsave required to support early boot calls */ ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); + + debug_rt_mutex_unlock(lock); + +@@ -1302,10 +1863,10 @@ + */ + while (!rt_mutex_has_waiters(lock)) { + /* Drops lock->wait_lock ! */ +- if (unlock_rt_mutex_safe(lock) == true) ++ if (unlock_rt_mutex_safe(lock, flags) == true) + return false; + /* Relock the rtmutex and try again */ +- raw_spin_lock(&lock->wait_lock); ++ raw_spin_lock_irqsave(&lock->wait_lock, flags); + } + + /* +@@ -1314,9 +1875,9 @@ + * + * Queue the next waiter for wakeup once we release the wait_lock. + */ +- mark_wakeup_next_waiter(wake_q, lock); ++ mark_wakeup_next_waiter(wake_q, wake_sleeper_q, lock); + +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irqrestore(&lock->wait_lock, flags); + + /* check PI boosting */ + return true; +@@ -1330,31 +1891,36 @@ + */ + static inline int + rt_mutex_fastlock(struct rt_mutex *lock, int state, ++ struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk)) ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx)) + { + if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { + rt_mutex_deadlock_account_lock(lock, current); + return 0; + } else +- return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK); ++ return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK, ++ ww_ctx); + } + + static inline int + rt_mutex_timed_fastlock(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, + enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx, + int (*slowfn)(struct rt_mutex *lock, int state, + struct hrtimer_sleeper *timeout, +- enum rtmutex_chainwalk chwalk)) ++ enum rtmutex_chainwalk chwalk, ++ struct ww_acquire_ctx *ww_ctx)) + { + if (chwalk == RT_MUTEX_MIN_CHAINWALK && + likely(rt_mutex_cmpxchg_acquire(lock, NULL, current))) { + rt_mutex_deadlock_account_lock(lock, current); + return 0; + } else +- return slowfn(lock, state, timeout, chwalk); ++ return slowfn(lock, state, timeout, chwalk, ww_ctx); + } + + static inline int +@@ -1371,17 +1937,20 @@ + static inline void + rt_mutex_fastunlock(struct rt_mutex *lock, + bool (*slowfn)(struct rt_mutex *lock, +- struct wake_q_head *wqh)) ++ struct wake_q_head *wqh, ++ struct wake_q_head *wq_sleeper)) + { + WAKE_Q(wake_q); ++ WAKE_Q(wake_sleeper_q); + + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { + rt_mutex_deadlock_account_unlock(current); + + } else { +- bool deboost = slowfn(lock, &wake_q); ++ bool deboost = slowfn(lock, &wake_q, &wake_sleeper_q); + + wake_up_q(&wake_q); ++ wake_up_q_sleeper(&wake_sleeper_q); + + /* Undo pi boosting if necessary: */ + if (deboost) +@@ -1398,7 +1967,7 @@ + { + might_sleep(); + +- rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock); ++ rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock); + +@@ -1415,7 +1984,7 @@ + { + might_sleep(); + +- return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock); ++ return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); + +@@ -1428,11 +1997,30 @@ + might_sleep(); + + return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, +- RT_MUTEX_FULL_CHAINWALK, ++ RT_MUTEX_FULL_CHAINWALK, NULL, + rt_mutex_slowlock); + } + + /** ++ * rt_mutex_lock_killable - lock a rt_mutex killable ++ * ++ * @lock: the rt_mutex to be locked ++ * @detect_deadlock: deadlock detection on/off ++ * ++ * Returns: ++ * 0 on success ++ * -EINTR when interrupted by a signal ++ * -EDEADLK when the lock would deadlock (when deadlock detection is on) ++ */ ++int __sched rt_mutex_lock_killable(struct rt_mutex *lock) ++{ ++ might_sleep(); ++ ++ return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock); ++} ++EXPORT_SYMBOL_GPL(rt_mutex_lock_killable); ++ ++/** + * rt_mutex_timed_lock - lock a rt_mutex interruptible + * the timeout structure is provided + * by the caller +@@ -1452,6 +2040,7 @@ + + return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout, + RT_MUTEX_MIN_CHAINWALK, ++ NULL, + rt_mutex_slowlock); + } + EXPORT_SYMBOL_GPL(rt_mutex_timed_lock); +@@ -1469,7 +2058,11 @@ + */ + int __sched rt_mutex_trylock(struct rt_mutex *lock) + { ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (WARN_ON_ONCE(in_irq() || in_nmi())) ++#else + if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq())) ++#endif + return 0; + + return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock); +@@ -1495,13 +2088,14 @@ + * required or not. + */ + bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh) ++ struct wake_q_head *wqh, ++ struct wake_q_head *wq_sleeper) + { + if (likely(rt_mutex_cmpxchg_release(lock, current, NULL))) { + rt_mutex_deadlock_account_unlock(current); + return false; + } +- return rt_mutex_slowunlock(lock, wqh); ++ return rt_mutex_slowunlock(lock, wqh, wq_sleeper); + } + + /** +@@ -1534,13 +2128,12 @@ + void __rt_mutex_init(struct rt_mutex *lock, const char *name) + { + lock->owner = NULL; +- raw_spin_lock_init(&lock->wait_lock); + lock->waiters = RB_ROOT; + lock->waiters_leftmost = NULL; + + debug_rt_mutex_init(lock, name); + } +-EXPORT_SYMBOL_GPL(__rt_mutex_init); ++EXPORT_SYMBOL(__rt_mutex_init); + + /** + * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a +@@ -1555,7 +2148,7 @@ + void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner) + { +- __rt_mutex_init(lock, NULL); ++ rt_mutex_init(lock); + debug_rt_mutex_proxy_lock(lock, proxy_owner); + rt_mutex_set_owner(lock, proxy_owner); + rt_mutex_deadlock_account_lock(lock, proxy_owner); +@@ -1596,13 +2189,42 @@ + { + int ret; + +- raw_spin_lock(&lock->wait_lock); ++ raw_spin_lock_irq(&lock->wait_lock); + + if (try_to_take_rt_mutex(lock, task, NULL)) { +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + return 1; + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * In PREEMPT_RT there's an added race. ++ * If the task, that we are about to requeue, times out, ++ * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue ++ * to skip this task. But right after the task sets ++ * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then ++ * block on the spin_lock(&hb->lock), which in RT is an rtmutex. ++ * This will replace the PI_WAKEUP_INPROGRESS with the actual ++ * lock that it blocks on. We *must not* place this task ++ * on this proxy lock in that case. ++ * ++ * To prevent this race, we first take the task's pi_lock ++ * and check if it has updated its pi_blocked_on. If it has, ++ * we assume that it woke up and we return -EAGAIN. ++ * Otherwise, we set the task's pi_blocked_on to ++ * PI_REQUEUE_INPROGRESS, so that if the task is waking up ++ * it will know that we are in the process of requeuing it. ++ */ ++ raw_spin_lock(&task->pi_lock); ++ if (task->pi_blocked_on) { ++ raw_spin_unlock(&task->pi_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); ++ return -EAGAIN; ++ } ++ task->pi_blocked_on = PI_REQUEUE_INPROGRESS; ++ raw_spin_unlock(&task->pi_lock); ++#endif ++ + /* We enforce deadlock detection for futexes */ + ret = task_blocks_on_rt_mutex(lock, waiter, task, + RT_MUTEX_FULL_CHAINWALK); +@@ -1617,10 +2239,10 @@ + ret = 0; + } + +- if (unlikely(ret)) ++ if (ret && rt_mutex_has_waiters(lock)) + remove_waiter(lock, waiter); + +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + + debug_rt_mutex_print_deadlock(waiter); + +@@ -1668,12 +2290,12 @@ + { + int ret; + +- raw_spin_lock(&lock->wait_lock); ++ raw_spin_lock_irq(&lock->wait_lock); + + set_current_state(TASK_INTERRUPTIBLE); + + /* sleep on the mutex */ +- ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter); ++ ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL); + + if (unlikely(ret)) + remove_waiter(lock, waiter); +@@ -1684,7 +2306,93 @@ + */ + fixup_rt_mutex_waiters(lock); + +- raw_spin_unlock(&lock->wait_lock); ++ raw_spin_unlock_irq(&lock->wait_lock); + + return ret; + } ++ ++static inline int ++ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ++{ ++#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH ++ unsigned tmp; ++ ++ if (ctx->deadlock_inject_countdown-- == 0) { ++ tmp = ctx->deadlock_inject_interval; ++ if (tmp > UINT_MAX/4) ++ tmp = UINT_MAX; ++ else ++ tmp = tmp*2 + tmp + tmp/2; ++ ++ ctx->deadlock_inject_interval = tmp; ++ ctx->deadlock_inject_countdown = tmp; ++ ctx->contending_lock = lock; ++ ++ ww_mutex_unlock(lock); ++ ++ return -EDEADLK; ++ } ++#endif ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PREEMPT_RT_FULL ++int __sched ++__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ww_ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ww_ctx); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible); ++ ++int __sched ++__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ++{ ++ int ret; ++ ++ might_sleep(); ++ ++ mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_); ++ ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx); ++ if (ret) ++ mutex_release(&lock->base.dep_map, 1, _RET_IP_); ++ else if (!ret && ww_ctx->acquired > 1) ++ return ww_mutex_deadlock_injection(lock, ww_ctx); ++ ++ return ret; ++} ++EXPORT_SYMBOL_GPL(__ww_mutex_lock); ++ ++void __sched ww_mutex_unlock(struct ww_mutex *lock) ++{ ++ int nest = !!lock->ctx; ++ ++ /* ++ * The unlocking fastpath is the 0->1 transition from 'locked' ++ * into 'unlocked' state: ++ */ ++ if (nest) { ++#ifdef CONFIG_DEBUG_MUTEXES ++ DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired); ++#endif ++ if (lock->ctx->acquired > 0) ++ lock->ctx->acquired--; ++ lock->ctx = NULL; ++ } ++ ++ mutex_release(&lock->base.dep_map, nest, _RET_IP_); ++ rt_mutex_unlock(&lock->base.lock); ++} ++EXPORT_SYMBOL(ww_mutex_unlock); ++#endif +diff -Nur linux-4.4.13.orig/kernel/locking/rtmutex_common.h linux-4.4.13/kernel/locking/rtmutex_common.h +--- linux-4.4.13.orig/kernel/locking/rtmutex_common.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/rtmutex_common.h 2016-06-22 21:55:08.050523867 +0200 +@@ -27,6 +27,7 @@ + struct rb_node pi_tree_entry; + struct task_struct *task; + struct rt_mutex *lock; ++ bool savestate; + #ifdef CONFIG_DEBUG_RT_MUTEXES + unsigned long ip; + struct pid *deadlock_task_pid; +@@ -97,6 +98,9 @@ + /* + * PI-futex support (proxy locking functions, etc.): + */ ++#define PI_WAKEUP_INPROGRESS ((struct rt_mutex_waiter *) 1) ++#define PI_REQUEUE_INPROGRESS ((struct rt_mutex_waiter *) 2) ++ + extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock); + extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, + struct task_struct *proxy_owner); +@@ -110,7 +114,8 @@ + struct rt_mutex_waiter *waiter); + extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); + extern bool rt_mutex_futex_unlock(struct rt_mutex *lock, +- struct wake_q_head *wqh); ++ struct wake_q_head *wqh, ++ struct wake_q_head *wq_sleeper); + extern void rt_mutex_adjust_prio(struct task_struct *task); + + #ifdef CONFIG_DEBUG_RT_MUTEXES +@@ -119,4 +124,14 @@ + # include "rtmutex.h" + #endif + ++static inline void ++rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate) ++{ ++ debug_rt_mutex_init_waiter(waiter); ++ waiter->task = NULL; ++ waiter->savestate = savestate; ++ RB_CLEAR_NODE(&waiter->pi_tree_entry); ++ RB_CLEAR_NODE(&waiter->tree_entry); ++} ++ + #endif +diff -Nur linux-4.4.13.orig/kernel/locking/spinlock.c linux-4.4.13/kernel/locking/spinlock.c +--- linux-4.4.13.orig/kernel/locking/spinlock.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/spinlock.c 2016-06-22 21:55:08.050523867 +0200 +@@ -124,8 +124,11 @@ + * __[spin|read|write]_lock_bh() + */ + BUILD_LOCK_OPS(spin, raw_spinlock); ++ ++#ifndef CONFIG_PREEMPT_RT_FULL + BUILD_LOCK_OPS(read, rwlock); + BUILD_LOCK_OPS(write, rwlock); ++#endif + + #endif + +@@ -209,6 +212,8 @@ + EXPORT_SYMBOL(_raw_spin_unlock_bh); + #endif + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #ifndef CONFIG_INLINE_READ_TRYLOCK + int __lockfunc _raw_read_trylock(rwlock_t *lock) + { +@@ -353,6 +358,8 @@ + EXPORT_SYMBOL(_raw_write_unlock_bh); + #endif + ++#endif /* !PREEMPT_RT_FULL */ ++ + #ifdef CONFIG_DEBUG_LOCK_ALLOC + + void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass) +diff -Nur linux-4.4.13.orig/kernel/locking/spinlock_debug.c linux-4.4.13/kernel/locking/spinlock_debug.c +--- linux-4.4.13.orig/kernel/locking/spinlock_debug.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/locking/spinlock_debug.c 2016-06-22 21:55:08.050523867 +0200 +@@ -31,6 +31,7 @@ + + EXPORT_SYMBOL(__raw_spin_lock_init); + ++#ifndef CONFIG_PREEMPT_RT_FULL + void __rwlock_init(rwlock_t *lock, const char *name, + struct lock_class_key *key) + { +@@ -48,6 +49,7 @@ + } + + EXPORT_SYMBOL(__rwlock_init); ++#endif + + static void spin_dump(raw_spinlock_t *lock, const char *msg) + { +@@ -159,6 +161,7 @@ + arch_spin_unlock(&lock->raw_lock); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + static void rwlock_bug(rwlock_t *lock, const char *msg) + { + if (!debug_locks_off()) +@@ -300,3 +303,5 @@ + debug_write_unlock(lock); + arch_write_unlock(&lock->raw_lock); + } ++ ++#endif +diff -Nur linux-4.4.13.orig/kernel/panic.c linux-4.4.13/kernel/panic.c +--- linux-4.4.13.orig/kernel/panic.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/panic.c 2016-06-22 21:55:08.050523867 +0200 +@@ -61,6 +61,37 @@ + cpu_relax(); + } + ++/* ++ * Stop ourselves in NMI context if another CPU has already panicked. Arch code ++ * may override this to prepare for crash dumping, e.g. save regs info. ++ */ ++void __weak nmi_panic_self_stop(struct pt_regs *regs) ++{ ++ panic_smp_self_stop(); ++} ++ ++atomic_t panic_cpu = ATOMIC_INIT(PANIC_CPU_INVALID); ++ ++/* ++ * A variant of panic() called from NMI context. We return if we've already ++ * panicked on this CPU. If another CPU already panicked, loop in ++ * nmi_panic_self_stop() which can provide architecture dependent code such ++ * as saving register state for crash dump. ++ */ ++void nmi_panic(struct pt_regs *regs, const char *msg) ++{ ++ int old_cpu, cpu; ++ ++ cpu = raw_smp_processor_id(); ++ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, cpu); ++ ++ if (old_cpu == PANIC_CPU_INVALID) ++ panic("%s", msg); ++ else if (old_cpu != cpu) ++ nmi_panic_self_stop(regs); ++} ++EXPORT_SYMBOL(nmi_panic); ++ + /** + * panic - halt the system + * @fmt: The text string to print +@@ -71,17 +102,17 @@ + */ + void panic(const char *fmt, ...) + { +- static DEFINE_SPINLOCK(panic_lock); + static char buf[1024]; + va_list args; + long i, i_next = 0; + int state = 0; ++ int old_cpu, this_cpu; + + /* + * Disable local interrupts. This will prevent panic_smp_self_stop + * from deadlocking the first cpu that invokes the panic, since + * there is nothing to prevent an interrupt handler (that runs +- * after the panic_lock is acquired) from invoking panic again. ++ * after setting panic_cpu) from invoking panic() again. + */ + local_irq_disable(); + +@@ -94,8 +125,16 @@ + * multiple parallel invocations of panic, all other CPUs either + * stop themself or will wait until they are stopped by the 1st CPU + * with smp_send_stop(). ++ * ++ * `old_cpu == PANIC_CPU_INVALID' means this is the 1st CPU which ++ * comes here, so go ahead. ++ * `old_cpu == this_cpu' means we came from nmi_panic() which sets ++ * panic_cpu to this CPU. In this case, this is also the 1st CPU. + */ +- if (!spin_trylock(&panic_lock)) ++ this_cpu = raw_smp_processor_id(); ++ old_cpu = atomic_cmpxchg(&panic_cpu, PANIC_CPU_INVALID, this_cpu); ++ ++ if (old_cpu != PANIC_CPU_INVALID && old_cpu != this_cpu) + panic_smp_self_stop(); + + console_verbose(); +@@ -400,9 +439,11 @@ + + static int init_oops_id(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!oops_id) + get_random_bytes(&oops_id, sizeof(oops_id)); + else ++#endif + oops_id++; + + return 0; +diff -Nur linux-4.4.13.orig/kernel/power/hibernate.c linux-4.4.13/kernel/power/hibernate.c +--- linux-4.4.13.orig/kernel/power/hibernate.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/power/hibernate.c 2016-06-22 21:55:08.054523867 +0200 +@@ -285,6 +285,8 @@ + + local_irq_disable(); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (error) { + printk(KERN_ERR "PM: Some system devices failed to power down, " +@@ -314,6 +316,7 @@ + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -438,6 +441,7 @@ + goto Enable_cpus; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + + error = syscore_suspend(); + if (error) +@@ -471,6 +475,7 @@ + syscore_resume(); + + Enable_irqs: ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -556,6 +561,7 @@ + goto Enable_cpus; + + local_irq_disable(); ++ system_state = SYSTEM_SUSPEND; + syscore_suspend(); + if (pm_wakeup_pending()) { + error = -EAGAIN; +@@ -568,6 +574,7 @@ + + Power_up: + syscore_resume(); ++ system_state = SYSTEM_RUNNING; + local_irq_enable(); + + Enable_cpus: +@@ -642,6 +649,10 @@ + cpu_relax(); + } + ++#ifndef CONFIG_SUSPEND ++bool pm_in_action; ++#endif ++ + /** + * hibernate - Carry out system hibernation, including saving the image. + */ +@@ -654,6 +665,8 @@ + return -EPERM; + } + ++ pm_in_action = true; ++ + lock_system_sleep(); + /* The snapshot device should not be opened while we're running */ + if (!atomic_add_unless(&snapshot_device_available, -1, 0)) { +@@ -719,6 +732,7 @@ + atomic_inc(&snapshot_device_available); + Unlock: + unlock_system_sleep(); ++ pm_in_action = false; + return error; + } + +diff -Nur linux-4.4.13.orig/kernel/power/suspend.c linux-4.4.13/kernel/power/suspend.c +--- linux-4.4.13.orig/kernel/power/suspend.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/power/suspend.c 2016-06-22 21:55:08.054523867 +0200 +@@ -359,6 +359,8 @@ + arch_suspend_disable_irqs(); + BUG_ON(!irqs_disabled()); + ++ system_state = SYSTEM_SUSPEND; ++ + error = syscore_suspend(); + if (!error) { + *wakeup = pm_wakeup_pending(); +@@ -375,6 +377,8 @@ + syscore_resume(); + } + ++ system_state = SYSTEM_RUNNING; ++ + arch_suspend_enable_irqs(); + BUG_ON(irqs_disabled()); + +@@ -518,6 +522,8 @@ + return error; + } + ++bool pm_in_action; ++ + /** + * pm_suspend - Externally visible function for suspending the system. + * @state: System sleep state to enter. +@@ -532,6 +538,8 @@ + if (state <= PM_SUSPEND_ON || state >= PM_SUSPEND_MAX) + return -EINVAL; + ++ pm_in_action = true; ++ + error = enter_state(state); + if (error) { + suspend_stats.fail++; +@@ -539,6 +547,7 @@ + } else { + suspend_stats.success++; + } ++ pm_in_action = false; + return error; + } + EXPORT_SYMBOL(pm_suspend); +diff -Nur linux-4.4.13.orig/kernel/printk/printk.c linux-4.4.13/kernel/printk/printk.c +--- linux-4.4.13.orig/kernel/printk/printk.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/printk/printk.c 2016-06-22 21:55:08.054523867 +0200 +@@ -241,6 +241,65 @@ + */ + static DEFINE_RAW_SPINLOCK(logbuf_lock); + ++#ifdef CONFIG_EARLY_PRINTK ++struct console *early_console; ++ ++static void early_vprintk(const char *fmt, va_list ap) ++{ ++ if (early_console) { ++ char buf[512]; ++ int n = vscnprintf(buf, sizeof(buf), fmt, ap); ++ ++ early_console->write(early_console, buf, n); ++ } ++} ++ ++asmlinkage void early_printk(const char *fmt, ...) ++{ ++ va_list ap; ++ ++ va_start(ap, fmt); ++ early_vprintk(fmt, ap); ++ va_end(ap); ++} ++ ++/* ++ * This is independent of any log levels - a global ++ * kill switch that turns off all of printk. ++ * ++ * Used by the NMI watchdog if early-printk is enabled. ++ */ ++static bool __read_mostly printk_killswitch; ++ ++static int __init force_early_printk_setup(char *str) ++{ ++ printk_killswitch = true; ++ return 0; ++} ++early_param("force_early_printk", force_early_printk_setup); ++ ++void printk_kill(void) ++{ ++ printk_killswitch = true; ++} ++ ++#ifdef CONFIG_PRINTK ++static int forced_early_printk(const char *fmt, va_list ap) ++{ ++ if (!printk_killswitch) ++ return 0; ++ early_vprintk(fmt, ap); ++ return 1; ++} ++#endif ++ ++#else ++static inline int forced_early_printk(const char *fmt, va_list ap) ++{ ++ return 0; ++} ++#endif ++ + #ifdef CONFIG_PRINTK + DECLARE_WAIT_QUEUE_HEAD(log_wait); + /* the next printk record to read by syslog(READ) or /proc/kmsg */ +@@ -1203,6 +1262,7 @@ + { + char *text; + int len = 0; ++ int attempts = 0; + + text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL); + if (!text) +@@ -1214,7 +1274,14 @@ + u64 seq; + u32 idx; + enum log_flags prev; +- ++ int num_msg; ++try_again: ++ attempts++; ++ if (attempts > 10) { ++ len = -EBUSY; ++ goto out; ++ } ++ num_msg = 0; + if (clear_seq < log_first_seq) { + /* messages are gone, move to first available one */ + clear_seq = log_first_seq; +@@ -1235,6 +1302,14 @@ + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* move first record forward until length fits into the buffer */ +@@ -1248,6 +1323,14 @@ + prev = msg->flags; + idx = log_next(idx); + seq++; ++ num_msg++; ++ if (num_msg > 5) { ++ num_msg = 0; ++ raw_spin_unlock_irq(&logbuf_lock); ++ raw_spin_lock_irq(&logbuf_lock); ++ if (clear_seq < log_first_seq) ++ goto try_again; ++ } + } + + /* last message fitting into this dump */ +@@ -1288,6 +1371,7 @@ + clear_seq = log_next_seq; + clear_idx = log_next_idx; + } ++out: + raw_spin_unlock_irq(&logbuf_lock); + + kfree(text); +@@ -1443,6 +1527,12 @@ + if (!console_drivers) + return; + ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { ++ if (in_irq() || in_nmi()) ++ return; ++ } ++ ++ migrate_disable(); + for_each_console(con) { + if (exclusive_console && con != exclusive_console) + continue; +@@ -1458,6 +1548,7 @@ + else + con->write(con, text, len); + } ++ migrate_enable(); + } + + /* +@@ -1518,6 +1609,15 @@ + static int console_trylock_for_printk(void) + { + unsigned int cpu = smp_processor_id(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ int lock = !early_boot_irqs_disabled && (preempt_count() == 0) && ++ !irqs_disabled(); ++#else ++ int lock = 1; ++#endif ++ ++ if (!lock) ++ return 0; + + if (!console_trylock()) + return 0; +@@ -1672,6 +1772,13 @@ + /* cpu currently holding logbuf_lock in this function */ + static unsigned int logbuf_cpu = UINT_MAX; + ++ /* ++ * Fall back to early_printk if a debugging subsystem has ++ * killed printk output ++ */ ++ if (unlikely(forced_early_printk(fmt, args))) ++ return 1; ++ + if (level == LOGLEVEL_SCHED) { + level = LOGLEVEL_DEFAULT; + in_sched = true; +@@ -1813,8 +1920,7 @@ + * console_sem which would prevent anyone from printing to + * console + */ +- preempt_disable(); +- ++ migrate_disable(); + /* + * Try to acquire and then immediately release the console + * semaphore. The release will print out buffers and wake up +@@ -1822,7 +1928,7 @@ + */ + if (console_trylock_for_printk()) + console_unlock(); +- preempt_enable(); ++ migrate_enable(); + lockdep_on(); + } + +@@ -1961,26 +2067,6 @@ + + #endif /* CONFIG_PRINTK */ + +-#ifdef CONFIG_EARLY_PRINTK +-struct console *early_console; +- +-asmlinkage __visible void early_printk(const char *fmt, ...) +-{ +- va_list ap; +- char buf[512]; +- int n; +- +- if (!early_console) +- return; +- +- va_start(ap, fmt); +- n = vscnprintf(buf, sizeof(buf), fmt, ap); +- va_end(ap); +- +- early_console->write(early_console, buf, n); +-} +-#endif +- + static int __add_preferred_console(char *name, int idx, char *options, + char *brl_options) + { +@@ -2202,11 +2288,16 @@ + goto out; + + len = cont_print_text(text, size); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(cont.level, NULL, 0, text, len); ++#else + raw_spin_unlock(&logbuf_lock); + stop_critical_timings(); + call_console_drivers(cont.level, NULL, 0, text, len); + start_critical_timings(); + local_irq_restore(flags); ++#endif + return; + out: + raw_spin_unlock_irqrestore(&logbuf_lock, flags); +@@ -2316,13 +2407,17 @@ + console_idx = log_next(console_idx); + console_seq++; + console_prev = msg->flags; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ raw_spin_unlock_irqrestore(&logbuf_lock, flags); ++ call_console_drivers(level, ext_text, ext_len, text, len); ++#else + raw_spin_unlock(&logbuf_lock); + + stop_critical_timings(); /* don't trace print latency */ + call_console_drivers(level, ext_text, ext_len, text, len); + start_critical_timings(); + local_irq_restore(flags); +- ++#endif + if (do_cond_resched) + cond_resched(); + } +@@ -2374,6 +2469,11 @@ + { + struct console *c; + ++ if (IS_ENABLED(CONFIG_PREEMPT_RT_BASE)) { ++ if (in_irq() || in_nmi()) ++ return; ++ } ++ + /* + * console_unblank can no longer be called in interrupt context unless + * oops_in_progress is set to 1.. +diff -Nur linux-4.4.13.orig/kernel/ptrace.c linux-4.4.13/kernel/ptrace.c +--- linux-4.4.13.orig/kernel/ptrace.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/ptrace.c 2016-06-22 21:55:08.054523867 +0200 +@@ -129,7 +129,14 @@ + + spin_lock_irq(&task->sighand->siglock); + if (task_is_traced(task) && !__fatal_signal_pending(task)) { +- task->state = __TASK_TRACED; ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&task->pi_lock, flags); ++ if (task->state & __TASK_TRACED) ++ task->state = __TASK_TRACED; ++ else ++ task->saved_state = __TASK_TRACED; ++ raw_spin_unlock_irqrestore(&task->pi_lock, flags); + ret = true; + } + spin_unlock_irq(&task->sighand->siglock); +diff -Nur linux-4.4.13.orig/kernel/rcu/rcutorture.c linux-4.4.13/kernel/rcu/rcutorture.c +--- linux-4.4.13.orig/kernel/rcu/rcutorture.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/rcu/rcutorture.c 2016-06-22 21:55:08.054523867 +0200 +@@ -390,6 +390,7 @@ + .name = "rcu" + }; + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Definitions for rcu_bh torture testing. + */ +@@ -429,6 +430,12 @@ + .name = "rcu_bh" + }; + ++#else ++static struct rcu_torture_ops rcu_bh_ops = { ++ .ttype = INVALID_RCU_FLAVOR, ++}; ++#endif ++ + /* + * Don't even think about trying any of these in real life!!! + * The names includes "busted", and they really means it! +diff -Nur linux-4.4.13.orig/kernel/rcu/tree.c linux-4.4.13/kernel/rcu/tree.c +--- linux-4.4.13.orig/kernel/rcu/tree.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/rcu/tree.c 2016-06-22 21:55:08.054523867 +0200 +@@ -56,6 +56,11 @@ + #include + #include + #include ++#include ++#include ++#include ++#include ++#include "../time/tick-internal.h" + + #include "tree.h" + #include "rcu.h" +@@ -266,6 +271,19 @@ + } + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void rcu_preempt_qs(void); ++ ++void rcu_bh_qs(void) ++{ ++ unsigned long flags; ++ ++ /* Callers to this function, rcu_preempt_qs(), must disable irqs. */ ++ local_irq_save(flags); ++ rcu_preempt_qs(); ++ local_irq_restore(flags); ++} ++#else + void rcu_bh_qs(void) + { + if (__this_cpu_read(rcu_bh_data.cpu_no_qs.s)) { +@@ -275,6 +293,7 @@ + __this_cpu_write(rcu_bh_data.cpu_no_qs.b.norm, false); + } + } ++#endif + + static DEFINE_PER_CPU(int, rcu_sched_qs_mask); + +@@ -435,11 +454,13 @@ + /* + * Return the number of RCU BH batches started thus far for debug & stats. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + unsigned long rcu_batches_started_bh(void) + { + return rcu_bh_state.gpnum; + } + EXPORT_SYMBOL_GPL(rcu_batches_started_bh); ++#endif + + /* + * Return the number of RCU batches completed thus far for debug & stats. +@@ -459,6 +480,7 @@ + } + EXPORT_SYMBOL_GPL(rcu_batches_completed_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Return the number of RCU BH batches completed thus far for debug & stats. + */ +@@ -486,6 +508,13 @@ + } + EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state); + ++#else ++void rcu_force_quiescent_state(void) ++{ ++} ++EXPORT_SYMBOL_GPL(rcu_force_quiescent_state); ++#endif ++ + /* + * Force a quiescent state for RCU-sched. + */ +@@ -536,9 +565,11 @@ + case RCU_FLAVOR: + rsp = rcu_state_p; + break; ++#ifndef CONFIG_PREEMPT_RT_FULL + case RCU_BH_FLAVOR: + rsp = &rcu_bh_state; + break; ++#endif + case RCU_SCHED_FLAVOR: + rsp = &rcu_sched_state; + break; +@@ -1590,7 +1621,6 @@ + int needmore; + struct rcu_data *rdp = this_cpu_ptr(rsp->rda); + +- rcu_nocb_gp_cleanup(rsp, rnp); + rnp->need_future_gp[c & 0x1] = 0; + needmore = rnp->need_future_gp[(c + 1) & 0x1]; + trace_rcu_future_gp(rnp, rdp, c, +@@ -1611,7 +1641,7 @@ + !READ_ONCE(rsp->gp_flags) || + !rsp->gp_kthread) + return; +- wake_up(&rsp->gp_wq); ++ swake_up(&rsp->gp_wq); + } + + /* +@@ -1991,6 +2021,7 @@ + int nocb = 0; + struct rcu_data *rdp; + struct rcu_node *rnp = rcu_get_root(rsp); ++ struct swait_queue_head *sq; + + WRITE_ONCE(rsp->gp_activity, jiffies); + raw_spin_lock_irq(&rnp->lock); +@@ -2029,7 +2060,9 @@ + needgp = __note_gp_changes(rsp, rnp, rdp) || needgp; + /* smp_mb() provided by prior unlock-lock pair. */ + nocb += rcu_future_gp_cleanup(rsp, rnp); ++ sq = rcu_nocb_gp_get(rnp); + raw_spin_unlock_irq(&rnp->lock); ++ rcu_nocb_gp_cleanup(sq); + cond_resched_rcu_qs(); + WRITE_ONCE(rsp->gp_activity, jiffies); + rcu_gp_slow(rsp, gp_cleanup_delay); +@@ -2076,7 +2109,7 @@ + READ_ONCE(rsp->gpnum), + TPS("reqwait")); + rsp->gp_state = RCU_GP_WAIT_GPS; +- wait_event_interruptible(rsp->gp_wq, ++ swait_event_interruptible(rsp->gp_wq, + READ_ONCE(rsp->gp_flags) & + RCU_GP_FLAG_INIT); + rsp->gp_state = RCU_GP_DONE_GPS; +@@ -2106,7 +2139,7 @@ + READ_ONCE(rsp->gpnum), + TPS("fqswait")); + rsp->gp_state = RCU_GP_WAIT_FQS; +- ret = wait_event_interruptible_timeout(rsp->gp_wq, ++ ret = swait_event_interruptible_timeout(rsp->gp_wq, + rcu_gp_fqs_check_wake(rsp, &gf), j); + rsp->gp_state = RCU_GP_DOING_FQS; + /* Locking provides needed memory barriers. */ +@@ -2230,7 +2263,7 @@ + WARN_ON_ONCE(!rcu_gp_in_progress(rsp)); + WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); + raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags); +- rcu_gp_kthread_wake(rsp); ++ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ + } + + /* +@@ -2891,7 +2924,7 @@ + } + WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS); + raw_spin_unlock_irqrestore(&rnp_old->lock, flags); +- rcu_gp_kthread_wake(rsp); ++ swake_up(&rsp->gp_wq); /* Memory barrier implied by swake_up() path. */ + } + + /* +@@ -2934,18 +2967,17 @@ + /* + * Do RCU core processing for the current CPU. + */ +-static void rcu_process_callbacks(struct softirq_action *unused) ++static void rcu_process_callbacks(void) + { + struct rcu_state *rsp; + + if (cpu_is_offline(smp_processor_id())) + return; +- trace_rcu_utilization(TPS("Start RCU core")); + for_each_rcu_flavor(rsp) + __rcu_process_callbacks(rsp); +- trace_rcu_utilization(TPS("End RCU core")); + } + ++static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); + /* + * Schedule RCU callback invocation. If the specified type of RCU + * does not support RCU priority boosting, just do a direct call, +@@ -2957,18 +2989,105 @@ + { + if (unlikely(!READ_ONCE(rcu_scheduler_fully_active))) + return; +- if (likely(!rsp->boost)) { +- rcu_do_batch(rsp, rdp); ++ rcu_do_batch(rsp, rdp); ++} ++ ++static void rcu_wake_cond(struct task_struct *t, int status) ++{ ++ /* ++ * If the thread is yielding, only wake it when this ++ * is invoked from idle ++ */ ++ if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current))) ++ wake_up_process(t); ++} ++ ++/* ++ * Wake up this CPU's rcuc kthread to do RCU core processing. ++ */ ++static void invoke_rcu_core(void) ++{ ++ unsigned long flags; ++ struct task_struct *t; ++ ++ if (!cpu_online(smp_processor_id())) + return; ++ local_irq_save(flags); ++ __this_cpu_write(rcu_cpu_has_work, 1); ++ t = __this_cpu_read(rcu_cpu_kthread_task); ++ if (t != NULL && current != t) ++ rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status)); ++ local_irq_restore(flags); ++} ++ ++static void rcu_cpu_kthread_park(unsigned int cpu) ++{ ++ per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; ++} ++ ++static int rcu_cpu_kthread_should_run(unsigned int cpu) ++{ ++ return __this_cpu_read(rcu_cpu_has_work); ++} ++ ++/* ++ * Per-CPU kernel thread that invokes RCU callbacks. This replaces the ++ * RCU softirq used in flavors and configurations of RCU that do not ++ * support RCU priority boosting. ++ */ ++static void rcu_cpu_kthread(unsigned int cpu) ++{ ++ unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); ++ char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); ++ int spincnt; ++ ++ for (spincnt = 0; spincnt < 10; spincnt++) { ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); ++ local_bh_disable(); ++ *statusp = RCU_KTHREAD_RUNNING; ++ this_cpu_inc(rcu_cpu_kthread_loops); ++ local_irq_disable(); ++ work = *workp; ++ *workp = 0; ++ local_irq_enable(); ++ if (work) ++ rcu_process_callbacks(); ++ local_bh_enable(); ++ if (*workp == 0) { ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); ++ *statusp = RCU_KTHREAD_WAITING; ++ return; ++ } + } +- invoke_rcu_callbacks_kthread(); ++ *statusp = RCU_KTHREAD_YIELDING; ++ trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); ++ schedule_timeout_interruptible(2); ++ trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); ++ *statusp = RCU_KTHREAD_WAITING; + } + +-static void invoke_rcu_core(void) ++static struct smp_hotplug_thread rcu_cpu_thread_spec = { ++ .store = &rcu_cpu_kthread_task, ++ .thread_should_run = rcu_cpu_kthread_should_run, ++ .thread_fn = rcu_cpu_kthread, ++ .thread_comm = "rcuc/%u", ++ .setup = rcu_cpu_kthread_setup, ++ .park = rcu_cpu_kthread_park, ++}; ++ ++/* ++ * Spawn per-CPU RCU core processing kthreads. ++ */ ++static int __init rcu_spawn_core_kthreads(void) + { +- if (cpu_online(smp_processor_id())) +- raise_softirq(RCU_SOFTIRQ); ++ int cpu; ++ ++ for_each_possible_cpu(cpu) ++ per_cpu(rcu_cpu_has_work, cpu) = 0; ++ BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); ++ return 0; + } ++early_initcall(rcu_spawn_core_kthreads); + + /* + * Handle any core-RCU processing required by a call_rcu() invocation. +@@ -3114,6 +3233,7 @@ + } + EXPORT_SYMBOL_GPL(call_rcu_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Queue an RCU callback for invocation after a quicker grace period. + */ +@@ -3122,6 +3242,7 @@ + __call_rcu(head, func, &rcu_bh_state, -1, 0); + } + EXPORT_SYMBOL_GPL(call_rcu_bh); ++#endif + + /* + * Queue an RCU callback for lazy invocation after a grace period. +@@ -3213,6 +3334,7 @@ + } + EXPORT_SYMBOL_GPL(synchronize_sched); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed. + * +@@ -3239,6 +3361,7 @@ + wait_rcu_gp(call_rcu_bh); + } + EXPORT_SYMBOL_GPL(synchronize_rcu_bh); ++#endif + + /** + * get_state_synchronize_rcu - Snapshot current RCU state +@@ -3524,7 +3647,7 @@ + raw_spin_unlock_irqrestore(&rnp->lock, flags); + if (wake) { + smp_mb(); /* EGP done before wake_up(). */ +- wake_up(&rsp->expedited_wq); ++ swake_up(&rsp->expedited_wq); + } + break; + } +@@ -3781,7 +3904,7 @@ + jiffies_start = jiffies; + + for (;;) { +- ret = wait_event_interruptible_timeout( ++ ret = swait_event_timeout( + rsp->expedited_wq, + sync_rcu_preempt_exp_done(rnp_root), + jiffies_stall); +@@ -3789,7 +3912,7 @@ + return; + if (ret < 0) { + /* Hit a signal, disable CPU stall warnings. */ +- wait_event(rsp->expedited_wq, ++ swait_event(rsp->expedited_wq, + sync_rcu_preempt_exp_done(rnp_root)); + return; + } +@@ -4101,6 +4224,7 @@ + mutex_unlock(&rsp->barrier_mutex); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete. + */ +@@ -4109,6 +4233,7 @@ + _rcu_barrier(&rcu_bh_state); + } + EXPORT_SYMBOL_GPL(rcu_barrier_bh); ++#endif + + /** + * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks. +@@ -4455,8 +4580,8 @@ + } + } + +- init_waitqueue_head(&rsp->gp_wq); +- init_waitqueue_head(&rsp->expedited_wq); ++ init_swait_queue_head(&rsp->gp_wq); ++ init_swait_queue_head(&rsp->expedited_wq); + rnp = rsp->level[rcu_num_lvls - 1]; + for_each_possible_cpu(i) { + while (i > rnp->grphi) +@@ -4576,12 +4701,13 @@ + + rcu_bootup_announce(); + rcu_init_geometry(); ++#ifndef CONFIG_PREEMPT_RT_FULL + rcu_init_one(&rcu_bh_state, &rcu_bh_data); ++#endif + rcu_init_one(&rcu_sched_state, &rcu_sched_data); + if (dump_tree) + rcu_dump_rcu_node_tree(&rcu_sched_state); + __rcu_init_preempt(); +- open_softirq(RCU_SOFTIRQ, rcu_process_callbacks); + + /* + * We don't need protection against CPU-hotplug here because +diff -Nur linux-4.4.13.orig/kernel/rcu/tree.h linux-4.4.13/kernel/rcu/tree.h +--- linux-4.4.13.orig/kernel/rcu/tree.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/rcu/tree.h 2016-06-22 21:55:08.054523867 +0200 +@@ -27,6 +27,7 @@ + #include + #include + #include ++#include + #include + + /* +@@ -241,7 +242,7 @@ + /* Refused to boost: not sure why, though. */ + /* This can happen due to race conditions. */ + #ifdef CONFIG_RCU_NOCB_CPU +- wait_queue_head_t nocb_gp_wq[2]; ++ struct swait_queue_head nocb_gp_wq[2]; + /* Place for rcu_nocb_kthread() to wait GP. */ + #endif /* #ifdef CONFIG_RCU_NOCB_CPU */ + int need_future_gp[2]; +@@ -393,7 +394,7 @@ + atomic_long_t nocb_q_count_lazy; /* invocation (all stages). */ + struct rcu_head *nocb_follower_head; /* CBs ready to invoke. */ + struct rcu_head **nocb_follower_tail; +- wait_queue_head_t nocb_wq; /* For nocb kthreads to sleep on. */ ++ struct swait_queue_head nocb_wq; /* For nocb kthreads to sleep on. */ + struct task_struct *nocb_kthread; + int nocb_defer_wakeup; /* Defer wakeup of nocb_kthread. */ + +@@ -472,7 +473,7 @@ + unsigned long gpnum; /* Current gp number. */ + unsigned long completed; /* # of last completed gp. */ + struct task_struct *gp_kthread; /* Task for grace periods. */ +- wait_queue_head_t gp_wq; /* Where GP task waits. */ ++ struct swait_queue_head gp_wq; /* Where GP task waits. */ + short gp_flags; /* Commands for GP task. */ + short gp_state; /* GP kthread sleep state. */ + +@@ -504,7 +505,7 @@ + atomic_long_t expedited_workdone3; /* # done by others #3. */ + atomic_long_t expedited_normal; /* # fallbacks to normal. */ + atomic_t expedited_need_qs; /* # CPUs left to check in. */ +- wait_queue_head_t expedited_wq; /* Wait for check-ins. */ ++ struct swait_queue_head expedited_wq; /* Wait for check-ins. */ + int ncpus_snap; /* # CPUs seen last time. */ + + unsigned long jiffies_force_qs; /* Time at which to invoke */ +@@ -556,18 +557,18 @@ + */ + extern struct rcu_state rcu_sched_state; + ++#ifndef CONFIG_PREEMPT_RT_FULL + extern struct rcu_state rcu_bh_state; ++#endif + + #ifdef CONFIG_PREEMPT_RCU + extern struct rcu_state rcu_preempt_state; + #endif /* #ifdef CONFIG_PREEMPT_RCU */ + +-#ifdef CONFIG_RCU_BOOST + DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status); + DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu); + DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); + DECLARE_PER_CPU(char, rcu_cpu_has_work); +-#endif /* #ifdef CONFIG_RCU_BOOST */ + + #ifndef RCU_TREE_NONCORE + +@@ -587,10 +588,9 @@ + static void __init __rcu_init_preempt(void); + static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); + static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); +-static void invoke_rcu_callbacks_kthread(void); + static bool rcu_is_callbacks_kthread(void); ++static void rcu_cpu_kthread_setup(unsigned int cpu); + #ifdef CONFIG_RCU_BOOST +-static void rcu_preempt_do_callbacks(void); + static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp, + struct rcu_node *rnp); + #endif /* #ifdef CONFIG_RCU_BOOST */ +@@ -607,7 +607,8 @@ + static void increment_cpu_stall_ticks(void); + static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu); + static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); +-static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); ++static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp); ++static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq); + static void rcu_init_one_nocb(struct rcu_node *rnp); + static bool __call_rcu_nocb(struct rcu_data *rdp, struct rcu_head *rhp, + bool lazy, unsigned long flags); +diff -Nur linux-4.4.13.orig/kernel/rcu/tree_plugin.h linux-4.4.13/kernel/rcu/tree_plugin.h +--- linux-4.4.13.orig/kernel/rcu/tree_plugin.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/rcu/tree_plugin.h 2016-06-22 21:55:08.054523867 +0200 +@@ -24,25 +24,10 @@ + * Paul E. McKenney + */ + +-#include +-#include +-#include +-#include +-#include "../time/tick-internal.h" +- + #ifdef CONFIG_RCU_BOOST + + #include "../locking/rtmutex_common.h" + +-/* +- * Control variables for per-CPU and per-rcu_node kthreads. These +- * handle all flavors of RCU. +- */ +-static DEFINE_PER_CPU(struct task_struct *, rcu_cpu_kthread_task); +-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); +-DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); +-DEFINE_PER_CPU(char, rcu_cpu_has_work); +- + #else /* #ifdef CONFIG_RCU_BOOST */ + + /* +@@ -55,6 +40,14 @@ + + #endif /* #else #ifdef CONFIG_RCU_BOOST */ + ++/* ++ * Control variables for per-CPU and per-rcu_node kthreads. These ++ * handle all flavors of RCU. ++ */ ++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status); ++DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops); ++DEFINE_PER_CPU(char, rcu_cpu_has_work); ++ + #ifdef CONFIG_RCU_NOCB_CPU + static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */ + static bool have_rcu_nocb_mask; /* Was rcu_nocb_mask allocated? */ +@@ -432,7 +425,7 @@ + } + + /* Hardware IRQ handlers cannot block, complain if they get here. */ +- if (in_irq() || in_serving_softirq()) { ++ if (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET)) { + lockdep_rcu_suspicious(__FILE__, __LINE__, + "rcu_read_unlock() from irq or softirq with blocking in critical section!!!\n"); + pr_alert("->rcu_read_unlock_special: %#x (b: %d, enq: %d nq: %d)\n", +@@ -645,15 +638,6 @@ + t->rcu_read_unlock_special.b.need_qs = true; + } + +-#ifdef CONFIG_RCU_BOOST +- +-static void rcu_preempt_do_callbacks(void) +-{ +- rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p)); +-} +- +-#endif /* #ifdef CONFIG_RCU_BOOST */ +- + /* + * Queue a preemptible-RCU callback for invocation after a grace period. + */ +@@ -930,6 +914,19 @@ + + #endif /* #else #ifdef CONFIG_PREEMPT_RCU */ + ++/* ++ * If boosting, set rcuc kthreads to realtime priority. ++ */ ++static void rcu_cpu_kthread_setup(unsigned int cpu) ++{ ++#ifdef CONFIG_RCU_BOOST ++ struct sched_param sp; ++ ++ sp.sched_priority = kthread_prio; ++ sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); ++#endif /* #ifdef CONFIG_RCU_BOOST */ ++} ++ + #ifdef CONFIG_RCU_BOOST + + #include "../locking/rtmutex_common.h" +@@ -961,16 +958,6 @@ + + #endif /* #else #ifdef CONFIG_RCU_TRACE */ + +-static void rcu_wake_cond(struct task_struct *t, int status) +-{ +- /* +- * If the thread is yielding, only wake it when this +- * is invoked from idle +- */ +- if (status != RCU_KTHREAD_YIELDING || is_idle_task(current)) +- wake_up_process(t); +-} +- + /* + * Carry out RCU priority boosting on the task indicated by ->exp_tasks + * or ->boost_tasks, advancing the pointer to the next task in the +@@ -1115,23 +1102,6 @@ + } + + /* +- * Wake up the per-CPU kthread to invoke RCU callbacks. +- */ +-static void invoke_rcu_callbacks_kthread(void) +-{ +- unsigned long flags; +- +- local_irq_save(flags); +- __this_cpu_write(rcu_cpu_has_work, 1); +- if (__this_cpu_read(rcu_cpu_kthread_task) != NULL && +- current != __this_cpu_read(rcu_cpu_kthread_task)) { +- rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task), +- __this_cpu_read(rcu_cpu_kthread_status)); +- } +- local_irq_restore(flags); +-} +- +-/* + * Is the current CPU running the RCU-callbacks kthread? + * Caller must have preemption disabled. + */ +@@ -1186,67 +1156,6 @@ + return 0; + } + +-static void rcu_kthread_do_work(void) +-{ +- rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data)); +- rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data)); +- rcu_preempt_do_callbacks(); +-} +- +-static void rcu_cpu_kthread_setup(unsigned int cpu) +-{ +- struct sched_param sp; +- +- sp.sched_priority = kthread_prio; +- sched_setscheduler_nocheck(current, SCHED_FIFO, &sp); +-} +- +-static void rcu_cpu_kthread_park(unsigned int cpu) +-{ +- per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU; +-} +- +-static int rcu_cpu_kthread_should_run(unsigned int cpu) +-{ +- return __this_cpu_read(rcu_cpu_has_work); +-} +- +-/* +- * Per-CPU kernel thread that invokes RCU callbacks. This replaces the +- * RCU softirq used in flavors and configurations of RCU that do not +- * support RCU priority boosting. +- */ +-static void rcu_cpu_kthread(unsigned int cpu) +-{ +- unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status); +- char work, *workp = this_cpu_ptr(&rcu_cpu_has_work); +- int spincnt; +- +- for (spincnt = 0; spincnt < 10; spincnt++) { +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait")); +- local_bh_disable(); +- *statusp = RCU_KTHREAD_RUNNING; +- this_cpu_inc(rcu_cpu_kthread_loops); +- local_irq_disable(); +- work = *workp; +- *workp = 0; +- local_irq_enable(); +- if (work) +- rcu_kthread_do_work(); +- local_bh_enable(); +- if (*workp == 0) { +- trace_rcu_utilization(TPS("End CPU kthread@rcu_wait")); +- *statusp = RCU_KTHREAD_WAITING; +- return; +- } +- } +- *statusp = RCU_KTHREAD_YIELDING; +- trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield")); +- schedule_timeout_interruptible(2); +- trace_rcu_utilization(TPS("End CPU kthread@rcu_yield")); +- *statusp = RCU_KTHREAD_WAITING; +-} +- + /* + * Set the per-rcu_node kthread's affinity to cover all CPUs that are + * served by the rcu_node in question. The CPU hotplug lock is still +@@ -1276,26 +1185,12 @@ + free_cpumask_var(cm); + } + +-static struct smp_hotplug_thread rcu_cpu_thread_spec = { +- .store = &rcu_cpu_kthread_task, +- .thread_should_run = rcu_cpu_kthread_should_run, +- .thread_fn = rcu_cpu_kthread, +- .thread_comm = "rcuc/%u", +- .setup = rcu_cpu_kthread_setup, +- .park = rcu_cpu_kthread_park, +-}; +- + /* + * Spawn boost kthreads -- called as soon as the scheduler is running. + */ + static void __init rcu_spawn_boost_kthreads(void) + { + struct rcu_node *rnp; +- int cpu; +- +- for_each_possible_cpu(cpu) +- per_cpu(rcu_cpu_has_work, cpu) = 0; +- BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec)); + rcu_for_each_leaf_node(rcu_state_p, rnp) + (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp); + } +@@ -1318,11 +1213,6 @@ + raw_spin_unlock_irqrestore(&rnp->lock, flags); + } + +-static void invoke_rcu_callbacks_kthread(void) +-{ +- WARN_ON_ONCE(1); +-} +- + static bool rcu_is_callbacks_kthread(void) + { + return false; +@@ -1346,7 +1236,7 @@ + + #endif /* #else #ifdef CONFIG_RCU_BOOST */ + +-#if !defined(CONFIG_RCU_FAST_NO_HZ) ++#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) + + /* + * Check to see if any future RCU-related work will need to be done +@@ -1363,7 +1253,9 @@ + return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) + ? 0 : rcu_cpu_has_callbacks(NULL); + } ++#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */ + ++#if !defined(CONFIG_RCU_FAST_NO_HZ) + /* + * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up + * after it. +@@ -1459,6 +1351,8 @@ + return cbs_ready; + } + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready + * to invoke. If the CPU has callbacks, try to advance them. Tell the +@@ -1504,6 +1398,7 @@ + *nextevt = basemono + dj * TICK_NSEC; + return 0; + } ++#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */ + + /* + * Prepare a CPU for idle from an RCU perspective. The first major task +@@ -1822,9 +1717,9 @@ + * Wake up any no-CBs CPUs' kthreads that were waiting on the just-ended + * grace period. + */ +-static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) ++static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) + { +- wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]); ++ swake_up_all(sq); + } + + /* +@@ -1840,10 +1735,15 @@ + rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq; + } + ++static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) ++{ ++ return &rnp->nocb_gp_wq[rnp->completed & 0x1]; ++} ++ + static void rcu_init_one_nocb(struct rcu_node *rnp) + { +- init_waitqueue_head(&rnp->nocb_gp_wq[0]); +- init_waitqueue_head(&rnp->nocb_gp_wq[1]); ++ init_swait_queue_head(&rnp->nocb_gp_wq[0]); ++ init_swait_queue_head(&rnp->nocb_gp_wq[1]); + } + + #ifndef CONFIG_RCU_NOCB_CPU_ALL +@@ -1868,7 +1768,7 @@ + if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) { + /* Prior smp_mb__after_atomic() orders against prior enqueue. */ + WRITE_ONCE(rdp_leader->nocb_leader_sleep, false); +- wake_up(&rdp_leader->nocb_wq); ++ swake_up(&rdp_leader->nocb_wq); + } + } + +@@ -2081,7 +1981,7 @@ + */ + trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); + for (;;) { +- wait_event_interruptible( ++ swait_event_interruptible( + rnp->nocb_gp_wq[c & 0x1], + (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c))); + if (likely(d)) +@@ -2109,7 +2009,7 @@ + /* Wait for callbacks to appear. */ + if (!rcu_nocb_poll) { + trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep"); +- wait_event_interruptible(my_rdp->nocb_wq, ++ swait_event_interruptible(my_rdp->nocb_wq, + !READ_ONCE(my_rdp->nocb_leader_sleep)); + /* Memory barrier handled by smp_mb() calls below and repoll. */ + } else if (firsttime) { +@@ -2184,7 +2084,7 @@ + * List was empty, wake up the follower. + * Memory barriers supplied by atomic_long_add(). + */ +- wake_up(&rdp->nocb_wq); ++ swake_up(&rdp->nocb_wq); + } + } + +@@ -2205,7 +2105,7 @@ + if (!rcu_nocb_poll) { + trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, + "FollowerSleep"); +- wait_event_interruptible(rdp->nocb_wq, ++ swait_event_interruptible(rdp->nocb_wq, + READ_ONCE(rdp->nocb_follower_head)); + } else if (firsttime) { + /* Don't drown trace log with "Poll"! */ +@@ -2364,7 +2264,7 @@ + static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp) + { + rdp->nocb_tail = &rdp->nocb_head; +- init_waitqueue_head(&rdp->nocb_wq); ++ init_swait_queue_head(&rdp->nocb_wq); + rdp->nocb_follower_tail = &rdp->nocb_follower_head; + } + +@@ -2514,7 +2414,7 @@ + return false; + } + +-static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) ++static void rcu_nocb_gp_cleanup(struct swait_queue_head *sq) + { + } + +@@ -2522,6 +2422,11 @@ + { + } + ++static struct swait_queue_head *rcu_nocb_gp_get(struct rcu_node *rnp) ++{ ++ return NULL; ++} ++ + static void rcu_init_one_nocb(struct rcu_node *rnp) + { + } +diff -Nur linux-4.4.13.orig/kernel/rcu/update.c linux-4.4.13/kernel/rcu/update.c +--- linux-4.4.13.orig/kernel/rcu/update.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/rcu/update.c 2016-06-22 21:55:08.054523867 +0200 +@@ -276,6 +276,7 @@ + } + EXPORT_SYMBOL_GPL(rcu_read_lock_held); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /** + * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section? + * +@@ -302,6 +303,7 @@ + return in_softirq() || irqs_disabled(); + } + EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ++#endif + + #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ + +diff -Nur linux-4.4.13.orig/kernel/relay.c linux-4.4.13/kernel/relay.c +--- linux-4.4.13.orig/kernel/relay.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/relay.c 2016-06-22 21:55:08.058523867 +0200 +@@ -336,6 +336,10 @@ + { + struct rchan_buf *buf = (struct rchan_buf *)data; + wake_up_interruptible(&buf->read_wait); ++ /* ++ * Stupid polling for now: ++ */ ++ mod_timer(&buf->timer, jiffies + 1); + } + + /** +@@ -353,6 +357,7 @@ + init_waitqueue_head(&buf->read_wait); + kref_init(&buf->kref); + setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf); ++ mod_timer(&buf->timer, jiffies + 1); + } else + del_timer_sync(&buf->timer); + +@@ -736,15 +741,6 @@ + else + buf->early_bytes += buf->chan->subbuf_size - + buf->padding[old_subbuf]; +- smp_mb(); +- if (waitqueue_active(&buf->read_wait)) +- /* +- * Calling wake_up_interruptible() from here +- * will deadlock if we happen to be logging +- * from the scheduler (trying to re-grab +- * rq->lock), so defer it. +- */ +- mod_timer(&buf->timer, jiffies + 1); + } + + old = buf->data; +diff -Nur linux-4.4.13.orig/kernel/sched/completion.c linux-4.4.13/kernel/sched/completion.c +--- linux-4.4.13.orig/kernel/sched/completion.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/completion.c 2016-06-22 21:55:08.058523867 +0200 +@@ -30,10 +30,10 @@ + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done++; +- __wake_up_locked(&x->wait, TASK_NORMAL, 1); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ swake_up_locked(&x->wait); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete); + +@@ -50,10 +50,10 @@ + { + unsigned long flags; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + x->done += UINT_MAX/2; +- __wake_up_locked(&x->wait, TASK_NORMAL, 0); +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ swake_up_all_locked(&x->wait); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + } + EXPORT_SYMBOL(complete_all); + +@@ -62,20 +62,20 @@ + long (*action)(long), long timeout, int state) + { + if (!x->done) { +- DECLARE_WAITQUEUE(wait, current); ++ DECLARE_SWAITQUEUE(wait); + +- __add_wait_queue_tail_exclusive(&x->wait, &wait); ++ __prepare_to_swait(&x->wait, &wait); + do { + if (signal_pending_state(state, current)) { + timeout = -ERESTARTSYS; + break; + } + __set_current_state(state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + timeout = action(timeout); +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + } while (!x->done && timeout); +- __remove_wait_queue(&x->wait, &wait); ++ __finish_swait(&x->wait, &wait); + if (!x->done) + return timeout; + } +@@ -89,9 +89,9 @@ + { + might_sleep(); + +- spin_lock_irq(&x->wait.lock); ++ raw_spin_lock_irq(&x->wait.lock); + timeout = do_wait_for_common(x, action, timeout, state); +- spin_unlock_irq(&x->wait.lock); ++ raw_spin_unlock_irq(&x->wait.lock); + return timeout; + } + +@@ -277,12 +277,12 @@ + if (!READ_ONCE(x->done)) + return 0; + +- spin_lock_irqsave(&x->wait.lock, flags); ++ raw_spin_lock_irqsave(&x->wait.lock, flags); + if (!x->done) + ret = 0; + else + x->done--; +- spin_unlock_irqrestore(&x->wait.lock, flags); ++ raw_spin_unlock_irqrestore(&x->wait.lock, flags); + return ret; + } + EXPORT_SYMBOL(try_wait_for_completion); +@@ -311,7 +311,7 @@ + * after it's acquired the lock. + */ + smp_rmb(); +- spin_unlock_wait(&x->wait.lock); ++ raw_spin_unlock_wait(&x->wait.lock); + return true; + } + EXPORT_SYMBOL(completion_done); +diff -Nur linux-4.4.13.orig/kernel/sched/core.c linux-4.4.13/kernel/sched/core.c +--- linux-4.4.13.orig/kernel/sched/core.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/core.c 2016-06-22 21:55:08.058523867 +0200 +@@ -260,7 +260,11 @@ + * Number of tasks to iterate in a single balance run. + * Limited because this is done with IRQs disabled. + */ ++#ifndef CONFIG_PREEMPT_RT_FULL + const_debug unsigned int sysctl_sched_nr_migrate = 32; ++#else ++const_debug unsigned int sysctl_sched_nr_migrate = 8; ++#endif + + /* + * period over which we average the RT time consumption, measured +@@ -438,6 +442,7 @@ + + hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + rq->hrtick_timer.function = hrtick; ++ rq->hrtick_timer.irqsafe = 1; + } + #else /* CONFIG_SCHED_HRTICK */ + static inline void hrtick_clear(struct rq *rq) +@@ -542,7 +547,7 @@ + head->lastp = &node->next; + } + +-void wake_up_q(struct wake_q_head *head) ++void __wake_up_q(struct wake_q_head *head, bool sleeper) + { + struct wake_q_node *node = head->first; + +@@ -559,7 +564,10 @@ + * wake_up_process() implies a wmb() to pair with the queueing + * in wake_q_add() so as not to miss wakeups. + */ +- wake_up_process(task); ++ if (sleeper) ++ wake_up_lock_sleeper(task); ++ else ++ wake_up_process(task); + put_task_struct(task); + } + } +@@ -595,6 +603,38 @@ + trace_sched_wake_idle_without_ipi(cpu); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++void resched_curr_lazy(struct rq *rq) ++{ ++ struct task_struct *curr = rq->curr; ++ int cpu; ++ ++ if (!sched_feat(PREEMPT_LAZY)) { ++ resched_curr(rq); ++ return; ++ } ++ ++ lockdep_assert_held(&rq->lock); ++ ++ if (test_tsk_need_resched(curr)) ++ return; ++ ++ if (test_tsk_need_resched_lazy(curr)) ++ return; ++ ++ set_tsk_need_resched_lazy(curr); ++ ++ cpu = cpu_of(rq); ++ if (cpu == smp_processor_id()) ++ return; ++ ++ /* NEED_RESCHED_LAZY must be visible before we test polling */ ++ smp_mb(); ++ if (!tsk_is_polling(curr)) ++ smp_send_reschedule(cpu); ++} ++#endif ++ + void resched_cpu(int cpu) + { + struct rq *rq = cpu_rq(cpu); +@@ -618,11 +658,14 @@ + */ + int get_nohz_timer_target(void) + { +- int i, cpu = smp_processor_id(); ++ int i, cpu; + struct sched_domain *sd; + ++ preempt_disable_rt(); ++ cpu = smp_processor_id(); ++ + if (!idle_cpu(cpu) && is_housekeeping_cpu(cpu)) +- return cpu; ++ goto preempt_en_rt; + + rcu_read_lock(); + for_each_domain(cpu, sd) { +@@ -638,6 +681,8 @@ + cpu = housekeeping_any_cpu(); + unlock: + rcu_read_unlock(); ++preempt_en_rt: ++ preempt_enable_rt(); + return cpu; + } + /* +@@ -1171,6 +1216,11 @@ + + lockdep_assert_held(&p->pi_lock); + ++ if (__migrate_disabled(p)) { ++ cpumask_copy(&p->cpus_allowed, new_mask); ++ return; ++ } ++ + queued = task_on_rq_queued(p); + running = task_current(rq, p); + +@@ -1193,6 +1243,84 @@ + enqueue_task(rq, p, ENQUEUE_RESTORE); + } + ++static DEFINE_PER_CPU(struct cpumask, sched_cpumasks); ++static DEFINE_MUTEX(sched_down_mutex); ++static cpumask_t sched_down_cpumask; ++ ++void tell_sched_cpu_down_begin(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_set_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++void tell_sched_cpu_down_done(int cpu) ++{ ++ mutex_lock(&sched_down_mutex); ++ cpumask_clear_cpu(cpu, &sched_down_cpumask); ++ mutex_unlock(&sched_down_mutex); ++} ++ ++/** ++ * migrate_me - try to move the current task off this cpu ++ * ++ * Used by the pin_current_cpu() code to try to get tasks ++ * to move off the current CPU as it is going down. ++ * It will only move the task if the task isn't pinned to ++ * the CPU (with migrate_disable, affinity or NO_SETAFFINITY) ++ * and the task has to be in a RUNNING state. Otherwise the ++ * movement of the task will wake it up (change its state ++ * to running) when the task did not expect it. ++ * ++ * Returns 1 if it succeeded in moving the current task ++ * 0 otherwise. ++ */ ++int migrate_me(void) ++{ ++ struct task_struct *p = current; ++ struct migration_arg arg; ++ struct cpumask *cpumask; ++ struct cpumask *mask; ++ unsigned long flags; ++ unsigned int dest_cpu; ++ struct rq *rq; ++ ++ /* ++ * We can not migrate tasks bounded to a CPU or tasks not ++ * running. The movement of the task will wake it up. ++ */ ++ if (p->flags & PF_NO_SETAFFINITY || p->state) ++ return 0; ++ ++ mutex_lock(&sched_down_mutex); ++ rq = task_rq_lock(p, &flags); ++ ++ cpumask = this_cpu_ptr(&sched_cpumasks); ++ mask = &p->cpus_allowed; ++ ++ cpumask_andnot(cpumask, mask, &sched_down_cpumask); ++ ++ if (!cpumask_weight(cpumask)) { ++ /* It's only on this CPU? */ ++ task_rq_unlock(rq, p, &flags); ++ mutex_unlock(&sched_down_mutex); ++ return 0; ++ } ++ ++ dest_cpu = cpumask_any_and(cpu_active_mask, cpumask); ++ ++ arg.task = p; ++ arg.dest_cpu = dest_cpu; ++ ++ task_rq_unlock(rq, p, &flags); ++ ++ stop_one_cpu(cpu_of(rq), migration_cpu_stop, &arg); ++ tlb_migrate_finish(p->mm); ++ mutex_unlock(&sched_down_mutex); ++ ++ return 1; ++} ++ + /* + * Change a given task's CPU affinity. Migrate the thread to a + * proper CPU and schedule it away if the CPU it's executing on +@@ -1232,7 +1360,7 @@ + do_set_cpus_allowed(p, new_mask); + + /* Can the task run on the task's current CPU? If so, we're done */ +- if (cpumask_test_cpu(task_cpu(p), new_mask)) ++ if (cpumask_test_cpu(task_cpu(p), new_mask) || __migrate_disabled(p)) + goto out; + + dest_cpu = cpumask_any_and(cpu_active_mask, new_mask); +@@ -1408,6 +1536,18 @@ + return ret; + } + ++static bool check_task_state(struct task_struct *p, long match_state) ++{ ++ bool match = false; ++ ++ raw_spin_lock_irq(&p->pi_lock); ++ if (p->state == match_state || p->saved_state == match_state) ++ match = true; ++ raw_spin_unlock_irq(&p->pi_lock); ++ ++ return match; ++} ++ + /* + * wait_task_inactive - wait for a thread to unschedule. + * +@@ -1452,7 +1592,7 @@ + * is actually now running somewhere else! + */ + while (task_running(rq, p)) { +- if (match_state && unlikely(p->state != match_state)) ++ if (match_state && !check_task_state(p, match_state)) + return 0; + cpu_relax(); + } +@@ -1467,7 +1607,8 @@ + running = task_running(rq, p); + queued = task_on_rq_queued(p); + ncsw = 0; +- if (!match_state || p->state == match_state) ++ if (!match_state || p->state == match_state || ++ p->saved_state == match_state) + ncsw = p->nvcsw | LONG_MIN; /* sets MSB */ + task_rq_unlock(rq, p, &flags); + +@@ -1624,7 +1765,7 @@ + { + lockdep_assert_held(&p->pi_lock); + +- if (p->nr_cpus_allowed > 1) ++ if (tsk_nr_cpus_allowed(p) > 1) + cpu = p->sched_class->select_task_rq(p, cpu, sd_flags, wake_flags); + + /* +@@ -1704,10 +1845,6 @@ + { + activate_task(rq, p, en_flags); + p->on_rq = TASK_ON_RQ_QUEUED; +- +- /* if a worker is waking up, notify workqueue */ +- if (p->flags & PF_WQ_WORKER) +- wq_worker_waking_up(p, cpu_of(rq)); + } + + /* +@@ -1934,8 +2071,27 @@ + */ + smp_mb__before_spinlock(); + raw_spin_lock_irqsave(&p->pi_lock, flags); +- if (!(p->state & state)) ++ if (!(p->state & state)) { ++ /* ++ * The task might be running due to a spinlock sleeper ++ * wakeup. Check the saved state and set it to running ++ * if the wakeup condition is true. ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) { ++ if (p->saved_state & state) { ++ p->saved_state = TASK_RUNNING; ++ success = 1; ++ } ++ } + goto out; ++ } ++ ++ /* ++ * If this is a regular wakeup, then we can unconditionally ++ * clear the saved state of a "lock sleeper". ++ */ ++ if (!(wake_flags & WF_LOCK_SLEEPER)) ++ p->saved_state = TASK_RUNNING; + + trace_sched_waking(p); + +@@ -2005,52 +2161,6 @@ + } + + /** +- * try_to_wake_up_local - try to wake up a local task with rq lock held +- * @p: the thread to be awakened +- * +- * Put @p on the run-queue if it's not already there. The caller must +- * ensure that this_rq() is locked, @p is bound to this_rq() and not +- * the current task. +- */ +-static void try_to_wake_up_local(struct task_struct *p) +-{ +- struct rq *rq = task_rq(p); +- +- if (WARN_ON_ONCE(rq != this_rq()) || +- WARN_ON_ONCE(p == current)) +- return; +- +- lockdep_assert_held(&rq->lock); +- +- if (!raw_spin_trylock(&p->pi_lock)) { +- /* +- * This is OK, because current is on_cpu, which avoids it being +- * picked for load-balance and preemption/IRQs are still +- * disabled avoiding further scheduler activity on it and we've +- * not yet picked a replacement task. +- */ +- lockdep_unpin_lock(&rq->lock); +- raw_spin_unlock(&rq->lock); +- raw_spin_lock(&p->pi_lock); +- raw_spin_lock(&rq->lock); +- lockdep_pin_lock(&rq->lock); +- } +- +- if (!(p->state & TASK_NORMAL)) +- goto out; +- +- trace_sched_waking(p); +- +- if (!task_on_rq_queued(p)) +- ttwu_activate(rq, p, ENQUEUE_WAKEUP); +- +- ttwu_do_wakeup(rq, p, 0); +- ttwu_stat(p, smp_processor_id(), 0); +-out: +- raw_spin_unlock(&p->pi_lock); +-} +- +-/** + * wake_up_process - Wake up a specific process + * @p: The process to be woken up. + * +@@ -2068,6 +2178,18 @@ + } + EXPORT_SYMBOL(wake_up_process); + ++/** ++ * wake_up_lock_sleeper - Wake up a specific process blocked on a "sleeping lock" ++ * @p: The process to be woken up. ++ * ++ * Same as wake_up_process() above, but wake_flags=WF_LOCK_SLEEPER to indicate ++ * the nature of the wakeup. ++ */ ++int wake_up_lock_sleeper(struct task_struct *p) ++{ ++ return try_to_wake_up(p, TASK_ALL, WF_LOCK_SLEEPER); ++} ++ + int wake_up_state(struct task_struct *p, unsigned int state) + { + return try_to_wake_up(p, state, 0); +@@ -2254,6 +2376,9 @@ + p->on_cpu = 0; + #endif + init_task_preempt_count(p); ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(p)->preempt_lazy_count = 0; ++#endif + #ifdef CONFIG_SMP + plist_node_init(&p->pushable_tasks, MAX_PRIO); + RB_CLEAR_NODE(&p->pushable_dl_tasks); +@@ -2578,8 +2703,12 @@ + finish_arch_post_lock_switch(); + + fire_sched_in_preempt_notifiers(current); ++ /* ++ * We use mmdrop_delayed() here so we don't have to do the ++ * full __mmdrop() when we are the last user. ++ */ + if (mm) +- mmdrop(mm); ++ mmdrop_delayed(mm); + if (unlikely(prev_state == TASK_DEAD)) { + if (prev->sched_class->task_dead) + prev->sched_class->task_dead(prev); +@@ -2910,16 +3039,6 @@ + } + #endif + +-notrace unsigned long get_parent_ip(unsigned long addr) +-{ +- if (in_lock_functions(addr)) { +- addr = CALLER_ADDR2; +- if (in_lock_functions(addr)) +- addr = CALLER_ADDR3; +- } +- return addr; +-} +- + #if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \ + defined(CONFIG_PREEMPT_TRACER)) + +@@ -2941,7 +3060,7 @@ + PREEMPT_MASK - 10); + #endif + if (preempt_count() == val) { +- unsigned long ip = get_parent_ip(CALLER_ADDR1); ++ unsigned long ip = get_lock_parent_ip(); + #ifdef CONFIG_DEBUG_PREEMPT + current->preempt_disable_ip = ip; + #endif +@@ -2968,7 +3087,7 @@ + #endif + + if (preempt_count() == val) +- trace_preempt_on(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip()); + __preempt_count_sub(val); + } + EXPORT_SYMBOL(preempt_count_sub); +@@ -3022,6 +3141,77 @@ + schedstat_inc(this_rq(), sched_count); + } + ++#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_SMP) ++ ++void migrate_disable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic() || irqs_disabled()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic++; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } ++#endif ++ ++ if (p->migrate_disable) { ++ p->migrate_disable++; ++ return; ++ } ++ ++ preempt_disable(); ++ preempt_lazy_disable(); ++ pin_current_cpu(); ++ p->migrate_disable = 1; ++ preempt_enable(); ++} ++EXPORT_SYMBOL(migrate_disable); ++ ++void migrate_enable(void) ++{ ++ struct task_struct *p = current; ++ ++ if (in_atomic() || irqs_disabled()) { ++#ifdef CONFIG_SCHED_DEBUG ++ p->migrate_disable_atomic--; ++#endif ++ return; ++ } ++ ++#ifdef CONFIG_SCHED_DEBUG ++ if (unlikely(p->migrate_disable_atomic)) { ++ tracing_off(); ++ WARN_ON_ONCE(1); ++ } ++#endif ++ WARN_ON_ONCE(p->migrate_disable <= 0); ++ ++ if (p->migrate_disable > 1) { ++ p->migrate_disable--; ++ return; ++ } ++ ++ preempt_disable(); ++ /* ++ * Clearing migrate_disable causes tsk_cpus_allowed to ++ * show the tasks original cpu affinity. ++ */ ++ p->migrate_disable = 0; ++ ++ unpin_current_cpu(); ++ preempt_enable(); ++ preempt_lazy_enable(); ++} ++EXPORT_SYMBOL(migrate_enable); ++#endif ++ + /* + * Pick up the highest-prio task: + */ +@@ -3146,19 +3336,6 @@ + } else { + deactivate_task(rq, prev, DEQUEUE_SLEEP); + prev->on_rq = 0; +- +- /* +- * If a worker went to sleep, notify and ask workqueue +- * whether it wants to wake up a task to maintain +- * concurrency. +- */ +- if (prev->flags & PF_WQ_WORKER) { +- struct task_struct *to_wakeup; +- +- to_wakeup = wq_worker_sleeping(prev, cpu); +- if (to_wakeup) +- try_to_wake_up_local(to_wakeup); +- } + } + switch_count = &prev->nvcsw; + } +@@ -3168,6 +3345,7 @@ + + next = pick_next_task(rq, prev); + clear_tsk_need_resched(prev); ++ clear_tsk_need_resched_lazy(prev); + clear_preempt_need_resched(); + rq->clock_skip_update = 0; + +@@ -3189,9 +3367,20 @@ + + static inline void sched_submit_work(struct task_struct *tsk) + { +- if (!tsk->state || tsk_is_pi_blocked(tsk)) ++ if (!tsk->state) + return; + /* ++ * If a worker went to sleep, notify and ask workqueue whether ++ * it wants to wake up a task to maintain concurrency. ++ */ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_sleeping(tsk); ++ ++ ++ if (tsk_is_pi_blocked(tsk)) ++ return; ++ ++ /* + * If we are going to sleep and we have plugged IO queued, + * make sure to submit it to avoid deadlocks. + */ +@@ -3199,6 +3388,12 @@ + blk_schedule_flush_plug(tsk); + } + ++static void sched_update_worker(struct task_struct *tsk) ++{ ++ if (tsk->flags & PF_WQ_WORKER) ++ wq_worker_running(tsk); ++} ++ + asmlinkage __visible void __sched schedule(void) + { + struct task_struct *tsk = current; +@@ -3209,6 +3404,7 @@ + __schedule(false); + sched_preempt_enable_no_resched(); + } while (need_resched()); ++ sched_update_worker(tsk); + } + EXPORT_SYMBOL(schedule); + +@@ -3257,6 +3453,30 @@ + } while (need_resched()); + } + ++#ifdef CONFIG_PREEMPT_LAZY ++/* ++ * If TIF_NEED_RESCHED is then we allow to be scheduled away since this is ++ * set by a RT task. Oterwise we try to avoid beeing scheduled out as long as ++ * preempt_lazy_count counter >0. ++ */ ++static __always_inline int preemptible_lazy(void) ++{ ++ if (test_thread_flag(TIF_NEED_RESCHED)) ++ return 1; ++ if (current_thread_info()->preempt_lazy_count) ++ return 0; ++ return 1; ++} ++ ++#else ++ ++static int preemptible_lazy(void) ++{ ++ return 1; ++} ++ ++#endif ++ + #ifdef CONFIG_PREEMPT + /* + * this is the entry point to schedule() from in-kernel preemption +@@ -3271,6 +3491,8 @@ + */ + if (likely(!preemptible())) + return; ++ if (!preemptible_lazy()) ++ return; + + preempt_schedule_common(); + } +@@ -3297,6 +3519,8 @@ + + if (likely(!preemptible())) + return; ++ if (!preemptible_lazy()) ++ return; + + do { + preempt_disable_notrace(); +@@ -3306,7 +3530,16 @@ + * an infinite recursion. + */ + prev_ctx = exception_enter(); ++ /* ++ * The add/subtract must not be traced by the function ++ * tracer. But we still want to account for the ++ * preempt off latency tracer. Since the _notrace versions ++ * of add/subtract skip the accounting for latency tracer ++ * we must force it manually. ++ */ ++ start_critical_timings(); + __schedule(true); ++ stop_critical_timings(); + exception_exit(prev_ctx); + + preempt_enable_no_resched_notrace(); +@@ -4650,6 +4883,7 @@ + } + EXPORT_SYMBOL(__cond_resched_lock); + ++#ifndef CONFIG_PREEMPT_RT_FULL + int __sched __cond_resched_softirq(void) + { + BUG_ON(!in_softirq()); +@@ -4663,6 +4897,7 @@ + return 0; + } + EXPORT_SYMBOL(__cond_resched_softirq); ++#endif + + /** + * yield - yield the current processor to other threads. +@@ -5027,7 +5262,9 @@ + + /* Set the preempt count _outside_ the spinlocks! */ + init_idle_preempt_count(idle, cpu); +- ++#ifdef CONFIG_HAVE_PREEMPT_LAZY ++ task_thread_info(idle)->preempt_lazy_count = 0; ++#endif + /* + * The idle tasks have their own, simple scheduling class: + */ +@@ -5168,6 +5405,8 @@ + #endif /* CONFIG_NUMA_BALANCING */ + + #ifdef CONFIG_HOTPLUG_CPU ++static DEFINE_PER_CPU(struct mm_struct *, idle_last_mm); ++ + /* + * Ensures that the idle task is using init_mm right before its cpu goes + * offline. +@@ -5182,7 +5421,11 @@ + switch_mm(mm, &init_mm, current); + finish_arch_post_lock_switch(); + } +- mmdrop(mm); ++ /* ++ * Defer the cleanup to an alive cpu. On RT we can neither ++ * call mmdrop() nor mmdrop_delayed() from here. ++ */ ++ per_cpu(idle_last_mm, smp_processor_id()) = mm; + } + + /* +@@ -5555,6 +5798,10 @@ + + case CPU_DEAD: + calc_load_migrate(rq); ++ if (per_cpu(idle_last_mm, cpu)) { ++ mmdrop(per_cpu(idle_last_mm, cpu)); ++ per_cpu(idle_last_mm, cpu) = NULL; ++ } + break; + #endif + } +@@ -7538,7 +7785,7 @@ + #ifdef CONFIG_DEBUG_ATOMIC_SLEEP + static inline int preempt_count_equals(int preempt_offset) + { +- int nested = preempt_count() + rcu_preempt_depth(); ++ int nested = preempt_count() + sched_rcu_preempt_depth(); + + return (nested == preempt_offset); + } +diff -Nur linux-4.4.13.orig/kernel/sched/cpudeadline.c linux-4.4.13/kernel/sched/cpudeadline.c +--- linux-4.4.13.orig/kernel/sched/cpudeadline.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/cpudeadline.c 2016-06-22 21:55:08.058523867 +0200 +@@ -103,10 +103,10 @@ + const struct sched_dl_entity *dl_se = &p->dl; + + if (later_mask && +- cpumask_and(later_mask, cp->free_cpus, &p->cpus_allowed)) { ++ cpumask_and(later_mask, cp->free_cpus, tsk_cpus_allowed(p))) { + best_cpu = cpumask_any(later_mask); + goto out; +- } else if (cpumask_test_cpu(cpudl_maximum(cp), &p->cpus_allowed) && ++ } else if (cpumask_test_cpu(cpudl_maximum(cp), tsk_cpus_allowed(p)) && + dl_time_before(dl_se->deadline, cp->elements[0].dl)) { + best_cpu = cpudl_maximum(cp); + if (later_mask) +diff -Nur linux-4.4.13.orig/kernel/sched/cpupri.c linux-4.4.13/kernel/sched/cpupri.c +--- linux-4.4.13.orig/kernel/sched/cpupri.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/cpupri.c 2016-06-22 21:55:08.058523867 +0200 +@@ -103,11 +103,11 @@ + if (skip) + continue; + +- if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) ++ if (cpumask_any_and(tsk_cpus_allowed(p), vec->mask) >= nr_cpu_ids) + continue; + + if (lowest_mask) { +- cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); ++ cpumask_and(lowest_mask, tsk_cpus_allowed(p), vec->mask); + + /* + * We have to ensure that we have at least one bit +diff -Nur linux-4.4.13.orig/kernel/sched/cputime.c linux-4.4.13/kernel/sched/cputime.c +--- linux-4.4.13.orig/kernel/sched/cputime.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/cputime.c 2016-06-22 21:55:08.058523867 +0200 +@@ -680,7 +680,7 @@ + { + unsigned long long delta = vtime_delta(tsk); + +- WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_SLEEPING); ++ WARN_ON_ONCE(tsk->vtime_snap_whence == VTIME_INACTIVE); + tsk->vtime_snap += delta; + + /* CHECKME: always safe to convert nsecs to cputime? */ +@@ -696,37 +696,37 @@ + + void vtime_account_system(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ write_seqcount_begin(&tsk->vtime_seqcount); + __vtime_account_system(tsk); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seqcount); + } + + void vtime_gen_account_irq_exit(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ write_seqcount_begin(&tsk->vtime_seqcount); + __vtime_account_system(tsk); + if (context_tracking_in_user()) + tsk->vtime_snap_whence = VTIME_USER; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seqcount); + } + + void vtime_account_user(struct task_struct *tsk) + { + cputime_t delta_cpu; + +- write_seqlock(&tsk->vtime_seqlock); ++ write_seqcount_begin(&tsk->vtime_seqcount); + delta_cpu = get_vtime_delta(tsk); + tsk->vtime_snap_whence = VTIME_SYS; + account_user_time(tsk, delta_cpu, cputime_to_scaled(delta_cpu)); +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seqcount); + } + + void vtime_user_enter(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ write_seqcount_begin(&tsk->vtime_seqcount); + __vtime_account_system(tsk); + tsk->vtime_snap_whence = VTIME_USER; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seqcount); + } + + void vtime_guest_enter(struct task_struct *tsk) +@@ -738,19 +738,19 @@ + * synchronization against the reader (task_gtime()) + * that can thus safely catch up with a tickless delta. + */ +- write_seqlock(&tsk->vtime_seqlock); ++ write_seqcount_begin(&tsk->vtime_seqcount); + __vtime_account_system(tsk); + current->flags |= PF_VCPU; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seqcount); + } + EXPORT_SYMBOL_GPL(vtime_guest_enter); + + void vtime_guest_exit(struct task_struct *tsk) + { +- write_seqlock(&tsk->vtime_seqlock); ++ write_seqcount_begin(&tsk->vtime_seqcount); + __vtime_account_system(tsk); + current->flags &= ~PF_VCPU; +- write_sequnlock(&tsk->vtime_seqlock); ++ write_seqcount_end(&tsk->vtime_seqcount); + } + EXPORT_SYMBOL_GPL(vtime_guest_exit); + +@@ -763,24 +763,26 @@ + + void arch_vtime_task_switch(struct task_struct *prev) + { +- write_seqlock(&prev->vtime_seqlock); +- prev->vtime_snap_whence = VTIME_SLEEPING; +- write_sequnlock(&prev->vtime_seqlock); ++ write_seqcount_begin(&prev->vtime_seqcount); ++ prev->vtime_snap_whence = VTIME_INACTIVE; ++ write_seqcount_end(&prev->vtime_seqcount); + +- write_seqlock(¤t->vtime_seqlock); ++ write_seqcount_begin(¤t->vtime_seqcount); + current->vtime_snap_whence = VTIME_SYS; + current->vtime_snap = sched_clock_cpu(smp_processor_id()); +- write_sequnlock(¤t->vtime_seqlock); ++ write_seqcount_end(¤t->vtime_seqcount); + } + + void vtime_init_idle(struct task_struct *t, int cpu) + { + unsigned long flags; + +- write_seqlock_irqsave(&t->vtime_seqlock, flags); ++ local_irq_save(flags); ++ write_seqcount_begin(&t->vtime_seqcount); + t->vtime_snap_whence = VTIME_SYS; + t->vtime_snap = sched_clock_cpu(cpu); +- write_sequnlock_irqrestore(&t->vtime_seqlock, flags); ++ write_seqcount_end(&t->vtime_seqcount); ++ local_irq_restore(flags); + } + + cputime_t task_gtime(struct task_struct *t) +@@ -792,13 +794,13 @@ + return t->gtime; + + do { +- seq = read_seqbegin(&t->vtime_seqlock); ++ seq = read_seqcount_begin(&t->vtime_seqcount); + + gtime = t->gtime; + if (t->flags & PF_VCPU) + gtime += vtime_delta(t); + +- } while (read_seqretry(&t->vtime_seqlock, seq)); ++ } while (read_seqcount_retry(&t->vtime_seqcount, seq)); + + return gtime; + } +@@ -821,7 +823,7 @@ + *udelta = 0; + *sdelta = 0; + +- seq = read_seqbegin(&t->vtime_seqlock); ++ seq = read_seqcount_begin(&t->vtime_seqcount); + + if (u_dst) + *u_dst = *u_src; +@@ -829,7 +831,7 @@ + *s_dst = *s_src; + + /* Task is sleeping, nothing to add */ +- if (t->vtime_snap_whence == VTIME_SLEEPING || ++ if (t->vtime_snap_whence == VTIME_INACTIVE || + is_idle_task(t)) + continue; + +@@ -845,7 +847,7 @@ + if (t->vtime_snap_whence == VTIME_SYS) + *sdelta = delta; + } +- } while (read_seqretry(&t->vtime_seqlock, seq)); ++ } while (read_seqcount_retry(&t->vtime_seqcount, seq)); + } + + +diff -Nur linux-4.4.13.orig/kernel/sched/deadline.c linux-4.4.13/kernel/sched/deadline.c +--- linux-4.4.13.orig/kernel/sched/deadline.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/deadline.c 2016-06-22 21:55:08.058523867 +0200 +@@ -134,7 +134,7 @@ + { + struct task_struct *p = dl_task_of(dl_se); + +- if (p->nr_cpus_allowed > 1) ++ if (tsk_nr_cpus_allowed(p) > 1) + dl_rq->dl_nr_migratory++; + + update_dl_migration(dl_rq); +@@ -144,7 +144,7 @@ + { + struct task_struct *p = dl_task_of(dl_se); + +- if (p->nr_cpus_allowed > 1) ++ if (tsk_nr_cpus_allowed(p) > 1) + dl_rq->dl_nr_migratory--; + + update_dl_migration(dl_rq); +@@ -697,6 +697,7 @@ + + hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + timer->function = dl_task_timer; ++ timer->irqsafe = 1; + } + + static +@@ -989,7 +990,7 @@ + + enqueue_dl_entity(&p->dl, pi_se, flags); + +- if (!task_current(rq, p) && p->nr_cpus_allowed > 1) ++ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) + enqueue_pushable_dl_task(rq, p); + } + +@@ -1067,9 +1068,9 @@ + * try to make it stay here, it might be important. + */ + if (unlikely(dl_task(curr)) && +- (curr->nr_cpus_allowed < 2 || ++ (tsk_nr_cpus_allowed(curr) < 2 || + !dl_entity_preempt(&p->dl, &curr->dl)) && +- (p->nr_cpus_allowed > 1)) { ++ (tsk_nr_cpus_allowed(p) > 1)) { + int target = find_later_rq(p); + + if (target != -1 && +@@ -1090,7 +1091,7 @@ + * Current can't be migrated, useless to reschedule, + * let's hope p can move out. + */ +- if (rq->curr->nr_cpus_allowed == 1 || ++ if (tsk_nr_cpus_allowed(rq->curr) == 1 || + cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) + return; + +@@ -1098,7 +1099,7 @@ + * p is migratable, so let's not schedule it and + * see if it is pushed or pulled somewhere else. + */ +- if (p->nr_cpus_allowed != 1 && ++ if (tsk_nr_cpus_allowed(p) != 1 && + cpudl_find(&rq->rd->cpudl, p, NULL) != -1) + return; + +@@ -1212,7 +1213,7 @@ + { + update_curr_dl(rq); + +- if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) ++ if (on_dl_rq(&p->dl) && tsk_nr_cpus_allowed(p) > 1) + enqueue_pushable_dl_task(rq, p); + } + +@@ -1335,7 +1336,7 @@ + if (unlikely(!later_mask)) + return -1; + +- if (task->nr_cpus_allowed == 1) ++ if (tsk_nr_cpus_allowed(task) == 1) + return -1; + + /* +@@ -1441,7 +1442,7 @@ + if (double_lock_balance(rq, later_rq)) { + if (unlikely(task_rq(task) != rq || + !cpumask_test_cpu(later_rq->cpu, +- &task->cpus_allowed) || ++ tsk_cpus_allowed(task)) || + task_running(rq, task) || + !task_on_rq_queued(task))) { + double_unlock_balance(rq, later_rq); +@@ -1480,7 +1481,7 @@ + + BUG_ON(rq->cpu != task_cpu(p)); + BUG_ON(task_current(rq, p)); +- BUG_ON(p->nr_cpus_allowed <= 1); ++ BUG_ON(tsk_nr_cpus_allowed(p) <= 1); + + BUG_ON(!task_on_rq_queued(p)); + BUG_ON(!dl_task(p)); +@@ -1519,7 +1520,7 @@ + */ + if (dl_task(rq->curr) && + dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && +- rq->curr->nr_cpus_allowed > 1) { ++ tsk_nr_cpus_allowed(rq->curr) > 1) { + resched_curr(rq); + return 0; + } +@@ -1666,9 +1667,9 @@ + { + if (!task_running(rq, p) && + !test_tsk_need_resched(rq->curr) && +- p->nr_cpus_allowed > 1 && ++ tsk_nr_cpus_allowed(p) > 1 && + dl_task(rq->curr) && +- (rq->curr->nr_cpus_allowed < 2 || ++ (tsk_nr_cpus_allowed(rq->curr) < 2 || + !dl_entity_preempt(&p->dl, &rq->curr->dl))) { + push_dl_tasks(rq); + } +@@ -1769,7 +1770,7 @@ + { + if (task_on_rq_queued(p) && rq->curr != p) { + #ifdef CONFIG_SMP +- if (p->nr_cpus_allowed > 1 && rq->dl.overloaded) ++ if (tsk_nr_cpus_allowed(p) > 1 && rq->dl.overloaded) + queue_push_tasks(rq); + #else + if (dl_task(rq->curr)) +diff -Nur linux-4.4.13.orig/kernel/sched/debug.c linux-4.4.13/kernel/sched/debug.c +--- linux-4.4.13.orig/kernel/sched/debug.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/debug.c 2016-06-22 21:55:08.058523867 +0200 +@@ -251,6 +251,9 @@ + P(rt_throttled); + PN(rt_time); + PN(rt_runtime); ++#ifdef CONFIG_SMP ++ P(rt_nr_migratory); ++#endif + + #undef PN + #undef P +@@ -635,6 +638,10 @@ + #endif + P(policy); + P(prio); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ P(migrate_disable); ++#endif ++ P(nr_cpus_allowed); + #undef PN + #undef __PN + #undef P +diff -Nur linux-4.4.13.orig/kernel/sched/fair.c linux-4.4.13/kernel/sched/fair.c +--- linux-4.4.13.orig/kernel/sched/fair.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/fair.c 2016-06-22 21:55:08.058523867 +0200 +@@ -3135,7 +3135,7 @@ + ideal_runtime = sched_slice(cfs_rq, curr); + delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; + if (delta_exec > ideal_runtime) { +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + /* + * The current task ran long enough, ensure it doesn't get + * re-elected due to buddy favours. +@@ -3159,7 +3159,7 @@ + return; + + if (delta > ideal_runtime) +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + } + + static void +@@ -3299,7 +3299,7 @@ + * validating it and just reschedule. + */ + if (queued) { +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + return; + } + /* +@@ -3481,7 +3481,7 @@ + * hierarchy can be throttled + */ + if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) +- resched_curr(rq_of(cfs_rq)); ++ resched_curr_lazy(rq_of(cfs_rq)); + } + + static __always_inline +@@ -4093,7 +4093,7 @@ + + if (delta < 0) { + if (rq->curr == p) +- resched_curr(rq); ++ resched_curr_lazy(rq); + return; + } + hrtick_start(rq, delta); +@@ -5177,7 +5177,7 @@ + return; + + preempt: +- resched_curr(rq); ++ resched_curr_lazy(rq); + /* + * Only set the backward buddy when the current task is still + * on the rq. This can happen when a wakeup gets interleaved +@@ -7928,7 +7928,7 @@ + * 'current' within the tree based on its new key value. + */ + swap(curr->vruntime, se->vruntime); +- resched_curr(rq); ++ resched_curr_lazy(rq); + } + + se->vruntime -= cfs_rq->min_vruntime; +@@ -7953,7 +7953,7 @@ + */ + if (rq->curr == p) { + if (p->prio > oldprio) +- resched_curr(rq); ++ resched_curr_lazy(rq); + } else + check_preempt_curr(rq, p, 0); + } +diff -Nur linux-4.4.13.orig/kernel/sched/features.h linux-4.4.13/kernel/sched/features.h +--- linux-4.4.13.orig/kernel/sched/features.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/features.h 2016-06-22 21:55:08.062523867 +0200 +@@ -45,11 +45,19 @@ + */ + SCHED_FEAT(NONTASK_CAPACITY, true) + ++#ifdef CONFIG_PREEMPT_RT_FULL ++SCHED_FEAT(TTWU_QUEUE, false) ++# ifdef CONFIG_PREEMPT_LAZY ++SCHED_FEAT(PREEMPT_LAZY, true) ++# endif ++#else ++ + /* + * Queue remote wakeups on the target CPU and process them + * using the scheduler IPI. Reduces rq->lock contention/bounces. + */ + SCHED_FEAT(TTWU_QUEUE, true) ++#endif + + #ifdef HAVE_RT_PUSH_IPI + /* +diff -Nur linux-4.4.13.orig/kernel/sched/Makefile linux-4.4.13/kernel/sched/Makefile +--- linux-4.4.13.orig/kernel/sched/Makefile 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/Makefile 2016-06-22 21:55:08.062523867 +0200 +@@ -13,7 +13,7 @@ + + obj-y += core.o loadavg.o clock.o cputime.o + obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o +-obj-y += wait.o completion.o idle.o ++obj-y += wait.o swait.o work-simple.o completion.o idle.o + obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o + obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o + obj-$(CONFIG_SCHEDSTATS) += stats.o +diff -Nur linux-4.4.13.orig/kernel/sched/rt.c linux-4.4.13/kernel/sched/rt.c +--- linux-4.4.13.orig/kernel/sched/rt.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/rt.c 2016-06-22 21:55:08.062523867 +0200 +@@ -47,6 +47,7 @@ + + hrtimer_init(&rt_b->rt_period_timer, + CLOCK_MONOTONIC, HRTIMER_MODE_REL); ++ rt_b->rt_period_timer.irqsafe = 1; + rt_b->rt_period_timer.function = sched_rt_period_timer; + } + +@@ -93,6 +94,7 @@ + rt_rq->push_cpu = nr_cpu_ids; + raw_spin_lock_init(&rt_rq->push_lock); + init_irq_work(&rt_rq->push_work, push_irq_work_func); ++ rt_rq->push_work.flags |= IRQ_WORK_HARD_IRQ; + #endif + #endif /* CONFIG_SMP */ + /* We start is dequeued state, because no RT tasks are queued */ +@@ -326,7 +328,7 @@ + rt_rq = &rq_of_rt_rq(rt_rq)->rt; + + rt_rq->rt_nr_total++; +- if (p->nr_cpus_allowed > 1) ++ if (tsk_nr_cpus_allowed(p) > 1) + rt_rq->rt_nr_migratory++; + + update_rt_migration(rt_rq); +@@ -343,7 +345,7 @@ + rt_rq = &rq_of_rt_rq(rt_rq)->rt; + + rt_rq->rt_nr_total--; +- if (p->nr_cpus_allowed > 1) ++ if (tsk_nr_cpus_allowed(p) > 1) + rt_rq->rt_nr_migratory--; + + update_rt_migration(rt_rq); +@@ -1262,7 +1264,7 @@ + + enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); + +- if (!task_current(rq, p) && p->nr_cpus_allowed > 1) ++ if (!task_current(rq, p) && tsk_nr_cpus_allowed(p) > 1) + enqueue_pushable_task(rq, p); + } + +@@ -1351,7 +1353,7 @@ + * will have to sort it out. + */ + if (curr && unlikely(rt_task(curr)) && +- (curr->nr_cpus_allowed < 2 || ++ (tsk_nr_cpus_allowed(curr) < 2 || + curr->prio <= p->prio)) { + int target = find_lowest_rq(p); + +@@ -1375,7 +1377,7 @@ + * Current can't be migrated, useless to reschedule, + * let's hope p can move out. + */ +- if (rq->curr->nr_cpus_allowed == 1 || ++ if (tsk_nr_cpus_allowed(rq->curr) == 1 || + !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) + return; + +@@ -1383,7 +1385,7 @@ + * p is migratable, so let's not schedule it and + * see if it is pushed or pulled somewhere else. + */ +- if (p->nr_cpus_allowed != 1 ++ if (tsk_nr_cpus_allowed(p) != 1 + && cpupri_find(&rq->rd->cpupri, p, NULL)) + return; + +@@ -1517,7 +1519,7 @@ + * The previous task needs to be made eligible for pushing + * if it is still active + */ +- if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) ++ if (on_rt_rq(&p->rt) && tsk_nr_cpus_allowed(p) > 1) + enqueue_pushable_task(rq, p); + } + +@@ -1567,7 +1569,7 @@ + if (unlikely(!lowest_mask)) + return -1; + +- if (task->nr_cpus_allowed == 1) ++ if (tsk_nr_cpus_allowed(task) == 1) + return -1; /* No other targets possible */ + + if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) +@@ -1699,7 +1701,7 @@ + + BUG_ON(rq->cpu != task_cpu(p)); + BUG_ON(task_current(rq, p)); +- BUG_ON(p->nr_cpus_allowed <= 1); ++ BUG_ON(tsk_nr_cpus_allowed(p) <= 1); + + BUG_ON(!task_on_rq_queued(p)); + BUG_ON(!rt_task(p)); +@@ -2059,9 +2061,9 @@ + { + if (!task_running(rq, p) && + !test_tsk_need_resched(rq->curr) && +- p->nr_cpus_allowed > 1 && ++ tsk_nr_cpus_allowed(p) > 1 && + (dl_task(rq->curr) || rt_task(rq->curr)) && +- (rq->curr->nr_cpus_allowed < 2 || ++ (tsk_nr_cpus_allowed(rq->curr) < 2 || + rq->curr->prio <= p->prio)) + push_rt_tasks(rq); + } +@@ -2134,7 +2136,7 @@ + */ + if (task_on_rq_queued(p) && rq->curr != p) { + #ifdef CONFIG_SMP +- if (p->nr_cpus_allowed > 1 && rq->rt.overloaded) ++ if (tsk_nr_cpus_allowed(p) > 1 && rq->rt.overloaded) + queue_push_tasks(rq); + #else + if (p->prio < rq->curr->prio) +diff -Nur linux-4.4.13.orig/kernel/sched/sched.h linux-4.4.13/kernel/sched/sched.h +--- linux-4.4.13.orig/kernel/sched/sched.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/sched/sched.h 2016-06-22 21:55:08.062523867 +0200 +@@ -1100,6 +1100,7 @@ + #define WF_SYNC 0x01 /* waker goes to sleep after wakeup */ + #define WF_FORK 0x02 /* child wakeup after fork */ + #define WF_MIGRATED 0x4 /* internal use, task got migrated */ ++#define WF_LOCK_SLEEPER 0x08 /* wakeup spinlock "sleeper" */ + + /* + * To aid in avoiding the subversion of "niceness" due to uneven distribution +@@ -1299,6 +1300,15 @@ + extern void resched_curr(struct rq *rq); + extern void resched_cpu(int cpu); + ++#ifdef CONFIG_PREEMPT_LAZY ++extern void resched_curr_lazy(struct rq *rq); ++#else ++static inline void resched_curr_lazy(struct rq *rq) ++{ ++ resched_curr(rq); ++} ++#endif ++ + extern struct rt_bandwidth def_rt_bandwidth; + extern void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime); + +diff -Nur linux-4.4.13.orig/kernel/sched/swait.c linux-4.4.13/kernel/sched/swait.c +--- linux-4.4.13.orig/kernel/sched/swait.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/kernel/sched/swait.c 2016-06-22 21:55:08.062523867 +0200 +@@ -0,0 +1,143 @@ ++#include ++#include ++#include ++ ++void __init_swait_queue_head(struct swait_queue_head *q, const char *name, ++ struct lock_class_key *key) ++{ ++ raw_spin_lock_init(&q->lock); ++ lockdep_set_class_and_name(&q->lock, key, name); ++ INIT_LIST_HEAD(&q->task_list); ++} ++EXPORT_SYMBOL(__init_swait_queue_head); ++ ++/* ++ * The thing about the wake_up_state() return value; I think we can ignore it. ++ * ++ * If for some reason it would return 0, that means the previously waiting ++ * task is already running, so it will observe condition true (or has already). ++ */ ++void swake_up_locked(struct swait_queue_head *q) ++{ ++ struct swait_queue *curr; ++ ++ if (list_empty(&q->task_list)) ++ return; ++ ++ curr = list_first_entry(&q->task_list, typeof(*curr), task_list); ++ wake_up_process(curr->task); ++ list_del_init(&curr->task_list); ++} ++EXPORT_SYMBOL(swake_up_locked); ++ ++void swake_up_all_locked(struct swait_queue_head *q) ++{ ++ struct swait_queue *curr; ++ int wakes = 0; ++ ++ while (!list_empty(&q->task_list)) { ++ ++ curr = list_first_entry(&q->task_list, typeof(*curr), ++ task_list); ++ wake_up_process(curr->task); ++ list_del_init(&curr->task_list); ++ wakes++; ++ } ++ if (pm_in_action) ++ return; ++ WARN(wakes > 2, "complate_all() with %d waiters\n", wakes); ++} ++EXPORT_SYMBOL(swake_up_all_locked); ++ ++void swake_up(struct swait_queue_head *q) ++{ ++ unsigned long flags; ++ ++ if (!swait_active(q)) ++ return; ++ ++ raw_spin_lock_irqsave(&q->lock, flags); ++ swake_up_locked(q); ++ raw_spin_unlock_irqrestore(&q->lock, flags); ++} ++EXPORT_SYMBOL(swake_up); ++ ++/* ++ * Does not allow usage from IRQ disabled, since we must be able to ++ * release IRQs to guarantee bounded hold time. ++ */ ++void swake_up_all(struct swait_queue_head *q) ++{ ++ struct swait_queue *curr; ++ LIST_HEAD(tmp); ++ ++ if (!swait_active(q)) ++ return; ++ ++ raw_spin_lock_irq(&q->lock); ++ list_splice_init(&q->task_list, &tmp); ++ while (!list_empty(&tmp)) { ++ curr = list_first_entry(&tmp, typeof(*curr), task_list); ++ ++ wake_up_state(curr->task, TASK_NORMAL); ++ list_del_init(&curr->task_list); ++ ++ if (list_empty(&tmp)) ++ break; ++ ++ raw_spin_unlock_irq(&q->lock); ++ raw_spin_lock_irq(&q->lock); ++ } ++ raw_spin_unlock_irq(&q->lock); ++} ++EXPORT_SYMBOL(swake_up_all); ++ ++void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait) ++{ ++ wait->task = current; ++ if (list_empty(&wait->task_list)) ++ list_add(&wait->task_list, &q->task_list); ++} ++ ++void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state) ++{ ++ unsigned long flags; ++ ++ raw_spin_lock_irqsave(&q->lock, flags); ++ __prepare_to_swait(q, wait); ++ set_current_state(state); ++ raw_spin_unlock_irqrestore(&q->lock, flags); ++} ++EXPORT_SYMBOL(prepare_to_swait); ++ ++long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state) ++{ ++ if (signal_pending_state(state, current)) ++ return -ERESTARTSYS; ++ ++ prepare_to_swait(q, wait, state); ++ ++ return 0; ++} ++EXPORT_SYMBOL(prepare_to_swait_event); ++ ++void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait) ++{ ++ __set_current_state(TASK_RUNNING); ++ if (!list_empty(&wait->task_list)) ++ list_del_init(&wait->task_list); ++} ++ ++void finish_swait(struct swait_queue_head *q, struct swait_queue *wait) ++{ ++ unsigned long flags; ++ ++ __set_current_state(TASK_RUNNING); ++ ++ if (!list_empty_careful(&wait->task_list)) { ++ raw_spin_lock_irqsave(&q->lock, flags); ++ list_del_init(&wait->task_list); ++ raw_spin_unlock_irqrestore(&q->lock, flags); ++ } ++} ++EXPORT_SYMBOL(finish_swait); +diff -Nur linux-4.4.13.orig/kernel/sched/work-simple.c linux-4.4.13/kernel/sched/work-simple.c +--- linux-4.4.13.orig/kernel/sched/work-simple.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/kernel/sched/work-simple.c 2016-06-22 21:55:08.062523867 +0200 +@@ -0,0 +1,173 @@ ++/* ++ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de ++ * ++ * Provides a framework for enqueuing callbacks from irq context ++ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define SWORK_EVENT_PENDING (1 << 0) ++ ++static DEFINE_MUTEX(worker_mutex); ++static struct sworker *glob_worker; ++ ++struct sworker { ++ struct list_head events; ++ struct swait_queue_head wq; ++ ++ raw_spinlock_t lock; ++ ++ struct task_struct *task; ++ int refs; ++}; ++ ++static bool swork_readable(struct sworker *worker) ++{ ++ bool r; ++ ++ if (kthread_should_stop()) ++ return true; ++ ++ raw_spin_lock_irq(&worker->lock); ++ r = !list_empty(&worker->events); ++ raw_spin_unlock_irq(&worker->lock); ++ ++ return r; ++} ++ ++static int swork_kthread(void *arg) ++{ ++ struct sworker *worker = arg; ++ ++ for (;;) { ++ swait_event_interruptible(worker->wq, ++ swork_readable(worker)); ++ if (kthread_should_stop()) ++ break; ++ ++ raw_spin_lock_irq(&worker->lock); ++ while (!list_empty(&worker->events)) { ++ struct swork_event *sev; ++ ++ sev = list_first_entry(&worker->events, ++ struct swork_event, item); ++ list_del(&sev->item); ++ raw_spin_unlock_irq(&worker->lock); ++ ++ WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING, ++ &sev->flags)); ++ sev->func(sev); ++ raw_spin_lock_irq(&worker->lock); ++ } ++ raw_spin_unlock_irq(&worker->lock); ++ } ++ return 0; ++} ++ ++static struct sworker *swork_create(void) ++{ ++ struct sworker *worker; ++ ++ worker = kzalloc(sizeof(*worker), GFP_KERNEL); ++ if (!worker) ++ return ERR_PTR(-ENOMEM); ++ ++ INIT_LIST_HEAD(&worker->events); ++ raw_spin_lock_init(&worker->lock); ++ init_swait_queue_head(&worker->wq); ++ ++ worker->task = kthread_run(swork_kthread, worker, "kswork"); ++ if (IS_ERR(worker->task)) { ++ kfree(worker); ++ return ERR_PTR(-ENOMEM); ++ } ++ ++ return worker; ++} ++ ++static void swork_destroy(struct sworker *worker) ++{ ++ kthread_stop(worker->task); ++ ++ WARN_ON(!list_empty(&worker->events)); ++ kfree(worker); ++} ++ ++/** ++ * swork_queue - queue swork ++ * ++ * Returns %false if @work was already on a queue, %true otherwise. ++ * ++ * The work is queued and processed on a random CPU ++ */ ++bool swork_queue(struct swork_event *sev) ++{ ++ unsigned long flags; ++ ++ if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags)) ++ return false; ++ ++ raw_spin_lock_irqsave(&glob_worker->lock, flags); ++ list_add_tail(&sev->item, &glob_worker->events); ++ raw_spin_unlock_irqrestore(&glob_worker->lock, flags); ++ ++ swake_up(&glob_worker->wq); ++ return true; ++} ++EXPORT_SYMBOL_GPL(swork_queue); ++ ++/** ++ * swork_get - get an instance of the sworker ++ * ++ * Returns an negative error code if the initialization if the worker did not ++ * work, %0 otherwise. ++ * ++ */ ++int swork_get(void) ++{ ++ struct sworker *worker; ++ ++ mutex_lock(&worker_mutex); ++ if (!glob_worker) { ++ worker = swork_create(); ++ if (IS_ERR(worker)) { ++ mutex_unlock(&worker_mutex); ++ return -ENOMEM; ++ } ++ ++ glob_worker = worker; ++ } ++ ++ glob_worker->refs++; ++ mutex_unlock(&worker_mutex); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(swork_get); ++ ++/** ++ * swork_put - puts an instance of the sworker ++ * ++ * Will destroy the sworker thread. This function must not be called until all ++ * queued events have been completed. ++ */ ++void swork_put(void) ++{ ++ mutex_lock(&worker_mutex); ++ ++ glob_worker->refs--; ++ if (glob_worker->refs > 0) ++ goto out; ++ ++ swork_destroy(glob_worker); ++ glob_worker = NULL; ++out: ++ mutex_unlock(&worker_mutex); ++} ++EXPORT_SYMBOL_GPL(swork_put); +diff -Nur linux-4.4.13.orig/kernel/signal.c linux-4.4.13/kernel/signal.c +--- linux-4.4.13.orig/kernel/signal.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/signal.c 2016-06-22 21:55:08.062523867 +0200 +@@ -14,6 +14,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -352,13 +353,30 @@ + return false; + } + ++static inline struct sigqueue *get_task_cache(struct task_struct *t) ++{ ++ struct sigqueue *q = t->sigqueue_cache; ++ ++ if (cmpxchg(&t->sigqueue_cache, q, NULL) != q) ++ return NULL; ++ return q; ++} ++ ++static inline int put_task_cache(struct task_struct *t, struct sigqueue *q) ++{ ++ if (cmpxchg(&t->sigqueue_cache, NULL, q) == NULL) ++ return 0; ++ return 1; ++} ++ + /* + * allocate a new signal queue record + * - this may be called without locks if and only if t == current, otherwise an + * appropriate lock must be held to stop the target task from exiting + */ + static struct sigqueue * +-__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) ++__sigqueue_do_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit, int fromslab) + { + struct sigqueue *q = NULL; + struct user_struct *user; +@@ -375,7 +393,10 @@ + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +- q = kmem_cache_alloc(sigqueue_cachep, flags); ++ if (!fromslab) ++ q = get_task_cache(t); ++ if (!q) ++ q = kmem_cache_alloc(sigqueue_cachep, flags); + } else { + print_dropped_signal(sig); + } +@@ -392,6 +413,13 @@ + return q; + } + ++static struct sigqueue * ++__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, ++ int override_rlimit) ++{ ++ return __sigqueue_do_alloc(sig, t, flags, override_rlimit, 0); ++} ++ + static void __sigqueue_free(struct sigqueue *q) + { + if (q->flags & SIGQUEUE_PREALLOC) +@@ -401,6 +429,21 @@ + kmem_cache_free(sigqueue_cachep, q); + } + ++static void sigqueue_free_current(struct sigqueue *q) ++{ ++ struct user_struct *up; ++ ++ if (q->flags & SIGQUEUE_PREALLOC) ++ return; ++ ++ up = q->user; ++ if (rt_prio(current->normal_prio) && !put_task_cache(current, q)) { ++ atomic_dec(&up->sigpending); ++ free_uid(up); ++ } else ++ __sigqueue_free(q); ++} ++ + void flush_sigqueue(struct sigpending *queue) + { + struct sigqueue *q; +@@ -414,6 +457,21 @@ + } + + /* ++ * Called from __exit_signal. Flush tsk->pending and ++ * tsk->sigqueue_cache ++ */ ++void flush_task_sigqueue(struct task_struct *tsk) ++{ ++ struct sigqueue *q; ++ ++ flush_sigqueue(&tsk->pending); ++ ++ q = get_task_cache(tsk); ++ if (q) ++ kmem_cache_free(sigqueue_cachep, q); ++} ++ ++/* + * Flush all pending signals for this kthread. + */ + void flush_signals(struct task_struct *t) +@@ -525,7 +583,7 @@ + still_pending: + list_del_init(&first->list); + copy_siginfo(info, &first->info); +- __sigqueue_free(first); ++ sigqueue_free_current(first); + } else { + /* + * Ok, it wasn't in the queue. This must be +@@ -560,6 +618,8 @@ + { + int signr; + ++ WARN_ON_ONCE(tsk != current); ++ + /* We only dequeue private signals from ourselves, we don't let + * signalfd steal them + */ +@@ -1156,8 +1216,8 @@ + * We don't want to have recursive SIGSEGV's etc, for example, + * that is why we also clear SIGNAL_UNKILLABLE. + */ +-int +-force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++static int ++do_force_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + unsigned long int flags; + int ret, blocked, ignored; +@@ -1182,6 +1242,39 @@ + return ret; + } + ++int force_sig_info(int sig, struct siginfo *info, struct task_struct *t) ++{ ++/* ++ * On some archs, PREEMPT_RT has to delay sending a signal from a trap ++ * since it can not enable preemption, and the signal code's spin_locks ++ * turn into mutexes. Instead, it must set TIF_NOTIFY_RESUME which will ++ * send the signal on exit of the trap. ++ */ ++#ifdef ARCH_RT_DELAYS_SIGNAL_SEND ++ if (in_atomic()) { ++ if (WARN_ON_ONCE(t != current)) ++ return 0; ++ if (WARN_ON_ONCE(t->forced_info.si_signo)) ++ return 0; ++ ++ if (is_si_special(info)) { ++ WARN_ON_ONCE(info != SEND_SIG_PRIV); ++ t->forced_info.si_signo = sig; ++ t->forced_info.si_errno = 0; ++ t->forced_info.si_code = SI_KERNEL; ++ t->forced_info.si_pid = 0; ++ t->forced_info.si_uid = 0; ++ } else { ++ t->forced_info = *info; ++ } ++ ++ set_tsk_thread_flag(t, TIF_NOTIFY_RESUME); ++ return 0; ++ } ++#endif ++ return do_force_sig_info(sig, info, t); ++} ++ + /* + * Nuke all other threads in the group. + */ +@@ -1216,12 +1309,12 @@ + * Disable interrupts early to avoid deadlocks. + * See rcu_read_unlock() comment header for details. + */ +- local_irq_save(*flags); ++ local_irq_save_nort(*flags); + rcu_read_lock(); + sighand = rcu_dereference(tsk->sighand); + if (unlikely(sighand == NULL)) { + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + break; + } + /* +@@ -1242,7 +1335,7 @@ + } + spin_unlock(&sighand->siglock); + rcu_read_unlock(); +- local_irq_restore(*flags); ++ local_irq_restore_nort(*flags); + } + + return sighand; +@@ -1485,7 +1578,8 @@ + */ + struct sigqueue *sigqueue_alloc(void) + { +- struct sigqueue *q = __sigqueue_alloc(-1, current, GFP_KERNEL, 0); ++ /* Preallocated sigqueue objects always from the slabcache ! */ ++ struct sigqueue *q = __sigqueue_do_alloc(-1, current, GFP_KERNEL, 0, 1); + + if (q) + q->flags |= SIGQUEUE_PREALLOC; +@@ -1846,15 +1940,7 @@ + if (gstop_done && ptrace_reparented(current)) + do_notify_parent_cldstop(current, false, why); + +- /* +- * Don't want to allow preemption here, because +- * sys_ptrace() needs this task to be inactive. +- * +- * XXX: implement read_unlock_no_resched(). +- */ +- preempt_disable(); + read_unlock(&tasklist_lock); +- preempt_enable_no_resched(); + freezable_schedule(); + } else { + /* +diff -Nur linux-4.4.13.orig/kernel/softirq.c linux-4.4.13/kernel/softirq.c +--- linux-4.4.13.orig/kernel/softirq.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/softirq.c 2016-06-22 21:55:08.062523867 +0200 +@@ -21,10 +21,12 @@ + #include + #include + #include ++#include + #include + #include + #include + #include ++#include + #include + + #define CREATE_TRACE_POINTS +@@ -56,12 +58,108 @@ + static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp; + + DEFINE_PER_CPU(struct task_struct *, ksoftirqd); ++#ifdef CONFIG_PREEMPT_RT_FULL ++#define TIMER_SOFTIRQS ((1 << TIMER_SOFTIRQ) | (1 << HRTIMER_SOFTIRQ)) ++DEFINE_PER_CPU(struct task_struct *, ktimer_softirqd); ++#endif + + const char * const softirq_to_name[NR_SOFTIRQS] = { + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; + ++#ifdef CONFIG_NO_HZ_COMMON ++# ifdef CONFIG_PREEMPT_RT_FULL ++ ++struct softirq_runner { ++ struct task_struct *runner[NR_SOFTIRQS]; ++}; ++ ++static DEFINE_PER_CPU(struct softirq_runner, softirq_runners); ++ ++static inline void softirq_set_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ ++ sr->runner[sirq] = current; ++} ++ ++static inline void softirq_clr_runner(unsigned int sirq) ++{ ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ ++ sr->runner[sirq] = NULL; ++} ++ ++/* ++ * On preempt-rt a softirq running context might be blocked on a ++ * lock. There might be no other runnable task on this CPU because the ++ * lock owner runs on some other CPU. So we have to go into idle with ++ * the pending bit set. Therefor we need to check this otherwise we ++ * warn about false positives which confuses users and defeats the ++ * whole purpose of this test. ++ * ++ * This code is called with interrupts disabled. ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ struct softirq_runner *sr = this_cpu_ptr(&softirq_runners); ++ u32 warnpending; ++ int i; ++ ++ if (rate_limit >= 10) ++ return; ++ ++ warnpending = local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK; ++ for (i = 0; i < NR_SOFTIRQS; i++) { ++ struct task_struct *tsk = sr->runner[i]; ++ ++ /* ++ * The wakeup code in rtmutex.c wakes up the task ++ * _before_ it sets pi_blocked_on to NULL under ++ * tsk->pi_lock. So we need to check for both: state ++ * and pi_blocked_on. ++ */ ++ if (tsk) { ++ raw_spin_lock(&tsk->pi_lock); ++ if (tsk->pi_blocked_on || tsk->state == TASK_RUNNING) { ++ /* Clear all bits pending in that task */ ++ warnpending &= ~(tsk->softirqs_raised); ++ warnpending &= ~(1 << i); ++ } ++ raw_spin_unlock(&tsk->pi_lock); ++ } ++ } ++ ++ if (warnpending) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ warnpending); ++ rate_limit++; ++ } ++} ++# else ++/* ++ * On !PREEMPT_RT we just printk rate limited: ++ */ ++void softirq_check_pending_idle(void) ++{ ++ static int rate_limit; ++ ++ if (rate_limit < 10 && ++ (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { ++ printk(KERN_ERR "NOHZ: local_softirq_pending %02x\n", ++ local_softirq_pending()); ++ rate_limit++; ++ } ++} ++# endif ++ ++#else /* !CONFIG_NO_HZ_COMMON */ ++static inline void softirq_set_runner(unsigned int sirq) { } ++static inline void softirq_clr_runner(unsigned int sirq) { } ++#endif ++ + /* + * we cannot loop indefinitely here to avoid userspace starvation, + * but we also don't want to introduce a worst case 1/HZ latency +@@ -77,6 +175,79 @@ + wake_up_process(tsk); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static void wakeup_timer_softirqd(void) ++{ ++ /* Interrupts are disabled: no need to stop preemption */ ++ struct task_struct *tsk = __this_cpu_read(ktimer_softirqd); ++ ++ if (tsk && tsk->state != TASK_RUNNING) ++ wake_up_process(tsk); ++} ++#endif ++ ++static void handle_softirq(unsigned int vec_nr) ++{ ++ struct softirq_action *h = softirq_vec + vec_nr; ++ int prev_count; ++ ++ prev_count = preempt_count(); ++ ++ kstat_incr_softirqs_this_cpu(vec_nr); ++ ++ trace_softirq_entry(vec_nr); ++ h->action(h); ++ trace_softirq_exit(vec_nr); ++ if (unlikely(prev_count != preempt_count())) { ++ pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", ++ vec_nr, softirq_to_name[vec_nr], h->action, ++ prev_count, preempt_count()); ++ preempt_count_set(prev_count); ++ } ++} ++ ++#ifndef CONFIG_PREEMPT_RT_FULL ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return local_softirq_pending(); ++} ++ ++static void handle_pending_softirqs(u32 pending) ++{ ++ struct softirq_action *h = softirq_vec; ++ int softirq_bit; ++ ++ local_irq_enable(); ++ ++ h = softirq_vec; ++ ++ while ((softirq_bit = ffs(pending))) { ++ unsigned int vec_nr; ++ ++ h += softirq_bit - 1; ++ vec_nr = h - softirq_vec; ++ handle_softirq(vec_nr); ++ ++ h++; ++ pending >>= softirq_bit; ++ } ++ ++ rcu_bh_qs(); ++ local_irq_disable(); ++} ++ ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ if (ksoftirqd_softirq_pending()) { ++ __do_softirq(); ++ local_irq_enable(); ++ cond_resched_rcu_qs(); ++ return; ++ } ++ local_irq_enable(); ++} ++ + /* + * preempt_count and SOFTIRQ_OFFSET usage: + * - preempt_count is changed by SOFTIRQ_OFFSET on entering or leaving +@@ -116,9 +287,9 @@ + + if (preempt_count() == cnt) { + #ifdef CONFIG_DEBUG_PREEMPT +- current->preempt_disable_ip = get_parent_ip(CALLER_ADDR1); ++ current->preempt_disable_ip = get_lock_parent_ip(); + #endif +- trace_preempt_off(CALLER_ADDR0, get_parent_ip(CALLER_ADDR1)); ++ trace_preempt_off(CALLER_ADDR0, get_lock_parent_ip()); + } + } + EXPORT_SYMBOL(__local_bh_disable_ip); +@@ -232,10 +403,8 @@ + unsigned long end = jiffies + MAX_SOFTIRQ_TIME; + unsigned long old_flags = current->flags; + int max_restart = MAX_SOFTIRQ_RESTART; +- struct softirq_action *h; + bool in_hardirq; + __u32 pending; +- int softirq_bit; + + /* + * Mask out PF_MEMALLOC s current task context is borrowed for the +@@ -254,36 +423,7 @@ + /* Reset the pending bitmask before enabling irqs */ + set_softirq_pending(0); + +- local_irq_enable(); +- +- h = softirq_vec; +- +- while ((softirq_bit = ffs(pending))) { +- unsigned int vec_nr; +- int prev_count; +- +- h += softirq_bit - 1; +- +- vec_nr = h - softirq_vec; +- prev_count = preempt_count(); +- +- kstat_incr_softirqs_this_cpu(vec_nr); +- +- trace_softirq_entry(vec_nr); +- h->action(h); +- trace_softirq_exit(vec_nr); +- if (unlikely(prev_count != preempt_count())) { +- pr_err("huh, entered softirq %u %s %p with preempt_count %08x, exited with %08x?\n", +- vec_nr, softirq_to_name[vec_nr], h->action, +- prev_count, preempt_count()); +- preempt_count_set(prev_count); +- } +- h++; +- pending >>= softirq_bit; +- } +- +- rcu_bh_qs(); +- local_irq_disable(); ++ handle_pending_softirqs(pending); + + pending = local_softirq_pending(); + if (pending) { +@@ -320,6 +460,310 @@ + } + + /* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ __raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an interrupt or softirq, we're done ++ * (this also catches softirq-disabled code). We will ++ * actually run the softirq once we return from ++ * the irq or softirq. ++ * ++ * Otherwise we wake up ksoftirqd to make sure we ++ * schedule the softirq soon. ++ */ ++ if (!in_interrupt()) ++ wakeup_softirqd(); ++} ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ trace_softirq_raise(nr); ++ or_softirq_pending(1UL << nr); ++} ++ ++static inline void local_bh_disable_nort(void) { local_bh_disable(); } ++static inline void _local_bh_enable_nort(void) { _local_bh_enable(); } ++static void ksoftirqd_set_sched_params(unsigned int cpu) { } ++ ++#else /* !PREEMPT_RT_FULL */ ++ ++/* ++ * On RT we serialize softirq execution with a cpu local lock per softirq ++ */ ++static DEFINE_PER_CPU(struct local_irq_lock [NR_SOFTIRQS], local_softirq_locks); ++ ++void __init softirq_early_init(void) ++{ ++ int i; ++ ++ for (i = 0; i < NR_SOFTIRQS; i++) ++ local_irq_lock_init(local_softirq_locks[i]); ++} ++ ++static void lock_softirq(int which) ++{ ++ local_lock(local_softirq_locks[which]); ++} ++ ++static void unlock_softirq(int which) ++{ ++ local_unlock(local_softirq_locks[which]); ++} ++ ++static void do_single_softirq(int which) ++{ ++ unsigned long old_flags = current->flags; ++ ++ current->flags &= ~PF_MEMALLOC; ++ vtime_account_irq_enter(current); ++ current->flags |= PF_IN_SOFTIRQ; ++ lockdep_softirq_enter(); ++ local_irq_enable(); ++ handle_softirq(which); ++ local_irq_disable(); ++ lockdep_softirq_exit(); ++ current->flags &= ~PF_IN_SOFTIRQ; ++ vtime_account_irq_enter(current); ++ tsk_restore_flags(current, old_flags, PF_MEMALLOC); ++} ++ ++/* ++ * Called with interrupts disabled. Process softirqs which were raised ++ * in current context (or on behalf of ksoftirqd). ++ */ ++static void do_current_softirqs(void) ++{ ++ while (current->softirqs_raised) { ++ int i = __ffs(current->softirqs_raised); ++ unsigned int pending, mask = (1U << i); ++ ++ current->softirqs_raised &= ~mask; ++ local_irq_enable(); ++ ++ /* ++ * If the lock is contended, we boost the owner to ++ * process the softirq or leave the critical section ++ * now. ++ */ ++ lock_softirq(i); ++ local_irq_disable(); ++ softirq_set_runner(i); ++ /* ++ * Check with the local_softirq_pending() bits, ++ * whether we need to process this still or if someone ++ * else took care of it. ++ */ ++ pending = local_softirq_pending(); ++ if (pending & mask) { ++ set_softirq_pending(pending & ~mask); ++ do_single_softirq(i); ++ } ++ softirq_clr_runner(i); ++ WARN_ON(current->softirq_nestcnt != 1); ++ local_irq_enable(); ++ unlock_softirq(i); ++ local_irq_disable(); ++ } ++} ++ ++void __local_bh_disable(void) ++{ ++ if (++current->softirq_nestcnt == 1) ++ migrate_disable(); ++} ++EXPORT_SYMBOL(__local_bh_disable); ++ ++void __local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ ++ local_irq_disable(); ++ if (current->softirq_nestcnt == 1 && current->softirqs_raised) ++ do_current_softirqs(); ++ local_irq_enable(); ++ ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); ++} ++EXPORT_SYMBOL(__local_bh_enable); ++ ++void _local_bh_enable(void) ++{ ++ if (WARN_ON(current->softirq_nestcnt == 0)) ++ return; ++ if (--current->softirq_nestcnt == 0) ++ migrate_enable(); ++} ++EXPORT_SYMBOL(_local_bh_enable); ++ ++int in_serving_softirq(void) ++{ ++ return current->flags & PF_IN_SOFTIRQ; ++} ++EXPORT_SYMBOL(in_serving_softirq); ++ ++/* Called with preemption disabled */ ++static void run_ksoftirqd(unsigned int cpu) ++{ ++ local_irq_disable(); ++ current->softirq_nestcnt++; ++ ++ do_current_softirqs(); ++ current->softirq_nestcnt--; ++ local_irq_enable(); ++ cond_resched_rcu_qs(); ++} ++ ++/* ++ * Called from netif_rx_ni(). Preemption enabled, but migration ++ * disabled. So the cpu can't go away under us. ++ */ ++void thread_do_softirq(void) ++{ ++ if (!in_serving_softirq() && current->softirqs_raised) { ++ current->softirq_nestcnt++; ++ do_current_softirqs(); ++ current->softirq_nestcnt--; ++ } ++} ++ ++static void do_raise_softirq_irqoff(unsigned int nr) ++{ ++ unsigned int mask; ++ ++ mask = 1UL << nr; ++ ++ trace_softirq_raise(nr); ++ or_softirq_pending(mask); ++ ++ /* ++ * If we are not in a hard interrupt and inside a bh disabled ++ * region, we simply raise the flag on current. local_bh_enable() ++ * will make sure that the softirq is executed. Otherwise we ++ * delegate it to ksoftirqd. ++ */ ++ if (!in_irq() && current->softirq_nestcnt) ++ current->softirqs_raised |= mask; ++ else if (!__this_cpu_read(ksoftirqd) || !__this_cpu_read(ktimer_softirqd)) ++ return; ++ ++ if (mask & TIMER_SOFTIRQS) ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; ++ else ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; ++} ++ ++static void wakeup_proper_softirq(unsigned int nr) ++{ ++ if ((1UL << nr) & TIMER_SOFTIRQS) ++ wakeup_timer_softirqd(); ++ else ++ wakeup_softirqd(); ++} ++ ++ ++void __raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ if (!in_irq() && !current->softirq_nestcnt) ++ wakeup_proper_softirq(nr); ++} ++ ++/* ++ * Same as __raise_softirq_irqoff() but will process them in ksoftirqd ++ */ ++void __raise_softirq_irqoff_ksoft(unsigned int nr) ++{ ++ unsigned int mask; ++ ++ if (WARN_ON_ONCE(!__this_cpu_read(ksoftirqd) || ++ !__this_cpu_read(ktimer_softirqd))) ++ return; ++ mask = 1UL << nr; ++ ++ trace_softirq_raise(nr); ++ or_softirq_pending(mask); ++ if (mask & TIMER_SOFTIRQS) ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised |= mask; ++ else ++ __this_cpu_read(ksoftirqd)->softirqs_raised |= mask; ++ wakeup_proper_softirq(nr); ++} ++ ++/* ++ * This function must run with irqs disabled! ++ */ ++void raise_softirq_irqoff(unsigned int nr) ++{ ++ do_raise_softirq_irqoff(nr); ++ ++ /* ++ * If we're in an hard interrupt we let irq return code deal ++ * with the wakeup of ksoftirqd. ++ */ ++ if (in_irq()) ++ return; ++ /* ++ * If we are in thread context but outside of a bh disabled ++ * region, we need to wake ksoftirqd as well. ++ * ++ * CHECKME: Some of the places which do that could be wrapped ++ * into local_bh_disable/enable pairs. Though it's unclear ++ * whether this is worth the effort. To find those places just ++ * raise a WARN() if the condition is met. ++ */ ++ if (!current->softirq_nestcnt) ++ wakeup_proper_softirq(nr); ++} ++ ++static inline int ksoftirqd_softirq_pending(void) ++{ ++ return current->softirqs_raised; ++} ++ ++static inline void local_bh_disable_nort(void) { } ++static inline void _local_bh_enable_nort(void) { } ++ ++static inline void ksoftirqd_set_sched_params(unsigned int cpu) ++{ ++ /* Take over all but timer pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending() & ~TIMER_SOFTIRQS; ++ local_irq_enable(); ++} ++ ++static inline void ktimer_softirqd_set_sched_params(unsigned int cpu) ++{ ++ struct sched_param param = { .sched_priority = 1 }; ++ ++ sched_setscheduler(current, SCHED_FIFO, ¶m); ++ ++ /* Take over timer pending softirqs when starting */ ++ local_irq_disable(); ++ current->softirqs_raised = local_softirq_pending() & TIMER_SOFTIRQS; ++ local_irq_enable(); ++} ++ ++static inline void ktimer_softirqd_clr_sched_params(unsigned int cpu, ++ bool online) ++{ ++ struct sched_param param = { .sched_priority = 0 }; ++ ++ sched_setscheduler(current, SCHED_NORMAL, ¶m); ++} ++ ++static int ktimer_softirqd_should_run(unsigned int cpu) ++{ ++ return current->softirqs_raised; ++} ++ ++#endif /* PREEMPT_RT_FULL */ ++/* + * Enter an interrupt context. + */ + void irq_enter(void) +@@ -330,9 +774,9 @@ + * Prevent raise_softirq from needlessly waking up ksoftirqd + * here, as softirq will be serviced on return from interrupt. + */ +- local_bh_disable(); ++ local_bh_disable_nort(); + tick_irq_enter(); +- _local_bh_enable(); ++ _local_bh_enable_nort(); + } + + __irq_enter(); +@@ -340,6 +784,7 @@ + + static inline void invoke_softirq(void) + { ++#ifndef CONFIG_PREEMPT_RT_FULL + if (!force_irqthreads) { + #ifdef CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK + /* +@@ -359,6 +804,18 @@ + } else { + wakeup_softirqd(); + } ++#else /* PREEMPT_RT_FULL */ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ if (__this_cpu_read(ksoftirqd) && ++ __this_cpu_read(ksoftirqd)->softirqs_raised) ++ wakeup_softirqd(); ++ if (__this_cpu_read(ktimer_softirqd) && ++ __this_cpu_read(ktimer_softirqd)->softirqs_raised) ++ wakeup_timer_softirqd(); ++ local_irq_restore(flags); ++#endif + } + + static inline void tick_irq_exit(void) +@@ -395,26 +852,6 @@ + trace_hardirq_exit(); /* must be last! */ + } + +-/* +- * This function must run with irqs disabled! +- */ +-inline void raise_softirq_irqoff(unsigned int nr) +-{ +- __raise_softirq_irqoff(nr); +- +- /* +- * If we're in an interrupt or softirq, we're done +- * (this also catches softirq-disabled code). We will +- * actually run the softirq once we return from +- * the irq or softirq. +- * +- * Otherwise we wake up ksoftirqd to make sure we +- * schedule the softirq soon. +- */ +- if (!in_interrupt()) +- wakeup_softirqd(); +-} +- + void raise_softirq(unsigned int nr) + { + unsigned long flags; +@@ -424,12 +861,6 @@ + local_irq_restore(flags); + } + +-void __raise_softirq_irqoff(unsigned int nr) +-{ +- trace_softirq_raise(nr); +- or_softirq_pending(1UL << nr); +-} +- + void open_softirq(int nr, void (*action)(struct softirq_action *)) + { + softirq_vec[nr].action = action; +@@ -446,15 +877,45 @@ + static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec); + static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec); + ++static void inline ++__tasklet_common_schedule(struct tasklet_struct *t, struct tasklet_head *head, unsigned int nr) ++{ ++ if (tasklet_trylock(t)) { ++again: ++ /* We may have been preempted before tasklet_trylock ++ * and __tasklet_action may have already run. ++ * So double check the sched bit while the takslet ++ * is locked before adding it to the list. ++ */ ++ if (test_bit(TASKLET_STATE_SCHED, &t->state)) { ++ t->next = NULL; ++ *head->tail = t; ++ head->tail = &(t->next); ++ raise_softirq_irqoff(nr); ++ tasklet_unlock(t); ++ } else { ++ /* This is subtle. If we hit the corner case above ++ * It is possible that we get preempted right here, ++ * and another task has successfully called ++ * tasklet_schedule(), then this function, and ++ * failed on the trylock. Thus we must be sure ++ * before releasing the tasklet lock, that the ++ * SCHED_BIT is clear. Otherwise the tasklet ++ * may get its SCHED_BIT set, but not added to the ++ * list ++ */ ++ if (!tasklet_tryunlock(t)) ++ goto again; ++ } ++ } ++} ++ + void __tasklet_schedule(struct tasklet_struct *t) + { + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- raise_softirq_irqoff(TASKLET_SOFTIRQ); ++ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_vec), TASKLET_SOFTIRQ); + local_irq_restore(flags); + } + EXPORT_SYMBOL(__tasklet_schedule); +@@ -464,10 +925,7 @@ + unsigned long flags; + + local_irq_save(flags); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_common_schedule(t, this_cpu_ptr(&tasklet_hi_vec), HI_SOFTIRQ); + local_irq_restore(flags); + } + EXPORT_SYMBOL(__tasklet_hi_schedule); +@@ -476,82 +934,122 @@ + { + BUG_ON(!irqs_disabled()); + +- t->next = __this_cpu_read(tasklet_hi_vec.head); +- __this_cpu_write(tasklet_hi_vec.head, t); +- __raise_softirq_irqoff(HI_SOFTIRQ); ++ __tasklet_hi_schedule(t); + } + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static void tasklet_action(struct softirq_action *a) ++void tasklet_enable(struct tasklet_struct *t) + { +- struct tasklet_struct *list; ++ if (!atomic_dec_and_test(&t->count)) ++ return; ++ if (test_and_clear_bit(TASKLET_STATE_PENDING, &t->state)) ++ tasklet_schedule(t); ++} ++EXPORT_SYMBOL(tasklet_enable); + +- local_irq_disable(); +- list = __this_cpu_read(tasklet_vec.head); +- __this_cpu_write(tasklet_vec.head, NULL); +- __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); +- local_irq_enable(); ++static void __tasklet_action(struct softirq_action *a, ++ struct tasklet_struct *list) ++{ ++ int loops = 1000000; + + while (list) { + struct tasklet_struct *t = list; + + list = list->next; + +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, +- &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); ++ /* ++ * Should always succeed - after a tasklist got on the ++ * list (after getting the SCHED bit set from 0 to 1), ++ * nothing but the tasklet softirq it got queued to can ++ * lock it: ++ */ ++ if (!tasklet_trylock(t)) { ++ WARN_ON(1); ++ continue; + } + +- local_irq_disable(); + t->next = NULL; +- *__this_cpu_read(tasklet_vec.tail) = t; +- __this_cpu_write(tasklet_vec.tail, &(t->next)); +- __raise_softirq_irqoff(TASKLET_SOFTIRQ); +- local_irq_enable(); ++ ++ /* ++ * If we cannot handle the tasklet because it's disabled, ++ * mark it as pending. tasklet_enable() will later ++ * re-schedule the tasklet. ++ */ ++ if (unlikely(atomic_read(&t->count))) { ++out_disabled: ++ /* implicit unlock: */ ++ wmb(); ++ t->state = TASKLET_STATEF_PENDING; ++ continue; ++ } ++ ++ /* ++ * After this point on the tasklet might be rescheduled ++ * on another CPU, but it can only be added to another ++ * CPU's tasklet list if we unlock the tasklet (which we ++ * dont do yet). ++ */ ++ if (!test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ WARN_ON(1); ++ ++again: ++ t->func(t->data); ++ ++ /* ++ * Try to unlock the tasklet. We must use cmpxchg, because ++ * another CPU might have scheduled or disabled the tasklet. ++ * We only allow the STATE_RUN -> 0 transition here. ++ */ ++ while (!tasklet_tryunlock(t)) { ++ /* ++ * If it got disabled meanwhile, bail out: ++ */ ++ if (atomic_read(&t->count)) ++ goto out_disabled; ++ /* ++ * If it got scheduled meanwhile, re-execute ++ * the tasklet function: ++ */ ++ if (test_and_clear_bit(TASKLET_STATE_SCHED, &t->state)) ++ goto again; ++ if (!--loops) { ++ printk("hm, tasklet state: %08lx\n", t->state); ++ WARN_ON(1); ++ tasklet_unlock(t); ++ break; ++ } ++ } + } + } + ++static void tasklet_action(struct softirq_action *a) ++{ ++ struct tasklet_struct *list; ++ ++ local_irq_disable(); ++ ++ list = __this_cpu_read(tasklet_vec.head); ++ __this_cpu_write(tasklet_vec.head, NULL); ++ __this_cpu_write(tasklet_vec.tail, this_cpu_ptr(&tasklet_vec.head)); ++ ++ local_irq_enable(); ++ ++ __tasklet_action(a, list); ++} ++ + static void tasklet_hi_action(struct softirq_action *a) + { + struct tasklet_struct *list; + + local_irq_disable(); ++ + list = __this_cpu_read(tasklet_hi_vec.head); + __this_cpu_write(tasklet_hi_vec.head, NULL); + __this_cpu_write(tasklet_hi_vec.tail, this_cpu_ptr(&tasklet_hi_vec.head)); +- local_irq_enable(); + +- while (list) { +- struct tasklet_struct *t = list; +- +- list = list->next; +- +- if (tasklet_trylock(t)) { +- if (!atomic_read(&t->count)) { +- if (!test_and_clear_bit(TASKLET_STATE_SCHED, +- &t->state)) +- BUG(); +- t->func(t->data); +- tasklet_unlock(t); +- continue; +- } +- tasklet_unlock(t); +- } ++ local_irq_enable(); + +- local_irq_disable(); +- t->next = NULL; +- *__this_cpu_read(tasklet_hi_vec.tail) = t; +- __this_cpu_write(tasklet_hi_vec.tail, &(t->next)); +- __raise_softirq_irqoff(HI_SOFTIRQ); +- local_irq_enable(); +- } ++ __tasklet_action(a, list); + } + + void tasklet_init(struct tasklet_struct *t, +@@ -572,7 +1070,7 @@ + + while (test_and_set_bit(TASKLET_STATE_SCHED, &t->state)) { + do { +- yield(); ++ msleep(1); + } while (test_bit(TASKLET_STATE_SCHED, &t->state)); + } + tasklet_unlock_wait(t); +@@ -646,25 +1144,26 @@ + open_softirq(HI_SOFTIRQ, tasklet_hi_action); + } + +-static int ksoftirqd_should_run(unsigned int cpu) +-{ +- return local_softirq_pending(); +-} +- +-static void run_ksoftirqd(unsigned int cpu) ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) ++void tasklet_unlock_wait(struct tasklet_struct *t) + { +- local_irq_disable(); +- if (local_softirq_pending()) { ++ while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { + /* +- * We can safely run softirq on inline stack, as we are not deep +- * in the task stack here. ++ * Hack for now to avoid this busy-loop: + */ +- __do_softirq(); +- local_irq_enable(); +- cond_resched_rcu_qs(); +- return; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ msleep(1); ++#else ++ barrier(); ++#endif + } +- local_irq_enable(); ++} ++EXPORT_SYMBOL(tasklet_unlock_wait); ++#endif ++ ++static int ksoftirqd_should_run(unsigned int cpu) ++{ ++ return ksoftirqd_softirq_pending(); + } + + #ifdef CONFIG_HOTPLUG_CPU +@@ -746,16 +1245,31 @@ + + static struct smp_hotplug_thread softirq_threads = { + .store = &ksoftirqd, ++ .setup = ksoftirqd_set_sched_params, + .thread_should_run = ksoftirqd_should_run, + .thread_fn = run_ksoftirqd, + .thread_comm = "ksoftirqd/%u", + }; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static struct smp_hotplug_thread softirq_timer_threads = { ++ .store = &ktimer_softirqd, ++ .setup = ktimer_softirqd_set_sched_params, ++ .cleanup = ktimer_softirqd_clr_sched_params, ++ .thread_should_run = ktimer_softirqd_should_run, ++ .thread_fn = run_ksoftirqd, ++ .thread_comm = "ktimersoftd/%u", ++}; ++#endif ++ + static __init int spawn_ksoftirqd(void) + { + register_cpu_notifier(&cpu_nfb); + + BUG_ON(smpboot_register_percpu_thread(&softirq_threads)); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ BUG_ON(smpboot_register_percpu_thread(&softirq_timer_threads)); ++#endif + + return 0; + } +diff -Nur linux-4.4.13.orig/kernel/stop_machine.c linux-4.4.13/kernel/stop_machine.c +--- linux-4.4.13.orig/kernel/stop_machine.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/stop_machine.c 2016-06-22 21:55:08.062523867 +0200 +@@ -37,7 +37,7 @@ + struct cpu_stopper { + struct task_struct *thread; + +- spinlock_t lock; ++ raw_spinlock_t lock; + bool enabled; /* is this stopper enabled? */ + struct list_head works; /* list of pending works */ + +@@ -86,12 +86,12 @@ + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + unsigned long flags; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + if (stopper->enabled) + __cpu_stop_queue_work(stopper, work); + else + cpu_stop_signal_done(work->done, false); +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + } + + /** +@@ -224,8 +224,8 @@ + int err; + + lg_double_lock(&stop_cpus_lock, cpu1, cpu2); +- spin_lock_irq(&stopper1->lock); +- spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); ++ raw_spin_lock_irq(&stopper1->lock); ++ raw_spin_lock_nested(&stopper2->lock, SINGLE_DEPTH_NESTING); + + err = -ENOENT; + if (!stopper1->enabled || !stopper2->enabled) +@@ -235,8 +235,8 @@ + __cpu_stop_queue_work(stopper1, work1); + __cpu_stop_queue_work(stopper2, work2); + unlock: +- spin_unlock(&stopper2->lock); +- spin_unlock_irq(&stopper1->lock); ++ raw_spin_unlock(&stopper2->lock); ++ raw_spin_unlock_irq(&stopper1->lock); + lg_double_unlock(&stop_cpus_lock, cpu1, cpu2); + + return err; +@@ -258,7 +258,7 @@ + struct cpu_stop_work work1, work2; + struct multi_stop_data msdata; + +- preempt_disable(); ++ preempt_disable_nort(); + msdata = (struct multi_stop_data){ + .fn = fn, + .data = arg, +@@ -278,11 +278,11 @@ + if (cpu1 > cpu2) + swap(cpu1, cpu2); + if (cpu_stop_queue_two_works(cpu1, &work1, cpu2, &work2)) { +- preempt_enable(); ++ preempt_enable_nort(); + return -ENOENT; + } + +- preempt_enable(); ++ preempt_enable_nort(); + + wait_for_completion(&done.completion); + +@@ -315,17 +315,20 @@ + + static void queue_stop_cpus_work(const struct cpumask *cpumask, + cpu_stop_fn_t fn, void *arg, +- struct cpu_stop_done *done) ++ struct cpu_stop_done *done, bool inactive) + { + struct cpu_stop_work *work; + unsigned int cpu; + + /* +- * Disable preemption while queueing to avoid getting +- * preempted by a stopper which might wait for other stoppers +- * to enter @fn which can lead to deadlock. ++ * Make sure that all work is queued on all cpus before ++ * any of the cpus can execute it. + */ +- lg_global_lock(&stop_cpus_lock); ++ if (!inactive) ++ lg_global_lock(&stop_cpus_lock); ++ else ++ lg_global_trylock_relax(&stop_cpus_lock); ++ + for_each_cpu(cpu, cpumask) { + work = &per_cpu(cpu_stopper.stop_work, cpu); + work->fn = fn; +@@ -342,7 +345,7 @@ + struct cpu_stop_done done; + + cpu_stop_init_done(&done, cpumask_weight(cpumask)); +- queue_stop_cpus_work(cpumask, fn, arg, &done); ++ queue_stop_cpus_work(cpumask, fn, arg, &done, false); + wait_for_completion(&done.completion); + return done.executed ? done.ret : -ENOENT; + } +@@ -422,9 +425,9 @@ + unsigned long flags; + int run; + +- spin_lock_irqsave(&stopper->lock, flags); ++ raw_spin_lock_irqsave(&stopper->lock, flags); + run = !list_empty(&stopper->works); +- spin_unlock_irqrestore(&stopper->lock, flags); ++ raw_spin_unlock_irqrestore(&stopper->lock, flags); + return run; + } + +@@ -436,13 +439,13 @@ + + repeat: + work = NULL; +- spin_lock_irq(&stopper->lock); ++ raw_spin_lock_irq(&stopper->lock); + if (!list_empty(&stopper->works)) { + work = list_first_entry(&stopper->works, + struct cpu_stop_work, list); + list_del_init(&work->list); + } +- spin_unlock_irq(&stopper->lock); ++ raw_spin_unlock_irq(&stopper->lock); + + if (work) { + cpu_stop_fn_t fn = work->fn; +@@ -450,6 +453,16 @@ + struct cpu_stop_done *done = work->done; + char ksym_buf[KSYM_NAME_LEN] __maybe_unused; + ++ /* ++ * Wait until the stopper finished scheduling on all ++ * cpus ++ */ ++ lg_global_lock(&stop_cpus_lock); ++ /* ++ * Let other cpu threads continue as well ++ */ ++ lg_global_unlock(&stop_cpus_lock); ++ + /* cpu stop callbacks are not allowed to sleep */ + preempt_disable(); + +@@ -520,10 +533,12 @@ + for_each_possible_cpu(cpu) { + struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); + +- spin_lock_init(&stopper->lock); ++ raw_spin_lock_init(&stopper->lock); + INIT_LIST_HEAD(&stopper->works); + } + ++ lg_lock_init(&stop_cpus_lock, "stop_cpus_lock"); ++ + BUG_ON(smpboot_register_percpu_thread(&cpu_stop_threads)); + stop_machine_unpark(raw_smp_processor_id()); + stop_machine_initialized = true; +@@ -620,7 +635,7 @@ + set_state(&msdata, MULTI_STOP_PREPARE); + cpu_stop_init_done(&done, num_active_cpus()); + queue_stop_cpus_work(cpu_active_mask, multi_cpu_stop, &msdata, +- &done); ++ &done, true); + ret = multi_cpu_stop(&msdata); + + /* Busy wait for completion. */ +diff -Nur linux-4.4.13.orig/kernel/time/hrtimer.c linux-4.4.13/kernel/time/hrtimer.c +--- linux-4.4.13.orig/kernel/time/hrtimer.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/hrtimer.c 2016-06-22 21:55:08.062523867 +0200 +@@ -48,11 +48,13 @@ + #include + #include + #include ++#include + #include + + #include + + #include ++#include + + #include "tick-internal.h" + +@@ -712,6 +714,44 @@ + + static DECLARE_WORK(hrtimer_work, clock_was_set_work); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT can not call schedule_work from real interrupt context. ++ * Need to make a thread to do the real work. ++ */ ++static struct task_struct *clock_set_delay_thread; ++static bool do_clock_set_delay; ++ ++static int run_clock_set_delay(void *ignore) ++{ ++ while (!kthread_should_stop()) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (do_clock_set_delay) { ++ do_clock_set_delay = false; ++ schedule_work(&hrtimer_work); ++ } ++ schedule(); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++void clock_was_set_delayed(void) ++{ ++ do_clock_set_delay = true; ++ /* Make visible before waking up process */ ++ smp_wmb(); ++ wake_up_process(clock_set_delay_thread); ++} ++ ++static __init int create_clock_set_delay_thread(void) ++{ ++ clock_set_delay_thread = kthread_run(run_clock_set_delay, NULL, "kclksetdelayd"); ++ BUG_ON(!clock_set_delay_thread); ++ return 0; ++} ++early_initcall(create_clock_set_delay_thread); ++#else /* PREEMPT_RT_FULL */ + /* + * Called from timekeeping and resume code to reprogramm the hrtimer + * interrupt device on all cpus. +@@ -720,6 +760,7 @@ + { + schedule_work(&hrtimer_work); + } ++#endif + + #else + +@@ -729,11 +770,8 @@ + static inline void hrtimer_switch_to_hres(void) { } + static inline void + hrtimer_force_reprogram(struct hrtimer_cpu_base *base, int skip_equal) { } +-static inline int hrtimer_reprogram(struct hrtimer *timer, +- struct hrtimer_clock_base *base) +-{ +- return 0; +-} ++static inline void hrtimer_reprogram(struct hrtimer *timer, ++ struct hrtimer_clock_base *base) { } + static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base) { } + static inline void retrigger_next_event(void *arg) { } + +@@ -865,6 +903,32 @@ + } + EXPORT_SYMBOL_GPL(hrtimer_forward); + ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define wake_up_timer_waiters(b) wake_up(&(b)->wait) ++ ++/** ++ * hrtimer_wait_for_timer - Wait for a running timer ++ * ++ * @timer: timer to wait for ++ * ++ * The function waits in case the timers callback function is ++ * currently executed on the waitqueue of the timer base. The ++ * waitqueue is woken up after the timer callback function has ++ * finished execution. ++ */ ++void hrtimer_wait_for_timer(const struct hrtimer *timer) ++{ ++ struct hrtimer_clock_base *base = timer->base; ++ ++ if (base && base->cpu_base && !timer->irqsafe) ++ wait_event(base->cpu_base->wait, ++ !(hrtimer_callback_running(timer))); ++} ++ ++#else ++# define wake_up_timer_waiters(b) do { } while (0) ++#endif ++ + /* + * enqueue_hrtimer - internal function to (re)start a timer + * +@@ -906,6 +970,11 @@ + if (!(state & HRTIMER_STATE_ENQUEUED)) + return; + ++ if (unlikely(!list_empty(&timer->cb_entry))) { ++ list_del_init(&timer->cb_entry); ++ return; ++ } ++ + if (!timerqueue_del(&base->active, &timer->node)) + cpu_base->active_bases &= ~(1 << base->index); + +@@ -1001,7 +1070,16 @@ + new_base = switch_hrtimer_base(timer, base, mode & HRTIMER_MODE_PINNED); + + timer_stats_hrtimer_set_start_info(timer); ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ { ++ ktime_t now = new_base->get_time(); + ++ if (ktime_to_ns(tim) < ktime_to_ns(now)) ++ timer->praecox = now; ++ else ++ timer->praecox = ktime_set(0, 0); ++ } ++#endif + leftmost = enqueue_hrtimer(timer, new_base); + if (!leftmost) + goto unlock; +@@ -1073,7 +1151,7 @@ + + if (ret >= 0) + return ret; +- cpu_relax(); ++ hrtimer_wait_for_timer(timer); + } + } + EXPORT_SYMBOL_GPL(hrtimer_cancel); +@@ -1137,6 +1215,7 @@ + + base = hrtimer_clockid_to_base(clock_id); + timer->base = &cpu_base->clock_base[base]; ++ INIT_LIST_HEAD(&timer->cb_entry); + timerqueue_init(&timer->node); + + #ifdef CONFIG_TIMER_STATS +@@ -1177,6 +1256,7 @@ + seq = raw_read_seqcount_begin(&cpu_base->seq); + + if (timer->state != HRTIMER_STATE_INACTIVE || ++ cpu_base->running_soft == timer || + cpu_base->running == timer) + return true; + +@@ -1275,10 +1355,112 @@ + cpu_base->running = NULL; + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void hrtimer_rt_reprogram(int restart, struct hrtimer *timer, ++ struct hrtimer_clock_base *base) ++{ ++ int leftmost; ++ ++ if (restart != HRTIMER_NORESTART && ++ !(timer->state & HRTIMER_STATE_ENQUEUED)) { ++ ++ leftmost = enqueue_hrtimer(timer, base); ++ if (!leftmost) ++ return; ++#ifdef CONFIG_HIGH_RES_TIMERS ++ if (!hrtimer_is_hres_active(timer)) { ++ /* ++ * Kick to reschedule the next tick to handle the new timer ++ * on dynticks target. ++ */ ++ if (base->cpu_base->nohz_active) ++ wake_up_nohz_cpu(base->cpu_base->cpu); ++ } else { ++ ++ hrtimer_reprogram(timer, base); ++ } ++#endif ++ } ++} ++ ++/* ++ * The changes in mainline which removed the callback modes from ++ * hrtimer are not yet working with -rt. The non wakeup_process() ++ * based callbacks which involve sleeping locks need to be treated ++ * seperately. ++ */ ++static void hrtimer_rt_run_pending(void) ++{ ++ enum hrtimer_restart (*fn)(struct hrtimer *); ++ struct hrtimer_cpu_base *cpu_base; ++ struct hrtimer_clock_base *base; ++ struct hrtimer *timer; ++ int index, restart; ++ ++ local_irq_disable(); ++ cpu_base = &per_cpu(hrtimer_bases, smp_processor_id()); ++ ++ raw_spin_lock(&cpu_base->lock); ++ ++ for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) { ++ base = &cpu_base->clock_base[index]; ++ ++ while (!list_empty(&base->expired)) { ++ timer = list_first_entry(&base->expired, ++ struct hrtimer, cb_entry); ++ ++ /* ++ * Same as the above __run_hrtimer function ++ * just we run with interrupts enabled. ++ */ ++ debug_deactivate(timer); ++ cpu_base->running_soft = timer; ++ raw_write_seqcount_barrier(&cpu_base->seq); ++ ++ __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0); ++ timer_stats_account_hrtimer(timer); ++ fn = timer->function; ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ restart = fn(timer); ++ raw_spin_lock_irq(&cpu_base->lock); ++ ++ hrtimer_rt_reprogram(restart, timer, base); ++ raw_write_seqcount_barrier(&cpu_base->seq); ++ ++ WARN_ON_ONCE(cpu_base->running_soft != timer); ++ cpu_base->running_soft = NULL; ++ } ++ } ++ ++ raw_spin_unlock_irq(&cpu_base->lock); ++ ++ wake_up_timer_waiters(cpu_base); ++} ++ ++static int hrtimer_rt_defer(struct hrtimer *timer) ++{ ++ if (timer->irqsafe) ++ return 0; ++ ++ __remove_hrtimer(timer, timer->base, timer->state, 0); ++ list_add_tail(&timer->cb_entry, &timer->base->expired); ++ return 1; ++} ++ ++#else ++ ++static inline int hrtimer_rt_defer(struct hrtimer *timer) { return 0; } ++ ++#endif ++ ++static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer); ++ + static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now) + { + struct hrtimer_clock_base *base = cpu_base->clock_base; + unsigned int active = cpu_base->active_bases; ++ int raise = 0; + + for (; active; base++, active >>= 1) { + struct timerqueue_node *node; +@@ -1294,6 +1476,15 @@ + + timer = container_of(node, struct hrtimer, node); + ++ trace_hrtimer_interrupt(raw_smp_processor_id(), ++ ktime_to_ns(ktime_sub(ktime_to_ns(timer->praecox) ? ++ timer->praecox : hrtimer_get_expires(timer), ++ basenow)), ++ current, ++ timer->function == hrtimer_wakeup ? ++ container_of(timer, struct hrtimer_sleeper, ++ timer)->task : NULL); ++ + /* + * The immediate goal for using the softexpires is + * minimizing wakeups, not running timers at the +@@ -1309,9 +1500,14 @@ + if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer)) + break; + +- __run_hrtimer(cpu_base, base, timer, &basenow); ++ if (!hrtimer_rt_defer(timer)) ++ __run_hrtimer(cpu_base, base, timer, &basenow); ++ else ++ raise = 1; + } + } ++ if (raise) ++ raise_softirq_irqoff(HRTIMER_SOFTIRQ); + } + + #ifdef CONFIG_HIGH_RES_TIMERS +@@ -1474,16 +1670,18 @@ + void hrtimer_init_sleeper(struct hrtimer_sleeper *sl, struct task_struct *task) + { + sl->timer.function = hrtimer_wakeup; ++ sl->timer.irqsafe = 1; + sl->task = task; + } + EXPORT_SYMBOL_GPL(hrtimer_init_sleeper); + +-static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode) ++static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mode, ++ unsigned long state) + { + hrtimer_init_sleeper(t, current); + + do { +- set_current_state(TASK_INTERRUPTIBLE); ++ set_current_state(state); + hrtimer_start_expires(&t->timer, mode); + + if (likely(t->task)) +@@ -1525,7 +1723,8 @@ + HRTIMER_MODE_ABS); + hrtimer_set_expires_tv64(&t.timer, restart->nanosleep.expires); + +- if (do_nanosleep(&t, HRTIMER_MODE_ABS)) ++ /* cpu_chill() does not care about restart state. */ ++ if (do_nanosleep(&t, HRTIMER_MODE_ABS, TASK_INTERRUPTIBLE)) + goto out; + + rmtp = restart->nanosleep.rmtp; +@@ -1542,8 +1741,10 @@ + return ret; + } + +-long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, +- const enum hrtimer_mode mode, const clockid_t clockid) ++static long ++__hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, ++ const enum hrtimer_mode mode, const clockid_t clockid, ++ unsigned long state) + { + struct restart_block *restart; + struct hrtimer_sleeper t; +@@ -1556,7 +1757,7 @@ + + hrtimer_init_on_stack(&t.timer, clockid, mode); + hrtimer_set_expires_range_ns(&t.timer, timespec_to_ktime(*rqtp), slack); +- if (do_nanosleep(&t, mode)) ++ if (do_nanosleep(&t, mode, state)) + goto out; + + /* Absolute timers do not update the rmtp value and restart: */ +@@ -1583,6 +1784,12 @@ + return ret; + } + ++long hrtimer_nanosleep(struct timespec *rqtp, struct timespec __user *rmtp, ++ const enum hrtimer_mode mode, const clockid_t clockid) ++{ ++ return __hrtimer_nanosleep(rqtp, rmtp, mode, clockid, TASK_INTERRUPTIBLE); ++} ++ + SYSCALL_DEFINE2(nanosleep, struct timespec __user *, rqtp, + struct timespec __user *, rmtp) + { +@@ -1597,6 +1804,26 @@ + return hrtimer_nanosleep(&tu, rmtp, HRTIMER_MODE_REL, CLOCK_MONOTONIC); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Sleep for 1 ms in hope whoever holds what we want will let it go. ++ */ ++void cpu_chill(void) ++{ ++ struct timespec tu = { ++ .tv_nsec = NSEC_PER_MSEC, ++ }; ++ unsigned int freeze_flag = current->flags & PF_NOFREEZE; ++ ++ current->flags |= PF_NOFREEZE; ++ __hrtimer_nanosleep(&tu, NULL, HRTIMER_MODE_REL, CLOCK_MONOTONIC, ++ TASK_UNINTERRUPTIBLE); ++ if (!freeze_flag) ++ current->flags &= ~PF_NOFREEZE; ++} ++EXPORT_SYMBOL(cpu_chill); ++#endif ++ + /* + * Functions related to boot-time initialization: + */ +@@ -1608,10 +1835,14 @@ + for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { + cpu_base->clock_base[i].cpu_base = cpu_base; + timerqueue_init_head(&cpu_base->clock_base[i].active); ++ INIT_LIST_HEAD(&cpu_base->clock_base[i].expired); + } + + cpu_base->cpu = cpu; + hrtimer_init_hres(cpu_base); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ init_waitqueue_head(&cpu_base->wait); ++#endif + } + + #ifdef CONFIG_HOTPLUG_CPU +@@ -1709,11 +1940,21 @@ + .notifier_call = hrtimer_cpu_notify, + }; + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static void run_hrtimer_softirq(struct softirq_action *h) ++{ ++ hrtimer_rt_run_pending(); ++} ++#endif ++ + void __init hrtimers_init(void) + { + hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE, + (void *)(long)smp_processor_id()); + register_cpu_notifier(&hrtimers_nb); ++#ifdef CONFIG_PREEMPT_RT_BASE ++ open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq); ++#endif + } + + /** +diff -Nur linux-4.4.13.orig/kernel/time/itimer.c linux-4.4.13/kernel/time/itimer.c +--- linux-4.4.13.orig/kernel/time/itimer.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/itimer.c 2016-06-22 21:55:08.062523867 +0200 +@@ -213,6 +213,7 @@ + /* We are sharing ->siglock with it_real_fn() */ + if (hrtimer_try_to_cancel(timer) < 0) { + spin_unlock_irq(&tsk->sighand->siglock); ++ hrtimer_wait_for_timer(&tsk->signal->real_timer); + goto again; + } + expires = timeval_to_ktime(value->it_value); +diff -Nur linux-4.4.13.orig/kernel/time/jiffies.c linux-4.4.13/kernel/time/jiffies.c +--- linux-4.4.13.orig/kernel/time/jiffies.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/jiffies.c 2016-06-22 21:55:08.062523867 +0200 +@@ -74,7 +74,8 @@ + .max_cycles = 10, + }; + +-__cacheline_aligned_in_smp DEFINE_SEQLOCK(jiffies_lock); ++__cacheline_aligned_in_smp DEFINE_RAW_SPINLOCK(jiffies_lock); ++__cacheline_aligned_in_smp seqcount_t jiffies_seq; + + #if (BITS_PER_LONG < 64) + u64 get_jiffies_64(void) +@@ -83,9 +84,9 @@ + u64 ret; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + ret = jiffies_64; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + return ret; + } + EXPORT_SYMBOL(get_jiffies_64); +diff -Nur linux-4.4.13.orig/kernel/time/ntp.c linux-4.4.13/kernel/time/ntp.c +--- linux-4.4.13.orig/kernel/time/ntp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/ntp.c 2016-06-22 21:55:08.062523867 +0200 +@@ -10,6 +10,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -562,10 +563,52 @@ + &sync_cmos_work, timespec64_to_jiffies(&next)); + } + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT can not call schedule_delayed_work from real interrupt context. ++ * Need to make a thread to do the real work. ++ */ ++static struct task_struct *cmos_delay_thread; ++static bool do_cmos_delay; ++ ++static int run_cmos_delay(void *ignore) ++{ ++ while (!kthread_should_stop()) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ if (do_cmos_delay) { ++ do_cmos_delay = false; ++ queue_delayed_work(system_power_efficient_wq, ++ &sync_cmos_work, 0); ++ } ++ schedule(); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++void ntp_notify_cmos_timer(void) ++{ ++ do_cmos_delay = true; ++ /* Make visible before waking up process */ ++ smp_wmb(); ++ wake_up_process(cmos_delay_thread); ++} ++ ++static __init int create_cmos_delay_thread(void) ++{ ++ cmos_delay_thread = kthread_run(run_cmos_delay, NULL, "kcmosdelayd"); ++ BUG_ON(!cmos_delay_thread); ++ return 0; ++} ++early_initcall(create_cmos_delay_thread); ++ ++#else ++ + void ntp_notify_cmos_timer(void) + { + queue_delayed_work(system_power_efficient_wq, &sync_cmos_work, 0); + } ++#endif /* CONFIG_PREEMPT_RT_FULL */ + + #else + void ntp_notify_cmos_timer(void) { } +diff -Nur linux-4.4.13.orig/kernel/time/posix-cpu-timers.c linux-4.4.13/kernel/time/posix-cpu-timers.c +--- linux-4.4.13.orig/kernel/time/posix-cpu-timers.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/posix-cpu-timers.c 2016-06-22 21:55:08.066523867 +0200 +@@ -3,6 +3,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -650,7 +651,7 @@ + /* + * Disarm any old timer after extracting its expiry time. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + ret = 0; + old_incr = timer->it.cpu.incr; +@@ -1091,7 +1092,7 @@ + /* + * Now re-arm for the new expiry time. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + arm_timer(timer); + unlock_task_sighand(p, &flags); + +@@ -1182,13 +1183,13 @@ + * already updated our counts. We need to check if any timers fire now. + * Interrupts are disabled. + */ +-void run_posix_cpu_timers(struct task_struct *tsk) ++static void __run_posix_cpu_timers(struct task_struct *tsk) + { + LIST_HEAD(firing); + struct k_itimer *timer, *next; + unsigned long flags; + +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + /* + * The fast path checks that there are no expired thread or thread +@@ -1242,6 +1243,190 @@ + } + } + ++#ifdef CONFIG_PREEMPT_RT_BASE ++#include ++#include ++DEFINE_PER_CPU(struct task_struct *, posix_timer_task); ++DEFINE_PER_CPU(struct task_struct *, posix_timer_tasklist); ++ ++static int posix_cpu_timers_thread(void *data) ++{ ++ int cpu = (long)data; ++ ++ BUG_ON(per_cpu(posix_timer_task,cpu) != current); ++ ++ while (!kthread_should_stop()) { ++ struct task_struct *tsk = NULL; ++ struct task_struct *next = NULL; ++ ++ if (cpu_is_offline(cpu)) ++ goto wait_to_die; ++ ++ /* grab task list */ ++ raw_local_irq_disable(); ++ tsk = per_cpu(posix_timer_tasklist, cpu); ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ raw_local_irq_enable(); ++ ++ /* its possible the list is empty, just return */ ++ if (!tsk) { ++ set_current_state(TASK_INTERRUPTIBLE); ++ schedule(); ++ __set_current_state(TASK_RUNNING); ++ continue; ++ } ++ ++ /* Process task list */ ++ while (1) { ++ /* save next */ ++ next = tsk->posix_timer_list; ++ ++ /* run the task timers, clear its ptr and ++ * unreference it ++ */ ++ __run_posix_cpu_timers(tsk); ++ tsk->posix_timer_list = NULL; ++ put_task_struct(tsk); ++ ++ /* check if this is the last on the list */ ++ if (next == tsk) ++ break; ++ tsk = next; ++ } ++ } ++ return 0; ++ ++wait_to_die: ++ /* Wait for kthread_stop */ ++ set_current_state(TASK_INTERRUPTIBLE); ++ while (!kthread_should_stop()) { ++ schedule(); ++ set_current_state(TASK_INTERRUPTIBLE); ++ } ++ __set_current_state(TASK_RUNNING); ++ return 0; ++} ++ ++static inline int __fastpath_timer_check(struct task_struct *tsk) ++{ ++ /* tsk == current, ensure it is safe to use ->signal/sighand */ ++ if (unlikely(tsk->exit_state)) ++ return 0; ++ ++ if (!task_cputime_zero(&tsk->cputime_expires)) ++ return 1; ++ ++ if (!task_cputime_zero(&tsk->signal->cputime_expires)) ++ return 1; ++ ++ return 0; ++} ++ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ unsigned long cpu = smp_processor_id(); ++ struct task_struct *tasklist; ++ ++ BUG_ON(!irqs_disabled()); ++ if(!per_cpu(posix_timer_task, cpu)) ++ return; ++ /* get per-cpu references */ ++ tasklist = per_cpu(posix_timer_tasklist, cpu); ++ ++ /* check to see if we're already queued */ ++ if (!tsk->posix_timer_list && __fastpath_timer_check(tsk)) { ++ get_task_struct(tsk); ++ if (tasklist) { ++ tsk->posix_timer_list = tasklist; ++ } else { ++ /* ++ * The list is terminated by a self-pointing ++ * task_struct ++ */ ++ tsk->posix_timer_list = tsk; ++ } ++ per_cpu(posix_timer_tasklist, cpu) = tsk; ++ ++ wake_up_process(per_cpu(posix_timer_task, cpu)); ++ } ++} ++ ++/* ++ * posix_cpu_thread_call - callback that gets triggered when a CPU is added. ++ * Here we can start up the necessary migration thread for the new CPU. ++ */ ++static int posix_cpu_thread_call(struct notifier_block *nfb, ++ unsigned long action, void *hcpu) ++{ ++ int cpu = (long)hcpu; ++ struct task_struct *p; ++ struct sched_param param; ++ ++ switch (action) { ++ case CPU_UP_PREPARE: ++ p = kthread_create(posix_cpu_timers_thread, hcpu, ++ "posixcputmr/%d",cpu); ++ if (IS_ERR(p)) ++ return NOTIFY_BAD; ++ p->flags |= PF_NOFREEZE; ++ kthread_bind(p, cpu); ++ /* Must be high prio to avoid getting starved */ ++ param.sched_priority = MAX_RT_PRIO-1; ++ sched_setscheduler(p, SCHED_FIFO, ¶m); ++ per_cpu(posix_timer_task,cpu) = p; ++ break; ++ case CPU_ONLINE: ++ /* Strictly unneccessary, as first user will wake it. */ ++ wake_up_process(per_cpu(posix_timer_task,cpu)); ++ break; ++#ifdef CONFIG_HOTPLUG_CPU ++ case CPU_UP_CANCELED: ++ /* Unbind it from offline cpu so it can run. Fall thru. */ ++ kthread_bind(per_cpu(posix_timer_task, cpu), ++ cpumask_any(cpu_online_mask)); ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++ case CPU_DEAD: ++ kthread_stop(per_cpu(posix_timer_task,cpu)); ++ per_cpu(posix_timer_task,cpu) = NULL; ++ break; ++#endif ++ } ++ return NOTIFY_OK; ++} ++ ++/* Register at highest priority so that task migration (migrate_all_tasks) ++ * happens before everything else. ++ */ ++static struct notifier_block posix_cpu_thread_notifier = { ++ .notifier_call = posix_cpu_thread_call, ++ .priority = 10 ++}; ++ ++static int __init posix_cpu_thread_init(void) ++{ ++ void *hcpu = (void *)(long)smp_processor_id(); ++ /* Start one for boot CPU. */ ++ unsigned long cpu; ++ ++ /* init the per-cpu posix_timer_tasklets */ ++ for_each_possible_cpu(cpu) ++ per_cpu(posix_timer_tasklist, cpu) = NULL; ++ ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_UP_PREPARE, hcpu); ++ posix_cpu_thread_call(&posix_cpu_thread_notifier, CPU_ONLINE, hcpu); ++ register_cpu_notifier(&posix_cpu_thread_notifier); ++ return 0; ++} ++early_initcall(posix_cpu_thread_init); ++#else /* CONFIG_PREEMPT_RT_BASE */ ++void run_posix_cpu_timers(struct task_struct *tsk) ++{ ++ __run_posix_cpu_timers(tsk); ++} ++#endif /* CONFIG_PREEMPT_RT_BASE */ ++ + /* + * Set one of the process-wide special case CPU timers or RLIMIT_CPU. + * The tsk->sighand->siglock must be held by the caller. +diff -Nur linux-4.4.13.orig/kernel/time/posix-timers.c linux-4.4.13/kernel/time/posix-timers.c +--- linux-4.4.13.orig/kernel/time/posix-timers.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/posix-timers.c 2016-06-22 21:55:08.066523867 +0200 +@@ -506,6 +506,7 @@ + static struct pid *good_sigevent(sigevent_t * event) + { + struct task_struct *rtn = current->group_leader; ++ int sig = event->sigev_signo; + + if ((event->sigev_notify & SIGEV_THREAD_ID ) && + (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) || +@@ -514,7 +515,8 @@ + return NULL; + + if (((event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_NONE) && +- ((event->sigev_signo <= 0) || (event->sigev_signo > SIGRTMAX))) ++ (sig <= 0 || sig > SIGRTMAX || sig_kernel_only(sig) || ++ sig_kernel_coredump(sig))) + return NULL; + + return task_pid(rtn); +@@ -826,6 +828,20 @@ + return overrun; + } + ++/* ++ * Protected by RCU! ++ */ ++static void timer_wait_for_callback(struct k_clock *kc, struct k_itimer *timr) ++{ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (kc->timer_set == common_timer_set) ++ hrtimer_wait_for_timer(&timr->it.real.timer); ++ else ++ /* FIXME: Whacky hack for posix-cpu-timers */ ++ schedule_timeout(1); ++#endif ++} ++ + /* Set a POSIX.1b interval timer. */ + /* timr->it_lock is taken. */ + static int +@@ -903,6 +919,7 @@ + if (!timr) + return -EINVAL; + ++ rcu_read_lock(); + kc = clockid_to_kclock(timr->it_clock); + if (WARN_ON_ONCE(!kc || !kc->timer_set)) + error = -EINVAL; +@@ -911,9 +928,12 @@ + + unlock_timer(timr, flag); + if (error == TIMER_RETRY) { ++ timer_wait_for_callback(kc, timr); + rtn = NULL; // We already got the old time... ++ rcu_read_unlock(); + goto retry; + } ++ rcu_read_unlock(); + + if (old_setting && !error && + copy_to_user(old_setting, &old_spec, sizeof (old_spec))) +@@ -951,10 +971,15 @@ + if (!timer) + return -EINVAL; + ++ rcu_read_lock(); + if (timer_delete_hook(timer) == TIMER_RETRY) { + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } ++ rcu_read_unlock(); + + spin_lock(¤t->sighand->siglock); + list_del(&timer->list); +@@ -980,8 +1005,18 @@ + retry_delete: + spin_lock_irqsave(&timer->it_lock, flags); + ++ /* On RT we can race with a deletion */ ++ if (!timer->it_signal) { ++ unlock_timer(timer, flags); ++ return; ++ } ++ + if (timer_delete_hook(timer) == TIMER_RETRY) { ++ rcu_read_lock(); + unlock_timer(timer, flags); ++ timer_wait_for_callback(clockid_to_kclock(timer->it_clock), ++ timer); ++ rcu_read_unlock(); + goto retry_delete; + } + list_del(&timer->list); +diff -Nur linux-4.4.13.orig/kernel/time/tick-broadcast-hrtimer.c linux-4.4.13/kernel/time/tick-broadcast-hrtimer.c +--- linux-4.4.13.orig/kernel/time/tick-broadcast-hrtimer.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/tick-broadcast-hrtimer.c 2016-06-22 21:55:08.066523867 +0200 +@@ -106,5 +106,6 @@ + { + hrtimer_init(&bctimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); + bctimer.function = bc_handler; ++ bctimer.irqsafe = true; + clockevents_register_device(&ce_broadcast_hrtimer); + } +diff -Nur linux-4.4.13.orig/kernel/time/tick-common.c linux-4.4.13/kernel/time/tick-common.c +--- linux-4.4.13.orig/kernel/time/tick-common.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/tick-common.c 2016-06-22 21:55:08.066523867 +0200 +@@ -79,13 +79,15 @@ + static void tick_periodic(int cpu) + { + if (tick_do_timer_cpu == cpu) { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + /* Keep track of the next tick event */ + tick_next_period = ktime_add(tick_next_period, tick_period); + + do_timer(1); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -157,9 +159,9 @@ + ktime_t next; + + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + next = tick_next_period; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + + clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); + +diff -Nur linux-4.4.13.orig/kernel/time/tick-sched.c linux-4.4.13/kernel/time/tick-sched.c +--- linux-4.4.13.orig/kernel/time/tick-sched.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/tick-sched.c 2016-06-22 21:55:08.066523867 +0200 +@@ -62,7 +62,8 @@ + return; + + /* Reevalute with jiffies_lock held */ +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + + delta = ktime_sub(now, last_jiffies_update); + if (delta.tv64 >= tick_period.tv64) { +@@ -85,10 +86,12 @@ + /* Keep the tick_next_period variable up to date */ + tick_next_period = ktime_add(last_jiffies_update, tick_period); + } else { +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return; + } +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } + +@@ -99,12 +102,14 @@ + { + ktime_t period; + +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + /* Did we start the jiffies update yet ? */ + if (last_jiffies_update.tv64 == 0) + last_jiffies_update = tick_next_period; + period = last_jiffies_update; +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + return period; + } + +@@ -176,6 +181,11 @@ + return false; + } + ++ if (!arch_irq_work_has_interrupt()) { ++ trace_tick_stop(0, "missing irq work interrupt\n"); ++ return false; ++ } ++ + /* sched_clock_tick() needs us? */ + #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK + /* +@@ -204,6 +214,7 @@ + + static DEFINE_PER_CPU(struct irq_work, nohz_full_kick_work) = { + .func = nohz_full_kick_work_func, ++ .flags = IRQ_WORK_HARD_IRQ, + }; + + /* +@@ -578,10 +589,10 @@ + + /* Read jiffies and the time when jiffies were updated last */ + do { +- seq = read_seqbegin(&jiffies_lock); ++ seq = read_seqcount_begin(&jiffies_seq); + basemono = last_jiffies_update.tv64; + basejiff = jiffies; +- } while (read_seqretry(&jiffies_lock, seq)); ++ } while (read_seqcount_retry(&jiffies_seq, seq)); + ts->last_jiffies = basejiff; + + if (rcu_needs_cpu(basemono, &next_rcu) || +@@ -753,14 +764,7 @@ + return false; + + if (unlikely(local_softirq_pending() && cpu_online(cpu))) { +- static int ratelimit; +- +- if (ratelimit < 10 && +- (local_softirq_pending() & SOFTIRQ_STOP_IDLE_MASK)) { +- pr_warn("NOHZ: local_softirq_pending %02x\n", +- (unsigned int) local_softirq_pending()); +- ratelimit++; +- } ++ softirq_check_pending_idle(); + return false; + } + +@@ -1100,6 +1104,7 @@ + * Emulate tick processing via per-CPU hrtimers: + */ + hrtimer_init(&ts->sched_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); ++ ts->sched_timer.irqsafe = 1; + ts->sched_timer.function = tick_sched_timer; + + /* Get the next period (per cpu) */ +diff -Nur linux-4.4.13.orig/kernel/time/timekeeping.c linux-4.4.13/kernel/time/timekeeping.c +--- linux-4.4.13.orig/kernel/time/timekeeping.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/timekeeping.c 2016-06-22 21:55:08.066523867 +0200 +@@ -2047,8 +2047,10 @@ + */ + void xtime_update(unsigned long ticks) + { +- write_seqlock(&jiffies_lock); ++ raw_spin_lock(&jiffies_lock); ++ write_seqcount_begin(&jiffies_seq); + do_timer(ticks); +- write_sequnlock(&jiffies_lock); ++ write_seqcount_end(&jiffies_seq); ++ raw_spin_unlock(&jiffies_lock); + update_wall_time(); + } +diff -Nur linux-4.4.13.orig/kernel/time/timekeeping.h linux-4.4.13/kernel/time/timekeeping.h +--- linux-4.4.13.orig/kernel/time/timekeeping.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/timekeeping.h 2016-06-22 21:55:08.066523867 +0200 +@@ -19,7 +19,8 @@ + extern void do_timer(unsigned long ticks); + extern void update_wall_time(void); + +-extern seqlock_t jiffies_lock; ++extern raw_spinlock_t jiffies_lock; ++extern seqcount_t jiffies_seq; + + #define CS_NAME_LEN 32 + +diff -Nur linux-4.4.13.orig/kernel/time/timer.c linux-4.4.13/kernel/time/timer.c +--- linux-4.4.13.orig/kernel/time/timer.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/time/timer.c 2016-06-22 21:55:08.066523867 +0200 +@@ -80,6 +80,9 @@ + struct tvec_base { + spinlock_t lock; + struct timer_list *running_timer; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ wait_queue_head_t wait_for_running_timer; ++#endif + unsigned long timer_jiffies; + unsigned long next_timer; + unsigned long active_timers; +@@ -777,6 +780,39 @@ + cpu_relax(); + } + } ++#ifdef CONFIG_PREEMPT_RT_FULL ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* ++ * We cannot do the below because we might be preempted and ++ * then the preempter would see NULL and loop forever. ++ */ ++ if (spin_trylock(&new->lock)) { ++ WRITE_ONCE(timer->flags, ++ (timer->flags & ~TIMER_BASEMASK) | new->cpu); ++ spin_unlock(&old->lock); ++ return new; ++ } ++ return old; ++} ++ ++#else ++static inline struct tvec_base *switch_timer_base(struct timer_list *timer, ++ struct tvec_base *old, ++ struct tvec_base *new) ++{ ++ /* See the comment in lock_timer_base() */ ++ timer->flags |= TIMER_MIGRATING; ++ ++ spin_unlock(&old->lock); ++ spin_lock(&new->lock); ++ WRITE_ONCE(timer->flags, ++ (timer->flags & ~TIMER_BASEMASK) | new->cpu); ++ return new; ++} ++#endif + + static inline int + __mod_timer(struct timer_list *timer, unsigned long expires, +@@ -807,16 +843,8 @@ + * handler yet has not finished. This also guarantees that + * the timer is serialized wrt itself. + */ +- if (likely(base->running_timer != timer)) { +- /* See the comment in lock_timer_base() */ +- timer->flags |= TIMER_MIGRATING; +- +- spin_unlock(&base->lock); +- base = new_base; +- spin_lock(&base->lock); +- WRITE_ONCE(timer->flags, +- (timer->flags & ~TIMER_BASEMASK) | base->cpu); +- } ++ if (likely(base->running_timer != timer)) ++ base = switch_timer_base(timer, base, new_base); + } + + timer->expires = expires; +@@ -1006,6 +1034,33 @@ + } + EXPORT_SYMBOL_GPL(add_timer_on); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * Wait for a running timer ++ */ ++static void wait_for_running_timer(struct timer_list *timer) ++{ ++ struct tvec_base *base; ++ u32 tf = timer->flags; ++ ++ if (tf & TIMER_MIGRATING) ++ return; ++ ++ base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK); ++ wait_event(base->wait_for_running_timer, ++ base->running_timer != timer); ++} ++ ++# define wakeup_timer_waiters(b) wake_up(&(b)->wait_for_running_timer) ++#else ++static inline void wait_for_running_timer(struct timer_list *timer) ++{ ++ cpu_relax(); ++} ++ ++# define wakeup_timer_waiters(b) do { } while (0) ++#endif ++ + /** + * del_timer - deactive a timer. + * @timer: the timer to be deactivated +@@ -1063,7 +1118,7 @@ + } + EXPORT_SYMBOL(try_to_del_timer_sync); + +-#ifdef CONFIG_SMP ++#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL) + /** + * del_timer_sync - deactivate a timer and wait for the handler to finish. + * @timer: the timer to be deactivated +@@ -1123,7 +1178,7 @@ + int ret = try_to_del_timer_sync(timer); + if (ret >= 0) + return ret; +- cpu_relax(); ++ wait_for_running_timer(timer); + } + } + EXPORT_SYMBOL(del_timer_sync); +@@ -1248,15 +1303,17 @@ + if (irqsafe) { + spin_unlock(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock(&base->lock); + } else { + spin_unlock_irq(&base->lock); + call_timer_fn(timer, fn, data); ++ base->running_timer = NULL; + spin_lock_irq(&base->lock); + } + } + } +- base->running_timer = NULL; ++ wakeup_timer_waiters(base); + spin_unlock_irq(&base->lock); + } + +@@ -1390,6 +1447,14 @@ + if (cpu_is_offline(smp_processor_id())) + return expires; + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ /* ++ * On PREEMPT_RT we cannot sleep here. As a result we can't take ++ * the base lock to check when the next timer is pending and so ++ * we assume the next jiffy. ++ */ ++ return basem + TICK_NSEC; ++#endif + spin_lock(&base->lock); + if (base->active_timers) { + if (time_before_eq(base->next_timer, base->timer_jiffies)) +@@ -1416,13 +1481,13 @@ + + /* Note: this timer irq context must be accounted for as well. */ + account_process_tick(p, user_tick); ++ scheduler_tick(); + run_local_timers(); + rcu_check_callbacks(user_tick); +-#ifdef CONFIG_IRQ_WORK ++#if defined(CONFIG_IRQ_WORK) + if (in_irq()) + irq_work_tick(); + #endif +- scheduler_tick(); + run_posix_cpu_timers(p); + } + +@@ -1433,6 +1498,8 @@ + { + struct tvec_base *base = this_cpu_ptr(&tvec_bases); + ++ irq_work_tick_soft(); ++ + if (time_after_eq(jiffies, base->timer_jiffies)) + __run_timers(base); + } +@@ -1589,7 +1656,7 @@ + + BUG_ON(cpu_online(cpu)); + old_base = per_cpu_ptr(&tvec_bases, cpu); +- new_base = get_cpu_ptr(&tvec_bases); ++ new_base = get_local_ptr(&tvec_bases); + /* + * The caller is globally serialized and nobody else + * takes two locks at once, deadlock is not possible. +@@ -1613,7 +1680,7 @@ + + spin_unlock(&old_base->lock); + spin_unlock_irq(&new_base->lock); +- put_cpu_ptr(&tvec_bases); ++ put_local_ptr(&tvec_bases); + } + + static int timer_cpu_notify(struct notifier_block *self, +@@ -1645,6 +1712,9 @@ + + base->cpu = cpu; + spin_lock_init(&base->lock); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ init_waitqueue_head(&base->wait_for_running_timer); ++#endif + + base->timer_jiffies = jiffies; + base->next_timer = base->timer_jiffies; +diff -Nur linux-4.4.13.orig/kernel/trace/Kconfig linux-4.4.13/kernel/trace/Kconfig +--- linux-4.4.13.orig/kernel/trace/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/trace/Kconfig 2016-06-22 21:55:08.066523867 +0200 +@@ -187,6 +187,24 @@ + enabled. This option and the preempt-off timing option can be + used together or separately.) + ++config INTERRUPT_OFF_HIST ++ bool "Interrupts-off Latency Histogram" ++ depends on IRQSOFF_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with interrupts disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If PREEMPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/irqsoff ++ + config PREEMPT_TRACER + bool "Preemption-off Latency Tracer" + default n +@@ -211,6 +229,24 @@ + enabled. This option and the irqs-off timing option can be + used together or separately.) + ++config PREEMPT_OFF_HIST ++ bool "Preemption-off Latency Histogram" ++ depends on PREEMPT_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the duration of time periods with preemption disabled. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/preemptirqsoff ++ ++ If INTERRUPT_OFF_HIST is also selected, additional histograms (one ++ per cpu) are generated that accumulate the duration of time periods ++ when both interrupts and preemption are disabled. The histogram data ++ will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/preemptoff ++ + config SCHED_TRACER + bool "Scheduling Latency Tracer" + select GENERIC_TRACER +@@ -221,6 +257,74 @@ + This tracer tracks the latency of the highest priority task + to be scheduled in, starting from the point it has woken up. + ++config WAKEUP_LATENCY_HIST ++ bool "Scheduling Latency Histogram" ++ depends on SCHED_TRACER ++ help ++ This option generates continuously updated histograms (one per cpu) ++ of the scheduling latency of the highest priority task. ++ The histograms are disabled by default. To enable them, write a ++ non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/wakeup ++ ++ Two different algorithms are used, one to determine the latency of ++ processes that exclusively use the highest priority of the system and ++ another one to determine the latency of processes that share the ++ highest system priority with other processes. The former is used to ++ improve hardware and system software, the latter to optimize the ++ priority design of a given system. The histogram data will be ++ located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup ++ ++ and ++ ++ /sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ ++config MISSED_TIMER_OFFSETS_HIST ++ depends on HIGH_RES_TIMERS ++ select GENERIC_TRACER ++ bool "Missed Timer Offsets Histogram" ++ help ++ Generate a histogram of missed timer offsets in microseconds. The ++ histograms are disabled by default. To enable them, write a non-zero ++ number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/missed_timer_offsets ++ ++ The histogram data will be located in the debug file system at ++ ++ /sys/kernel/debug/tracing/latency_hist/missed_timer_offsets ++ ++ If both Scheduling Latency Histogram and Missed Timer Offsets ++ Histogram are selected, additional histogram data will be collected ++ that contain, in addition to the wakeup latency, the timer latency, in ++ case the wakeup was triggered by an expired timer. These histograms ++ are available in the ++ ++ /sys/kernel/debug/tracing/latency_hist/timerandwakeup ++ ++ directory. They reflect the apparent interrupt and scheduling latency ++ and are best suitable to determine the worst-case latency of a given ++ system. To enable these histograms, write a non-zero number to ++ ++ /sys/kernel/debug/tracing/latency_hist/enable/timerandwakeup ++ + config ENABLE_DEFAULT_TRACERS + bool "Trace process context switches and events" + depends on !GENERIC_TRACER +diff -Nur linux-4.4.13.orig/kernel/trace/latency_hist.c linux-4.4.13/kernel/trace/latency_hist.c +--- linux-4.4.13.orig/kernel/trace/latency_hist.c 1970-01-01 01:00:00.000000000 +0100 ++++ linux-4.4.13/kernel/trace/latency_hist.c 2016-06-22 21:55:08.066523867 +0200 +@@ -0,0 +1,1178 @@ ++/* ++ * kernel/trace/latency_hist.c ++ * ++ * Add support for histograms of preemption-off latency and ++ * interrupt-off latency and wakeup latency, it depends on ++ * Real-Time Preemption Support. ++ * ++ * Copyright (C) 2005 MontaVista Software, Inc. ++ * Yi Yang ++ * ++ * Converted to work with the new latency tracer. ++ * Copyright (C) 2008 Red Hat, Inc. ++ * Steven Rostedt ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "trace.h" ++#include ++ ++#define NSECS_PER_USECS 1000L ++ ++#define CREATE_TRACE_POINTS ++#include ++ ++enum { ++ IRQSOFF_LATENCY = 0, ++ PREEMPTOFF_LATENCY, ++ PREEMPTIRQSOFF_LATENCY, ++ WAKEUP_LATENCY, ++ WAKEUP_LATENCY_SHAREDPRIO, ++ MISSED_TIMER_OFFSETS, ++ TIMERANDWAKEUP_LATENCY, ++ MAX_LATENCY_TYPE, ++}; ++ ++#define MAX_ENTRY_NUM 10240 ++ ++struct hist_data { ++ atomic_t hist_mode; /* 0 log, 1 don't log */ ++ long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */ ++ long min_lat; ++ long max_lat; ++ unsigned long long below_hist_bound_samples; ++ unsigned long long above_hist_bound_samples; ++ long long accumulate_lat; ++ unsigned long long total_samples; ++ unsigned long long hist_array[MAX_ENTRY_NUM]; ++}; ++ ++struct enable_data { ++ int latency_type; ++ int enabled; ++}; ++ ++static char *latency_hist_dir_root = "latency_hist"; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, irqsoff_hist); ++static char *irqsoff_hist_dir = "irqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_irqsoff_start); ++static DEFINE_PER_CPU(int, hist_irqsoff_counting); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++static DEFINE_PER_CPU(struct hist_data, preemptoff_hist); ++static char *preemptoff_hist_dir = "preemptoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptoff_start); ++static DEFINE_PER_CPU(int, hist_preemptoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++static DEFINE_PER_CPU(struct hist_data, preemptirqsoff_hist); ++static char *preemptirqsoff_hist_dir = "preemptirqsoff"; ++static DEFINE_PER_CPU(cycles_t, hist_preemptirqsoff_start); ++static DEFINE_PER_CPU(int, hist_preemptirqsoff_counting); ++#endif ++ ++#if defined(CONFIG_PREEMPT_OFF_HIST) || defined(CONFIG_INTERRUPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, int start); ++static struct enable_data preemptirqsoff_enabled_data = { ++ .latency_type = PREEMPTIRQSOFF_LATENCY, ++ .enabled = 0, ++}; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++struct maxlatproc_data { ++ char comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ char current_comm[FIELD_SIZEOF(struct task_struct, comm)]; ++ int pid; ++ int current_pid; ++ int prio; ++ int current_prio; ++ long latency; ++ long timeroffset; ++ cycle_t timestamp; ++}; ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist); ++static DEFINE_PER_CPU(struct hist_data, wakeup_latency_hist_sharedprio); ++static char *wakeup_latency_hist_dir = "wakeup"; ++static char *wakeup_latency_hist_dir_sharedprio = "sharedprio"; ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p); ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ bool preempt, struct task_struct *prev, struct task_struct *next); ++static notrace void probe_sched_migrate_task(void *, ++ struct task_struct *task, int cpu); ++static struct enable_data wakeup_latency_enabled_data = { ++ .latency_type = WAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc); ++static DEFINE_PER_CPU(struct maxlatproc_data, wakeup_maxlatproc_sharedprio); ++static DEFINE_PER_CPU(struct task_struct *, wakeup_task); ++static DEFINE_PER_CPU(int, wakeup_sharedprio); ++static unsigned long wakeup_pid; ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static DEFINE_PER_CPU(struct hist_data, missed_timer_offsets); ++static char *missed_timer_offsets_dir = "missed_timer_offsets"; ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long offset, struct task_struct *curr, struct task_struct *task); ++static struct enable_data missed_timer_offsets_enabled_data = { ++ .latency_type = MISSED_TIMER_OFFSETS, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, missed_timer_offsets_maxlatproc); ++static unsigned long missed_timer_offsets_pid; ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static DEFINE_PER_CPU(struct hist_data, timerandwakeup_latency_hist); ++static char *timerandwakeup_latency_hist_dir = "timerandwakeup"; ++static struct enable_data timerandwakeup_enabled_data = { ++ .latency_type = TIMERANDWAKEUP_LATENCY, ++ .enabled = 0, ++}; ++static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc); ++#endif ++ ++void notrace latency_hist(int latency_type, int cpu, long latency, ++ long timeroffset, cycle_t stop, ++ struct task_struct *p) ++{ ++ struct hist_data *my_hist; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ if (!cpu_possible(cpu) || latency_type < 0 || ++ latency_type >= MAX_LATENCY_TYPE) ++ return; ++ ++ switch (latency_type) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ my_hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ my_hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_PREEMPT_OFF_HIST) && defined(CONFIG_INTERRUPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ my_hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ my_hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ my_hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ my_hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ ++ default: ++ return; ++ } ++ ++ latency += my_hist->offset; ++ ++ if (atomic_read(&my_hist->hist_mode) == 0) ++ return; ++ ++ if (latency < 0 || latency >= MAX_ENTRY_NUM) { ++ if (latency < 0) ++ my_hist->below_hist_bound_samples++; ++ else ++ my_hist->above_hist_bound_samples++; ++ } else ++ my_hist->hist_array[latency]++; ++ ++ if (unlikely(latency > my_hist->max_lat || ++ my_hist->min_lat == LONG_MAX)) { ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) { ++ strncpy(mp->comm, p->comm, sizeof(mp->comm)); ++ strncpy(mp->current_comm, current->comm, ++ sizeof(mp->current_comm)); ++ mp->pid = task_pid_nr(p); ++ mp->current_pid = task_pid_nr(current); ++ mp->prio = p->prio; ++ mp->current_prio = current->prio; ++ mp->latency = latency; ++ mp->timeroffset = timeroffset; ++ mp->timestamp = stop; ++ } ++#endif ++ my_hist->max_lat = latency; ++ } ++ if (unlikely(latency < my_hist->min_lat)) ++ my_hist->min_lat = latency; ++ my_hist->total_samples++; ++ my_hist->accumulate_lat += latency; ++} ++ ++static void *l_start(struct seq_file *m, loff_t *pos) ++{ ++ loff_t *index_ptr = NULL; ++ loff_t index = *pos; ++ struct hist_data *my_hist = m->private; ++ ++ if (index == 0) { ++ char minstr[32], avgstr[32], maxstr[32]; ++ ++ atomic_dec(&my_hist->hist_mode); ++ ++ if (likely(my_hist->total_samples)) { ++ long avg = (long) div64_s64(my_hist->accumulate_lat, ++ my_hist->total_samples); ++ snprintf(minstr, sizeof(minstr), "%ld", ++ my_hist->min_lat - my_hist->offset); ++ snprintf(avgstr, sizeof(avgstr), "%ld", ++ avg - my_hist->offset); ++ snprintf(maxstr, sizeof(maxstr), "%ld", ++ my_hist->max_lat - my_hist->offset); ++ } else { ++ strcpy(minstr, ""); ++ strcpy(avgstr, minstr); ++ strcpy(maxstr, minstr); ++ } ++ ++ seq_printf(m, "#Minimum latency: %s microseconds\n" ++ "#Average latency: %s microseconds\n" ++ "#Maximum latency: %s microseconds\n" ++ "#Total samples: %llu\n" ++ "#There are %llu samples lower than %ld" ++ " microseconds.\n" ++ "#There are %llu samples greater or equal" ++ " than %ld microseconds.\n" ++ "#usecs\t%16s\n", ++ minstr, avgstr, maxstr, ++ my_hist->total_samples, ++ my_hist->below_hist_bound_samples, ++ -my_hist->offset, ++ my_hist->above_hist_bound_samples, ++ MAX_ENTRY_NUM - my_hist->offset, ++ "samples"); ++ } ++ if (index < MAX_ENTRY_NUM) { ++ index_ptr = kmalloc(sizeof(loff_t), GFP_KERNEL); ++ if (index_ptr) ++ *index_ptr = index; ++ } ++ ++ return index_ptr; ++} ++ ++static void *l_next(struct seq_file *m, void *p, loff_t *pos) ++{ ++ loff_t *index_ptr = p; ++ struct hist_data *my_hist = m->private; ++ ++ if (++*pos >= MAX_ENTRY_NUM) { ++ atomic_inc(&my_hist->hist_mode); ++ return NULL; ++ } ++ *index_ptr = *pos; ++ return index_ptr; ++} ++ ++static void l_stop(struct seq_file *m, void *p) ++{ ++ kfree(p); ++} ++ ++static int l_show(struct seq_file *m, void *p) ++{ ++ int index = *(loff_t *) p; ++ struct hist_data *my_hist = m->private; ++ ++ seq_printf(m, "%6ld\t%16llu\n", index - my_hist->offset, ++ my_hist->hist_array[index]); ++ return 0; ++} ++ ++static const struct seq_operations latency_hist_seq_op = { ++ .start = l_start, ++ .next = l_next, ++ .stop = l_stop, ++ .show = l_show ++}; ++ ++static int latency_hist_open(struct inode *inode, struct file *file) ++{ ++ int ret; ++ ++ ret = seq_open(file, &latency_hist_seq_op); ++ if (!ret) { ++ struct seq_file *seq = file->private_data; ++ seq->private = inode->i_private; ++ } ++ return ret; ++} ++ ++static const struct file_operations latency_hist_fops = { ++ .open = latency_hist_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = seq_release, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static void clear_maxlatprocdata(struct maxlatproc_data *mp) ++{ ++ mp->comm[0] = mp->current_comm[0] = '\0'; ++ mp->prio = mp->current_prio = mp->pid = mp->current_pid = ++ mp->latency = mp->timeroffset = -1; ++ mp->timestamp = 0; ++} ++#endif ++ ++static void hist_reset(struct hist_data *hist) ++{ ++ atomic_dec(&hist->hist_mode); ++ ++ memset(hist->hist_array, 0, sizeof(hist->hist_array)); ++ hist->below_hist_bound_samples = 0ULL; ++ hist->above_hist_bound_samples = 0ULL; ++ hist->min_lat = LONG_MAX; ++ hist->max_lat = LONG_MIN; ++ hist->total_samples = 0ULL; ++ hist->accumulate_lat = 0LL; ++ ++ atomic_inc(&hist->hist_mode); ++} ++ ++static ssize_t ++latency_hist_reset(struct file *file, const char __user *a, ++ size_t size, loff_t *off) ++{ ++ int cpu; ++ struct hist_data *hist = NULL; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ struct maxlatproc_data *mp = NULL; ++#endif ++ off_t latency_type = (off_t) file->private_data; ++ ++ for_each_online_cpu(cpu) { ++ ++ switch (latency_type) { ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ case PREEMPTOFF_LATENCY: ++ hist = &per_cpu(preemptoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ case IRQSOFF_LATENCY: ++ hist = &per_cpu(irqsoff_hist, cpu); ++ break; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ hist = &per_cpu(preemptirqsoff_hist, cpu); ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ hist = &per_cpu(wakeup_latency_hist, cpu); ++ mp = &per_cpu(wakeup_maxlatproc, cpu); ++ break; ++ case WAKEUP_LATENCY_SHAREDPRIO: ++ hist = &per_cpu(wakeup_latency_hist_sharedprio, cpu); ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, cpu); ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ hist = &per_cpu(missed_timer_offsets, cpu); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, cpu); ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ hist = &per_cpu(timerandwakeup_latency_hist, cpu); ++ mp = &per_cpu(timerandwakeup_maxlatproc, cpu); ++ break; ++#endif ++ } ++ ++ hist_reset(hist); ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ if (latency_type == WAKEUP_LATENCY || ++ latency_type == WAKEUP_LATENCY_SHAREDPRIO || ++ latency_type == MISSED_TIMER_OFFSETS || ++ latency_type == TIMERANDWAKEUP_LATENCY) ++ clear_maxlatprocdata(mp); ++#endif ++ } ++ ++ return size; ++} ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_pid(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ int r; ++ unsigned long *this_pid = file->private_data; ++ ++ r = snprintf(buf, sizeof(buf), "%lu\n", *this_pid); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t do_pid(struct file *file, const char __user *ubuf, ++ size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ unsigned long pid; ++ unsigned long *this_pid = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = '\0'; ++ ++ if (kstrtoul(buf, 10, &pid)) ++ return -EINVAL; ++ ++ *this_pid = pid; ++ ++ return cnt; ++} ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static ssize_t ++show_maxlatproc(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ int r; ++ struct maxlatproc_data *mp = file->private_data; ++ int strmaxlen = (TASK_COMM_LEN * 2) + (8 * 8); ++ unsigned long long t; ++ unsigned long usecs, secs; ++ char *buf; ++ ++ if (mp->pid == -1 || mp->current_pid == -1) { ++ buf = "(none)\n"; ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, ++ strlen(buf)); ++ } ++ ++ buf = kmalloc(strmaxlen, GFP_KERNEL); ++ if (buf == NULL) ++ return -ENOMEM; ++ ++ t = ns2usecs(mp->timestamp); ++ usecs = do_div(t, USEC_PER_SEC); ++ secs = (unsigned long) t; ++ r = snprintf(buf, strmaxlen, ++ "%d %d %ld (%ld) %s <- %d %d %s %lu.%06lu\n", mp->pid, ++ MAX_RT_PRIO-1 - mp->prio, mp->latency, mp->timeroffset, mp->comm, ++ mp->current_pid, MAX_RT_PRIO-1 - mp->current_prio, mp->current_comm, ++ secs, usecs); ++ r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++ kfree(buf); ++ return r; ++} ++#endif ++ ++static ssize_t ++show_enable(struct file *file, char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ struct enable_data *ed = file->private_data; ++ int r; ++ ++ r = snprintf(buf, sizeof(buf), "%d\n", ed->enabled); ++ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r); ++} ++ ++static ssize_t ++do_enable(struct file *file, const char __user *ubuf, size_t cnt, loff_t *ppos) ++{ ++ char buf[64]; ++ long enable; ++ struct enable_data *ed = file->private_data; ++ ++ if (cnt >= sizeof(buf)) ++ return -EINVAL; ++ ++ if (copy_from_user(&buf, ubuf, cnt)) ++ return -EFAULT; ++ ++ buf[cnt] = 0; ++ ++ if (kstrtoul(buf, 10, &enable)) ++ return -EINVAL; ++ ++ if ((enable && ed->enabled) || (!enable && !ed->enabled)) ++ return cnt; ++ ++ if (enable) { ++ int ret; ++ ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ ret = register_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_preemptirqsoff_hist " ++ "to trace_preemptirqsoff_hist\n"); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ ret = register_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup\n"); ++ return ret; ++ } ++ ret = register_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_start " ++ "to trace_sched_wakeup_new\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_wakeup_latency_hist_stop " ++ "to trace_sched_switch\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ return ret; ++ } ++ ret = register_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_sched_migrate_task " ++ "to trace_sched_migrate_task\n"); ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ return ret; ++ } ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ ret = register_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++ if (ret) { ++ pr_info("wakeup trace: Couldn't assign " ++ "probe_hrtimer_interrupt " ++ "to trace_hrtimer_interrupt\n"); ++ return ret; ++ } ++ break; ++#endif ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ case TIMERANDWAKEUP_LATENCY: ++ if (!wakeup_latency_enabled_data.enabled || ++ !missed_timer_offsets_enabled_data.enabled) ++ return -EINVAL; ++ break; ++#endif ++ default: ++ break; ++ } ++ } else { ++ switch (ed->latency_type) { ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ case PREEMPTIRQSOFF_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_preemptirqsoff_hist( ++ probe_preemptirqsoff_hist, NULL); ++ for_each_online_cpu(cpu) { ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ per_cpu(hist_irqsoff_counting, ++ cpu) = 0; ++#endif ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ per_cpu(hist_preemptoff_counting, ++ cpu) = 0; ++#endif ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ per_cpu(hist_preemptirqsoff_counting, ++ cpu) = 0; ++#endif ++ } ++ } ++ break; ++#endif ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ case WAKEUP_LATENCY: ++ { ++ int cpu; ++ ++ unregister_trace_sched_wakeup( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_wakeup_new( ++ probe_wakeup_latency_hist_start, NULL); ++ unregister_trace_sched_switch( ++ probe_wakeup_latency_hist_stop, NULL); ++ unregister_trace_sched_migrate_task( ++ probe_sched_migrate_task, NULL); ++ ++ for_each_online_cpu(cpu) { ++ per_cpu(wakeup_task, cpu) = NULL; ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } ++ } ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ case MISSED_TIMER_OFFSETS: ++ unregister_trace_hrtimer_interrupt( ++ probe_hrtimer_interrupt, NULL); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ timerandwakeup_enabled_data.enabled = 0; ++#endif ++ break; ++#endif ++ default: ++ break; ++ } ++ } ++ ed->enabled = enable; ++ return cnt; ++} ++ ++static const struct file_operations latency_hist_reset_fops = { ++ .open = tracing_open_generic, ++ .write = latency_hist_reset, ++}; ++ ++static const struct file_operations enable_fops = { ++ .open = tracing_open_generic, ++ .read = show_enable, ++ .write = do_enable, ++}; ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++static const struct file_operations pid_fops = { ++ .open = tracing_open_generic, ++ .read = show_pid, ++ .write = do_pid, ++}; ++ ++static const struct file_operations maxlatproc_fops = { ++ .open = tracing_open_generic, ++ .read = show_maxlatproc, ++}; ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++static notrace void probe_preemptirqsoff_hist(void *v, int reason, ++ int starthist) ++{ ++ int cpu = raw_smp_processor_id(); ++ int time_set = 0; ++ ++ if (starthist) { ++ cycle_t uninitialized_var(start); ++ ++ if (!preempt_count() && !irqs_disabled()) ++ return; ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_OFF || reason == TRACE_START) && ++ !per_cpu(hist_irqsoff_counting, cpu)) { ++ per_cpu(hist_irqsoff_counting, cpu) = 1; ++ start = ftrace_now(cpu); ++ time_set++; ++ per_cpu(hist_irqsoff_start, cpu) = start; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_OFF || reason == TRACE_START) && ++ !per_cpu(hist_preemptoff_counting, cpu)) { ++ per_cpu(hist_preemptoff_counting, cpu) = 1; ++ if (!(time_set++)) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptoff_start, cpu) = start; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if (per_cpu(hist_irqsoff_counting, cpu) && ++ per_cpu(hist_preemptoff_counting, cpu) && ++ !per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 1; ++ if (!time_set) ++ start = ftrace_now(cpu); ++ per_cpu(hist_preemptirqsoff_start, cpu) = start; ++ } ++#endif ++ } else { ++ cycle_t uninitialized_var(stop); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ if ((reason == IRQS_ON || reason == TRACE_STOP) && ++ per_cpu(hist_irqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_irqsoff_start, cpu); ++ ++ stop = ftrace_now(cpu); ++ time_set++; ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(IRQSOFF_LATENCY, cpu, latency, 0, ++ stop, NULL); ++ } ++ per_cpu(hist_irqsoff_counting, cpu) = 0; ++ } ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ if ((reason == PREEMPT_ON || reason == TRACE_STOP) && ++ per_cpu(hist_preemptoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptoff_start, cpu); ++ ++ if (!(time_set++)) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTOFF_LATENCY, cpu, latency, ++ 0, stop, NULL); ++ } ++ per_cpu(hist_preemptoff_counting, cpu) = 0; ++ } ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ if ((!per_cpu(hist_irqsoff_counting, cpu) || ++ !per_cpu(hist_preemptoff_counting, cpu)) && ++ per_cpu(hist_preemptirqsoff_counting, cpu)) { ++ cycle_t start = per_cpu(hist_preemptirqsoff_start, cpu); ++ ++ if (!time_set) ++ stop = ftrace_now(cpu); ++ if (start) { ++ long latency = ((long) (stop - start)) / ++ NSECS_PER_USECS; ++ ++ latency_hist(PREEMPTIRQSOFF_LATENCY, cpu, ++ latency, 0, stop, NULL); ++ } ++ per_cpu(hist_preemptirqsoff_counting, cpu) = 0; ++ } ++#endif ++ } ++} ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++static DEFINE_RAW_SPINLOCK(wakeup_lock); ++static notrace void probe_sched_migrate_task(void *v, struct task_struct *task, ++ int cpu) ++{ ++ int old_cpu = task_cpu(task); ++ ++ if (cpu != old_cpu) { ++ unsigned long flags; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, old_cpu); ++ if (task == cpu_wakeup_task) { ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, old_cpu) = NULL; ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = task; ++ get_task_struct(cpu_wakeup_task); ++ } ++ ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++ } ++} ++ ++static notrace void probe_wakeup_latency_hist_start(void *v, ++ struct task_struct *p) ++{ ++ unsigned long flags; ++ struct task_struct *curr = current; ++ int cpu = task_cpu(p); ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (wakeup_pid) { ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ if (likely(wakeup_pid != task_pid_nr(p))) ++ goto out; ++ } else { ++ if (likely(!rt_task(p)) || ++ (cpu_wakeup_task && p->prio > cpu_wakeup_task->prio) || ++ p->prio > curr->prio) ++ goto out; ++ if ((cpu_wakeup_task && p->prio == cpu_wakeup_task->prio) || ++ p->prio == curr->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ } ++ ++ if (cpu_wakeup_task) ++ put_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu) = p; ++ get_task_struct(cpu_wakeup_task); ++ cpu_wakeup_task->preempt_timestamp_hist = ++ ftrace_now(raw_smp_processor_id()); ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++ ++static notrace void probe_wakeup_latency_hist_stop(void *v, ++ bool preempt, struct task_struct *prev, struct task_struct *next) ++{ ++ unsigned long flags; ++ int cpu = task_cpu(next); ++ long latency; ++ cycle_t stop; ++ struct task_struct *cpu_wakeup_task; ++ ++ raw_spin_lock_irqsave(&wakeup_lock, flags); ++ ++ cpu_wakeup_task = per_cpu(wakeup_task, cpu); ++ ++ if (cpu_wakeup_task == NULL) ++ goto out; ++ ++ /* Already running? */ ++ if (unlikely(current == cpu_wakeup_task)) ++ goto out_reset; ++ ++ if (next != cpu_wakeup_task) { ++ if (next->prio < cpu_wakeup_task->prio) ++ goto out_reset; ++ ++ if (next->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ goto out; ++ } ++ ++ if (current->prio == cpu_wakeup_task->prio) ++ per_cpu(wakeup_sharedprio, cpu) = 1; ++ ++ /* ++ * The task we are waiting for is about to be switched to. ++ * Calculate latency and store it in histogram. ++ */ ++ stop = ftrace_now(raw_smp_processor_id()); ++ ++ latency = ((long) (stop - next->preempt_timestamp_hist)) / ++ NSECS_PER_USECS; ++ ++ if (per_cpu(wakeup_sharedprio, cpu)) { ++ latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop, ++ next); ++ per_cpu(wakeup_sharedprio, cpu) = 0; ++ } else { ++ latency_hist(WAKEUP_LATENCY, cpu, latency, 0, stop, next); ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ if (timerandwakeup_enabled_data.enabled) { ++ latency_hist(TIMERANDWAKEUP_LATENCY, cpu, ++ next->timer_offset + latency, next->timer_offset, ++ stop, next); ++ } ++#endif ++ } ++ ++out_reset: ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ next->timer_offset = 0; ++#endif ++ put_task_struct(cpu_wakeup_task); ++ per_cpu(wakeup_task, cpu) = NULL; ++out: ++ raw_spin_unlock_irqrestore(&wakeup_lock, flags); ++} ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++static notrace void probe_hrtimer_interrupt(void *v, int cpu, ++ long long latency_ns, struct task_struct *curr, ++ struct task_struct *task) ++{ ++ if (latency_ns <= 0 && task != NULL && rt_task(task) && ++ (task->prio < curr->prio || ++ (task->prio == curr->prio && ++ !cpumask_test_cpu(cpu, &task->cpus_allowed)))) { ++ long latency; ++ cycle_t now; ++ ++ if (missed_timer_offsets_pid) { ++ if (likely(missed_timer_offsets_pid != ++ task_pid_nr(task))) ++ return; ++ } ++ ++ now = ftrace_now(cpu); ++ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS); ++ latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now, ++ task); ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ task->timer_offset = latency; ++#endif ++ } ++} ++#endif ++ ++static __init int latency_hist_init(void) ++{ ++ struct dentry *latency_hist_root = NULL; ++ struct dentry *dentry; ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ struct dentry *dentry_sharedprio; ++#endif ++ struct dentry *entry; ++ struct dentry *enable_root; ++ int i = 0; ++ struct hist_data *my_hist; ++ char name[64]; ++ char *cpufmt = "CPU%d"; ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ char *cpufmt_maxlatproc = "max_latency-CPU%d"; ++ struct maxlatproc_data *mp = NULL; ++#endif ++ ++ dentry = tracing_init_dentry(); ++ latency_hist_root = debugfs_create_dir(latency_hist_dir_root, dentry); ++ enable_root = debugfs_create_dir("enable", latency_hist_root); ++ ++#ifdef CONFIG_INTERRUPT_OFF_HIST ++ dentry = debugfs_create_dir(irqsoff_hist_dir, latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(irqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(irqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)IRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#ifdef CONFIG_PREEMPT_OFF_HIST ++ dentry = debugfs_create_dir(preemptoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) && defined(CONFIG_PREEMPT_OFF_HIST) ++ dentry = debugfs_create_dir(preemptirqsoff_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(preemptirqsoff_hist, i), &latency_hist_fops); ++ my_hist = &per_cpu(preemptirqsoff_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops); ++#endif ++ ++#if defined(CONFIG_INTERRUPT_OFF_HIST) || defined(CONFIG_PREEMPT_OFF_HIST) ++ entry = debugfs_create_file("preemptirqsoff", 0644, ++ enable_root, (void *)&preemptirqsoff_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_WAKEUP_LATENCY_HIST ++ dentry = debugfs_create_dir(wakeup_latency_hist_dir, ++ latency_hist_root); ++ dentry_sharedprio = debugfs_create_dir( ++ wakeup_latency_hist_dir_sharedprio, dentry); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(wakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, ++ &per_cpu(wakeup_latency_hist_sharedprio, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ ++ mp = &per_cpu(wakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ ++ mp = &per_cpu(wakeup_maxlatproc_sharedprio, i); ++ entry = debugfs_create_file(name, 0444, dentry_sharedprio, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&wakeup_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)WAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("reset", 0644, dentry_sharedprio, ++ (void *)WAKEUP_LATENCY_SHAREDPRIO, &latency_hist_reset_fops); ++ entry = debugfs_create_file("wakeup", 0644, ++ enable_root, (void *)&wakeup_latency_enabled_data, ++ &enable_fops); ++#endif ++ ++#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST ++ dentry = debugfs_create_dir(missed_timer_offsets_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(missed_timer_offsets, i), &latency_hist_fops); ++ my_hist = &per_cpu(missed_timer_offsets, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(missed_timer_offsets_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("pid", 0644, dentry, ++ (void *)&missed_timer_offsets_pid, &pid_fops); ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)MISSED_TIMER_OFFSETS, &latency_hist_reset_fops); ++ entry = debugfs_create_file("missed_timer_offsets", 0644, ++ enable_root, (void *)&missed_timer_offsets_enabled_data, ++ &enable_fops); ++#endif ++ ++#if defined(CONFIG_WAKEUP_LATENCY_HIST) && \ ++ defined(CONFIG_MISSED_TIMER_OFFSETS_HIST) ++ dentry = debugfs_create_dir(timerandwakeup_latency_hist_dir, ++ latency_hist_root); ++ for_each_possible_cpu(i) { ++ sprintf(name, cpufmt, i); ++ entry = debugfs_create_file(name, 0444, dentry, ++ &per_cpu(timerandwakeup_latency_hist, i), ++ &latency_hist_fops); ++ my_hist = &per_cpu(timerandwakeup_latency_hist, i); ++ atomic_set(&my_hist->hist_mode, 1); ++ my_hist->min_lat = LONG_MAX; ++ ++ sprintf(name, cpufmt_maxlatproc, i); ++ mp = &per_cpu(timerandwakeup_maxlatproc, i); ++ entry = debugfs_create_file(name, 0444, dentry, mp, ++ &maxlatproc_fops); ++ clear_maxlatprocdata(mp); ++ } ++ entry = debugfs_create_file("reset", 0644, dentry, ++ (void *)TIMERANDWAKEUP_LATENCY, &latency_hist_reset_fops); ++ entry = debugfs_create_file("timerandwakeup", 0644, ++ enable_root, (void *)&timerandwakeup_enabled_data, ++ &enable_fops); ++#endif ++ return 0; ++} ++ ++device_initcall(latency_hist_init); +diff -Nur linux-4.4.13.orig/kernel/trace/Makefile linux-4.4.13/kernel/trace/Makefile +--- linux-4.4.13.orig/kernel/trace/Makefile 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/trace/Makefile 2016-06-22 21:55:08.066523867 +0200 +@@ -36,6 +36,10 @@ + obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o + obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o + obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o ++obj-$(CONFIG_INTERRUPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_PREEMPT_OFF_HIST) += latency_hist.o ++obj-$(CONFIG_WAKEUP_LATENCY_HIST) += latency_hist.o ++obj-$(CONFIG_MISSED_TIMER_OFFSETS_HIST) += latency_hist.o + obj-$(CONFIG_NOP_TRACER) += trace_nop.o + obj-$(CONFIG_STACK_TRACER) += trace_stack.o + obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o +diff -Nur linux-4.4.13.orig/kernel/trace/trace.c linux-4.4.13/kernel/trace/trace.c +--- linux-4.4.13.orig/kernel/trace/trace.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/trace/trace.c 2016-06-22 21:55:08.066523867 +0200 +@@ -1652,6 +1652,7 @@ + struct task_struct *tsk = current; + + entry->preempt_count = pc & 0xff; ++ entry->preempt_lazy_count = preempt_lazy_count(); + entry->pid = (tsk) ? tsk->pid : 0; + entry->flags = + #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT +@@ -1661,8 +1662,11 @@ + #endif + ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | + ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | +- (tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (tif_need_resched_now() ? TRACE_FLAG_NEED_RESCHED : 0) | ++ (need_resched_lazy() ? TRACE_FLAG_NEED_RESCHED_LAZY : 0) | + (test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0); ++ ++ entry->migrate_disable = (tsk) ? __migrate_disabled(tsk) & 0xFF : 0; + } + EXPORT_SYMBOL_GPL(tracing_generic_entry_update); + +@@ -2555,14 +2559,17 @@ + + static void print_lat_help_header(struct seq_file *m) + { +- seq_puts(m, "# _------=> CPU# \n" +- "# / _-----=> irqs-off \n" +- "# | / _----=> need-resched \n" +- "# || / _---=> hardirq/softirq \n" +- "# ||| / _--=> preempt-depth \n" +- "# |||| / delay \n" +- "# cmd pid ||||| time | caller \n" +- "# \\ / ||||| \\ | / \n"); ++ seq_puts(m, "# _--------=> CPU# \n" ++ "# / _-------=> irqs-off \n" ++ "# | / _------=> need-resched \n" ++ "# || / _-----=> need-resched_lazy \n" ++ "# ||| / _----=> hardirq/softirq \n" ++ "# |||| / _---=> preempt-depth \n" ++ "# ||||| / _--=> preempt-lazy-depth\n" ++ "# |||||| / _-=> migrate-disable \n" ++ "# ||||||| / delay \n" ++ "# cmd pid |||||||| time | caller \n" ++ "# \\ / |||||||| \\ | / \n"); + } + + static void print_event_info(struct trace_buffer *buf, struct seq_file *m) +@@ -2588,11 +2595,14 @@ + print_event_info(buf, m); + seq_puts(m, "# _-----=> irqs-off\n" + "# / _----=> need-resched\n" +- "# | / _---=> hardirq/softirq\n" +- "# || / _--=> preempt-depth\n" +- "# ||| / delay\n" +- "# TASK-PID CPU# |||| TIMESTAMP FUNCTION\n" +- "# | | | |||| | |\n"); ++ "# |/ _-----=> need-resched_lazy\n" ++ "# || / _---=> hardirq/softirq\n" ++ "# ||| / _--=> preempt-depth\n" ++ "# |||| /_--=> preempt-lazy-depth\n" ++ "# ||||| _-=> migrate-disable \n" ++ "# ||||| / delay\n" ++ "# TASK-PID CPU# |||||| TIMESTAMP FUNCTION\n" ++ "# | | | |||||| | |\n"); + } + + void +diff -Nur linux-4.4.13.orig/kernel/trace/trace_events.c linux-4.4.13/kernel/trace/trace_events.c +--- linux-4.4.13.orig/kernel/trace/trace_events.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/trace/trace_events.c 2016-06-22 21:55:08.070523867 +0200 +@@ -188,6 +188,8 @@ + __common_field(unsigned char, flags); + __common_field(unsigned char, preempt_count); + __common_field(int, pid); ++ __common_field(unsigned short, migrate_disable); ++ __common_field(unsigned short, padding); + + return ret; + } +diff -Nur linux-4.4.13.orig/kernel/trace/trace.h linux-4.4.13/kernel/trace/trace.h +--- linux-4.4.13.orig/kernel/trace/trace.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/trace/trace.h 2016-06-22 21:55:08.070523867 +0200 +@@ -117,6 +117,7 @@ + * NEED_RESCHED - reschedule is requested + * HARDIRQ - inside an interrupt handler + * SOFTIRQ - inside a softirq handler ++ * NEED_RESCHED_LAZY - lazy reschedule is requested + */ + enum trace_flag_type { + TRACE_FLAG_IRQS_OFF = 0x01, +@@ -125,6 +126,7 @@ + TRACE_FLAG_HARDIRQ = 0x08, + TRACE_FLAG_SOFTIRQ = 0x10, + TRACE_FLAG_PREEMPT_RESCHED = 0x20, ++ TRACE_FLAG_NEED_RESCHED_LAZY = 0x40, + }; + + #define TRACE_BUF_SIZE 1024 +diff -Nur linux-4.4.13.orig/kernel/trace/trace_irqsoff.c linux-4.4.13/kernel/trace/trace_irqsoff.c +--- linux-4.4.13.orig/kernel/trace/trace_irqsoff.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/trace/trace_irqsoff.c 2016-06-22 21:55:08.070523867 +0200 +@@ -13,6 +13,7 @@ + #include + #include + #include ++#include + + #include "trace.h" + +@@ -424,11 +425,13 @@ + { + if (preempt_trace() || irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist_rcuidle(TRACE_START, 1); + } + EXPORT_SYMBOL_GPL(start_critical_timings); + + void stop_critical_timings(void) + { ++ trace_preemptirqsoff_hist_rcuidle(TRACE_STOP, 0); + if (preempt_trace() || irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -438,6 +441,7 @@ + #ifdef CONFIG_PROVE_LOCKING + void time_hardirqs_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist_rcuidle(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(a0, a1); + } +@@ -446,6 +450,7 @@ + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(a0, a1); ++ trace_preemptirqsoff_hist_rcuidle(IRQS_OFF, 1); + } + + #else /* !CONFIG_PROVE_LOCKING */ +@@ -471,6 +476,7 @@ + */ + void trace_hardirqs_on(void) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1); + } +@@ -480,11 +486,13 @@ + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, CALLER_ADDR1); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off); + + __visible void trace_hardirqs_on_caller(unsigned long caller_addr) + { ++ trace_preemptirqsoff_hist(IRQS_ON, 0); + if (!preempt_trace() && irq_trace()) + stop_critical_timing(CALLER_ADDR0, caller_addr); + } +@@ -494,6 +502,7 @@ + { + if (!preempt_trace() && irq_trace()) + start_critical_timing(CALLER_ADDR0, caller_addr); ++ trace_preemptirqsoff_hist(IRQS_OFF, 1); + } + EXPORT_SYMBOL(trace_hardirqs_off_caller); + +@@ -503,12 +512,14 @@ + #ifdef CONFIG_PREEMPT_TRACER + void trace_preempt_on(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 0); + if (preempt_trace() && !irq_trace()) + stop_critical_timing(a0, a1); + } + + void trace_preempt_off(unsigned long a0, unsigned long a1) + { ++ trace_preemptirqsoff_hist(PREEMPT_ON, 1); + if (preempt_trace() && !irq_trace()) + start_critical_timing(a0, a1); + } +diff -Nur linux-4.4.13.orig/kernel/trace/trace_output.c linux-4.4.13/kernel/trace/trace_output.c +--- linux-4.4.13.orig/kernel/trace/trace_output.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/trace/trace_output.c 2016-06-22 21:55:08.070523867 +0200 +@@ -386,6 +386,7 @@ + { + char hardsoft_irq; + char need_resched; ++ char need_resched_lazy; + char irqs_off; + int hardirq; + int softirq; +@@ -413,6 +414,8 @@ + need_resched = '.'; + break; + } ++ need_resched_lazy = ++ (entry->flags & TRACE_FLAG_NEED_RESCHED_LAZY) ? 'L' : '.'; + + hardsoft_irq = + (hardirq && softirq) ? 'H' : +@@ -420,14 +423,25 @@ + softirq ? 's' : + '.'; + +- trace_seq_printf(s, "%c%c%c", +- irqs_off, need_resched, hardsoft_irq); ++ trace_seq_printf(s, "%c%c%c%c", ++ irqs_off, need_resched, need_resched_lazy, ++ hardsoft_irq); + + if (entry->preempt_count) + trace_seq_printf(s, "%x", entry->preempt_count); + else + trace_seq_putc(s, '.'); + ++ if (entry->preempt_lazy_count) ++ trace_seq_printf(s, "%x", entry->preempt_lazy_count); ++ else ++ trace_seq_putc(s, '.'); ++ ++ if (entry->migrate_disable) ++ trace_seq_printf(s, "%x", entry->migrate_disable); ++ else ++ trace_seq_putc(s, '.'); ++ + return !trace_seq_has_overflowed(s); + } + +diff -Nur linux-4.4.13.orig/kernel/user.c linux-4.4.13/kernel/user.c +--- linux-4.4.13.orig/kernel/user.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/user.c 2016-06-22 21:55:08.070523867 +0200 +@@ -161,11 +161,11 @@ + if (!up) + return; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + + struct user_struct *alloc_uid(kuid_t uid) +diff -Nur linux-4.4.13.orig/kernel/watchdog.c linux-4.4.13/kernel/watchdog.c +--- linux-4.4.13.orig/kernel/watchdog.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/watchdog.c 2016-06-22 21:55:08.070523867 +0200 +@@ -299,6 +299,8 @@ + + #ifdef CONFIG_HARDLOCKUP_DETECTOR + ++static DEFINE_RAW_SPINLOCK(watchdog_output_lock); ++ + static struct perf_event_attr wd_hw_attr = { + .type = PERF_TYPE_HARDWARE, + .config = PERF_COUNT_HW_CPU_CYCLES, +@@ -333,6 +335,13 @@ + /* only print hardlockups once */ + if (__this_cpu_read(hard_watchdog_warn) == true) + return; ++ /* ++ * If early-printk is enabled then make sure we do not ++ * lock up in printk() and kill console logging: ++ */ ++ printk_kill(); ++ ++ raw_spin_lock(&watchdog_output_lock); + + pr_emerg("Watchdog detected hard LOCKUP on cpu %d", this_cpu); + print_modules(); +@@ -350,8 +359,9 @@ + !test_and_set_bit(0, &hardlockup_allcpu_dumped)) + trigger_allbutself_cpu_backtrace(); + ++ raw_spin_unlock(&watchdog_output_lock); + if (hardlockup_panic) +- panic("Hard LOCKUP"); ++ nmi_panic(regs, "Hard LOCKUP"); + + __this_cpu_write(hard_watchdog_warn, true); + return; +@@ -497,6 +507,7 @@ + /* kick off the timer for the hardlockup detector */ + hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + hrtimer->function = watchdog_timer_fn; ++ hrtimer->irqsafe = 1; + + /* Enable the perf event */ + watchdog_nmi_enable(cpu); +diff -Nur linux-4.4.13.orig/kernel/workqueue.c linux-4.4.13/kernel/workqueue.c +--- linux-4.4.13.orig/kernel/workqueue.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/workqueue.c 2016-06-22 21:55:08.070523867 +0200 +@@ -48,6 +48,8 @@ + #include + #include + #include ++#include ++#include + + #include "workqueue_internal.h" + +@@ -121,11 +123,16 @@ + * cpu or grabbing pool->lock is enough for read access. If + * POOL_DISASSOCIATED is set, it's identical to L. + * ++ * On RT we need the extra protection via rt_lock_idle_list() for ++ * the list manipulations against read access from ++ * wq_worker_sleeping(). All other places are nicely serialized via ++ * pool->lock. ++ * + * A: pool->attach_mutex protected. + * + * PL: wq_pool_mutex protected. + * +- * PR: wq_pool_mutex protected for writes. Sched-RCU protected for reads. ++ * PR: wq_pool_mutex protected for writes. RCU protected for reads. + * + * PW: wq_pool_mutex and wq->mutex protected for writes. Either for reads. + * +@@ -134,7 +141,7 @@ + * + * WQ: wq->mutex protected. + * +- * WR: wq->mutex protected for writes. Sched-RCU protected for reads. ++ * WR: wq->mutex protected for writes. RCU protected for reads. + * + * MD: wq_mayday_lock protected. + */ +@@ -183,7 +190,7 @@ + atomic_t nr_running ____cacheline_aligned_in_smp; + + /* +- * Destruction of pool is sched-RCU protected to allow dereferences ++ * Destruction of pool is RCU protected to allow dereferences + * from get_work_pool(). + */ + struct rcu_head rcu; +@@ -212,7 +219,7 @@ + /* + * Release of unbound pwq is punted to system_wq. See put_pwq() + * and pwq_unbound_release_workfn() for details. pool_workqueue +- * itself is also sched-RCU protected so that the first pwq can be ++ * itself is also RCU protected so that the first pwq can be + * determined without grabbing wq->mutex. + */ + struct work_struct unbound_release_work; +@@ -331,6 +338,8 @@ + struct workqueue_struct *system_freezable_power_efficient_wq __read_mostly; + EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq); + ++static DEFINE_LOCAL_IRQ_LOCK(pendingb_lock); ++ + static int worker_thread(void *__worker); + static void workqueue_sysfs_unregister(struct workqueue_struct *wq); + +@@ -338,20 +347,20 @@ + #include + + #define assert_rcu_or_pool_mutex() \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU or wq_pool_mutex should be held") ++ "RCU or wq_pool_mutex should be held") + + #define assert_rcu_or_wq_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex), \ +- "sched RCU or wq->mutex should be held") ++ "RCU or wq->mutex should be held") + + #define assert_rcu_or_wq_mutex_or_pool_mutex(wq) \ +- RCU_LOCKDEP_WARN(!rcu_read_lock_sched_held() && \ ++ RCU_LOCKDEP_WARN(!rcu_read_lock_held() && \ + !lockdep_is_held(&wq->mutex) && \ + !lockdep_is_held(&wq_pool_mutex), \ +- "sched RCU, wq->mutex or wq_pool_mutex should be held") ++ "RCU, wq->mutex or wq_pool_mutex should be held") + + #define for_each_cpu_worker_pool(pool, cpu) \ + for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \ +@@ -363,7 +372,7 @@ + * @pool: iteration cursor + * @pi: integer used for iteration + * +- * This must be called either with wq_pool_mutex held or sched RCU read ++ * This must be called either with wq_pool_mutex held or RCU read + * locked. If the pool needs to be used beyond the locking in effect, the + * caller is responsible for guaranteeing that the pool stays online. + * +@@ -395,7 +404,7 @@ + * @pwq: iteration cursor + * @wq: the target workqueue + * +- * This must be called either with wq->mutex held or sched RCU read locked. ++ * This must be called either with wq->mutex held or RCU read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. + * +@@ -407,6 +416,31 @@ + if (({ assert_rcu_or_wq_mutex(wq); false; })) { } \ + else + ++#ifdef CONFIG_PREEMPT_RT_BASE ++static inline void rt_lock_idle_list(struct worker_pool *pool) ++{ ++ preempt_disable(); ++} ++static inline void rt_unlock_idle_list(struct worker_pool *pool) ++{ ++ preempt_enable(); ++} ++static inline void sched_lock_idle_list(struct worker_pool *pool) { } ++static inline void sched_unlock_idle_list(struct worker_pool *pool) { } ++#else ++static inline void rt_lock_idle_list(struct worker_pool *pool) { } ++static inline void rt_unlock_idle_list(struct worker_pool *pool) { } ++static inline void sched_lock_idle_list(struct worker_pool *pool) ++{ ++ spin_lock_irq(&pool->lock); ++} ++static inline void sched_unlock_idle_list(struct worker_pool *pool) ++{ ++ spin_unlock_irq(&pool->lock); ++} ++#endif ++ ++ + #ifdef CONFIG_DEBUG_OBJECTS_WORK + + static struct debug_obj_descr work_debug_descr; +@@ -557,7 +591,7 @@ + * @wq: the target workqueue + * @node: the node ID + * +- * This must be called with any of wq_pool_mutex, wq->mutex or sched RCU ++ * This must be called with any of wq_pool_mutex, wq->mutex or RCU + * read locked. + * If the pwq needs to be used beyond the locking in effect, the caller is + * responsible for guaranteeing that the pwq stays online. +@@ -701,8 +735,8 @@ + * @work: the work item of interest + * + * Pools are created and destroyed under wq_pool_mutex, and allows read +- * access under sched-RCU read lock. As such, this function should be +- * called under wq_pool_mutex or with preemption disabled. ++ * access under RCU read lock. As such, this function should be ++ * called under wq_pool_mutex or inside of a rcu_read_lock() region. + * + * All fields of the returned pool are accessible as long as the above + * mentioned locking is in effect. If the returned pool needs to be used +@@ -839,51 +873,44 @@ + */ + static void wake_up_worker(struct worker_pool *pool) + { +- struct worker *worker = first_idle_worker(pool); ++ struct worker *worker; ++ ++ rt_lock_idle_list(pool); ++ ++ worker = first_idle_worker(pool); + + if (likely(worker)) + wake_up_process(worker->task); ++ ++ rt_unlock_idle_list(pool); + } + + /** +- * wq_worker_waking_up - a worker is waking up +- * @task: task waking up +- * @cpu: CPU @task is waking up to ++ * wq_worker_running - a worker is running again ++ * @task: task returning from sleep + * +- * This function is called during try_to_wake_up() when a worker is +- * being awoken. +- * +- * CONTEXT: +- * spin_lock_irq(rq->lock) ++ * This function is called when a worker returns from schedule() + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu) ++void wq_worker_running(struct task_struct *task) + { + struct worker *worker = kthread_data(task); + +- if (!(worker->flags & WORKER_NOT_RUNNING)) { +- WARN_ON_ONCE(worker->pool->cpu != cpu); ++ if (!worker->sleeping) ++ return; ++ if (!(worker->flags & WORKER_NOT_RUNNING)) + atomic_inc(&worker->pool->nr_running); +- } ++ worker->sleeping = 0; + } + + /** + * wq_worker_sleeping - a worker is going to sleep + * @task: task going to sleep +- * @cpu: CPU in question, must be the current CPU number +- * +- * This function is called during schedule() when a busy worker is +- * going to sleep. Worker on the same cpu can be woken up by +- * returning pointer to its task. +- * +- * CONTEXT: +- * spin_lock_irq(rq->lock) +- * +- * Return: +- * Worker task on @cpu to wake up, %NULL if none. ++ * This function is called from schedule() when a busy worker is ++ * going to sleep. + */ +-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) ++void wq_worker_sleeping(struct task_struct *task) + { +- struct worker *worker = kthread_data(task), *to_wakeup = NULL; ++ struct worker *worker = kthread_data(task); + struct worker_pool *pool; + + /* +@@ -892,29 +919,26 @@ + * checking NOT_RUNNING. + */ + if (worker->flags & WORKER_NOT_RUNNING) +- return NULL; ++ return; + + pool = worker->pool; + +- /* this can only happen on the local cpu */ +- if (WARN_ON_ONCE(cpu != raw_smp_processor_id() || pool->cpu != cpu)) +- return NULL; ++ if (WARN_ON_ONCE(worker->sleeping)) ++ return; ++ ++ worker->sleeping = 1; + + /* + * The counterpart of the following dec_and_test, implied mb, + * worklist not empty test sequence is in insert_work(). + * Please read comment there. +- * +- * NOT_RUNNING is clear. This means that we're bound to and +- * running on the local cpu w/ rq lock held and preemption +- * disabled, which in turn means that none else could be +- * manipulating idle_list, so dereferencing idle_list without pool +- * lock is safe. + */ + if (atomic_dec_and_test(&pool->nr_running) && +- !list_empty(&pool->worklist)) +- to_wakeup = first_idle_worker(pool); +- return to_wakeup ? to_wakeup->task : NULL; ++ !list_empty(&pool->worklist)) { ++ sched_lock_idle_list(pool); ++ wake_up_worker(pool); ++ sched_unlock_idle_list(pool); ++ } + } + + /** +@@ -1108,12 +1132,12 @@ + { + if (pwq) { + /* +- * As both pwqs and pools are sched-RCU protected, the ++ * As both pwqs and pools are RCU protected, the + * following lock operations are safe. + */ +- spin_lock_irq(&pwq->pool->lock); ++ local_spin_lock_irq(pendingb_lock, &pwq->pool->lock); + put_pwq(pwq); +- spin_unlock_irq(&pwq->pool->lock); ++ local_spin_unlock_irq(pendingb_lock, &pwq->pool->lock); + } + } + +@@ -1215,7 +1239,7 @@ + struct worker_pool *pool; + struct pool_workqueue *pwq; + +- local_irq_save(*flags); ++ local_lock_irqsave(pendingb_lock, *flags); + + /* try to steal the timer if it exists */ + if (is_dwork) { +@@ -1234,6 +1258,7 @@ + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) + return 0; + ++ rcu_read_lock(); + /* + * The queueing is in progress, or it is already queued. Try to + * steal it from ->worklist without clearing WORK_STRUCT_PENDING. +@@ -1272,14 +1297,16 @@ + set_work_pool_and_keep_pending(work, pool->id); + + spin_unlock(&pool->lock); ++ rcu_read_unlock(); + return 1; + } + spin_unlock(&pool->lock); + fail: +- local_irq_restore(*flags); ++ rcu_read_unlock(); ++ local_unlock_irqrestore(pendingb_lock, *flags); + if (work_is_canceling(work)) + return -ENOENT; +- cpu_relax(); ++ cpu_chill(); + return -EAGAIN; + } + +@@ -1348,7 +1375,7 @@ + * queued or lose PENDING. Grabbing PENDING and queueing should + * happen with IRQ disabled. + */ +- WARN_ON_ONCE(!irqs_disabled()); ++ WARN_ON_ONCE_NONRT(!irqs_disabled()); + + debug_work_activate(work); + +@@ -1356,6 +1383,8 @@ + if (unlikely(wq->flags & __WQ_DRAINING) && + WARN_ON_ONCE(!is_chained_work(wq))) + return; ++ ++ rcu_read_lock(); + retry: + if (req_cpu == WORK_CPU_UNBOUND) + cpu = raw_smp_processor_id(); +@@ -1412,10 +1441,8 @@ + /* pwq determined, queue */ + trace_workqueue_queue_work(req_cpu, pwq, work); + +- if (WARN_ON(!list_empty(&work->entry))) { +- spin_unlock(&pwq->pool->lock); +- return; +- } ++ if (WARN_ON(!list_empty(&work->entry))) ++ goto out; + + pwq->nr_in_flight[pwq->work_color]++; + work_flags = work_color_to_flags(pwq->work_color); +@@ -1431,7 +1458,9 @@ + + insert_work(pwq, work, worklist, work_flags); + ++out: + spin_unlock(&pwq->pool->lock); ++ rcu_read_unlock(); + } + + /** +@@ -1451,14 +1480,14 @@ + bool ret = false; + unsigned long flags; + +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock,flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_work(cpu, wq, work); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_work_on); +@@ -1525,14 +1554,14 @@ + unsigned long flags; + + /* read the comment in __queue_work() */ +- local_irq_save(flags); ++ local_lock_irqsave(pendingb_lock, flags); + + if (!test_and_set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(work))) { + __queue_delayed_work(cpu, wq, dwork, delay); + ret = true; + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(queue_delayed_work_on); +@@ -1567,7 +1596,7 @@ + + if (likely(ret >= 0)) { + __queue_delayed_work(cpu, wq, dwork, delay); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + } + + /* -ENOENT from try_to_grab_pending() becomes %true */ +@@ -1600,7 +1629,9 @@ + worker->last_active = jiffies; + + /* idle_list is LIFO */ ++ rt_lock_idle_list(pool); + list_add(&worker->entry, &pool->idle_list); ++ rt_unlock_idle_list(pool); + + if (too_many_workers(pool) && !timer_pending(&pool->idle_timer)) + mod_timer(&pool->idle_timer, jiffies + IDLE_WORKER_TIMEOUT); +@@ -1633,7 +1664,9 @@ + return; + worker_clr_flags(worker, WORKER_IDLE); + pool->nr_idle--; ++ rt_lock_idle_list(pool); + list_del_init(&worker->entry); ++ rt_unlock_idle_list(pool); + } + + static struct worker *alloc_worker(int node) +@@ -1799,7 +1832,9 @@ + pool->nr_workers--; + pool->nr_idle--; + ++ rt_lock_idle_list(pool); + list_del_init(&worker->entry); ++ rt_unlock_idle_list(pool); + worker->flags |= WORKER_DIE; + wake_up_process(worker->task); + } +@@ -2716,14 +2751,14 @@ + + might_sleep(); + +- local_irq_disable(); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (!pool) { +- local_irq_enable(); ++ rcu_read_unlock(); + return false; + } + +- spin_lock(&pool->lock); ++ spin_lock_irq(&pool->lock); + /* see the comment in try_to_grab_pending() with the same code */ + pwq = get_work_pwq(work); + if (pwq) { +@@ -2750,10 +2785,11 @@ + else + lock_map_acquire_read(&pwq->wq->lockdep_map); + lock_map_release(&pwq->wq->lockdep_map); +- ++ rcu_read_unlock(); + return true; + already_gone: + spin_unlock_irq(&pool->lock); ++ rcu_read_unlock(); + return false; + } + +@@ -2840,7 +2876,7 @@ + + /* tell other tasks trying to grab @work to back off */ + mark_work_canceling(work); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + + flush_work(work); + clear_work_data(work); +@@ -2895,10 +2931,10 @@ + */ + bool flush_delayed_work(struct delayed_work *dwork) + { +- local_irq_disable(); ++ local_lock_irq(pendingb_lock); + if (del_timer_sync(&dwork->timer)) + __queue_work(dwork->cpu, dwork->wq, &dwork->work); +- local_irq_enable(); ++ local_unlock_irq(pendingb_lock); + return flush_work(&dwork->work); + } + EXPORT_SYMBOL(flush_delayed_work); +@@ -2933,7 +2969,7 @@ + + set_work_pool_and_clear_pending(&dwork->work, + get_work_pool_id(&dwork->work)); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pendingb_lock, flags); + return ret; + } + EXPORT_SYMBOL(cancel_delayed_work); +@@ -3161,7 +3197,7 @@ + * put_unbound_pool - put a worker_pool + * @pool: worker_pool to put + * +- * Put @pool. If its refcnt reaches zero, it gets destroyed in sched-RCU ++ * Put @pool. If its refcnt reaches zero, it gets destroyed in RCU + * safe manner. get_unbound_pool() calls this function on its failure path + * and this function should be able to release pools which went through, + * successfully or not, init_worker_pool(). +@@ -3215,8 +3251,8 @@ + del_timer_sync(&pool->idle_timer); + del_timer_sync(&pool->mayday_timer); + +- /* sched-RCU protected to allow dereferences from get_work_pool() */ +- call_rcu_sched(&pool->rcu, rcu_free_pool); ++ /* RCU protected to allow dereferences from get_work_pool() */ ++ call_rcu(&pool->rcu, rcu_free_pool); + } + + /** +@@ -3323,14 +3359,14 @@ + put_unbound_pool(pool); + mutex_unlock(&wq_pool_mutex); + +- call_rcu_sched(&pwq->rcu, rcu_free_pwq); ++ call_rcu(&pwq->rcu, rcu_free_pwq); + + /* + * If we're the last pwq going away, @wq is already dead and no one + * is gonna access it anymore. Schedule RCU free. + */ + if (is_last) +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } + + /** +@@ -3983,7 +4019,7 @@ + * The base ref is never dropped on per-cpu pwqs. Directly + * schedule RCU free. + */ +- call_rcu_sched(&wq->rcu, rcu_free_wq); ++ call_rcu(&wq->rcu, rcu_free_wq); + } else { + /* + * We're the sole accessor of @wq at this point. Directly +@@ -4076,7 +4112,8 @@ + struct pool_workqueue *pwq; + bool ret; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); ++ preempt_disable(); + + if (cpu == WORK_CPU_UNBOUND) + cpu = smp_processor_id(); +@@ -4087,7 +4124,8 @@ + pwq = unbound_pwq_by_node(wq, cpu_to_node(cpu)); + + ret = !list_empty(&pwq->delayed_works); +- rcu_read_unlock_sched(); ++ preempt_enable(); ++ rcu_read_unlock(); + + return ret; + } +@@ -4113,15 +4151,15 @@ + if (work_pending(work)) + ret |= WORK_BUSY_PENDING; + +- local_irq_save(flags); ++ rcu_read_lock(); + pool = get_work_pool(work); + if (pool) { +- spin_lock(&pool->lock); ++ spin_lock_irqsave(&pool->lock, flags); + if (find_worker_executing_work(pool, work)) + ret |= WORK_BUSY_RUNNING; +- spin_unlock(&pool->lock); ++ spin_unlock_irqrestore(&pool->lock, flags); + } +- local_irq_restore(flags); ++ rcu_read_unlock(); + + return ret; + } +@@ -4310,7 +4348,7 @@ + unsigned long flags; + int pi; + +- rcu_read_lock_sched(); ++ rcu_read_lock(); + + pr_info("Showing busy workqueues and worker pools:\n"); + +@@ -4361,7 +4399,7 @@ + spin_unlock_irqrestore(&pool->lock, flags); + } + +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + + /* +@@ -4722,16 +4760,16 @@ + * nr_active is monotonically decreasing. It's safe + * to peek without lock. + */ +- rcu_read_lock_sched(); ++ rcu_read_lock(); + for_each_pwq(pwq, wq) { + WARN_ON_ONCE(pwq->nr_active < 0); + if (pwq->nr_active) { + busy = true; +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + goto out_unlock; + } + } +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); + } + out_unlock: + mutex_unlock(&wq_pool_mutex); +@@ -4921,7 +4959,8 @@ + const char *delim = ""; + int node, written = 0; + +- rcu_read_lock_sched(); ++ get_online_cpus(); ++ rcu_read_lock(); + for_each_node(node) { + written += scnprintf(buf + written, PAGE_SIZE - written, + "%s%d:%d", delim, node, +@@ -4929,7 +4968,8 @@ + delim = " "; + } + written += scnprintf(buf + written, PAGE_SIZE - written, "\n"); +- rcu_read_unlock_sched(); ++ rcu_read_unlock(); ++ put_online_cpus(); + + return written; + } +diff -Nur linux-4.4.13.orig/kernel/workqueue_internal.h linux-4.4.13/kernel/workqueue_internal.h +--- linux-4.4.13.orig/kernel/workqueue_internal.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/kernel/workqueue_internal.h 2016-06-22 21:55:08.070523867 +0200 +@@ -43,6 +43,7 @@ + unsigned long last_active; /* L: last active timestamp */ + unsigned int flags; /* X: flags */ + int id; /* I: worker id */ ++ int sleeping; /* None */ + + /* + * Opaque string set with work_set_desc(). Printed out with task +@@ -68,7 +69,7 @@ + * Scheduler hooks for concurrency managed workqueue. Only to be used from + * sched/core.c and workqueue.c. + */ +-void wq_worker_waking_up(struct task_struct *task, int cpu); +-struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu); ++void wq_worker_running(struct task_struct *task); ++void wq_worker_sleeping(struct task_struct *task); + + #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */ +diff -Nur linux-4.4.13.orig/lib/debugobjects.c linux-4.4.13/lib/debugobjects.c +--- linux-4.4.13.orig/lib/debugobjects.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/debugobjects.c 2016-06-22 21:55:08.070523867 +0200 +@@ -309,7 +309,10 @@ + struct debug_obj *obj; + unsigned long flags; + +- fill_pool(); ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (preempt_count() == 0 && !irqs_disabled()) ++#endif ++ fill_pool(); + + db = get_bucket((unsigned long) addr); + +diff -Nur linux-4.4.13.orig/lib/idr.c linux-4.4.13/lib/idr.c +--- linux-4.4.13.orig/lib/idr.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/idr.c 2016-06-22 21:55:08.070523867 +0200 +@@ -30,6 +30,7 @@ + #include + #include + #include ++#include + + #define MAX_IDR_SHIFT (sizeof(int) * 8 - 1) + #define MAX_IDR_BIT (1U << MAX_IDR_SHIFT) +@@ -45,6 +46,37 @@ + static DEFINE_PER_CPU(int, idr_preload_cnt); + static DEFINE_SPINLOCK(simple_ida_lock); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++static DEFINE_LOCAL_IRQ_LOCK(idr_lock); ++ ++static inline void idr_preload_lock(void) ++{ ++ local_lock(idr_lock); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ local_unlock(idr_lock); ++} ++ ++void idr_preload_end(void) ++{ ++ idr_preload_unlock(); ++} ++EXPORT_SYMBOL(idr_preload_end); ++#else ++static inline void idr_preload_lock(void) ++{ ++ preempt_disable(); ++} ++ ++static inline void idr_preload_unlock(void) ++{ ++ preempt_enable(); ++} ++#endif ++ ++ + /* the maximum ID which can be allocated given idr->layers */ + static int idr_max(int layers) + { +@@ -115,14 +147,14 @@ + * context. See idr_preload() for details. + */ + if (!in_interrupt()) { +- preempt_disable(); ++ idr_preload_lock(); + new = __this_cpu_read(idr_preload_head); + if (new) { + __this_cpu_write(idr_preload_head, new->ary[0]); + __this_cpu_dec(idr_preload_cnt); + new->ary[0] = NULL; + } +- preempt_enable(); ++ idr_preload_unlock(); + if (new) + return new; + } +@@ -366,7 +398,6 @@ + idr_mark_full(pa, id); + } + +- + /** + * idr_preload - preload for idr_alloc() + * @gfp_mask: allocation mask to use for preloading +@@ -401,7 +432,7 @@ + WARN_ON_ONCE(in_interrupt()); + might_sleep_if(gfpflags_allow_blocking(gfp_mask)); + +- preempt_disable(); ++ idr_preload_lock(); + + /* + * idr_alloc() is likely to succeed w/o full idr_layer buffer and +@@ -413,9 +444,9 @@ + while (__this_cpu_read(idr_preload_cnt) < MAX_IDR_FREE) { + struct idr_layer *new; + +- preempt_enable(); ++ idr_preload_unlock(); + new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); +- preempt_disable(); ++ idr_preload_lock(); + if (!new) + break; + +diff -Nur linux-4.4.13.orig/lib/Kconfig linux-4.4.13/lib/Kconfig +--- linux-4.4.13.orig/lib/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/Kconfig 2016-06-22 21:55:08.070523867 +0200 +@@ -397,6 +397,7 @@ + + config CPUMASK_OFFSTACK + bool "Force CPU masks off stack" if DEBUG_PER_CPU_MAPS ++ depends on !PREEMPT_RT_FULL + help + Use dynamic allocation for cpumask_var_t, instead of putting + them on the stack. This is a bit more expensive, but avoids +diff -Nur linux-4.4.13.orig/lib/locking-selftest.c linux-4.4.13/lib/locking-selftest.c +--- linux-4.4.13.orig/lib/locking-selftest.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/locking-selftest.c 2016-06-22 21:55:08.070523867 +0200 +@@ -590,6 +590,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) + +@@ -605,9 +607,12 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Enabling hardirqs with a softirq-safe lock held: + */ +@@ -640,6 +645,8 @@ + #undef E1 + #undef E2 + ++#endif ++ + /* + * Enabling irqs with an irq-safe lock held: + */ +@@ -663,6 +670,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) + +@@ -678,6 +687,8 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + +@@ -709,6 +720,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) + +@@ -724,6 +737,8 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + #undef E3 +@@ -757,6 +772,8 @@ + #include "locking-selftest-spin-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_spin) + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + #include "locking-selftest-rlock-hardirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) + +@@ -772,10 +789,14 @@ + #include "locking-selftest-wlock-softirq.h" + GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) + ++#endif ++ + #undef E1 + #undef E2 + #undef E3 + ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * read-lock / write-lock irq inversion. + * +@@ -838,6 +859,10 @@ + #undef E2 + #undef E3 + ++#endif ++ ++#ifndef CONFIG_PREEMPT_RT_FULL ++ + /* + * read-lock / write-lock recursion that is actually safe. + */ +@@ -876,6 +901,8 @@ + #undef E2 + #undef E3 + ++#endif ++ + /* + * read-lock / write-lock recursion that is unsafe. + */ +@@ -1858,6 +1885,7 @@ + + printk(" --------------------------------------------------------------------------\n"); + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * irq-context testcases: + */ +@@ -1870,6 +1898,28 @@ + + DO_TESTCASE_6x2("irq read-recursion", irq_read_recursion); + // DO_TESTCASE_6x2B("irq read-recursion #2", irq_read_recursion2); ++#else ++ /* On -rt, we only do hardirq context test for raw spinlock */ ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 12); ++ DO_TESTCASE_1B("hard-irqs-on + irq-safe-A", irqsafe1_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 12); ++ DO_TESTCASE_1B("hard-safe-A + irqs-on", irqsafe2B_hard_spin, 21); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #1", irqsafe3_hard_spin, 321); ++ ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 123); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 132); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 213); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 231); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 312); ++ DO_TESTCASE_1B("hard-safe-A + unsafe-B #2", irqsafe4_hard_spin, 321); ++#endif + + ww_tests(); + +diff -Nur linux-4.4.13.orig/lib/percpu_ida.c linux-4.4.13/lib/percpu_ida.c +--- linux-4.4.13.orig/lib/percpu_ida.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/percpu_ida.c 2016-06-22 21:55:08.074523867 +0200 +@@ -26,6 +26,9 @@ + #include + #include + #include ++#include ++ ++static DEFINE_LOCAL_IRQ_LOCK(irq_off_lock); + + struct percpu_ida_cpu { + /* +@@ -148,13 +151,13 @@ + unsigned long flags; + int tag; + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + + /* Fastpath */ + tag = alloc_local_tag(tags); + if (likely(tag >= 0)) { +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + return tag; + } + +@@ -173,6 +176,7 @@ + + if (!tags->nr_free) + alloc_global_tags(pool, tags); ++ + if (!tags->nr_free) + steal_tags(pool, tags); + +@@ -184,7 +188,7 @@ + } + + spin_unlock(&pool->lock); +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + + if (tag >= 0 || state == TASK_RUNNING) + break; +@@ -196,7 +200,7 @@ + + schedule(); + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + } + if (state != TASK_RUNNING) +@@ -221,7 +225,7 @@ + + BUG_ON(tag >= pool->nr_tags); + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + tags = this_cpu_ptr(pool->tag_cpu); + + spin_lock(&tags->lock); +@@ -253,7 +257,7 @@ + spin_unlock(&pool->lock); + } + +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + } + EXPORT_SYMBOL_GPL(percpu_ida_free); + +@@ -345,7 +349,7 @@ + struct percpu_ida_cpu *remote; + unsigned cpu, i, err = 0; + +- local_irq_save(flags); ++ local_lock_irqsave(irq_off_lock, flags); + for_each_possible_cpu(cpu) { + remote = per_cpu_ptr(pool->tag_cpu, cpu); + spin_lock(&remote->lock); +@@ -367,7 +371,7 @@ + } + spin_unlock(&pool->lock); + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(irq_off_lock, flags); + return err; + } + EXPORT_SYMBOL_GPL(percpu_ida_for_each_free); +diff -Nur linux-4.4.13.orig/lib/radix-tree.c linux-4.4.13/lib/radix-tree.c +--- linux-4.4.13.orig/lib/radix-tree.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/radix-tree.c 2016-06-22 21:55:08.074523867 +0200 +@@ -196,13 +196,14 @@ + * succeed in getting a node here (and never reach + * kmem_cache_alloc) + */ +- rtp = this_cpu_ptr(&radix_tree_preloads); ++ rtp = &get_cpu_var(radix_tree_preloads); + if (rtp->nr) { + ret = rtp->nodes; + rtp->nodes = ret->private_data; + ret->private_data = NULL; + rtp->nr--; + } ++ put_cpu_var(radix_tree_preloads); + /* + * Update the allocation stack trace as this is more useful + * for debugging. +@@ -242,6 +243,7 @@ + call_rcu(&node->rcu_head, radix_tree_node_rcu_free); + } + ++#ifndef CONFIG_PREEMPT_RT_FULL + /* + * Load up this CPU's radix_tree_node buffer with sufficient objects to + * ensure that the addition of a single element in the tree cannot fail. On +@@ -310,6 +312,7 @@ + return 0; + } + EXPORT_SYMBOL(radix_tree_maybe_preload); ++#endif + + /* + * Return the maximum key which can be store into a +diff -Nur linux-4.4.13.orig/lib/rbtree.c linux-4.4.13/lib/rbtree.c +--- linux-4.4.13.orig/lib/rbtree.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/rbtree.c 2016-06-22 21:55:08.074523867 +0200 +@@ -23,6 +23,7 @@ + + #include + #include ++#include + + /* + * red-black trees properties: http://en.wikipedia.org/wiki/Rbtree +@@ -590,3 +591,13 @@ + return rb_left_deepest_node(root->rb_node); + } + EXPORT_SYMBOL(rb_first_postorder); ++ ++void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, ++ struct rb_node **rb_link) ++{ ++ node->__rb_parent_color = (unsigned long)parent; ++ node->rb_left = node->rb_right = NULL; ++ ++ rcu_assign_pointer(*rb_link, node); ++} ++EXPORT_SYMBOL(rb_link_node_rcu); +diff -Nur linux-4.4.13.orig/lib/scatterlist.c linux-4.4.13/lib/scatterlist.c +--- linux-4.4.13.orig/lib/scatterlist.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/scatterlist.c 2016-06-22 21:55:08.074523867 +0200 +@@ -620,7 +620,7 @@ + flush_kernel_dcache_page(miter->page); + + if (miter->__flags & SG_MITER_ATOMIC) { +- WARN_ON_ONCE(preemptible()); ++ WARN_ON_ONCE(!pagefault_disabled()); + kunmap_atomic(miter->addr); + } else + kunmap(miter->page); +@@ -664,7 +664,7 @@ + if (!sg_miter_skip(&miter, skip)) + return false; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + + while (sg_miter_next(&miter) && offset < buflen) { + unsigned int len; +@@ -681,7 +681,7 @@ + + sg_miter_stop(&miter); + +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return offset; + } + EXPORT_SYMBOL(sg_copy_buffer); +diff -Nur linux-4.4.13.orig/lib/smp_processor_id.c linux-4.4.13/lib/smp_processor_id.c +--- linux-4.4.13.orig/lib/smp_processor_id.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/lib/smp_processor_id.c 2016-06-22 21:55:08.074523867 +0200 +@@ -39,8 +39,9 @@ + if (!printk_ratelimit()) + goto out_enable; + +- printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x] code: %s/%d\n", +- what1, what2, preempt_count() - 1, current->comm, current->pid); ++ printk(KERN_ERR "BUG: using %s%s() in preemptible [%08x %08x] code: %s/%d\n", ++ what1, what2, preempt_count() - 1, __migrate_disabled(current), ++ current->comm, current->pid); + + print_symbol("caller is %s\n", (long)__builtin_return_address(0)); + dump_stack(); +diff -Nur linux-4.4.13.orig/Makefile linux-4.4.13/Makefile +--- linux-4.4.13.orig/Makefile 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/Makefile 2016-06-22 21:55:08.074523867 +0200 +@@ -768,6 +768,9 @@ + # Prohibit date/time macros, which would make the build non-deterministic + KBUILD_CFLAGS += $(call cc-option,-Werror=date-time) + ++# enforce correct pointer usage ++KBUILD_CFLAGS += $(call cc-option,-Werror=incompatible-pointer-types) ++ + # use the deterministic mode of AR if available + KBUILD_ARFLAGS := $(call ar-option,D) + +diff -Nur linux-4.4.13.orig/mm/backing-dev.c linux-4.4.13/mm/backing-dev.c +--- linux-4.4.13.orig/mm/backing-dev.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/backing-dev.c 2016-06-22 21:55:08.074523867 +0200 +@@ -457,9 +457,9 @@ + { + unsigned long flags; + +- local_irq_save(flags); ++ local_irq_save_nort(flags); + if (!atomic_dec_and_lock(&congested->refcnt, &cgwb_lock)) { +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + return; + } + +diff -Nur linux-4.4.13.orig/mm/compaction.c linux-4.4.13/mm/compaction.c +--- linux-4.4.13.orig/mm/compaction.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/compaction.c 2016-06-22 21:55:08.074523867 +0200 +@@ -1435,10 +1435,12 @@ + cc->migrate_pfn & ~((1UL << cc->order) - 1); + + if (cc->last_migrated_pfn < current_block_start) { +- cpu = get_cpu(); ++ cpu = get_cpu_light(); ++ local_lock_irq(swapvec_lock); + lru_add_drain_cpu(cpu); ++ local_unlock_irq(swapvec_lock); + drain_local_pages(zone); +- put_cpu(); ++ put_cpu_light(); + /* No more flushing until we migrate again */ + cc->last_migrated_pfn = 0; + } +diff -Nur linux-4.4.13.orig/mm/filemap.c linux-4.4.13/mm/filemap.c +--- linux-4.4.13.orig/mm/filemap.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/filemap.c 2016-06-22 21:55:08.074523867 +0200 +@@ -168,7 +168,9 @@ + if (!workingset_node_pages(node) && + list_empty(&node->private_list)) { + node->private_data = mapping; +- list_lru_add(&workingset_shadow_nodes, &node->private_list); ++ local_lock(workingset_shadow_lock); ++ list_lru_add(&__workingset_shadow_nodes, &node->private_list); ++ local_unlock(workingset_shadow_lock); + } + } + +@@ -597,9 +599,12 @@ + * node->private_list is protected by + * mapping->tree_lock. + */ +- if (!list_empty(&node->private_list)) +- list_lru_del(&workingset_shadow_nodes, ++ if (!list_empty(&node->private_list)) { ++ local_lock(workingset_shadow_lock); ++ list_lru_del(&__workingset_shadow_nodes, + &node->private_list); ++ local_unlock(workingset_shadow_lock); ++ } + } + return 0; + } +diff -Nur linux-4.4.13.orig/mm/highmem.c linux-4.4.13/mm/highmem.c +--- linux-4.4.13.orig/mm/highmem.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/highmem.c 2016-06-22 21:55:08.074523867 +0200 +@@ -29,10 +29,11 @@ + #include + #include + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32) + DEFINE_PER_CPU(int, __kmap_atomic_idx); + #endif ++#endif + + /* + * Virtual_count is not a pure "count". +@@ -107,8 +108,9 @@ + unsigned long totalhigh_pages __read_mostly; + EXPORT_SYMBOL(totalhigh_pages); + +- ++#ifndef CONFIG_PREEMPT_RT_FULL + EXPORT_PER_CPU_SYMBOL(__kmap_atomic_idx); ++#endif + + unsigned int nr_free_highpages (void) + { +diff -Nur linux-4.4.13.orig/mm/Kconfig linux-4.4.13/mm/Kconfig +--- linux-4.4.13.orig/mm/Kconfig 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/Kconfig 2016-06-22 21:55:08.074523867 +0200 +@@ -392,7 +392,7 @@ + + config TRANSPARENT_HUGEPAGE + bool "Transparent Hugepage Support" +- depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE ++ depends on HAVE_ARCH_TRANSPARENT_HUGEPAGE && !PREEMPT_RT_FULL + select COMPACTION + help + Transparent Hugepages allows the kernel to use huge pages and +diff -Nur linux-4.4.13.orig/mm/memcontrol.c linux-4.4.13/mm/memcontrol.c +--- linux-4.4.13.orig/mm/memcontrol.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/memcontrol.c 2016-06-22 21:55:08.074523867 +0200 +@@ -67,6 +67,8 @@ + #include + #include + #include ++#include ++ + #include "slab.h" + + #include +@@ -87,6 +89,7 @@ + #define do_swap_account 0 + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(event_lock); + static const char * const mem_cgroup_stat_names[] = { + "cache", + "rss", +@@ -1936,14 +1939,17 @@ + */ + static void refill_stock(struct mem_cgroup *memcg, unsigned int nr_pages) + { +- struct memcg_stock_pcp *stock = &get_cpu_var(memcg_stock); ++ struct memcg_stock_pcp *stock; ++ int cpu = get_cpu_light(); ++ ++ stock = &per_cpu(memcg_stock, cpu); + + if (stock->cached != memcg) { /* reset if necessary */ + drain_stock(stock); + stock->cached = memcg; + } + stock->nr_pages += nr_pages; +- put_cpu_var(memcg_stock); ++ put_cpu_light(); + } + + /* +@@ -1959,7 +1965,7 @@ + return; + /* Notify other cpus that system-wide "drain" is running */ + get_online_cpus(); +- curcpu = get_cpu(); ++ curcpu = get_cpu_light(); + for_each_online_cpu(cpu) { + struct memcg_stock_pcp *stock = &per_cpu(memcg_stock, cpu); + struct mem_cgroup *memcg; +@@ -1976,7 +1982,7 @@ + schedule_work_on(cpu, &stock->work); + } + } +- put_cpu(); ++ put_cpu_light(); + put_online_cpus(); + mutex_unlock(&percpu_charge_mutex); + } +@@ -4618,12 +4624,12 @@ + + ret = 0; + +- local_irq_disable(); ++ local_lock_irq(event_lock); + mem_cgroup_charge_statistics(to, page, nr_pages); + memcg_check_events(to, page); + mem_cgroup_charge_statistics(from, page, -nr_pages); + memcg_check_events(from, page); +- local_irq_enable(); ++ local_unlock_irq(event_lock); + out_unlock: + unlock_page(page); + out: +@@ -5411,10 +5417,10 @@ + VM_BUG_ON_PAGE(!PageTransHuge(page), page); + } + +- local_irq_disable(); ++ local_lock_irq(event_lock); + mem_cgroup_charge_statistics(memcg, page, nr_pages); + memcg_check_events(memcg, page); +- local_irq_enable(); ++ local_unlock_irq(event_lock); + + if (do_swap_account && PageSwapCache(page)) { + swp_entry_t entry = { .val = page_private(page) }; +@@ -5470,14 +5476,14 @@ + memcg_oom_recover(memcg); + } + +- local_irq_save(flags); ++ local_lock_irqsave(event_lock, flags); + __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS], nr_anon); + __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_CACHE], nr_file); + __this_cpu_sub(memcg->stat->count[MEM_CGROUP_STAT_RSS_HUGE], nr_huge); + __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_PGPGOUT], pgpgout); + __this_cpu_add(memcg->stat->nr_page_events, nr_pages); + memcg_check_events(memcg, dummy_page); +- local_irq_restore(flags); ++ local_unlock_irqrestore(event_lock, flags); + + if (!mem_cgroup_is_root(memcg)) + css_put_many(&memcg->css, nr_pages); +@@ -5669,6 +5675,7 @@ + { + struct mem_cgroup *memcg; + unsigned short oldid; ++ unsigned long flags; + + VM_BUG_ON_PAGE(PageLRU(page), page); + VM_BUG_ON_PAGE(page_count(page), page); +@@ -5697,9 +5704,13 @@ + * important here to have the interrupts disabled because it is the + * only synchronisation we have for udpating the per-CPU variables. + */ ++ local_lock_irqsave(event_lock, flags); ++#ifndef CONFIG_PREEMPT_RT_BASE + VM_BUG_ON(!irqs_disabled()); ++#endif + mem_cgroup_charge_statistics(memcg, page, -1); + memcg_check_events(memcg, page); ++ local_unlock_irqrestore(event_lock, flags); + } + + /** +diff -Nur linux-4.4.13.orig/mm/mmu_context.c linux-4.4.13/mm/mmu_context.c +--- linux-4.4.13.orig/mm/mmu_context.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/mmu_context.c 2016-06-22 21:55:08.074523867 +0200 +@@ -23,6 +23,7 @@ + struct task_struct *tsk = current; + + task_lock(tsk); ++ preempt_disable_rt(); + active_mm = tsk->active_mm; + if (active_mm != mm) { + atomic_inc(&mm->mm_count); +@@ -30,6 +31,7 @@ + } + tsk->mm = mm; + switch_mm(active_mm, mm, tsk); ++ preempt_enable_rt(); + task_unlock(tsk); + #ifdef finish_arch_post_lock_switch + finish_arch_post_lock_switch(); +diff -Nur linux-4.4.13.orig/mm/page_alloc.c linux-4.4.13/mm/page_alloc.c +--- linux-4.4.13.orig/mm/page_alloc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/page_alloc.c 2016-06-22 21:55:08.078523867 +0200 +@@ -60,6 +60,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -264,6 +265,18 @@ + EXPORT_SYMBOL(nr_online_nodes); + #endif + ++static DEFINE_LOCAL_IRQ_LOCK(pa_lock); ++ ++#ifdef CONFIG_PREEMPT_RT_BASE ++# define cpu_lock_irqsave(cpu, flags) \ ++ local_lock_irqsave_on(pa_lock, flags, cpu) ++# define cpu_unlock_irqrestore(cpu, flags) \ ++ local_unlock_irqrestore_on(pa_lock, flags, cpu) ++#else ++# define cpu_lock_irqsave(cpu, flags) local_irq_save(flags) ++# define cpu_unlock_irqrestore(cpu, flags) local_irq_restore(flags) ++#endif ++ + int page_group_by_mobility_disabled __read_mostly; + + #ifdef CONFIG_DEFERRED_STRUCT_PAGE_INIT +@@ -784,7 +797,7 @@ + } + + /* +- * Frees a number of pages from the PCP lists ++ * Frees a number of pages which have been collected from the pcp lists. + * Assumes all pages on list are in same zone, and of same order. + * count is the number of pages to free. + * +@@ -795,18 +808,53 @@ + * pinned" detection logic. + */ + static void free_pcppages_bulk(struct zone *zone, int count, +- struct per_cpu_pages *pcp) ++ struct list_head *list) + { +- int migratetype = 0; +- int batch_free = 0; + int to_free = count; + unsigned long nr_scanned; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); + +- spin_lock(&zone->lock); + nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); + if (nr_scanned) + __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); + ++ while (!list_empty(list)) { ++ struct page *page = list_first_entry(list, struct page, lru); ++ int mt; /* migratetype of the to-be-freed page */ ++ ++ /* must delete as __free_one_page list manipulates */ ++ list_del(&page->lru); ++ ++ mt = get_pcppage_migratetype(page); ++ /* MIGRATE_ISOLATE page should not go to pcplists */ ++ VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); ++ /* Pageblock could have been isolated meanwhile */ ++ if (unlikely(has_isolate_pageblock(zone))) ++ mt = get_pageblock_migratetype(page); ++ ++ __free_one_page(page, page_to_pfn(page), zone, 0, mt); ++ trace_mm_page_pcpu_drain(page, 0, mt); ++ to_free--; ++ } ++ WARN_ON(to_free != 0); ++ spin_unlock_irqrestore(&zone->lock, flags); ++} ++ ++/* ++ * Moves a number of pages from the PCP lists to free list which ++ * is freed outside of the locked region. ++ * ++ * Assumes all pages on list are in same zone, and of same order. ++ * count is the number of pages to free. ++ */ ++static void isolate_pcp_pages(int to_free, struct per_cpu_pages *src, ++ struct list_head *dst) ++{ ++ int migratetype = 0; ++ int batch_free = 0; ++ + while (to_free) { + struct page *page; + struct list_head *list; +@@ -822,7 +870,7 @@ + batch_free++; + if (++migratetype == MIGRATE_PCPTYPES) + migratetype = 0; +- list = &pcp->lists[migratetype]; ++ list = &src->lists[migratetype]; + } while (list_empty(list)); + + /* This is the only non-empty list. Free them all. */ +@@ -830,24 +878,12 @@ + batch_free = to_free; + + do { +- int mt; /* migratetype of the to-be-freed page */ +- +- page = list_entry(list->prev, struct page, lru); +- /* must delete as __free_one_page list manipulates */ ++ page = list_last_entry(list, struct page, lru); + list_del(&page->lru); + +- mt = get_pcppage_migratetype(page); +- /* MIGRATE_ISOLATE page should not go to pcplists */ +- VM_BUG_ON_PAGE(is_migrate_isolate(mt), page); +- /* Pageblock could have been isolated meanwhile */ +- if (unlikely(has_isolate_pageblock(zone))) +- mt = get_pageblock_migratetype(page); +- +- __free_one_page(page, page_to_pfn(page), zone, 0, mt); +- trace_mm_page_pcpu_drain(page, 0, mt); ++ list_add(&page->lru, dst); + } while (--to_free && --batch_free && !list_empty(list)); + } +- spin_unlock(&zone->lock); + } + + static void free_one_page(struct zone *zone, +@@ -856,7 +892,9 @@ + int migratetype) + { + unsigned long nr_scanned; +- spin_lock(&zone->lock); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&zone->lock, flags); + nr_scanned = zone_page_state(zone, NR_PAGES_SCANNED); + if (nr_scanned) + __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); +@@ -866,7 +904,7 @@ + migratetype = get_pfnblock_migratetype(page, pfn); + } + __free_one_page(page, pfn, zone, order, migratetype); +- spin_unlock(&zone->lock); ++ spin_unlock_irqrestore(&zone->lock, flags); + } + + static int free_tail_pages_check(struct page *head_page, struct page *page) +@@ -1017,10 +1055,10 @@ + return; + + migratetype = get_pfnblock_migratetype(page, pfn); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_events(PGFREE, 1 << order); + free_one_page(page_zone(page), page, pfn, order, migratetype); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + static void __init __free_pages_boot_core(struct page *page, +@@ -1877,16 +1915,18 @@ + void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) + { + unsigned long flags; ++ LIST_HEAD(dst); + int to_drain, batch; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + batch = READ_ONCE(pcp->batch); + to_drain = min(pcp->count, batch); + if (to_drain > 0) { +- free_pcppages_bulk(zone, to_drain, pcp); ++ isolate_pcp_pages(to_drain, pcp, &dst); + pcp->count -= to_drain; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, to_drain, &dst); + } + #endif + +@@ -1902,16 +1942,21 @@ + unsigned long flags; + struct per_cpu_pageset *pset; + struct per_cpu_pages *pcp; ++ LIST_HEAD(dst); ++ int count; + +- local_irq_save(flags); ++ cpu_lock_irqsave(cpu, flags); + pset = per_cpu_ptr(zone->pageset, cpu); + + pcp = &pset->pcp; +- if (pcp->count) { +- free_pcppages_bulk(zone, pcp->count, pcp); ++ count = pcp->count; ++ if (count) { ++ isolate_pcp_pages(count, pcp, &dst); + pcp->count = 0; + } +- local_irq_restore(flags); ++ cpu_unlock_irqrestore(cpu, flags); ++ if (count) ++ free_pcppages_bulk(zone, count, &dst); + } + + /* +@@ -1997,8 +2042,17 @@ + else + cpumask_clear_cpu(cpu, &cpus_with_pcps); + } ++#ifndef CONFIG_PREEMPT_RT_BASE + on_each_cpu_mask(&cpus_with_pcps, (smp_call_func_t) drain_local_pages, + zone, 1); ++#else ++ for_each_cpu(cpu, &cpus_with_pcps) { ++ if (zone) ++ drain_pages_zone(cpu, zone); ++ else ++ drain_pages(cpu); ++ } ++#endif + } + + #ifdef CONFIG_HIBERNATION +@@ -2054,7 +2108,7 @@ + + migratetype = get_pfnblock_migratetype(page, pfn); + set_pcppage_migratetype(page, migratetype); +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + __count_vm_event(PGFREE); + + /* +@@ -2080,12 +2134,17 @@ + pcp->count++; + if (pcp->count >= pcp->high) { + unsigned long batch = READ_ONCE(pcp->batch); +- free_pcppages_bulk(zone, batch, pcp); ++ LIST_HEAD(dst); ++ ++ isolate_pcp_pages(batch, pcp, &dst); + pcp->count -= batch; ++ local_unlock_irqrestore(pa_lock, flags); ++ free_pcppages_bulk(zone, batch, &dst); ++ return; + } + + out: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + /* +@@ -2220,7 +2279,7 @@ + struct per_cpu_pages *pcp; + struct list_head *list; + +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + pcp = &this_cpu_ptr(zone->pageset)->pcp; + list = &pcp->lists[migratetype]; + if (list_empty(list)) { +@@ -2252,7 +2311,7 @@ + */ + WARN_ON_ONCE(order > 1); + } +- spin_lock_irqsave(&zone->lock, flags); ++ local_spin_lock_irqsave(pa_lock, &zone->lock, flags); + + page = NULL; + if (alloc_flags & ALLOC_HARDER) { +@@ -2262,11 +2321,13 @@ + } + if (!page) + page = __rmqueue(zone, order, migratetype, gfp_flags); +- spin_unlock(&zone->lock); +- if (!page) ++ if (!page) { ++ spin_unlock(&zone->lock); + goto failed; ++ } + __mod_zone_freepage_state(zone, -(1 << order), + get_pcppage_migratetype(page)); ++ spin_unlock(&zone->lock); + } + + __mod_zone_page_state(zone, NR_ALLOC_BATCH, -(1 << order)); +@@ -2276,13 +2337,13 @@ + + __count_zone_vm_events(PGALLOC, zone, 1 << order); + zone_statistics(preferred_zone, zone, gfp_flags); +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + + VM_BUG_ON_PAGE(bad_range(zone, page), page); + return page; + + failed: +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + return NULL; + } + +@@ -5948,6 +6009,7 @@ + void __init page_alloc_init(void) + { + hotcpu_notifier(page_alloc_cpu_notify, 0); ++ local_irq_lock_init(pa_lock); + } + + /* +@@ -6842,7 +6904,7 @@ + struct per_cpu_pageset *pset; + + /* avoid races with drain_pages() */ +- local_irq_save(flags); ++ local_lock_irqsave(pa_lock, flags); + if (zone->pageset != &boot_pageset) { + for_each_online_cpu(cpu) { + pset = per_cpu_ptr(zone->pageset, cpu); +@@ -6851,7 +6913,7 @@ + free_percpu(zone->pageset); + zone->pageset = &boot_pageset; + } +- local_irq_restore(flags); ++ local_unlock_irqrestore(pa_lock, flags); + } + + #ifdef CONFIG_MEMORY_HOTREMOVE +diff -Nur linux-4.4.13.orig/mm/slab.h linux-4.4.13/mm/slab.h +--- linux-4.4.13.orig/mm/slab.h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/slab.h 2016-06-22 21:55:08.078523867 +0200 +@@ -324,7 +324,11 @@ + * The slab lists for all objects. + */ + struct kmem_cache_node { ++#ifdef CONFIG_SLUB ++ raw_spinlock_t list_lock; ++#else + spinlock_t list_lock; ++#endif + + #ifdef CONFIG_SLAB + struct list_head slabs_partial; /* partial list first, better asm code */ +diff -Nur linux-4.4.13.orig/mm/slub.c linux-4.4.13/mm/slub.c +--- linux-4.4.13.orig/mm/slub.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/slub.c 2016-06-22 21:55:08.078523867 +0200 +@@ -1075,7 +1075,7 @@ + void *object = head; + int cnt = 0; + +- spin_lock_irqsave(&n->list_lock, *flags); ++ raw_spin_lock_irqsave(&n->list_lock, *flags); + slab_lock(page); + + if (!check_slab(s, page)) +@@ -1136,7 +1136,7 @@ + + fail: + slab_unlock(page); +- spin_unlock_irqrestore(&n->list_lock, *flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, *flags); + slab_fix(s, "Object at 0x%p not freed", object); + return NULL; + } +@@ -1263,6 +1263,12 @@ + + #endif /* CONFIG_SLUB_DEBUG */ + ++struct slub_free_list { ++ raw_spinlock_t lock; ++ struct list_head list; ++}; ++static DEFINE_PER_CPU(struct slub_free_list, slub_free_list); ++ + /* + * Hooks for other subsystems that check memory allocations. In a typical + * production configuration these hooks all should produce no code at all. +@@ -1399,10 +1405,17 @@ + gfp_t alloc_gfp; + void *start, *p; + int idx, order; ++ bool enableirqs = false; + + flags &= gfp_allowed_mask; + + if (gfpflags_allow_blocking(flags)) ++ enableirqs = true; ++#ifdef CONFIG_PREEMPT_RT_FULL ++ if (system_state == SYSTEM_RUNNING) ++ enableirqs = true; ++#endif ++ if (enableirqs) + local_irq_enable(); + + flags |= s->allocflags; +@@ -1473,7 +1486,7 @@ + page->frozen = 1; + + out: +- if (gfpflags_allow_blocking(flags)) ++ if (enableirqs) + local_irq_disable(); + if (!page) + return NULL; +@@ -1529,6 +1542,16 @@ + __free_kmem_pages(page, order); + } + ++static void free_delayed(struct list_head *h) ++{ ++ while(!list_empty(h)) { ++ struct page *page = list_first_entry(h, struct page, lru); ++ ++ list_del(&page->lru); ++ __free_slab(page->slab_cache, page); ++ } ++} ++ + #define need_reserve_slab_rcu \ + (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head)) + +@@ -1560,6 +1583,12 @@ + } + + call_rcu(head, rcu_free_slab); ++ } else if (irqs_disabled()) { ++ struct slub_free_list *f = this_cpu_ptr(&slub_free_list); ++ ++ raw_spin_lock(&f->lock); ++ list_add(&page->lru, &f->list); ++ raw_spin_unlock(&f->lock); + } else + __free_slab(s, page); + } +@@ -1673,7 +1702,7 @@ + if (!n || !n->nr_partial) + return NULL; + +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + list_for_each_entry_safe(page, page2, &n->partial, lru) { + void *t; + +@@ -1698,7 +1727,7 @@ + break; + + } +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + return object; + } + +@@ -1944,7 +1973,7 @@ + * that acquire_slab() will see a slab page that + * is frozen + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } else { + m = M_FULL; +@@ -1955,7 +1984,7 @@ + * slabs from diagnostic functions will not see + * any frozen slabs. + */ +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + } + +@@ -1990,7 +2019,7 @@ + goto redo; + + if (lock) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + if (m == M_FREE) { + stat(s, DEACTIVATE_EMPTY); +@@ -2022,10 +2051,10 @@ + n2 = get_node(s, page_to_nid(page)); + if (n != n2) { + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + n = n2; +- spin_lock(&n->list_lock); ++ raw_spin_lock(&n->list_lock); + } + + do { +@@ -2054,7 +2083,7 @@ + } + + if (n) +- spin_unlock(&n->list_lock); ++ raw_spin_unlock(&n->list_lock); + + while (discard_page) { + page = discard_page; +@@ -2093,14 +2122,21 @@ + pobjects = oldpage->pobjects; + pages = oldpage->pages; + if (drain && pobjects > s->cpu_partial) { ++ struct slub_free_list *f; + unsigned long flags; ++ LIST_HEAD(tofree); + /* + * partial array is full. Move the existing + * set to the per node partial list. + */ + local_irq_save(flags); + unfreeze_partials(s, this_cpu_ptr(s->cpu_slab)); ++ f = this_cpu_ptr(&slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock(&f->lock); + local_irq_restore(flags); ++ free_delayed(&tofree); + oldpage = NULL; + pobjects = 0; + pages = 0; +@@ -2172,7 +2208,22 @@ + + static void flush_all(struct kmem_cache *s) + { ++ LIST_HEAD(tofree); ++ int cpu; ++ + on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC); ++ for_each_online_cpu(cpu) { ++ struct slub_free_list *f; ++ ++ if (!has_cpu_slab(cpu, s)) ++ continue; ++ ++ f = &per_cpu(slub_free_list, cpu); ++ raw_spin_lock_irq(&f->lock); ++ list_splice_init(&f->list, &tofree); ++ raw_spin_unlock_irq(&f->lock); ++ free_delayed(&tofree); ++ } + } + + /* +@@ -2208,10 +2259,10 @@ + unsigned long x = 0; + struct page *page; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + x += get_count(page); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return x; + } + #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */ +@@ -2349,8 +2400,10 @@ + * already disabled (which is the case for bulk allocation). + */ + static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node, +- unsigned long addr, struct kmem_cache_cpu *c) ++ unsigned long addr, struct kmem_cache_cpu *c, ++ struct list_head *to_free) + { ++ struct slub_free_list *f; + void *freelist; + struct page *page; + +@@ -2410,6 +2463,13 @@ + VM_BUG_ON(!c->page->frozen); + c->freelist = get_freepointer(s, freelist); + c->tid = next_tid(c->tid); ++ ++out: ++ f = this_cpu_ptr(&slub_free_list); ++ raw_spin_lock(&f->lock); ++ list_splice_init(&f->list, to_free); ++ raw_spin_unlock(&f->lock); ++ + return freelist; + + new_slab: +@@ -2441,7 +2501,7 @@ + deactivate_slab(s, page, get_freepointer(s, freelist)); + c->page = NULL; + c->freelist = NULL; +- return freelist; ++ goto out; + } + + /* +@@ -2453,6 +2513,7 @@ + { + void *p; + unsigned long flags; ++ LIST_HEAD(tofree); + + local_irq_save(flags); + #ifdef CONFIG_PREEMPT +@@ -2464,8 +2525,9 @@ + c = this_cpu_ptr(s->cpu_slab); + #endif + +- p = ___slab_alloc(s, gfpflags, node, addr, c); ++ p = ___slab_alloc(s, gfpflags, node, addr, c, &tofree); + local_irq_restore(flags); ++ free_delayed(&tofree); + return p; + } + +@@ -2652,7 +2714,7 @@ + + do { + if (unlikely(n)) { +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + n = NULL; + } + prior = page->freelist; +@@ -2684,7 +2746,7 @@ + * Otherwise the list_lock will synchronize with + * other processors updating the list of slabs. + */ +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + } + } +@@ -2726,7 +2788,7 @@ + add_partial(n, page, DEACTIVATE_TO_TAIL); + stat(s, FREE_ADD_PARTIAL); + } +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return; + + slab_empty: +@@ -2741,7 +2803,7 @@ + remove_full(s, n, page); + } + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + stat(s, FREE_SLAB); + discard_slab(s, page); + } +@@ -2913,6 +2975,7 @@ + void **p) + { + struct kmem_cache_cpu *c; ++ LIST_HEAD(to_free); + int i; + + /* memcg and kmem_cache debug support */ +@@ -2936,7 +2999,7 @@ + * of re-populating per CPU c->freelist + */ + p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE, +- _RET_IP_, c); ++ _RET_IP_, c, &to_free); + if (unlikely(!p[i])) + goto error; + +@@ -2948,6 +3011,7 @@ + } + c->tid = next_tid(c->tid); + local_irq_enable(); ++ free_delayed(&to_free); + + /* Clear memory outside IRQ disabled fastpath loop */ + if (unlikely(flags & __GFP_ZERO)) { +@@ -3095,7 +3159,7 @@ + init_kmem_cache_node(struct kmem_cache_node *n) + { + n->nr_partial = 0; +- spin_lock_init(&n->list_lock); ++ raw_spin_lock_init(&n->list_lock); + INIT_LIST_HEAD(&n->partial); + #ifdef CONFIG_SLUB_DEBUG + atomic_long_set(&n->nr_slabs, 0); +@@ -3677,7 +3741,7 @@ + for (i = 0; i < SHRINK_PROMOTE_MAX; i++) + INIT_LIST_HEAD(promote + i); + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + /* + * Build lists of slabs to discard or promote. +@@ -3708,7 +3772,7 @@ + for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--) + list_splice(promote + i, &n->partial); + +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + + /* Release empty slabs */ + list_for_each_entry_safe(page, t, &discard, lru) +@@ -3884,6 +3948,12 @@ + { + static __initdata struct kmem_cache boot_kmem_cache, + boot_kmem_cache_node; ++ int cpu; ++ ++ for_each_possible_cpu(cpu) { ++ raw_spin_lock_init(&per_cpu(slub_free_list, cpu).lock); ++ INIT_LIST_HEAD(&per_cpu(slub_free_list, cpu).list); ++ } + + if (debug_guardpage_minorder()) + slub_max_order = 0; +@@ -4127,7 +4197,7 @@ + struct page *page; + unsigned long flags; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + + list_for_each_entry(page, &n->partial, lru) { + validate_slab_slab(s, page, map); +@@ -4149,7 +4219,7 @@ + s->name, count, atomic_long_read(&n->nr_slabs)); + + out: +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + return count; + } + +@@ -4337,12 +4407,12 @@ + if (!atomic_long_read(&n->nr_slabs)) + continue; + +- spin_lock_irqsave(&n->list_lock, flags); ++ raw_spin_lock_irqsave(&n->list_lock, flags); + list_for_each_entry(page, &n->partial, lru) + process_slab(&t, s, page, alloc, map); + list_for_each_entry(page, &n->full, lru) + process_slab(&t, s, page, alloc, map); +- spin_unlock_irqrestore(&n->list_lock, flags); ++ raw_spin_unlock_irqrestore(&n->list_lock, flags); + } + + for (i = 0; i < t.count; i++) { +diff -Nur linux-4.4.13.orig/mm/swap.c linux-4.4.13/mm/swap.c +--- linux-4.4.13.orig/mm/swap.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/swap.c 2016-06-22 21:55:08.078523867 +0200 +@@ -31,6 +31,7 @@ + #include + #include + #include ++#include + #include + #include + +@@ -46,6 +47,9 @@ + static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); + static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); + ++static DEFINE_LOCAL_IRQ_LOCK(rotate_lock); ++DEFINE_LOCAL_IRQ_LOCK(swapvec_lock); ++ + /* + * This path almost never happens for VM activity - pages are normally + * freed via pagevecs. But it gets used by networking. +@@ -481,11 +485,11 @@ + unsigned long flags; + + page_cache_get(page); +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pvec = this_cpu_ptr(&lru_rotate_pvecs); + if (!pagevec_add(pvec, page)) + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + } + +@@ -536,12 +540,13 @@ + void activate_page(struct page *page) + { + if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { +- struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ activate_page_pvecs); + + page_cache_get(page); + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, __activate_page, NULL); +- put_cpu_var(activate_page_pvecs); ++ put_locked_var(swapvec_lock, activate_page_pvecs); + } + } + +@@ -567,7 +572,7 @@ + + static void __lru_cache_activate_page(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + int i; + + /* +@@ -589,7 +594,7 @@ + } + } + +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /* +@@ -630,13 +635,13 @@ + + static void __lru_cache_add(struct page *page) + { +- struct pagevec *pvec = &get_cpu_var(lru_add_pvec); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, lru_add_pvec); + + page_cache_get(page); + if (!pagevec_space(pvec)) + __pagevec_lru_add(pvec); + pagevec_add(pvec, page); +- put_cpu_var(lru_add_pvec); ++ put_locked_var(swapvec_lock, lru_add_pvec); + } + + /** +@@ -816,9 +821,9 @@ + unsigned long flags; + + /* No harm done if a racing interrupt already did this */ +- local_irq_save(flags); ++ local_lock_irqsave(rotate_lock, flags); + pagevec_move_tail(pvec); +- local_irq_restore(flags); ++ local_unlock_irqrestore(rotate_lock, flags); + } + + pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); +@@ -846,18 +851,19 @@ + return; + + if (likely(get_page_unless_zero(page))) { +- struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); ++ struct pagevec *pvec = &get_locked_var(swapvec_lock, ++ lru_deactivate_file_pvecs); + + if (!pagevec_add(pvec, page)) + pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); +- put_cpu_var(lru_deactivate_file_pvecs); ++ put_locked_var(swapvec_lock, lru_deactivate_file_pvecs); + } + } + + void lru_add_drain(void) + { +- lru_add_drain_cpu(get_cpu()); +- put_cpu(); ++ lru_add_drain_cpu(local_lock_cpu(swapvec_lock)); ++ local_unlock_cpu(swapvec_lock); + } + + static void lru_add_drain_per_cpu(struct work_struct *dummy) +diff -Nur linux-4.4.13.orig/mm/truncate.c linux-4.4.13/mm/truncate.c +--- linux-4.4.13.orig/mm/truncate.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/truncate.c 2016-06-22 21:55:08.078523867 +0200 +@@ -56,8 +56,11 @@ + * protected by mapping->tree_lock. + */ + if (!workingset_node_shadows(node) && +- !list_empty(&node->private_list)) +- list_lru_del(&workingset_shadow_nodes, &node->private_list); ++ !list_empty(&node->private_list)) { ++ local_lock(workingset_shadow_lock); ++ list_lru_del(&__workingset_shadow_nodes, &node->private_list); ++ local_unlock(workingset_shadow_lock); ++ } + __radix_tree_delete_node(&mapping->page_tree, node); + unlock: + spin_unlock_irq(&mapping->tree_lock); +diff -Nur linux-4.4.13.orig/mm/vmalloc.c linux-4.4.13/mm/vmalloc.c +--- linux-4.4.13.orig/mm/vmalloc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/vmalloc.c 2016-06-22 21:55:08.078523867 +0200 +@@ -821,7 +821,7 @@ + struct vmap_block *vb; + struct vmap_area *va; + unsigned long vb_idx; +- int node, err; ++ int node, err, cpu; + void *vaddr; + + node = numa_node_id(); +@@ -864,11 +864,12 @@ + BUG_ON(err); + radix_tree_preload_end(); + +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + spin_lock(&vbq->lock); + list_add_tail_rcu(&vb->free_list, &vbq->free); + spin_unlock(&vbq->lock); +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + + return vaddr; + } +@@ -937,6 +938,7 @@ + struct vmap_block *vb; + void *vaddr = NULL; + unsigned int order; ++ int cpu; + + BUG_ON(offset_in_page(size)); + BUG_ON(size > PAGE_SIZE*VMAP_MAX_ALLOC); +@@ -951,7 +953,8 @@ + order = get_order(size); + + rcu_read_lock(); +- vbq = &get_cpu_var(vmap_block_queue); ++ cpu = get_cpu_light(); ++ vbq = this_cpu_ptr(&vmap_block_queue); + list_for_each_entry_rcu(vb, &vbq->free, free_list) { + unsigned long pages_off; + +@@ -974,7 +977,7 @@ + break; + } + +- put_cpu_var(vmap_block_queue); ++ put_cpu_light(); + rcu_read_unlock(); + + /* Allocate new block if nothing was found */ +diff -Nur linux-4.4.13.orig/mm/vmstat.c linux-4.4.13/mm/vmstat.c +--- linux-4.4.13.orig/mm/vmstat.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/vmstat.c 2016-06-22 21:55:08.078523867 +0200 +@@ -226,6 +226,7 @@ + long x; + long t; + ++ preempt_disable_rt(); + x = delta + __this_cpu_read(*p); + + t = __this_cpu_read(pcp->stat_threshold); +@@ -235,6 +236,7 @@ + x = 0; + } + __this_cpu_write(*p, x); ++ preempt_enable_rt(); + } + EXPORT_SYMBOL(__mod_zone_page_state); + +@@ -267,6 +269,7 @@ + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_inc_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v > t)) { +@@ -275,6 +278,7 @@ + zone_page_state_add(v + overstep, zone, item); + __this_cpu_write(*p, -overstep); + } ++ preempt_enable_rt(); + } + + void __inc_zone_page_state(struct page *page, enum zone_stat_item item) +@@ -289,6 +293,7 @@ + s8 __percpu *p = pcp->vm_stat_diff + item; + s8 v, t; + ++ preempt_disable_rt(); + v = __this_cpu_dec_return(*p); + t = __this_cpu_read(pcp->stat_threshold); + if (unlikely(v < - t)) { +@@ -297,6 +302,7 @@ + zone_page_state_add(v - overstep, zone, item); + __this_cpu_write(*p, overstep); + } ++ preempt_enable_rt(); + } + + void __dec_zone_page_state(struct page *page, enum zone_stat_item item) +diff -Nur linux-4.4.13.orig/mm/workingset.c linux-4.4.13/mm/workingset.c +--- linux-4.4.13.orig/mm/workingset.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/workingset.c 2016-06-22 21:55:08.078523867 +0200 +@@ -264,7 +264,8 @@ + * point where they would still be useful. + */ + +-struct list_lru workingset_shadow_nodes; ++struct list_lru __workingset_shadow_nodes; ++DEFINE_LOCAL_IRQ_LOCK(workingset_shadow_lock); + + static unsigned long count_shadow_nodes(struct shrinker *shrinker, + struct shrink_control *sc) +@@ -274,9 +275,9 @@ + unsigned long pages; + + /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ +- local_irq_disable(); +- shadow_nodes = list_lru_shrink_count(&workingset_shadow_nodes, sc); +- local_irq_enable(); ++ local_lock_irq(workingset_shadow_lock); ++ shadow_nodes = list_lru_shrink_count(&__workingset_shadow_nodes, sc); ++ local_unlock_irq(workingset_shadow_lock); + + pages = node_present_pages(sc->nid); + /* +@@ -363,9 +364,9 @@ + spin_unlock(&mapping->tree_lock); + ret = LRU_REMOVED_RETRY; + out: +- local_irq_enable(); ++ local_unlock_irq(workingset_shadow_lock); + cond_resched(); +- local_irq_disable(); ++ local_lock_irq(workingset_shadow_lock); + spin_lock(lru_lock); + return ret; + } +@@ -376,10 +377,10 @@ + unsigned long ret; + + /* list_lru lock nests inside IRQ-safe mapping->tree_lock */ +- local_irq_disable(); +- ret = list_lru_shrink_walk(&workingset_shadow_nodes, sc, ++ local_lock_irq(workingset_shadow_lock); ++ ret = list_lru_shrink_walk(&__workingset_shadow_nodes, sc, + shadow_lru_isolate, NULL); +- local_irq_enable(); ++ local_unlock_irq(workingset_shadow_lock); + return ret; + } + +@@ -400,7 +401,7 @@ + { + int ret; + +- ret = list_lru_init_key(&workingset_shadow_nodes, &shadow_nodes_key); ++ ret = list_lru_init_key(&__workingset_shadow_nodes, &shadow_nodes_key); + if (ret) + goto err; + ret = register_shrinker(&workingset_shadow_shrinker); +@@ -408,7 +409,7 @@ + goto err_list_lru; + return 0; + err_list_lru: +- list_lru_destroy(&workingset_shadow_nodes); ++ list_lru_destroy(&__workingset_shadow_nodes); + err: + return ret; + } +diff -Nur linux-4.4.13.orig/mm/zsmalloc.c linux-4.4.13/mm/zsmalloc.c +--- linux-4.4.13.orig/mm/zsmalloc.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/mm/zsmalloc.c 2016-06-22 21:55:08.078523867 +0200 +@@ -1289,7 +1289,7 @@ + class = pool->size_class[class_idx]; + off = obj_idx_to_offset(page, obj_idx, class->size); + +- area = &get_cpu_var(zs_map_area); ++ area = per_cpu_ptr(&zs_map_area, get_cpu_light()); + area->vm_mm = mm; + if (off + class->size <= PAGE_SIZE) { + /* this object is contained entirely within a page */ +@@ -1342,7 +1342,7 @@ + + __zs_unmap_object(area, pages, off, class->size); + } +- put_cpu_var(zs_map_area); ++ put_cpu_light(); + unpin_tag(handle); + } + EXPORT_SYMBOL_GPL(zs_unmap_object); +diff -Nur linux-4.4.13.orig/net/core/dev.c linux-4.4.13/net/core/dev.c +--- linux-4.4.13.orig/net/core/dev.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/core/dev.c 2016-06-22 21:55:08.082523867 +0200 +@@ -186,6 +186,7 @@ + static DEFINE_HASHTABLE(napi_hash, 8); + + static seqcount_t devnet_rename_seq; ++static DEFINE_MUTEX(devnet_rename_mutex); + + static inline void dev_base_seq_inc(struct net *net) + { +@@ -207,14 +208,14 @@ + static inline void rps_lock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_lock(&sd->input_pkt_queue.lock); ++ raw_spin_lock(&sd->input_pkt_queue.raw_lock); + #endif + } + + static inline void rps_unlock(struct softnet_data *sd) + { + #ifdef CONFIG_RPS +- spin_unlock(&sd->input_pkt_queue.lock); ++ raw_spin_unlock(&sd->input_pkt_queue.raw_lock); + #endif + } + +@@ -884,7 +885,8 @@ + strcpy(name, dev->name); + rcu_read_unlock(); + if (read_seqcount_retry(&devnet_rename_seq, seq)) { +- cond_resched(); ++ mutex_lock(&devnet_rename_mutex); ++ mutex_unlock(&devnet_rename_mutex); + goto retry; + } + +@@ -1153,20 +1155,17 @@ + if (dev->flags & IFF_UP) + return -EBUSY; + +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __raw_write_seqcount_begin(&devnet_rename_seq); + +- if (strncmp(newname, dev->name, IFNAMSIZ) == 0) { +- write_seqcount_end(&devnet_rename_seq); +- return 0; +- } ++ if (strncmp(newname, dev->name, IFNAMSIZ) == 0) ++ goto outunlock; + + memcpy(oldname, dev->name, IFNAMSIZ); + + err = dev_get_valid_name(net, dev, newname); +- if (err < 0) { +- write_seqcount_end(&devnet_rename_seq); +- return err; +- } ++ if (err < 0) ++ goto outunlock; + + if (oldname[0] && !strchr(oldname, '%')) + netdev_info(dev, "renamed from %s\n", oldname); +@@ -1179,11 +1178,12 @@ + if (ret) { + memcpy(dev->name, oldname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +- write_seqcount_end(&devnet_rename_seq); +- return ret; ++ err = ret; ++ goto outunlock; + } + +- write_seqcount_end(&devnet_rename_seq); ++ __raw_write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); + + netdev_adjacent_rename_links(dev, oldname); + +@@ -1204,7 +1204,8 @@ + /* err >= 0 after dev_alloc_name() or stores the first errno */ + if (err >= 0) { + err = ret; +- write_seqcount_begin(&devnet_rename_seq); ++ mutex_lock(&devnet_rename_mutex); ++ __raw_write_seqcount_begin(&devnet_rename_seq); + memcpy(dev->name, oldname, IFNAMSIZ); + memcpy(oldname, newname, IFNAMSIZ); + dev->name_assign_type = old_assign_type; +@@ -1217,6 +1218,11 @@ + } + + return err; ++ ++outunlock: ++ __raw_write_seqcount_end(&devnet_rename_seq); ++ mutex_unlock(&devnet_rename_mutex); ++ return err; + } + + /** +@@ -2246,6 +2252,7 @@ + sd->output_queue_tailp = &q->next_sched; + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + + void __netif_schedule(struct Qdisc *q) +@@ -2327,6 +2334,7 @@ + __this_cpu_write(softnet_data.completion_queue, skb); + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__dev_kfree_skb_irq); + +@@ -2883,7 +2891,11 @@ + * This permits __QDISC___STATE_RUNNING owner to get the lock more + * often and dequeue packets faster. + */ ++#ifdef CONFIG_PREEMPT_RT_FULL ++ contended = true; ++#else + contended = qdisc_is_running(q); ++#endif + if (unlikely(contended)) + spin_lock(&q->busylock); + +@@ -2943,9 +2955,44 @@ + #define skb_update_prio(skb) + #endif + ++#ifdef CONFIG_PREEMPT_RT_FULL ++ ++static inline int xmit_rec_read(void) ++{ ++ return current->xmit_recursion; ++} ++ ++static inline void xmit_rec_inc(void) ++{ ++ current->xmit_recursion++; ++} ++ ++static inline void xmit_rec_dec(void) ++{ ++ current->xmit_recursion--; ++} ++ ++#else ++ + DEFINE_PER_CPU(int, xmit_recursion); + EXPORT_SYMBOL(xmit_recursion); + ++static inline int xmit_rec_read(void) ++{ ++ return __this_cpu_read(xmit_recursion); ++} ++ ++static inline void xmit_rec_inc(void) ++{ ++ __this_cpu_inc(xmit_recursion); ++} ++ ++static inline void xmit_rec_dec(void) ++{ ++ __this_cpu_dec(xmit_recursion); ++} ++#endif ++ + #define RECURSION_LIMIT 10 + + /** +@@ -3138,7 +3185,7 @@ + + if (txq->xmit_lock_owner != cpu) { + +- if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT) ++ if (xmit_rec_read() > RECURSION_LIMIT) + goto recursion_alert; + + skb = validate_xmit_skb(skb, dev); +@@ -3148,9 +3195,9 @@ + HARD_TX_LOCK(dev, txq, cpu); + + if (!netif_xmit_stopped(txq)) { +- __this_cpu_inc(xmit_recursion); ++ xmit_rec_inc(); + skb = dev_hard_start_xmit(skb, dev, txq, &rc); +- __this_cpu_dec(xmit_recursion); ++ xmit_rec_dec(); + if (dev_xmit_complete(rc)) { + HARD_TX_UNLOCK(dev, txq); + goto out; +@@ -3524,6 +3571,7 @@ + rps_unlock(sd); + + local_irq_restore(flags); ++ preempt_check_resched_rt(); + + atomic_long_inc(&skb->dev->rx_dropped); + kfree_skb(skb); +@@ -3542,7 +3590,7 @@ + struct rps_dev_flow voidflow, *rflow = &voidflow; + int cpu; + +- preempt_disable(); ++ migrate_disable(); + rcu_read_lock(); + + cpu = get_rps_cpu(skb->dev, skb, &rflow); +@@ -3552,13 +3600,13 @@ + ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail); + + rcu_read_unlock(); +- preempt_enable(); ++ migrate_enable(); + } else + #endif + { + unsigned int qtail; +- ret = enqueue_to_backlog(skb, get_cpu(), &qtail); +- put_cpu(); ++ ret = enqueue_to_backlog(skb, get_cpu_light(), &qtail); ++ put_cpu_light(); + } + return ret; + } +@@ -3592,16 +3640,44 @@ + + trace_netif_rx_ni_entry(skb); + +- preempt_disable(); ++ local_bh_disable(); + err = netif_rx_internal(skb); +- if (local_softirq_pending()) +- do_softirq(); +- preempt_enable(); ++ local_bh_enable(); + + return err; + } + EXPORT_SYMBOL(netif_rx_ni); + ++#ifdef CONFIG_PREEMPT_RT_FULL ++/* ++ * RT runs ksoftirqd as a real time thread and the root_lock is a ++ * "sleeping spinlock". If the trylock fails then we can go into an ++ * infinite loop when ksoftirqd preempted the task which actually ++ * holds the lock, because we requeue q and raise NET_TX softirq ++ * causing ksoftirqd to loop forever. ++ * ++ * It's safe to use spin_lock on RT here as softirqs run in thread ++ * context and cannot deadlock against the thread which is holding ++ * root_lock. ++ * ++ * On !RT the trylock might fail, but there we bail out from the ++ * softirq loop after 10 attempts which we can't do on RT. And the ++ * task holding root_lock cannot be preempted, so the only downside of ++ * that trylock is that we need 10 loops to decide that we should have ++ * given up in the first one :) ++ */ ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ spin_lock(lock); ++ return 1; ++} ++#else ++static inline int take_root_lock(spinlock_t *lock) ++{ ++ return spin_trylock(lock); ++} ++#endif ++ + static void net_tx_action(struct softirq_action *h) + { + struct softnet_data *sd = this_cpu_ptr(&softnet_data); +@@ -3643,7 +3719,7 @@ + head = head->next_sched; + + root_lock = qdisc_lock(q); +- if (spin_trylock(root_lock)) { ++ if (take_root_lock(root_lock)) { + smp_mb__before_atomic(); + clear_bit(__QDISC_STATE_SCHED, + &q->state); +@@ -4049,7 +4125,7 @@ + skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->input_pkt_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } +@@ -4058,10 +4134,13 @@ + skb_queue_walk_safe(&sd->process_queue, skb, tmp) { + if (skb->dev == dev) { + __skb_unlink(skb, &sd->process_queue); +- kfree_skb(skb); ++ __skb_queue_tail(&sd->tofree_queue, skb); + input_queue_head_incr(sd); + } + } ++ ++ if (!skb_queue_empty(&sd->tofree_queue)) ++ raise_softirq_irqoff(NET_RX_SOFTIRQ); + } + + static int napi_gro_complete(struct sk_buff *skb) +@@ -4515,6 +4594,7 @@ + sd->rps_ipi_list = NULL; + + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Send pending IPI's to kick RPS processing on remote cpus. */ + while (remsd) { +@@ -4528,6 +4608,7 @@ + } else + #endif + local_irq_enable(); ++ preempt_check_resched_rt(); + } + + static bool sd_has_rps_ipi_waiting(struct softnet_data *sd) +@@ -4609,6 +4690,7 @@ + local_irq_save(flags); + ____napi_schedule(this_cpu_ptr(&softnet_data), n); + local_irq_restore(flags); ++ preempt_check_resched_rt(); + } + EXPORT_SYMBOL(__napi_schedule); + +@@ -4885,7 +4967,7 @@ + list_splice_tail(&repoll, &list); + list_splice(&list, &sd->poll_list); + if (!list_empty(&sd->poll_list)) +- __raise_softirq_irqoff(NET_RX_SOFTIRQ); ++ __raise_softirq_irqoff_ksoft(NET_RX_SOFTIRQ); + + net_rps_action_and_irq_enable(sd); + } +@@ -7218,7 +7300,7 @@ + void synchronize_net(void) + { + might_sleep(); +- if (rtnl_is_locked()) ++ if (rtnl_is_locked() && !IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) + synchronize_rcu_expedited(); + else + synchronize_rcu(); +@@ -7459,16 +7541,20 @@ + + raise_softirq_irqoff(NET_TX_SOFTIRQ); + local_irq_enable(); ++ preempt_check_resched_rt(); + + /* Process offline CPU's input_pkt_queue */ + while ((skb = __skb_dequeue(&oldsd->process_queue))) { + netif_rx_ni(skb); + input_queue_head_incr(oldsd); + } +- while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { ++ while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { + netif_rx_ni(skb); + input_queue_head_incr(oldsd); + } ++ while ((skb = __skb_dequeue(&oldsd->tofree_queue))) { ++ kfree_skb(skb); ++ } + + return NOTIFY_OK; + } +@@ -7770,8 +7856,9 @@ + for_each_possible_cpu(i) { + struct softnet_data *sd = &per_cpu(softnet_data, i); + +- skb_queue_head_init(&sd->input_pkt_queue); +- skb_queue_head_init(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->input_pkt_queue); ++ skb_queue_head_init_raw(&sd->process_queue); ++ skb_queue_head_init_raw(&sd->tofree_queue); + INIT_LIST_HEAD(&sd->poll_list); + sd->output_queue_tailp = &sd->output_queue; + #ifdef CONFIG_RPS +diff -Nur linux-4.4.13.orig/net/core/skbuff.c linux-4.4.13/net/core/skbuff.c +--- linux-4.4.13.orig/net/core/skbuff.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/core/skbuff.c 2016-06-22 21:55:08.082523867 +0200 +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + + #include + #include +@@ -351,6 +352,8 @@ + + static DEFINE_PER_CPU(struct page_frag_cache, netdev_alloc_cache); + static DEFINE_PER_CPU(struct page_frag_cache, napi_alloc_cache); ++static DEFINE_LOCAL_IRQ_LOCK(netdev_alloc_lock); ++static DEFINE_LOCAL_IRQ_LOCK(napi_alloc_cache_lock); + + static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +@@ -358,10 +361,10 @@ + unsigned long flags; + void *data; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + nc = this_cpu_ptr(&netdev_alloc_cache); + data = __alloc_page_frag(nc, fragsz, gfp_mask); +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + return data; + } + +@@ -380,9 +383,13 @@ + + static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask) + { +- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct page_frag_cache *nc; ++ void *data; + +- return __alloc_page_frag(nc, fragsz, gfp_mask); ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); ++ data = __alloc_page_frag(nc, fragsz, gfp_mask); ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); ++ return data; + } + + void *napi_alloc_frag(unsigned int fragsz) +@@ -429,13 +436,13 @@ + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; + +- local_irq_save(flags); ++ local_lock_irqsave(netdev_alloc_lock, flags); + + nc = this_cpu_ptr(&netdev_alloc_cache); + data = __alloc_page_frag(nc, len, gfp_mask); + pfmemalloc = nc->pfmemalloc; + +- local_irq_restore(flags); ++ local_unlock_irqrestore(netdev_alloc_lock, flags); + + if (unlikely(!data)) + return NULL; +@@ -476,9 +483,10 @@ + struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len, + gfp_t gfp_mask) + { +- struct page_frag_cache *nc = this_cpu_ptr(&napi_alloc_cache); ++ struct page_frag_cache *nc; + struct sk_buff *skb; + void *data; ++ bool pfmemalloc; + + len += NET_SKB_PAD + NET_IP_ALIGN; + +@@ -496,7 +504,11 @@ + if (sk_memalloc_socks()) + gfp_mask |= __GFP_MEMALLOC; + ++ nc = &get_locked_var(napi_alloc_cache_lock, napi_alloc_cache); + data = __alloc_page_frag(nc, len, gfp_mask); ++ pfmemalloc = nc->pfmemalloc; ++ put_locked_var(napi_alloc_cache_lock, napi_alloc_cache); ++ + if (unlikely(!data)) + return NULL; + +@@ -507,7 +519,7 @@ + } + + /* use OR instead of assignment to avoid clearing of bits in mask */ +- if (nc->pfmemalloc) ++ if (pfmemalloc) + skb->pfmemalloc = 1; + skb->head_frag = 1; + +diff -Nur linux-4.4.13.orig/net/core/sock.c linux-4.4.13/net/core/sock.c +--- linux-4.4.13.orig/net/core/sock.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/core/sock.c 2016-06-22 21:55:08.082523867 +0200 +@@ -2435,12 +2435,11 @@ + if (sk->sk_lock.owned) + __lock_sock(sk); + sk->sk_lock.owned = 1; +- spin_unlock(&sk->sk_lock.slock); ++ spin_unlock_bh(&sk->sk_lock.slock); + /* + * The sk_lock has mutex_lock() semantics here: + */ + mutex_acquire(&sk->sk_lock.dep_map, subclass, 0, _RET_IP_); +- local_bh_enable(); + } + EXPORT_SYMBOL(lock_sock_nested); + +diff -Nur linux-4.4.13.orig/net/ipv4/icmp.c linux-4.4.13/net/ipv4/icmp.c +--- linux-4.4.13.orig/net/ipv4/icmp.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/ipv4/icmp.c 2016-06-22 21:55:08.082523867 +0200 +@@ -69,6 +69,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -891,6 +892,30 @@ + } + + /* ++ * 32bit and 64bit have different timestamp length, so we check for ++ * the cookie at offset 20 and verify it is repeated at offset 50 ++ */ ++#define CO_POS0 20 ++#define CO_POS1 50 ++#define CO_SIZE sizeof(int) ++#define ICMP_SYSRQ_SIZE 57 ++ ++/* ++ * We got a ICMP_SYSRQ_SIZE sized ping request. Check for the cookie ++ * pattern and if it matches send the next byte as a trigger to sysrq. ++ */ ++static void icmp_check_sysrq(struct net *net, struct sk_buff *skb) ++{ ++ int cookie = htonl(net->ipv4.sysctl_icmp_echo_sysrq); ++ char *p = skb->data; ++ ++ if (!memcmp(&cookie, p + CO_POS0, CO_SIZE) && ++ !memcmp(&cookie, p + CO_POS1, CO_SIZE) && ++ p[CO_POS0 + CO_SIZE] == p[CO_POS1 + CO_SIZE]) ++ handle_sysrq(p[CO_POS0 + CO_SIZE]); ++} ++ ++/* + * Handle ICMP_ECHO ("ping") requests. + * + * RFC 1122: 3.2.2.6 MUST have an echo server that answers ICMP echo +@@ -917,6 +942,11 @@ + icmp_param.data_len = skb->len; + icmp_param.head_len = sizeof(struct icmphdr); + icmp_reply(&icmp_param, skb); ++ ++ if (skb->len == ICMP_SYSRQ_SIZE && ++ net->ipv4.sysctl_icmp_echo_sysrq) { ++ icmp_check_sysrq(net, skb); ++ } + } + /* should there be an ICMP stat for ignored echos? */ + return true; +diff -Nur linux-4.4.13.orig/net/ipv4/sysctl_net_ipv4.c linux-4.4.13/net/ipv4/sysctl_net_ipv4.c +--- linux-4.4.13.orig/net/ipv4/sysctl_net_ipv4.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/ipv4/sysctl_net_ipv4.c 2016-06-22 21:55:08.082523867 +0200 +@@ -818,6 +818,13 @@ + .proc_handler = proc_dointvec + }, + { ++ .procname = "icmp_echo_sysrq", ++ .data = &init_net.ipv4.sysctl_icmp_echo_sysrq, ++ .maxlen = sizeof(int), ++ .mode = 0644, ++ .proc_handler = proc_dointvec ++ }, ++ { + .procname = "icmp_ignore_bogus_error_responses", + .data = &init_net.ipv4.sysctl_icmp_ignore_bogus_error_responses, + .maxlen = sizeof(int), +diff -Nur linux-4.4.13.orig/net/mac80211/rx.c linux-4.4.13/net/mac80211/rx.c +--- linux-4.4.13.orig/net/mac80211/rx.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/mac80211/rx.c 2016-06-22 21:55:08.082523867 +0200 +@@ -3574,7 +3574,7 @@ + struct ieee80211_supported_band *sband; + struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); + +- WARN_ON_ONCE(softirq_count() == 0); ++ WARN_ON_ONCE_NONRT(softirq_count() == 0); + + if (WARN_ON(status->band >= IEEE80211_NUM_BANDS)) + goto drop; +diff -Nur linux-4.4.13.orig/net/netfilter/core.c linux-4.4.13/net/netfilter/core.c +--- linux-4.4.13.orig/net/netfilter/core.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/netfilter/core.c 2016-06-22 21:55:08.082523867 +0200 +@@ -22,11 +22,17 @@ + #include + #include + #include ++#include + #include + #include + + #include "nf_internals.h" + ++#ifdef CONFIG_PREEMPT_RT_BASE ++DEFINE_LOCAL_IRQ_LOCK(xt_write_lock); ++EXPORT_PER_CPU_SYMBOL(xt_write_lock); ++#endif ++ + static DEFINE_MUTEX(afinfo_mutex); + + const struct nf_afinfo __rcu *nf_afinfo[NFPROTO_NUMPROTO] __read_mostly; +diff -Nur linux-4.4.13.orig/net/packet/af_packet.c linux-4.4.13/net/packet/af_packet.c +--- linux-4.4.13.orig/net/packet/af_packet.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/packet/af_packet.c 2016-06-22 21:55:08.086523867 +0200 +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + #include + #include + #include +@@ -695,7 +696,7 @@ + if (BLOCK_NUM_PKTS(pbd)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + +@@ -957,7 +958,7 @@ + if (!(status & TP_STATUS_BLK_TMO)) { + while (atomic_read(&pkc->blk_fill_in_prog)) { + /* Waiting for skb_copy_bits to finish... */ +- cpu_relax(); ++ cpu_chill(); + } + } + prb_close_block(pkc, pbd, po, status); +diff -Nur linux-4.4.13.orig/net/rds/ib_rdma.c linux-4.4.13/net/rds/ib_rdma.c +--- linux-4.4.13.orig/net/rds/ib_rdma.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/rds/ib_rdma.c 2016-06-22 21:55:08.086523867 +0200 +@@ -34,6 +34,7 @@ + #include + #include + #include ++#include + + #include "rds.h" + #include "ib.h" +@@ -313,7 +314,7 @@ + for_each_online_cpu(cpu) { + flag = &per_cpu(clean_list_grace, cpu); + while (test_bit(CLEAN_LIST_BUSY_BIT, flag)) +- cpu_relax(); ++ cpu_chill(); + } + } + +diff -Nur linux-4.4.13.orig/net/sched/sch_generic.c linux-4.4.13/net/sched/sch_generic.c +--- linux-4.4.13.orig/net/sched/sch_generic.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/sched/sch_generic.c 2016-06-22 21:55:08.086523867 +0200 +@@ -893,7 +893,7 @@ + /* Wait for outstanding qdisc_run calls. */ + list_for_each_entry(dev, head, close_list) + while (some_qdisc_is_busy(dev)) +- yield(); ++ msleep(1); + } + + void dev_deactivate(struct net_device *dev) +diff -Nur linux-4.4.13.orig/net/sunrpc/svc_xprt.c linux-4.4.13/net/sunrpc/svc_xprt.c +--- linux-4.4.13.orig/net/sunrpc/svc_xprt.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/net/sunrpc/svc_xprt.c 2016-06-22 21:55:08.086523867 +0200 +@@ -340,7 +340,7 @@ + goto out; + } + +- cpu = get_cpu(); ++ cpu = get_cpu_light(); + pool = svc_pool_for_cpu(xprt->xpt_server, cpu); + + atomic_long_inc(&pool->sp_stats.packets); +@@ -376,7 +376,7 @@ + + atomic_long_inc(&pool->sp_stats.threads_woken); + wake_up_process(rqstp->rq_task); +- put_cpu(); ++ put_cpu_light(); + goto out; + } + rcu_read_unlock(); +@@ -397,7 +397,7 @@ + goto redo_search; + } + rqstp = NULL; +- put_cpu(); ++ put_cpu_light(); + out: + trace_svc_xprt_do_enqueue(xprt, rqstp); + } +diff -Nur linux-4.4.13.orig/scripts/mkcompile_h linux-4.4.13/scripts/mkcompile_h +--- linux-4.4.13.orig/scripts/mkcompile_h 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/scripts/mkcompile_h 2016-06-22 21:55:08.086523867 +0200 +@@ -4,7 +4,8 @@ + ARCH=$2 + SMP=$3 + PREEMPT=$4 +-CC=$5 ++RT=$5 ++CC=$6 + + vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; } + +@@ -57,6 +58,7 @@ + CONFIG_FLAGS="" + if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi + if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi ++if [ -n "$RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS RT"; fi + UTS_VERSION="$UTS_VERSION $CONFIG_FLAGS $TIMESTAMP" + + # Truncate to maximum length +diff -Nur linux-4.4.13.orig/sound/core/pcm_native.c linux-4.4.13/sound/core/pcm_native.c +--- linux-4.4.13.orig/sound/core/pcm_native.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/sound/core/pcm_native.c 2016-06-22 21:55:08.086523867 +0200 +@@ -135,7 +135,7 @@ + void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream) + { + if (!substream->pcm->nonatomic) +- local_irq_disable(); ++ local_irq_disable_nort(); + snd_pcm_stream_lock(substream); + } + EXPORT_SYMBOL_GPL(snd_pcm_stream_lock_irq); +@@ -150,7 +150,7 @@ + { + snd_pcm_stream_unlock(substream); + if (!substream->pcm->nonatomic) +- local_irq_enable(); ++ local_irq_enable_nort(); + } + EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irq); + +@@ -158,7 +158,7 @@ + { + unsigned long flags = 0; + if (!substream->pcm->nonatomic) +- local_irq_save(flags); ++ local_irq_save_nort(flags); + snd_pcm_stream_lock(substream); + return flags; + } +@@ -176,7 +176,7 @@ + { + snd_pcm_stream_unlock(substream); + if (!substream->pcm->nonatomic) +- local_irq_restore(flags); ++ local_irq_restore_nort(flags); + } + EXPORT_SYMBOL_GPL(snd_pcm_stream_unlock_irqrestore); + +diff -Nur linux-4.4.13.orig/virt/kvm/async_pf.c linux-4.4.13/virt/kvm/async_pf.c +--- linux-4.4.13.orig/virt/kvm/async_pf.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/virt/kvm/async_pf.c 2016-06-22 21:55:08.086523867 +0200 +@@ -98,8 +98,8 @@ + * This memory barrier pairs with prepare_to_wait's set_current_state() + */ + smp_mb(); +- if (waitqueue_active(&vcpu->wq)) +- wake_up_interruptible(&vcpu->wq); ++ if (swait_active(&vcpu->wq)) ++ swake_up(&vcpu->wq); + + mmput(mm); + kvm_put_kvm(vcpu->kvm); +diff -Nur linux-4.4.13.orig/virt/kvm/kvm_main.c linux-4.4.13/virt/kvm/kvm_main.c +--- linux-4.4.13.orig/virt/kvm/kvm_main.c 2016-06-08 03:14:51.000000000 +0200 ++++ linux-4.4.13/virt/kvm/kvm_main.c 2016-06-22 21:55:08.086523867 +0200 +@@ -226,8 +226,7 @@ + vcpu->kvm = kvm; + vcpu->vcpu_id = id; + vcpu->pid = NULL; +- vcpu->halt_poll_ns = 0; +- init_waitqueue_head(&vcpu->wq); ++ init_swait_queue_head(&vcpu->wq); + kvm_async_pf_vcpu_init(vcpu); + + vcpu->pre_pcpu = -1; +@@ -2003,7 +2002,7 @@ + void kvm_vcpu_block(struct kvm_vcpu *vcpu) + { + ktime_t start, cur; +- DEFINE_WAIT(wait); ++ DECLARE_SWAITQUEUE(wait); + bool waited = false; + u64 block_ns; + +@@ -2028,7 +2027,7 @@ + kvm_arch_vcpu_blocking(vcpu); + + for (;;) { +- prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); ++ prepare_to_swait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE); + + if (kvm_vcpu_check_block(vcpu) < 0) + break; +@@ -2037,7 +2036,7 @@ + schedule(); + } + +- finish_wait(&vcpu->wq, &wait); ++ finish_swait(&vcpu->wq, &wait); + cur = ktime_get(); + + kvm_arch_vcpu_unblocking(vcpu); +@@ -2069,11 +2068,11 @@ + { + int me; + int cpu = vcpu->cpu; +- wait_queue_head_t *wqp; ++ struct swait_queue_head *wqp; + + wqp = kvm_arch_vcpu_wq(vcpu); +- if (waitqueue_active(wqp)) { +- wake_up_interruptible(wqp); ++ if (swait_active(wqp)) { ++ swake_up(wqp); + ++vcpu->stat.halt_wakeup; + } + +@@ -2174,7 +2173,7 @@ + continue; + if (vcpu == me) + continue; +- if (waitqueue_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) ++ if (swait_active(&vcpu->wq) && !kvm_arch_vcpu_runnable(vcpu)) + continue; + if (!kvm_vcpu_eligible_for_directed_yield(vcpu)) + continue; diff --git a/target/linux/patches/4.4.19/revert-sparc.patch b/target/linux/patches/4.4.19/revert-sparc.patch new file mode 100644 index 000000000..296be0e4a --- /dev/null +++ b/target/linux/patches/4.4.19/revert-sparc.patch @@ -0,0 +1,319 @@ +From fff1ade5bd7576b053b6bbc9c9b72c2572092c06 Mon Sep 17 00:00:00 2001 +From: Waldemar Brodkorb +Date: Wed, 6 Jul 2016 08:16:59 +0200 +Subject: [PATCH] Revert "sparc: Harden signal return frame checks." + +This reverts commit 1fda90c39d8ef6acbedfd3cd9bd710a5bcc490c3. + +Signed-off-by: Waldemar Brodkorb +--- + arch/sparc/kernel/signal32.c | 46 ++++++++++++++-------------------------- + arch/sparc/kernel/signal_32.c | 41 +++++++++++++---------------------- + arch/sparc/kernel/signal_64.c | 31 ++++++++++----------------- + arch/sparc/kernel/sigutil_32.c | 9 +------- + arch/sparc/kernel/sigutil_64.c | 10 ++------- + 5 files changed, 45 insertions(+), 92 deletions(-) + +diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c +index 77655f0..4eed773 100644 +--- a/arch/sparc/kernel/signal32.c ++++ b/arch/sparc/kernel/signal32.c +@@ -138,24 +138,12 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) + return 0; + } + +-/* Checks if the fp is valid. We always build signal frames which are +- * 16-byte aligned, therefore we can always enforce that the restore +- * frame has that property as well. +- */ +-static bool invalid_frame_pointer(void __user *fp, int fplen) +-{ +- if ((((unsigned long) fp) & 15) || +- ((unsigned long)fp) > 0x100000000ULL - fplen) +- return true; +- return false; +-} +- + void do_sigreturn32(struct pt_regs *regs) + { + struct signal_frame32 __user *sf; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; +- unsigned int psr, ufp; ++ unsigned int psr; + unsigned pc, npc; + sigset_t set; + compat_sigset_t seta; +@@ -170,16 +158,11 @@ void do_sigreturn32(struct pt_regs *regs) + sf = (struct signal_frame32 __user *) regs->u_regs[UREG_FP]; + + /* 1. Make sure we are not getting garbage from the user */ +- if (invalid_frame_pointer(sf, sizeof(*sf))) +- goto segv; +- +- if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) +- goto segv; +- +- if (ufp & 0x7) ++ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || ++ (((unsigned long) sf) & 3)) + goto segv; + +- if (__get_user(pc, &sf->info.si_regs.pc) || ++ if (get_user(pc, &sf->info.si_regs.pc) || + __get_user(npc, &sf->info.si_regs.npc)) + goto segv; + +@@ -244,7 +227,7 @@ segv: + asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) + { + struct rt_signal_frame32 __user *sf; +- unsigned int psr, pc, npc, ufp; ++ unsigned int psr, pc, npc; + compat_uptr_t fpu_save; + compat_uptr_t rwin_save; + sigset_t set; +@@ -259,16 +242,11 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs) + sf = (struct rt_signal_frame32 __user *) regs->u_regs[UREG_FP]; + + /* 1. Make sure we are not getting garbage from the user */ +- if (invalid_frame_pointer(sf, sizeof(*sf))) ++ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || ++ (((unsigned long) sf) & 3)) + goto segv; + +- if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) +- goto segv; +- +- if (ufp & 0x7) +- goto segv; +- +- if (__get_user(pc, &sf->regs.pc) || ++ if (get_user(pc, &sf->regs.pc) || + __get_user(npc, &sf->regs.npc)) + goto segv; + +@@ -329,6 +307,14 @@ segv: + force_sig(SIGSEGV, current); + } + ++/* Checks if the fp is valid */ ++static int invalid_frame_pointer(void __user *fp, int fplen) ++{ ++ if ((((unsigned long) fp) & 7) || ((unsigned long)fp) > 0x100000000ULL - fplen) ++ return 1; ++ return 0; ++} ++ + static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) + { + unsigned long sp; +diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c +index c3c12ef..52aa5e4 100644 +--- a/arch/sparc/kernel/signal_32.c ++++ b/arch/sparc/kernel/signal_32.c +@@ -60,22 +60,10 @@ struct rt_signal_frame { + #define SF_ALIGNEDSZ (((sizeof(struct signal_frame) + 7) & (~7))) + #define RT_ALIGNEDSZ (((sizeof(struct rt_signal_frame) + 7) & (~7))) + +-/* Checks if the fp is valid. We always build signal frames which are +- * 16-byte aligned, therefore we can always enforce that the restore +- * frame has that property as well. +- */ +-static inline bool invalid_frame_pointer(void __user *fp, int fplen) +-{ +- if ((((unsigned long) fp) & 15) || !__access_ok((unsigned long)fp, fplen)) +- return true; +- +- return false; +-} +- + asmlinkage void do_sigreturn(struct pt_regs *regs) + { +- unsigned long up_psr, pc, npc, ufp; + struct signal_frame __user *sf; ++ unsigned long up_psr, pc, npc; + sigset_t set; + __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; +@@ -89,13 +77,10 @@ asmlinkage void do_sigreturn(struct pt_regs *regs) + sf = (struct signal_frame __user *) regs->u_regs[UREG_FP]; + + /* 1. Make sure we are not getting garbage from the user */ +- if (!invalid_frame_pointer(sf, sizeof(*sf))) +- goto segv_and_exit; +- +- if (get_user(ufp, &sf->info.si_regs.u_regs[UREG_FP])) ++ if (!access_ok(VERIFY_READ, sf, sizeof(*sf))) + goto segv_and_exit; + +- if (ufp & 0x7) ++ if (((unsigned long) sf) & 3) + goto segv_and_exit; + + err = __get_user(pc, &sf->info.si_regs.pc); +@@ -142,7 +127,7 @@ segv_and_exit: + asmlinkage void do_rt_sigreturn(struct pt_regs *regs) + { + struct rt_signal_frame __user *sf; +- unsigned int psr, pc, npc, ufp; ++ unsigned int psr, pc, npc; + __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; + sigset_t set; +@@ -150,13 +135,8 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs) + + synchronize_user_stack(); + sf = (struct rt_signal_frame __user *) regs->u_regs[UREG_FP]; +- if (!invalid_frame_pointer(sf, sizeof(*sf))) +- goto segv; +- +- if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) +- goto segv; +- +- if (ufp & 0x7) ++ if (!access_ok(VERIFY_READ, sf, sizeof(*sf)) || ++ (((unsigned long) sf) & 0x03)) + goto segv; + + err = __get_user(pc, &sf->regs.pc); +@@ -198,6 +178,15 @@ segv: + force_sig(SIGSEGV, current); + } + ++/* Checks if the fp is valid */ ++static inline int invalid_frame_pointer(void __user *fp, int fplen) ++{ ++ if ((((unsigned long) fp) & 7) || !__access_ok((unsigned long)fp, fplen)) ++ return 1; ++ ++ return 0; ++} ++ + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) + { + unsigned long sp = regs->u_regs[UREG_FP]; +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c +index 5ee930c..39aaec1 100644 +--- a/arch/sparc/kernel/signal_64.c ++++ b/arch/sparc/kernel/signal_64.c +@@ -234,17 +234,6 @@ do_sigsegv: + goto out; + } + +-/* Checks if the fp is valid. We always build rt signal frames which +- * are 16-byte aligned, therefore we can always enforce that the +- * restore frame has that property as well. +- */ +-static bool invalid_frame_pointer(void __user *fp) +-{ +- if (((unsigned long) fp) & 15) +- return true; +- return false; +-} +- + struct rt_signal_frame { + struct sparc_stackf ss; + siginfo_t info; +@@ -257,8 +246,8 @@ struct rt_signal_frame { + + void do_rt_sigreturn(struct pt_regs *regs) + { +- unsigned long tpc, tnpc, tstate, ufp; + struct rt_signal_frame __user *sf; ++ unsigned long tpc, tnpc, tstate; + __siginfo_fpu_t __user *fpu_save; + __siginfo_rwin_t __user *rwin_save; + sigset_t set; +@@ -272,16 +261,10 @@ void do_rt_sigreturn(struct pt_regs *regs) + (regs->u_regs [UREG_FP] + STACK_BIAS); + + /* 1. Make sure we are not getting garbage from the user */ +- if (invalid_frame_pointer(sf)) +- goto segv; +- +- if (get_user(ufp, &sf->regs.u_regs[UREG_FP])) ++ if (((unsigned long) sf) & 3) + goto segv; + +- if ((ufp + STACK_BIAS) & 0x7) +- goto segv; +- +- err = __get_user(tpc, &sf->regs.tpc); ++ err = get_user(tpc, &sf->regs.tpc); + err |= __get_user(tnpc, &sf->regs.tnpc); + if (test_thread_flag(TIF_32BIT)) { + tpc &= 0xffffffff; +@@ -325,6 +308,14 @@ segv: + force_sig(SIGSEGV, current); + } + ++/* Checks if the fp is valid */ ++static int invalid_frame_pointer(void __user *fp) ++{ ++ if (((unsigned long) fp) & 15) ++ return 1; ++ return 0; ++} ++ + static inline void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, unsigned long framesize) + { + unsigned long sp = regs->u_regs[UREG_FP] + STACK_BIAS; +diff --git a/arch/sparc/kernel/sigutil_32.c b/arch/sparc/kernel/sigutil_32.c +index e5fe8ce..0f6eebe 100644 +--- a/arch/sparc/kernel/sigutil_32.c ++++ b/arch/sparc/kernel/sigutil_32.c +@@ -48,10 +48,6 @@ int save_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) + int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) + { + int err; +- +- if (((unsigned long) fpu) & 3) +- return -EFAULT; +- + #ifdef CONFIG_SMP + if (test_tsk_thread_flag(current, TIF_USEDFPU)) + regs->psr &= ~PSR_EF; +@@ -101,10 +97,7 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + +- if (((unsigned long) rp) & 3) +- return -EFAULT; +- +- get_user(wsaved, &rp->wsaved); ++ __get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + +diff --git a/arch/sparc/kernel/sigutil_64.c b/arch/sparc/kernel/sigutil_64.c +index 36aadcb..387834a 100644 +--- a/arch/sparc/kernel/sigutil_64.c ++++ b/arch/sparc/kernel/sigutil_64.c +@@ -37,10 +37,7 @@ int restore_fpu_state(struct pt_regs *regs, __siginfo_fpu_t __user *fpu) + unsigned long fprs; + int err; + +- if (((unsigned long) fpu) & 7) +- return -EFAULT; +- +- err = get_user(fprs, &fpu->si_fprs); ++ err = __get_user(fprs, &fpu->si_fprs); + fprs_write(0); + regs->tstate &= ~TSTATE_PEF; + if (fprs & FPRS_DL) +@@ -75,10 +72,7 @@ int restore_rwin_state(__siginfo_rwin_t __user *rp) + struct thread_info *t = current_thread_info(); + int i, wsaved, err; + +- if (((unsigned long) rp) & 7) +- return -EFAULT; +- +- get_user(wsaved, &rp->wsaved); ++ __get_user(wsaved, &rp->wsaved); + if (wsaved > NSWINS) + return -EFAULT; + +-- +1.7.10.4 + diff --git a/target/linux/patches/4.4.19/startup.patch b/target/linux/patches/4.4.19/startup.patch new file mode 100644 index 000000000..e54ac19a6 --- /dev/null +++ b/target/linux/patches/4.4.19/startup.patch @@ -0,0 +1,34 @@ +diff -Nur linux-4.4.6.orig/init/initramfs.c linux-4.4.6/init/initramfs.c +--- linux-4.4.6.orig/init/initramfs.c 2016-03-16 16:43:17.000000000 +0100 ++++ linux-4.4.6/init/initramfs.c 2016-03-27 14:15:43.931044532 +0200 +@@ -653,6 +653,9 @@ + */ + load_default_modules(); + } ++#ifdef CONFIG_DEVTMPFS_MOUNT ++ devtmpfs_mount("dev"); ++#endif + return 0; + } + rootfs_initcall(populate_rootfs); +diff -Nur linux-4.4.6.orig/init/main.c linux-4.4.6/init/main.c +--- linux-4.4.6.orig/init/main.c 2016-03-16 16:43:17.000000000 +0100 ++++ linux-4.4.6/init/main.c 2016-03-27 14:15:43.935044783 +0200 +@@ -1011,6 +1011,8 @@ + if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) + pr_err("Warning: unable to open an initial console.\n"); + ++ printk(KERN_WARNING "Starting Linux (built with OpenADK).\n"); ++ + (void) sys_dup(0); + (void) sys_dup(0); + /* +@@ -1019,7 +1021,7 @@ + */ + + if (!ramdisk_execute_command) +- ramdisk_execute_command = "/init"; ++ ramdisk_execute_command = "/sbin/init"; + + if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { + ramdisk_execute_command = NULL; diff --git a/target/linux/patches/4.4.19/use-libgcc-for-sh.patch b/target/linux/patches/4.4.19/use-libgcc-for-sh.patch new file mode 100644 index 000000000..6420219b0 --- /dev/null +++ b/target/linux/patches/4.4.19/use-libgcc-for-sh.patch @@ -0,0 +1,29 @@ +diff -Nur linux-4.1.13.orig/arch/sh/Makefile linux-4.1.13/arch/sh/Makefile +--- linux-4.1.13.orig/arch/sh/Makefile 2015-11-09 23:34:10.000000000 +0100 ++++ linux-4.1.13/arch/sh/Makefile 2015-12-06 19:59:31.000000000 +0100 +@@ -200,7 +206,9 @@ + KBUILD_CFLAGS += -fasynchronous-unwind-tables + endif + +-libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) ++LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-file-name=libgcc.a) ++ ++libs-$(CONFIG_SUPERH32) := arch/sh/lib/ $(libs-y) $(LIBGCC) + libs-$(CONFIG_SUPERH64) := arch/sh/lib64/ $(libs-y) + + BOOT_TARGETS = uImage uImage.bz2 uImage.gz uImage.lzma uImage.xz uImage.lzo \ +diff -Nur linux-4.1.13.orig/arch/sh/lib/Makefile linux-4.1.13/arch/sh/lib/Makefile +--- linux-4.1.13.orig/arch/sh/lib/Makefile 2015-11-09 23:34:10.000000000 +0100 ++++ linux-4.1.13/arch/sh/lib/Makefile 2015-12-06 19:59:14.000000000 +0100 +@@ -5,11 +5,6 @@ + lib-y = delay.o memmove.o memchr.o \ + checksum.o strlen.o div64.o div64-generic.o + +-# Extracted from libgcc +-obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ +- ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ +- udiv_qrnnd.o +- + udivsi3-y := udivsi3_i4i-Os.o + + ifneq ($(CONFIG_CC_OPTIMIZE_FOR_SIZE),y) diff --git a/target/linux/patches/4.4.19/versatile-nommu.patch b/target/linux/patches/4.4.19/versatile-nommu.patch new file mode 100644 index 000000000..96140b30c --- /dev/null +++ b/target/linux/patches/4.4.19/versatile-nommu.patch @@ -0,0 +1,16 @@ +diff -Nur linux-4.4.17.orig/arch/arm/mach-versatile/include/mach/hardware.h linux-4.4.17/arch/arm/mach-versatile/include/mach/hardware.h +--- linux-4.4.17.orig/arch/arm/mach-versatile/include/mach/hardware.h 2016-08-10 11:49:43.000000000 +0200 ++++ linux-4.4.17/arch/arm/mach-versatile/include/mach/hardware.h 2016-08-25 23:19:03.691716292 +0200 +@@ -30,8 +30,12 @@ + #define VERSATILE_PCI_VIRT_BASE (void __iomem *)0xe8000000ul + #define VERSATILE_PCI_CFG_VIRT_BASE (void __iomem *)0xe9000000ul + ++#ifdef CONFIG_MMU + /* macro to get at MMIO space when running virtually */ + #define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000) ++#else ++#define IO_ADDRESS(x) (x) ++#endif + + #define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n)) + -- cgit v1.2.3