---
 Documentation/hwlat_detector.txt                     |   64 
 Documentation/sysrq.txt                              |   11 
 Documentation/trace/histograms.txt                   |  186 ++
 arch/Kconfig                                         |    1 
 arch/alpha/mm/fault.c                                |    2 
 arch/arm/Kconfig                                     |    1 
 arch/arm/include/asm/cmpxchg.h                       |    2 
 arch/arm/include/asm/futex.h                         |    4 
 arch/arm/include/asm/switch_to.h                     |    8 
 arch/arm/include/asm/thread_info.h                   |    3 
 arch/arm/kernel/asm-offsets.c                        |    1 
 arch/arm/kernel/entry-armv.S                         |   13 
 arch/arm/kernel/process.c                            |   24 
 arch/arm/kernel/signal.c                             |    3 
 arch/arm/kernel/unwind.c                             |   14 
 arch/arm/kvm/arm.c                                   |    4 
 arch/arm/kvm/psci.c                                  |    4 
 arch/arm/mach-at91/at91rm9200_time.c                 |    1 
 arch/arm/mach-exynos/platsmp.c                       |   12 
 arch/arm/mach-hisi/platmcpm.c                        |   26 
 arch/arm/mach-omap2/omap-smp.c                       |   10 
 arch/arm/mach-prima2/platsmp.c                       |   10 
 arch/arm/mach-qcom/platsmp.c                         |   10 
 arch/arm/mach-spear/platsmp.c                        |   10 
 arch/arm/mach-sti/platsmp.c                          |   10 
 arch/arm/mach-ux500/platsmp.c                        |   10 
 arch/arm/mm/fault.c                                  |    8 
 arch/arm/mm/highmem.c                                |   42 
 arch/arm/plat-versatile/platsmp.c                    |   10 
 arch/arm64/Kconfig                                   |    2 
 arch/arm64/include/asm/thread_info.h                 |    3 
 arch/arm64/kernel/asm-offsets.c                      |    1 
 arch/arm64/kernel/entry.S                            |   13 
 arch/arm64/kernel/perf_event.c                       |    2 
 arch/avr32/mm/fault.c                                |    2 
 arch/cris/mm/fault.c                                 |    2 
 arch/frv/mm/fault.c                                  |    2 
 arch/ia64/mm/fault.c                                 |    2 
 arch/m32r/mm/fault.c                                 |    2 
 arch/m68k/mm/fault.c                                 |    2 
 arch/microblaze/mm/fault.c                           |    2 
 arch/mips/Kconfig                                    |    2 
 arch/mips/kernel/signal.c                            |    1 
 arch/mips/mm/fault.c                                 |    2 
 arch/mips/mm/init.c                                  |    4 
 arch/mn10300/mm/fault.c                              |    2 
 arch/parisc/mm/fault.c                               |    2 
 arch/powerpc/Kconfig                                 |    6 
 arch/powerpc/include/asm/kvm_host.h                  |    4 
 arch/powerpc/include/asm/thread_info.h               |   11 
 arch/powerpc/kernel/asm-offsets.c                    |    1 
 arch/powerpc/kernel/entry_32.S                       |   17 
 arch/powerpc/kernel/entry_64.S                       |   14 
 arch/powerpc/kernel/irq.c                            |    2 
 arch/powerpc/kernel/misc_32.S                        |    2 
 arch/powerpc/kernel/misc_64.S                        |    2 
 arch/powerpc/kernel/time.c                           |    2 
 arch/powerpc/kvm/Kconfig                             |    1 
 arch/powerpc/kvm/book3s_hv.c                         |   20 
 arch/powerpc/mm/fault.c                              |    2 
 arch/s390/include/asm/kvm_host.h                     |    2 
 arch/s390/kvm/interrupt.c                            |    8 
 arch/s390/mm/fault.c                                 |    3 
 arch/score/mm/fault.c                                |    2 
 arch/sh/kernel/irq.c                                 |    2 
 arch/sh/mm/fault.c                                   |    2 
 arch/sparc/Kconfig                                   |   10 
 arch/sparc/kernel/irq_64.c                           |    2 
 arch/sparc/kernel/setup_32.c                         |    1 
 arch/sparc/kernel/setup_64.c                         |    8 
 arch/sparc/mm/fault_32.c                             |    2 
 arch/sparc/mm/fault_64.c                             |    2 
 arch/tile/mm/fault.c                                 |    2 
 arch/um/kernel/trap.c                                |    2 
 arch/x86/Kconfig                                     |    8 
 arch/x86/crypto/aesni-intel_glue.c                   |   24 
 arch/x86/crypto/cast5_avx_glue.c                     |   21 
 arch/x86/crypto/glue_helper.c                        |   31 
 arch/x86/include/asm/preempt.h                       |   18 
 arch/x86/include/asm/signal.h                        |   13 
 arch/x86/include/asm/stackprotector.h                |   10 
 arch/x86/include/asm/thread_info.h                   |    6 
 arch/x86/include/asm/uv/uv_bau.h                     |   14 
 arch/x86/include/asm/uv/uv_hub.h                     |    2 
 arch/x86/kernel/apic/io_apic.c                       |    3 
 arch/x86/kernel/apic/x2apic_uv_x.c                   |    2 
 arch/x86/kernel/asm-offsets.c                        |    2 
 arch/x86/kernel/cpu/mcheck/mce.c                     |  126 +
 arch/x86/kernel/entry_32.S                           |   20 
 arch/x86/kernel/entry_64.S                           |   31 
 arch/x86/kernel/irq_32.c                             |    2 
 arch/x86/kernel/process_32.c                         |   32 
 arch/x86/kernel/signal.c                             |    8 
 arch/x86/kernel/traps.c                              |   28 
 arch/x86/kvm/lapic.c                                 |   48 
 arch/x86/kvm/x86.c                                   |    7 
 arch/x86/mm/fault.c                                  |    2 
 arch/x86/mm/highmem_32.c                             |    9 
 arch/x86/mm/iomap_32.c                               |   11 
 arch/x86/platform/uv/tlb_uv.c                        |   26 
 arch/x86/platform/uv/uv_time.c                       |   21 
 arch/xtensa/mm/fault.c                               |    2 
 block/blk-core.c                                     |   19 
 block/blk-ioc.c                                      |    5 
 block/blk-iopoll.c                                   |    3 
 block/blk-mq-cpu.c                                   |   17 
 block/blk-mq.c                                       |   46 
 block/blk-mq.h                                       |    9 
 block/blk-softirq.c                                  |    3 
 block/bounce.c                                       |    4 
 crypto/algapi.c                                      |    4 
 crypto/api.c                                         |    6 
 crypto/internal.h                                    |    4 
 drivers/acpi/acpica/acglobal.h                       |    2 
 drivers/acpi/acpica/hwregs.c                         |    4 
 drivers/acpi/acpica/hwxface.c                        |    4 
 drivers/acpi/acpica/utmutex.c                        |    4 
 drivers/ata/libata-sff.c                             |   12 
 drivers/char/random.c                                |   14 
 drivers/clocksource/tcb_clksrc.c                     |   37 
 drivers/clocksource/timer-atmel-pit.c                |    4 
 drivers/cpufreq/Kconfig.x86                          |    2 
 drivers/gpio/gpio-omap.c                             |   74 -
 drivers/gpu/drm/i915/i915_gem.c                      |    2 
 drivers/gpu/drm/i915/i915_gem_execbuffer.c           |    2 
 drivers/i2c/busses/i2c-omap.c                        |    5 
 drivers/ide/alim15x3.c                               |    4 
 drivers/ide/hpt366.c                                 |    4 
 drivers/ide/ide-io-std.c                             |    8 
 drivers/ide/ide-io.c                                 |    2 
 drivers/ide/ide-iops.c                               |    4 
 drivers/ide/ide-probe.c                              |    4 
 drivers/ide/ide-taskfile.c                           |    6 
 drivers/infiniband/ulp/ipoib/ipoib_multicast.c       |    4 
 drivers/input/gameport/gameport.c                    |    8 
 drivers/leds/trigger/Kconfig                         |    2 
 drivers/md/bcache/Kconfig                            |    1 
 drivers/md/dm.c                                      |    4 
 drivers/md/raid5.c                                   |    7 
 drivers/md/raid5.h                                   |    1 
 drivers/misc/Kconfig                                 |   42 
 drivers/misc/Makefile                                |    1 
 drivers/misc/hwlat_detector.c                        | 1240 +++++++++++++++++++
 drivers/mmc/host/mmci.c                              |    5 
 drivers/mmc/host/sdhci.c                             |   32 
 drivers/net/ethernet/3com/3c59x.c                    |    8 
 drivers/net/ethernet/atheros/atl1c/atl1c_main.c      |    6 
 drivers/net/ethernet/atheros/atl1e/atl1e_main.c      |    3 
 drivers/net/ethernet/chelsio/cxgb/sge.c              |    3 
 drivers/net/ethernet/freescale/gianfar.c             |   12 
 drivers/net/ethernet/neterion/s2io.c                 |    7 
 drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c |    6 
 drivers/net/ethernet/realtek/8139too.c               |    2 
 drivers/net/ethernet/tehuti/tehuti.c                 |    9 
 drivers/net/rionet.c                                 |    6 
 drivers/net/wireless/orinoco/orinoco_usb.c           |    2 
 drivers/pci/access.c                                 |    2 
 drivers/scsi/fcoe/fcoe.c                             |   18 
 drivers/scsi/fcoe/fcoe_ctlr.c                        |    4 
 drivers/scsi/libfc/fc_exch.c                         |    4 
 drivers/scsi/libsas/sas_ata.c                        |    4 
 drivers/scsi/qla2xxx/qla_inline.h                    |    4 
 drivers/thermal/x86_pkg_temp_thermal.c               |   50 
 drivers/tty/serial/8250/8250_core.c                  |   14 
 drivers/tty/serial/amba-pl011.c                      |   15 
 drivers/tty/serial/omap-serial.c                     |   12 
 drivers/usb/core/hcd.c                               |    4 
 drivers/usb/gadget/function/f_fs.c                   |    2 
 drivers/usb/gadget/legacy/inode.c                    |    4 
 fs/aio.c                                             |   24 
 fs/autofs4/autofs_i.h                                |    1 
 fs/autofs4/expire.c                                  |    2 
 fs/buffer.c                                          |   21 
 fs/dcache.c                                          |    5 
 fs/eventpoll.c                                       |    4 
 fs/exec.c                                            |    2 
 fs/jbd/checkpoint.c                                  |    2 
 fs/jbd2/checkpoint.c                                 |    2 
 fs/namespace.c                                       |    8 
 fs/ntfs/aops.c                                       |   14 
 fs/timerfd.c                                         |    5 
 fs/xfs/xfs_linux.h                                   |    2 
 include/acpi/platform/aclinux.h                      |   15 
 include/asm-generic/bug.h                            |   14 
 include/linux/blk-mq.h                               |    1 
 include/linux/blkdev.h                               |    3 
 include/linux/bottom_half.h                          |   12 
 include/linux/buffer_head.h                          |   44 
 include/linux/cgroup.h                               |    2 
 include/linux/completion.h                           |    9 
 include/linux/cpu.h                                  |    4 
 include/linux/delay.h                                |    6 
 include/linux/ftrace_event.h                         |    3 
 include/linux/highmem.h                              |   28 
 include/linux/hrtimer.h                              |   16 
 include/linux/idr.h                                  |    4 
 include/linux/init_task.h                            |   10 
 include/linux/interrupt.h                            |   63 
 include/linux/irq.h                                  |    4 
 include/linux/irq_work.h                             |    1 
 include/linux/irqdesc.h                              |    1 
 include/linux/irqflags.h                             |   29 
 include/linux/jbd_common.h                           |   24 
 include/linux/jump_label.h                           |    3 
 include/linux/kdb.h                                  |    3 
 include/linux/kernel.h                               |    1 
 include/linux/kvm_host.h                             |    4 
 include/linux/lglock.h                               |   27 
 include/linux/list_bl.h                              |   28 
 include/linux/locallock.h                            |  270 ++++
 include/linux/mm_types.h                             |    4 
 include/linux/mutex.h                                |   20 
 include/linux/mutex_rt.h                             |   84 +
 include/linux/netdevice.h                            |    1 
 include/linux/netfilter/x_tables.h                   |    7 
 include/linux/notifier.h                             |   34 
 include/linux/percpu.h                               |   29 
 include/linux/pid.h                                  |    1 
 include/linux/preempt.h                              |   59 
 include/linux/preempt_mask.h                         |   15 
 include/linux/printk.h                               |    3 
 include/linux/radix-tree.h                           |    7 
 include/linux/random.h                               |    2 
 include/linux/rcupdate.h                             |   26 
 include/linux/rcutree.h                              |   18 
 include/linux/rtmutex.h                              |   30 
 include/linux/rwlock_rt.h                            |   99 +
 include/linux/rwlock_types.h                         |    7 
 include/linux/rwlock_types_rt.h                      |   33 
 include/linux/rwsem.h                                |    6 
 include/linux/rwsem_rt.h                             |  134 ++
 include/linux/sched.h                                |  196 ++-
 include/linux/seqlock.h                              |   56 
 include/linux/signal.h                               |    1 
 include/linux/skbuff.h                               |    7 
 include/linux/smp.h                                  |    3 
 include/linux/spinlock.h                             |   12 
 include/linux/spinlock_api_smp.h                     |    4 
 include/linux/spinlock_rt.h                          |  167 ++
 include/linux/spinlock_types.h                       |   79 -
 include/linux/spinlock_types_nort.h                  |   33 
 include/linux/spinlock_types_raw.h                   |   56 
 include/linux/spinlock_types_rt.h                    |   51 
 include/linux/srcu.h                                 |    9 
 include/linux/swap.h                                 |    4 
 include/linux/sysctl.h                               |    1 
 include/linux/thread_info.h                          |   12 
 include/linux/timer.h                                |    2 
 include/linux/uaccess.h                              |   30 
 include/linux/uprobes.h                              |    1 
 include/linux/vmstat.h                               |    4 
 include/linux/wait-simple.h                          |  207 +++
 include/linux/wait.h                                 |    1 
 include/linux/work-simple.h                          |   24 
 include/net/dst.h                                    |    2 
 include/net/neighbour.h                              |    4 
 include/net/netns/ipv4.h                             |    1 
 include/trace/events/hist.h                          |   72 +
 include/trace/events/latency_hist.h                  |   29 
 init/Kconfig                                         |    7 
 init/Makefile                                        |    2 
 init/main.c                                          |    1 
 ipc/mqueue.c                                         |   24 
 ipc/msg.c                                            |   16 
 ipc/sem.c                                            |   10 
 kernel/Kconfig.locks                                 |    4 
 kernel/Kconfig.preempt                               |   33 
 kernel/cgroup.c                                      |    9 
 kernel/cpu.c                                         |  325 ++++
 kernel/debug/kdb/kdb_io.c                            |    6 
 kernel/events/core.c                                 |    1 
 kernel/exit.c                                        |    2 
 kernel/fork.c                                        |   40 
 kernel/futex.c                                       |   84 -
 kernel/irq/handle.c                                  |    8 
 kernel/irq/manage.c                                  |  100 +
 kernel/irq/settings.h                                |   12 
 kernel/irq/spurious.c                                |    8 
 kernel/irq_work.c                                    |   51 
 kernel/ksysfs.c                                      |   12 
 kernel/locking/Makefile                              |    9 
 kernel/locking/lglock.c                              |   79 -
 kernel/locking/lockdep.c                             |    2 
 kernel/locking/percpu-rwsem.c                        |    4 
 kernel/locking/rt.c                                  |  456 ++++++
 kernel/locking/rtmutex.c                             |  734 ++++++++++-
 kernel/locking/rtmutex_common.h                      |   14 
 kernel/locking/spinlock.c                            |    7 
 kernel/locking/spinlock_debug.c                      |    5 
 kernel/panic.c                                       |    2 
 kernel/power/hibernate.c                             |    7 
 kernel/power/suspend.c                               |    4 
 kernel/printk/printk.c                               |  139 +-
 kernel/ptrace.c                                      |    7 
 kernel/rcu/tiny.c                                    |    2 
 kernel/rcu/tree.c                                    |  149 ++
 kernel/rcu/tree.h                                    |   15 
 kernel/rcu/tree_plugin.h                             |  170 --
 kernel/rcu/update.c                                  |    2 
 kernel/relay.c                                       |   14 
 kernel/res_counter.c                                 |    8 
 kernel/sched/Makefile                                |    2 
 kernel/sched/completion.c                            |   34 
 kernel/sched/core.c                                  |  465 ++++++-
 kernel/sched/cputime.c                               |   62 
 kernel/sched/deadline.c                              |    1 
 kernel/sched/debug.c                                 |    7 
 kernel/sched/fair.c                                  |   16 
 kernel/sched/features.h                              |    8 
 kernel/sched/rt.c                                    |    1 
 kernel/sched/sched.h                                 |   10 
 kernel/sched/wait-simple.c                           |  115 +
 kernel/sched/work-simple.c                           |  172 ++
 kernel/signal.c                                      |  135 +-
 kernel/softirq.c                                     |  732 +++++++++--
 kernel/stop_machine.c                                |   98 +
 kernel/time/hrtimer.c                                |  346 ++++-
 kernel/time/itimer.c                                 |    1 
 kernel/time/jiffies.c                                |    7 
 kernel/time/ntp.c                                    |   43 
 kernel/time/posix-cpu-timers.c                       |  198 ++-
 kernel/time/posix-timers.c                           |   37 
 kernel/time/tick-common.c                            |   10 
 kernel/time/tick-internal.h                          |    3 
 kernel/time/tick-sched.c                             |   40 
 kernel/time/timekeeping.c                            |    6 
 kernel/time/timer.c                                  |  109 +
 kernel/trace/Kconfig                                 |  104 +
 kernel/trace/Makefile                                |    4 
 kernel/trace/latency_hist.c                          | 1178 ++++++++++++++++++
 kernel/trace/trace.c                                 |   42 
 kernel/trace/trace.h                                 |    2 
 kernel/trace/trace_events.c                          |    2 
 kernel/trace/trace_irqsoff.c                         |   11 
 kernel/trace/trace_output.c                          |   18 
 kernel/user.c                                        |    4 
 kernel/watchdog.c                                    |   15 
 kernel/workqueue.c                                   |  223 ++-
 kernel/workqueue_internal.h                          |    5 
 lib/Kconfig                                          |    1 
 lib/Kconfig.debug                                    |    2 
 lib/debugobjects.c                                   |    5 
 lib/idr.c                                            |   36 
 lib/locking-selftest.c                               |   50 
 lib/percpu_ida.c                                     |   20 
 lib/radix-tree.c                                     |    5 
 lib/scatterlist.c                                    |    6 
 lib/smp_processor_id.c                               |    5 
 localversion-rt                                      |    1 
 mm/Kconfig                                           |    2 
 mm/filemap.c                                         |   11 
 mm/highmem.c                                         |    6 
 mm/memcontrol.c                                      |   26 
 mm/memory.c                                          |   26 
 mm/mmu_context.c                                     |    2 
 mm/page_alloc.c                                      |  142 +-
 mm/slab.h                                            |    4 
 mm/slub.c                                            |  125 +
 mm/swap.c                                            |   34 
 mm/truncate.c                                        |    7 
 mm/vmalloc.c                                         |   13 
 mm/vmstat.c                                          |    6 
 mm/workingset.c                                      |   23 
 net/core/dev.c                                       |  112 +
 net/core/skbuff.c                                    |    6 
 net/core/sock.c                                      |    3 
 net/ipv4/icmp.c                                      |   30 
 net/ipv4/sysctl_net_ipv4.c                           |    7 
 net/mac80211/rx.c                                    |    2 
 net/netfilter/core.c                                 |    6 
 net/packet/af_packet.c                               |    5 
 net/rds/ib_rdma.c                                    |    3 
 net/sched/sch_generic.c                              |    2 
 net/sunrpc/svc_xprt.c                                |    4 
 scripts/mkcompile_h                                  |    4 
 sound/core/pcm_native.c                              |    8 
 virt/kvm/async_pf.c                                  |    4 
 virt/kvm/kvm_main.c                                  |   16 
 378 files changed, 11328 insertions(+), 1777 deletions(-)

Index: linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt
===================================================================
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+Introduction:
+-------------
+
+The module hwlat_detector is a special purpose kernel module that is used to
+detect large system latencies induced by the behavior of certain underlying
+hardware or firmware, independent of Linux itself. The code was developed
+originally to detect SMIs (System Management Interrupts) on x86 systems,
+however there is nothing x86 specific about this patchset. It was
+originally written for use by the "RT" patch since the Real Time
+kernel is highly latency sensitive.
+
+SMIs are usually not serviced by the Linux kernel, which typically does not
+even know that they are occuring. SMIs are instead are set up by BIOS code
+and are serviced by BIOS code, usually for "critical" events such as
+management of thermal sensors and fans. Sometimes though, SMIs are used for
+other tasks and those tasks can spend an inordinate amount of time in the
+handler (sometimes measured in milliseconds). Obviously this is a problem if
+you are trying to keep event service latencies down in the microsecond range.
+
+The hardware latency detector works by hogging all of the cpus for configurable
+amounts of time (by calling stop_machine()), polling the CPU Time Stamp Counter
+for some period, then looking for gaps in the TSC data. Any gap indicates a
+time when the polling was interrupted and since the machine is stopped and
+interrupts turned off the only thing that could do that would be an SMI.
+
+Note that the SMI detector should *NEVER* be used in a production environment.
+It is intended to be run manually to determine if the hardware platform has a
+problem with long system firmware service routines.
+
+Usage:
+------
+
+Loading the module hwlat_detector passing the parameter "enabled=1" (or by
+setting the "enable" entry in "hwlat_detector" debugfs toggled on) is the only
+step required to start the hwlat_detector. It is possible to redefine the
+threshold in microseconds (us) above which latency spikes will be taken
+into account (parameter "threshold=").
+
+Example:
+
+	# modprobe hwlat_detector enabled=1 threshold=100
+
+After the module is loaded, it creates a directory named "hwlat_detector" under
+the debugfs mountpoint, "/debug/hwlat_detector" for this text. It is necessary
+to have debugfs mounted, which might be on /sys/debug on your system.
+
+The /debug/hwlat_detector interface contains the following files:
+
+count			- number of latency spikes observed since last reset
+enable			- a global enable/disable toggle (0/1), resets count
+max			- maximum hardware latency actually observed (usecs)
+sample			- a pipe from which to read current raw sample data
+			  in the format <timestamp> <latency observed usecs>
+			  (can be opened O_NONBLOCK for a single sample)
+threshold		- minimum latency value to be considered (usecs)
+width			- time period to sample with CPUs held (usecs)
+			  must be less than the total window size (enforced)
+window			- total period of sampling, width being inside (usecs)
+
+By default we will set width to 500,000 and window to 1,000,000, meaning that
+we will sample every 1,000,000 usecs (1s) for 500,000 usecs (0.5s). If we
+observe any latencies that exceed the threshold (initially 100 usecs),
+then we write to a global sample ring buffer of 8K samples, which is
+consumed by reading from the "sample" (pipe) debugfs file interface.
Index: linux-3.18.13-rt10-r7s4/Documentation/sysrq.txt
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/Documentation/sysrq.txt
+++ linux-3.18.13-rt10-r7s4/Documentation/sysrq.txt
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:62 @ On PowerPC - Press 'ALT - Print Screen (
 On other - If you know of the key combos for other architectures, please
            let me know so I can add them to this section.
 
-On all -  write a character to /proc/sysrq-trigger.  e.g.:
-
+On all -  write a character to /proc/sysrq-trigger, e.g.:
 		echo t > /proc/sysrq-trigger
 
+On all - Enable network SysRq by writing a cookie to icmp_echo_sysrq, e.g.
+		echo 0x01020304 >/proc/sys/net/ipv4/icmp_echo_sysrq
+	 Send an ICMP echo request with this pattern plus the particular
+	 SysRq command key. Example:
+		# ping -c1 -s57 -p0102030468
+	 will trigger the SysRq-H (help) command.
+
+
 *  What are the 'command' keys?
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
 'b'     - Will immediately reboot the system without syncing or unmounting
Index: linux-3.18.13-rt10-r7s4/Documentation/trace/histograms.txt
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/Documentation/trace/histograms.txt
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+		Using the Linux Kernel Latency Histograms
+
+
+This document gives a short explanation how to enable, configure and use
+latency histograms. Latency histograms are primarily relevant in the
+context of real-time enabled kernels (CONFIG_PREEMPT/CONFIG_PREEMPT_RT)
+and are used in the quality management of the Linux real-time
+capabilities.
+
+
+* Purpose of latency histograms
+
+A latency histogram continuously accumulates the frequencies of latency
+data. There are two types of histograms
+- potential sources of latencies
+- effective latencies
+
+
+* Potential sources of latencies
+
+Potential sources of latencies are code segments where interrupts,
+preemption or both are disabled (aka critical sections). To create
+histograms of potential sources of latency, the kernel stores the time
+stamp at the start of a critical section, determines the time elapsed
+when the end of the section is reached, and increments the frequency
+counter of that latency value - irrespective of whether any concurrently
+running process is affected by latency or not.
+- Configuration items (in the Kernel hacking/Tracers submenu)
+  CONFIG_INTERRUPT_OFF_LATENCY
+  CONFIG_PREEMPT_OFF_LATENCY
+
+
+* Effective latencies
+
+Effective latencies are actually occuring during wakeup of a process. To
+determine effective latencies, the kernel stores the time stamp when a
+process is scheduled to be woken up, and determines the duration of the
+wakeup time shortly before control is passed over to this process. Note
+that the apparent latency in user space may be somewhat longer, since the
+process may be interrupted after control is passed over to it but before
+the execution in user space takes place. Simply measuring the interval
+between enqueuing and wakeup may also not appropriate in cases when a
+process is scheduled as a result of a timer expiration. The timer may have
+missed its deadline, e.g. due to disabled interrupts, but this latency
+would not be registered. Therefore, the offsets of missed timers are
+recorded in a separate histogram. If both wakeup latency and missed timer
+offsets are configured and enabled, a third histogram may be enabled that
+records the overall latency as a sum of the timer latency, if any, and the
+wakeup latency. This histogram is called "timerandwakeup".
+- Configuration items (in the Kernel hacking/Tracers submenu)
+  CONFIG_WAKEUP_LATENCY
+  CONFIG_MISSED_TIMER_OFSETS
+
+
+* Usage
+
+The interface to the administration of the latency histograms is located
+in the debugfs file system. To mount it, either enter
+
+mount -t sysfs nodev /sys
+mount -t debugfs nodev /sys/kernel/debug
+
+from shell command line level, or add
+
+nodev	/sys			sysfs	defaults	0 0
+nodev	/sys/kernel/debug	debugfs	defaults	0 0
+
+to the file /etc/fstab. All latency histogram related files are then
+available in the directory /sys/kernel/debug/tracing/latency_hist. A
+particular histogram type is enabled by writing non-zero to the related
+variable in the /sys/kernel/debug/tracing/latency_hist/enable directory.
+Select "preemptirqsoff" for the histograms of potential sources of
+latencies and "wakeup" for histograms of effective latencies etc. The
+histogram data - one per CPU - are available in the files
+
+/sys/kernel/debug/tracing/latency_hist/preemptoff/CPUx
+/sys/kernel/debug/tracing/latency_hist/irqsoff/CPUx
+/sys/kernel/debug/tracing/latency_hist/preemptirqsoff/CPUx
+/sys/kernel/debug/tracing/latency_hist/wakeup/CPUx
+/sys/kernel/debug/tracing/latency_hist/wakeup/sharedprio/CPUx
+/sys/kernel/debug/tracing/latency_hist/missed_timer_offsets/CPUx
+/sys/kernel/debug/tracing/latency_hist/timerandwakeup/CPUx
+
+The histograms are reset by writing non-zero to the file "reset" in a
+particular latency directory. To reset all latency data, use
+
+#!/bin/sh
+
+TRACINGDIR=/sys/kernel/debug/tracing
+HISTDIR=$TRACINGDIR/latency_hist
+
+if test -d $HISTDIR
+then
+  cd $HISTDIR
+  for i in `find . | grep /reset$`
+  do
+    echo 1 >$i
+  done
+fi
+
+
+* Data format
+
+Latency data are stored with a resolution of one microsecond. The
+maximum latency is 10,240 microseconds. The data are only valid, if the
+overflow register is empty. Every output line contains the latency in
+microseconds in the first row and the number of samples in the second
+row. To display only lines with a positive latency count, use, for
+example,
+
+grep -v " 0$" /sys/kernel/debug/tracing/latency_hist/preemptoff/CPU0
+
+#Minimum latency: 0 microseconds.
+#Average latency: 0 microseconds.
+#Maximum latency: 25 microseconds.
+#Total samples: 3104770694
+#There are 0 samples greater or equal than 10240 microseconds
+#usecs	         samples
+    0	      2984486876
+    1	        49843506
+    2	        58219047
+    3	         5348126
+    4	         2187960
+    5	         3388262
+    6	          959289
+    7	          208294
+    8	           40420
+    9	            4485
+   10	           14918
+   11	           18340
+   12	           25052
+   13	           19455
+   14	            5602
+   15	             969
+   16	              47
+   17	              18
+   18	              14
+   19	               1
+   20	               3
+   21	               2
+   22	               5
+   23	               2
+   25	               1
+
+
+* Wakeup latency of a selected process
+
+To only collect wakeup latency data of a particular process, write the
+PID of the requested process to
+
+/sys/kernel/debug/tracing/latency_hist/wakeup/pid
+
+PIDs are not considered, if this variable is set to 0.
+
+
+* Details of the process with the highest wakeup latency so far
+
+Selected data of the process that suffered from the highest wakeup
+latency that occurred in a particular CPU are available in the file
+
+/sys/kernel/debug/tracing/latency_hist/wakeup/max_latency-CPUx.
+
+In addition, other relevant system data at the time when the
+latency occurred are given.
+
+The format of the data is (all in one line):
+<PID> <Priority> <Latency> (<Timeroffset>) <Command> \
+<- <PID> <Priority> <Command> <Timestamp>
+
+The value of <Timeroffset> is only relevant in the combined timer
+and wakeup latency recording. In the wakeup recording, it is
+always 0, in the missed_timer_offsets recording, it is the same
+as <Latency>.
+
+When retrospectively searching for the origin of a latency and
+tracing was not enabled, it may be helpful to know the name and
+some basic data of the task that (finally) was switching to the
+late real-tlme task. In addition to the victim's data, also the
+data of the possible culprit are therefore displayed after the
+"<-" symbol.
+
+Finally, the timestamp of the time when the latency occurred
+in <seconds>.<microseconds> after the most recent system boot
+is provided.
+
+These data are also reset when the wakeup histogram is reset.
Index: linux-3.18.13-rt10-r7s4/arch/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:9 @ config OPROFILE
 	tristate "OProfile system profiling"
 	depends on PROFILING
 	depends on HAVE_OPROFILE
+	depends on !PREEMPT_RT_FULL
 	select RING_BUFFER
 	select RING_BUFFER_ALLOW_SWAP
 	help
Index: linux-3.18.13-rt10-r7s4/arch/alpha/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/alpha/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/alpha/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:110 @ do_page_fault(unsigned long address, uns
 
 	/* If we're in an interrupt context, or have no user context,
 	   we must not take the fault.  */
-	if (!mm || in_atomic())
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
Index: linux-3.18.13-rt10-r7s4/arch/arm/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/arm/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:65 @ config ARM
 	select HAVE_PERF_EVENTS
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
+	select HAVE_PREEMPT_LAZY
 	select HAVE_RCU_TABLE_FREE if (SMP && ARM_LPAE)
 	select HAVE_REGS_AND_STACK_ACCESS_API
 	select HAVE_SYSCALL_TRACEPOINTS
Index: linux-3.18.13-rt10-r7s4/arch/arm/include/asm/cmpxchg.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/include/asm/cmpxchg.h
+++ linux-3.18.13-rt10-r7s4/arch/arm/include/asm/cmpxchg.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:132 @ static inline unsigned long __xchg(unsig
 
 #else	/* min ARCH >= ARMv6 */
 
+#define __HAVE_ARCH_CMPXCHG 1
+
 extern void __bad_cmpxchg(volatile void *ptr, int size);
 
 /*
Index: linux-3.18.13-rt10-r7s4/arch/arm/include/asm/futex.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/include/asm/futex.h
+++ linux-3.18.13-rt10-r7s4/arch/arm/include/asm/futex.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:96 @ futex_atomic_cmpxchg_inatomic(u32 *uval,
 	if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
 		return -EFAULT;
 
+	preempt_disable_rt();
+
 	__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
 	"1:	" TUSER(ldr) "	%1, [%4]\n"
 	"	teq	%1, %2\n"
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:109 @ futex_atomic_cmpxchg_inatomic(u32 *uval,
 	: "cc", "memory");
 
 	*uval = val;
+
+	preempt_enable_rt();
 	return ret;
 }
 
Index: linux-3.18.13-rt10-r7s4/arch/arm/include/asm/switch_to.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/include/asm/switch_to.h
+++ linux-3.18.13-rt10-r7s4/arch/arm/include/asm/switch_to.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:6 @
 
 #include <linux/thread_info.h>
 
+#if defined CONFIG_PREEMPT_RT_FULL && defined CONFIG_HIGHMEM
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p);
+#else
+static inline void
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
+#endif
+
 /*
  * For v7 SMP cores running a preemptible kernel we may be pre-empted
  * during a TLB maintenance operation, so execute an inner-shareable dsb
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:32 @ extern struct task_struct *__switch_to(s
 
 #define switch_to(prev,next,last)					\
 do {									\
+	switch_kmaps(prev, next);					\
 	last = __switch_to(prev,task_thread_info(prev), task_thread_info(next));	\
 } while (0)
 
Index: linux-3.18.13-rt10-r7s4/arch/arm/include/asm/thread_info.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/include/asm/thread_info.h
+++ linux-3.18.13-rt10-r7s4/arch/arm/include/asm/thread_info.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:54 @ struct cpu_context_save {
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	int			preempt_count;	/* 0 => preemptable, <0 => bug */
+	int			preempt_lazy_count;	/* 0 => preemptable, <0 => bug */
 	mm_segment_t		addr_limit;	/* address limit */
 	struct task_struct	*task;		/* main task structure */
 	struct exec_domain	*exec_domain;	/* execution domain */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:153 @ extern int vfp_restore_user_hwstate(stru
 #define TIF_SIGPENDING		0
 #define TIF_NEED_RESCHED	1
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
+#define TIF_NEED_RESCHED_LAZY	3
 #define TIF_UPROBE		7
 #define TIF_SYSCALL_TRACE	8
 #define TIF_SYSCALL_AUDIT	9
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:167 @ extern int vfp_restore_user_hwstate(stru
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
+#define _TIF_NEED_RESCHED_LAZY	(1 << TIF_NEED_RESCHED_LAZY)
 #define _TIF_UPROBE		(1 << TIF_UPROBE)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
Index: linux-3.18.13-rt10-r7s4/arch/arm/kernel/asm-offsets.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/kernel/asm-offsets.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/kernel/asm-offsets.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:67 @ int main(void)
   BLANK();
   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
+  DEFINE(TI_PREEMPT_LAZY,	offsetof(struct thread_info, preempt_lazy_count));
   DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
   DEFINE(TI_TASK,		offsetof(struct thread_info, task));
   DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
Index: linux-3.18.13-rt10-r7s4/arch/arm/kernel/entry-armv.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/kernel/entry-armv.S
+++ linux-3.18.13-rt10-r7s4/arch/arm/kernel/entry-armv.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:210 @ __irq_svc:
 #ifdef CONFIG_PREEMPT
 	get_thread_info tsk
 	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
-	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
 	teq	r8, #0				@ if preempt count != 0
+	bne	1f				@ return from exeption
+	ldr	r0, [tsk, #TI_FLAGS]		@ get flags
+	tst	r0, #_TIF_NEED_RESCHED		@ if NEED_RESCHED is set
+	blne	svc_preempt			@ preempt!
+
+	ldr	r8, [tsk, #TI_PREEMPT_LAZY]	@ get preempt lazy count
+	teq	r8, #0				@ if preempt lazy count != 0
 	movne	r0, #0				@ force flags to 0
-	tst	r0, #_TIF_NEED_RESCHED
+	tst	r0, #_TIF_NEED_RESCHED_LAZY
 	blne	svc_preempt
+1:
 #endif
 
 	svc_exit r5, irq = 1			@ return from exception
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:236 @ svc_preempt:
 1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
 	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 	tst	r0, #_TIF_NEED_RESCHED
+	bne	1b
+	tst	r0, #_TIF_NEED_RESCHED_LAZY
 	reteq	r8				@ go again
 	b	1b
 #endif
Index: linux-3.18.13-rt10-r7s4/arch/arm/kernel/process.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/kernel/process.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/kernel/process.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:434 @ unsigned long arch_randomize_brk(struct
 }
 
 #ifdef CONFIG_MMU
+/*
+ * CONFIG_SPLIT_PTLOCK_CPUS results in a page->ptl lock.  If the lock is not
+ * initialized by pgtable_page_ctor() then a coredump of the vector page will
+ * fail.
+ */
+static int __init vectors_user_mapping_init_page(void)
+{
+	struct page *page;
+	unsigned long addr = 0xffff0000;
+	pgd_t *pgd;
+	pud_t *pud;
+	pmd_t *pmd;
+
+	pgd = pgd_offset_k(addr);
+	pud = pud_offset(pgd, addr);
+	pmd = pmd_offset(pud, addr);
+	page = pmd_page(*(pmd));
+
+	pgtable_page_ctor(page);
+
+	return 0;
+}
+late_initcall(vectors_user_mapping_init_page);
+
 #ifdef CONFIG_KUSER_HELPERS
 /*
  * The vectors page is always readable from user space for the
Index: linux-3.18.13-rt10-r7s4/arch/arm/kernel/signal.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/kernel/signal.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/kernel/signal.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:577 @ asmlinkage int
 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
 	do {
-		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
+		if (likely(thread_flags & (_TIF_NEED_RESCHED |
+					   _TIF_NEED_RESCHED_LAZY))) {
 			schedule();
 		} else {
 			if (unlikely(!user_mode(regs)))
Index: linux-3.18.13-rt10-r7s4/arch/arm/kernel/unwind.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/kernel/unwind.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/kernel/unwind.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:96 @ extern const struct unwind_idx __start_u
 static const struct unwind_idx *__origin_unwind_idx;
 extern const struct unwind_idx __stop_unwind_idx[];
 
-static DEFINE_SPINLOCK(unwind_lock);
+static DEFINE_RAW_SPINLOCK(unwind_lock);
 static LIST_HEAD(unwind_tables);
 
 /* Convert a prel31 symbol to an absolute address */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:204 @ static const struct unwind_idx *unwind_f
 		/* module unwind tables */
 		struct unwind_table *table;
 
-		spin_lock_irqsave(&unwind_lock, flags);
+		raw_spin_lock_irqsave(&unwind_lock, flags);
 		list_for_each_entry(table, &unwind_tables, list) {
 			if (addr >= table->begin_addr &&
 			    addr < table->end_addr) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:216 @ static const struct unwind_idx *unwind_f
 				break;
 			}
 		}
-		spin_unlock_irqrestore(&unwind_lock, flags);
+		raw_spin_unlock_irqrestore(&unwind_lock, flags);
 	}
 
 	pr_debug("%s: idx = %p\n", __func__, idx);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:533 @ struct unwind_table *unwind_table_add(un
 	tab->begin_addr = text_addr;
 	tab->end_addr = text_addr + text_size;
 
-	spin_lock_irqsave(&unwind_lock, flags);
+	raw_spin_lock_irqsave(&unwind_lock, flags);
 	list_add_tail(&tab->list, &unwind_tables);
-	spin_unlock_irqrestore(&unwind_lock, flags);
+	raw_spin_unlock_irqrestore(&unwind_lock, flags);
 
 	return tab;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:547 @ void unwind_table_del(struct unwind_tabl
 	if (!tab)
 		return;
 
-	spin_lock_irqsave(&unwind_lock, flags);
+	raw_spin_lock_irqsave(&unwind_lock, flags);
 	list_del(&tab->list);
-	spin_unlock_irqrestore(&unwind_lock, flags);
+	raw_spin_unlock_irqrestore(&unwind_lock, flags);
 
 	kfree(tab);
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/kvm/arm.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/kvm/arm.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/kvm/arm.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:444 @ static int kvm_vcpu_first_run_init(struc
 
 static void vcpu_pause(struct kvm_vcpu *vcpu)
 {
-	wait_queue_head_t *wq = kvm_arch_vcpu_wq(vcpu);
+	struct swait_head *wq = kvm_arch_vcpu_wq(vcpu);
 
-	wait_event_interruptible(*wq, !vcpu->arch.pause);
+	swait_event_interruptible(*wq, !vcpu->arch.pause);
 }
 
 static int kvm_vcpu_initialized(struct kvm_vcpu *vcpu)
Index: linux-3.18.13-rt10-r7s4/arch/arm/kvm/psci.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/kvm/psci.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/kvm/psci.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:69 @ static unsigned long kvm_psci_vcpu_on(st
 {
 	struct kvm *kvm = source_vcpu->kvm;
 	struct kvm_vcpu *vcpu = NULL, *tmp;
-	wait_queue_head_t *wq;
+	struct swait_head *wq;
 	unsigned long cpu_id;
 	unsigned long context_id;
 	unsigned long mpidr;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:126 @ static unsigned long kvm_psci_vcpu_on(st
 	smp_mb();		/* Make sure the above is visible */
 
 	wq = kvm_arch_vcpu_wq(vcpu);
-	wake_up_interruptible(wq);
+	swait_wake_interruptible(wq);
 
 	return PSCI_RET_SUCCESS;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-at91/at91rm9200_time.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-at91/at91rm9200_time.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-at91/at91rm9200_time.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:138 @ clkevt32k_mode(enum clock_event_mode mod
 		break;
 	case CLOCK_EVT_MODE_SHUTDOWN:
 	case CLOCK_EVT_MODE_UNUSED:
+		remove_irq(NR_IRQS_LEGACY + AT91_ID_SYS, &at91rm9200_timer_irq);
 	case CLOCK_EVT_MODE_RESUME:
 		irqmask = 0;
 		break;
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-exynos/platsmp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-exynos/platsmp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-exynos/platsmp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:140 @ static void __iomem *scu_base_addr(void)
 	return (void __iomem *)(S5P_VA_SCU);
 }
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 static void exynos_secondary_init(unsigned int cpu)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:153 @ static void exynos_secondary_init(unsign
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:168 @ static int exynos_boot_secondary(unsigne
 	 * Set synchronisation state between this boot processor
 	 * and the secondary one
 	 */
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * The secondary processor is waiting to be released from
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:195 @ static int exynos_boot_secondary(unsigne
 
 		if (timeout == 0) {
 			printk(KERN_ERR "cpu1 power enable failed");
-			spin_unlock(&boot_lock);
+			raw_spin_unlock(&boot_lock);
 			return -ETIMEDOUT;
 		}
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:245 @ static int exynos_boot_secondary(unsigne
 	 * calibrations, then wait for it to finish
 	 */
 fail:
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return pen_release != -1 ? ret : 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-hisi/platmcpm.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-hisi/platmcpm.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-hisi/platmcpm.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:60 @
 
 static void __iomem *sysctrl, *fabric;
 static int hip04_cpu_table[HIP04_MAX_CLUSTERS][HIP04_MAX_CPUS_PER_CLUSTER];
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 static u32 fabric_phys_addr;
 /*
  * [0]: bootwrapper physical address
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:107 @ static int hip04_mcpm_power_up(unsigned
 	if (cluster >= HIP04_MAX_CLUSTERS || cpu >= HIP04_MAX_CPUS_PER_CLUSTER)
 		return -EINVAL;
 
-	spin_lock_irq(&boot_lock);
+	raw_spin_lock_irq(&boot_lock);
 
 	if (hip04_cpu_table[cluster][cpu])
 		goto out;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:136 @ static int hip04_mcpm_power_up(unsigned
 	udelay(20);
 out:
 	hip04_cpu_table[cluster][cpu]++;
-	spin_unlock_irq(&boot_lock);
+	raw_spin_unlock_irq(&boot_lock);
 
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:152 @ static void hip04_mcpm_power_down(void)
 
 	__mcpm_cpu_going_down(cpu, cluster);
 
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 	BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP);
 	hip04_cpu_table[cluster][cpu]--;
 	if (hip04_cpu_table[cluster][cpu] == 1) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:165 @ static void hip04_mcpm_power_down(void)
 
 	last_man = hip04_cluster_is_down(cluster);
 	if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) {
-		spin_unlock(&boot_lock);
+		raw_spin_unlock(&boot_lock);
 		/* Since it's Cortex A15, disable L2 prefetching. */
 		asm volatile(
 		"mcr	p15, 1, %0, c15, c0, 3 \n\t"
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:176 @ static void hip04_mcpm_power_down(void)
 		hip04_set_snoop_filter(cluster, 0);
 		__mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN);
 	} else {
-		spin_unlock(&boot_lock);
+		raw_spin_unlock(&boot_lock);
 		v7_exit_coherency_flush(louis);
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:195 @ static int hip04_mcpm_wait_for_powerdown
 	       cpu >= HIP04_MAX_CPUS_PER_CLUSTER);
 
 	count = TIMEOUT_MSEC / POLL_MSEC;
-	spin_lock_irq(&boot_lock);
+	raw_spin_lock_irq(&boot_lock);
 	for (tries = 0; tries < count; tries++) {
 		if (hip04_cpu_table[cluster][cpu]) {
 			ret = -EBUSY;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:205 @ static int hip04_mcpm_wait_for_powerdown
 		data = readl_relaxed(sysctrl + SC_CPU_RESET_STATUS(cluster));
 		if (data & CORE_WFI_STATUS(cpu))
 			break;
-		spin_unlock_irq(&boot_lock);
+		raw_spin_unlock_irq(&boot_lock);
 		/* Wait for clean L2 when the whole cluster is down. */
 		msleep(POLL_MSEC);
-		spin_lock_irq(&boot_lock);
+		raw_spin_lock_irq(&boot_lock);
 	}
 	if (tries >= count)
 		goto err;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:223 @ static int hip04_mcpm_wait_for_powerdown
 	}
 	if (tries >= count)
 		goto err;
-	spin_unlock_irq(&boot_lock);
+	raw_spin_unlock_irq(&boot_lock);
 	return 0;
 err:
-	spin_unlock_irq(&boot_lock);
+	raw_spin_unlock_irq(&boot_lock);
 	return ret;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:238 @ static void hip04_mcpm_powered_up(void)
 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 	if (!hip04_cpu_table[cluster][cpu])
 		hip04_cpu_table[cluster][cpu] = 1;
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static void __naked hip04_mcpm_power_up_setup(unsigned int affinity_level)
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-omap2/omap-smp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-omap2/omap-smp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-omap2/omap-smp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:46 @
 /* SCU base address */
 static void __iomem *scu_base;
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 void __iomem *omap4_get_scu_base(void)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:77 @ static void omap4_secondary_init(unsigne
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static int omap4_boot_secondary(unsigned int cpu, struct task_struct *idle)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:92 @ static int omap4_boot_secondary(unsigned
 	 * Set synchronisation state between this boot processor
 	 * and the secondary one
 	 */
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * Update the AuxCoreBoot0 with boot state for secondary core.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:169 @ static int omap4_boot_secondary(unsigned
 	 * Now the secondary core is starting up let it run its
 	 * calibrations, then wait for it to finish
 	 */
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-prima2/platsmp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-prima2/platsmp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-prima2/platsmp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:26 @
 static void __iomem *scu_base;
 static void __iomem *rsc_base;
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 static struct map_desc scu_io_desc __initdata = {
 	.length		= SZ_4K,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:59 @ static void sirfsoc_secondary_init(unsig
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static struct of_device_id rsc_ids[]  = {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:98 @ static int sirfsoc_boot_secondary(unsign
 	/* make sure write buffer is drained */
 	mb();
 
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * The secondary processor is waiting to be released from
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:130 @ static int sirfsoc_boot_secondary(unsign
 	 * now the secondary core is starting up let it run its
 	 * calibrations, then wait for it to finish
 	 */
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return pen_release != -1 ? -ENOSYS : 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-qcom/platsmp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-qcom/platsmp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-qcom/platsmp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:49 @
 
 extern void secondary_startup(void);
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 #ifdef CONFIG_HOTPLUG_CPU
 static void __ref qcom_cpu_die(unsigned int cpu)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:63 @ static void qcom_secondary_init(unsigned
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static int scss_release_secondary(unsigned int cpu)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:287 @ static int qcom_boot_secondary(unsigned
 	 * set synchronisation state between this boot processor
 	 * and the secondary one
 	 */
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * Send the secondary CPU a soft interrupt, thereby causing
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:300 @ static int qcom_boot_secondary(unsigned
 	 * now the secondary core is starting up let it run its
 	 * calibrations, then wait for it to finish
 	 */
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return ret;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-spear/platsmp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-spear/platsmp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-spear/platsmp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:35 @ static void write_pen_release(int val)
 	sync_cache_w(&pen_release);
 }
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:50 @ static void spear13xx_secondary_init(uns
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static int spear13xx_boot_secondary(unsigned int cpu, struct task_struct *idle)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:62 @ static int spear13xx_boot_secondary(unsi
 	 * set synchronisation state between this boot processor
 	 * and the secondary one
 	 */
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * The secondary processor is waiting to be released from
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:87 @ static int spear13xx_boot_secondary(unsi
 	 * now the secondary core is starting up let it run its
 	 * calibrations, then wait for it to finish
 	 */
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return pen_release != -1 ? -ENOSYS : 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-sti/platsmp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-sti/platsmp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-sti/platsmp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:37 @ static void write_pen_release(int val)
 	sync_cache_w(&pen_release);
 }
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 static void sti_secondary_init(unsigned int cpu)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:52 @ static void sti_secondary_init(unsigned
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static int sti_boot_secondary(unsigned int cpu, struct task_struct *idle)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:64 @ static int sti_boot_secondary(unsigned i
 	 * set synchronisation state between this boot processor
 	 * and the secondary one
 	 */
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * The secondary processor is waiting to be released from
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:95 @ static int sti_boot_secondary(unsigned i
 	 * now the secondary core is starting up let it run its
 	 * calibrations, then wait for it to finish
 	 */
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return pen_release != -1 ? -ENOSYS : 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mach-ux500/platsmp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mach-ux500/platsmp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mach-ux500/platsmp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:54 @ static void __iomem *scu_base_addr(void)
 	return NULL;
 }
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 static void ux500_secondary_init(unsigned int cpu)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:67 @ static void ux500_secondary_init(unsigne
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 static int ux500_boot_secondary(unsigned int cpu, struct task_struct *idle)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:79 @ static int ux500_boot_secondary(unsigned
 	 * set synchronisation state between this boot processor
 	 * and the secondary one
 	 */
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * The secondary processor is waiting to be released from
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:100 @ static int ux500_boot_secondary(unsigned
 	 * now the secondary core is starting up let it run its
 	 * calibrations, then wait for it to finish
 	 */
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return pen_release != -1 ? -ENOSYS : 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:280 @ do_page_fault(unsigned long addr, unsign
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 	if (user_mode(regs))
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:434 @ do_translation_fault(unsigned long addr,
 	if (addr < TASK_SIZE)
 		return do_page_fault(addr, fsr, regs);
 
+	if (interrupts_enabled(regs))
+		local_irq_enable();
+
 	if (user_mode(regs))
 		goto bad_area;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:504 @ do_translation_fault(unsigned long addr,
 static int
 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
+	if (interrupts_enabled(regs))
+		local_irq_enable();
+
 	do_bad_area(addr, fsr, regs);
 	return 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm/mm/highmem.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/mm/highmem.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/mm/highmem.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:56 @ EXPORT_SYMBOL(kunmap);
 
 void *kmap_atomic(struct page *page)
 {
+	pte_t pte = mk_pte(page, kmap_prot);
 	unsigned int idx;
 	unsigned long vaddr;
 	void *kmap;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:95 @ void *kmap_atomic(struct page *page)
 	 * in place, so the contained TLB flush ensures the TLB is updated
 	 * with the new mapping.
 	 */
-	set_fixmap_pte(idx, mk_pte(page, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+	current->kmap_pte[type] = pte;
+#endif
+	set_fixmap_pte(idx, pte);
 
 	return (void *)vaddr;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:115 @ void __kunmap_atomic(void *kvaddr)
 
 		if (cache_is_vivt())
 			__cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
+#ifdef CONFIG_PREEMPT_RT_FULL
+		current->kmap_pte[type] = __pte(0);
+#endif
 #ifdef CONFIG_DEBUG_HIGHMEM
 		BUG_ON(vaddr != __fix_to_virt(idx));
-		set_fixmap_pte(idx, __pte(0));
 #else
 		(void) idx;  /* to kill a warning */
 #endif
+		set_fixmap_pte(idx, __pte(0));
 		kmap_atomic_idx_pop();
 	} else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
 		/* this address was obtained through kmap_high_get() */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:135 @ EXPORT_SYMBOL(__kunmap_atomic);
 
 void *kmap_atomic_pfn(unsigned long pfn)
 {
+	pte_t pte = pfn_pte(pfn, kmap_prot);
 	unsigned long vaddr;
 	int idx, type;
 	struct page *page = pfn_to_page(pfn);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:150 @ void *kmap_atomic_pfn(unsigned long pfn)
 #ifdef CONFIG_DEBUG_HIGHMEM
 	BUG_ON(!pte_none(*(fixmap_page_table + idx)));
 #endif
-	set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+	current->kmap_pte[type] = pte;
+#endif
+	set_fixmap_pte(idx, pte);
 
 	return (void *)vaddr;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:167 @ struct page *kmap_atomic_to_page(const v
 
 	return pte_page(get_fixmap_pte(vaddr));
 }
+
+#if defined CONFIG_PREEMPT_RT_FULL
+void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
+	int i;
+
+	/*
+	 * Clear @prev's kmap_atomic mappings
+	 */
+	for (i = 0; i < prev_p->kmap_idx; i++) {
+		int idx = i + KM_TYPE_NR * smp_processor_id();
+
+		set_fixmap_pte(idx, __pte(0));
+	}
+	/*
+	 * Restore @next_p's kmap_atomic mappings
+	 */
+	for (i = 0; i < next_p->kmap_idx; i++) {
+		int idx = i + KM_TYPE_NR * smp_processor_id();
+
+		if (!pte_none(next_p->kmap_pte[i]))
+			set_fixmap_pte(idx, next_p->kmap_pte[i]);
+	}
+}
+#endif
Index: linux-3.18.13-rt10-r7s4/arch/arm/plat-versatile/platsmp.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm/plat-versatile/platsmp.c
+++ linux-3.18.13-rt10-r7s4/arch/arm/plat-versatile/platsmp.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:33 @ static void write_pen_release(int val)
 	sync_cache_w(&pen_release);
 }
 
-static DEFINE_SPINLOCK(boot_lock);
+static DEFINE_RAW_SPINLOCK(boot_lock);
 
 void versatile_secondary_init(unsigned int cpu)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:46 @ void versatile_secondary_init(unsigned i
 	/*
 	 * Synchronise with the boot thread.
 	 */
-	spin_lock(&boot_lock);
-	spin_unlock(&boot_lock);
+	raw_spin_lock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 }
 
 int versatile_boot_secondary(unsigned int cpu, struct task_struct *idle)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:58 @ int versatile_boot_secondary(unsigned in
 	 * Set synchronisation state between this boot processor
 	 * and the secondary one
 	 */
-	spin_lock(&boot_lock);
+	raw_spin_lock(&boot_lock);
 
 	/*
 	 * This is really belt and braces; we hold unintended secondary
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:88 @ int versatile_boot_secondary(unsigned in
 	 * now the secondary core is starting up let it run its
 	 * calibrations, then wait for it to finish
 	 */
-	spin_unlock(&boot_lock);
+	raw_spin_unlock(&boot_lock);
 
 	return pen_release != -1 ? -ENOSYS : 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/arm64/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm64/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/arm64/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:62 @ config ARM64
 	select HAVE_PERF_REGS
 	select HAVE_PERF_USER_STACK_DUMP
 	select HAVE_RCU_TABLE_FREE
+	select HAVE_PREEMPT_LAZY
 	select HAVE_SYSCALL_TRACEPOINTS
 	select IRQ_DOMAIN
+	select IRQ_FORCED_THREADING
 	select MODULES_USE_ELF_RELA
 	select NO_BOOTMEM
 	select OF
Index: linux-3.18.13-rt10-r7s4/arch/arm64/include/asm/thread_info.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm64/include/asm/thread_info.h
+++ linux-3.18.13-rt10-r7s4/arch/arm64/include/asm/thread_info.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:53 @ struct thread_info {
 	struct exec_domain	*exec_domain;	/* execution domain */
 	struct restart_block	restart_block;
 	int			preempt_count;	/* 0 => preemptable, <0 => bug */
+	int			preempt_lazy_count;	/* 0 => preemptable, <0 => bug */
 	int			cpu;		/* cpu */
 };
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:112 @ static inline struct thread_info *curren
 #define TIF_NEED_RESCHED	1
 #define TIF_NOTIFY_RESUME	2	/* callback before returning to user */
 #define TIF_FOREIGN_FPSTATE	3	/* CPU's FP state is not current's */
+#define TIF_NEED_RESCHED_LAZY	4
 #define TIF_NOHZ		7
 #define TIF_SYSCALL_TRACE	8
 #define TIF_SYSCALL_AUDIT	9
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:129 @ static inline struct thread_info *curren
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
 #define _TIF_NOTIFY_RESUME	(1 << TIF_NOTIFY_RESUME)
 #define _TIF_FOREIGN_FPSTATE	(1 << TIF_FOREIGN_FPSTATE)
+#define _TIF_NEED_RESCHED_LAZY	(1 << TIF_NEED_RESCHED_LAZY)
 #define _TIF_NOHZ		(1 << TIF_NOHZ)
 #define _TIF_SYSCALL_TRACE	(1 << TIF_SYSCALL_TRACE)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
Index: linux-3.18.13-rt10-r7s4/arch/arm64/kernel/asm-offsets.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm64/kernel/asm-offsets.c
+++ linux-3.18.13-rt10-r7s4/arch/arm64/kernel/asm-offsets.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:39 @ int main(void)
   BLANK();
   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
+  DEFINE(TI_PREEMPT_LAZY,	offsetof(struct thread_info, preempt_lazy_count));
   DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
   DEFINE(TI_TASK,		offsetof(struct thread_info, task));
   DEFINE(TI_EXEC_DOMAIN,	offsetof(struct thread_info, exec_domain));
Index: linux-3.18.13-rt10-r7s4/arch/arm64/kernel/entry.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm64/kernel/entry.S
+++ linux-3.18.13-rt10-r7s4/arch/arm64/kernel/entry.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:370 @ el1_irq:
 #ifdef CONFIG_PREEMPT
 	get_thread_info tsk
 	ldr	w24, [tsk, #TI_PREEMPT]		// get preempt count
-	cbnz	w24, 1f				// preempt count != 0
+	cbnz	w24, 2f				// preempt count != 0
 	ldr	x0, [tsk, #TI_FLAGS]		// get flags
-	tbz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
-	bl	el1_preempt
+	tbnz	x0, #TIF_NEED_RESCHED, 1f	// needs rescheduling?
+
+	ldr	w24, [tsk, #TI_PREEMPT_LAZY]	// get preempt lazy count
+	cbnz	w24, 2f				// preempt lazy count != 0
+	tbz	x0, #TIF_NEED_RESCHED_LAZY, 2f	// needs rescheduling?
 1:
+	bl	el1_preempt
+2:
 #endif
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_on
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:393 @ el1_preempt:
 1:	bl	preempt_schedule_irq		// irq en/disable is done inside
 	ldr	x0, [tsk, #TI_FLAGS]		// get new tasks TI_FLAGS
 	tbnz	x0, #TIF_NEED_RESCHED, 1b	// needs rescheduling?
+	tbnz	x0, #TIF_NEED_RESCHED_LAZY, 1b	// needs rescheduling?
 	ret	x24
 #endif
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:630 @ fast_work_pending:
 	str	x0, [sp, #S_X0]			// returned x0
 work_pending:
 	tbnz	x1, #TIF_NEED_RESCHED, work_resched
+	tbnz	x1, #TIF_NEED_RESCHED_LAZY, work_resched
 	/* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
 	ldr	x2, [sp, #S_PSTATE]
 	mov	x0, sp				// 'regs'
Index: linux-3.18.13-rt10-r7s4/arch/arm64/kernel/perf_event.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/arm64/kernel/perf_event.c
+++ linux-3.18.13-rt10-r7s4/arch/arm64/kernel/perf_event.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:464 @ armpmu_reserve_hardware(struct arm_pmu *
 			}
 
 			err = request_irq(irq, armpmu->handle_irq,
-					IRQF_NOBALANCING,
+					IRQF_NOBALANCING | IRQF_NO_THREAD,
 					"arm-pmu", armpmu);
 			if (err) {
 				pr_err("unable to request IRQ%d for ARM PMU counters\n",
Index: linux-3.18.13-rt10-r7s4/arch/avr32/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/avr32/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/avr32/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:84 @ asmlinkage void do_page_fault(unsigned l
 	 * If we're in an interrupt or have no user context, we must
 	 * not take the fault...
 	 */
-	if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
+	if (!mm || regs->sr & SYSREG_BIT(GM) || pagefault_disabled())
 		goto no_context;
 
 	local_irq_enable();
Index: linux-3.18.13-rt10-r7s4/arch/cris/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/cris/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/cris/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:116 @ do_page_fault(unsigned long address, str
 	 * user context, we must not take the fault.
 	 */
 
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 	if (user_mode(regs))
Index: linux-3.18.13-rt10-r7s4/arch/frv/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/frv/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/frv/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:81 @ asmlinkage void do_page_fault(int datamm
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 	if (user_mode(__frame))
Index: linux-3.18.13-rt10-r7s4/arch/ia64/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/ia64/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/ia64/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:99 @ ia64_do_page_fault (unsigned long addres
 	/*
 	 * If we're in an interrupt or have no user context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
Index: linux-3.18.13-rt10-r7s4/arch/m32r/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/m32r/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/m32r/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:117 @ asmlinkage void do_page_fault(struct pt_
 	 * If we're in an interrupt or have no user context or are running in an
 	 * atomic region then we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto bad_area_nosemaphore;
 
 	if (error_code & ACE_USERMODE)
Index: linux-3.18.13-rt10-r7s4/arch/m68k/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/m68k/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/m68k/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:84 @ int do_page_fault(struct pt_regs *regs,
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 	if (user_mode(regs))
Index: linux-3.18.13-rt10-r7s4/arch/microblaze/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/microblaze/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/microblaze/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:110 @ void do_page_fault(struct pt_regs *regs,
 	if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
 		is_write = 0;
 
-	if (unlikely(in_atomic() || !mm)) {
+	if (unlikely(!mm || pagefault_disabled())) {
 		if (kernel_mode(regs))
 			goto bad_area_nosemaphore;
 
Index: linux-3.18.13-rt10-r7s4/arch/mips/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/mips/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/mips/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2199 @ config CPU_R4400_WORKAROUNDS
 #
 config HIGHMEM
 	bool "High Memory Support"
-	depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA
+	depends on 32BIT && CPU_SUPPORTS_HIGHMEM && SYS_SUPPORTS_HIGHMEM && !CPU_MIPS32_3_5_EVA && !PREEMPT_RT_FULL
 
 config CPU_SUPPORTS_HIGHMEM
 	bool
Index: linux-3.18.13-rt10-r7s4/arch/mips/kernel/signal.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/mips/kernel/signal.c
+++ linux-3.18.13-rt10-r7s4/arch/mips/kernel/signal.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:616 @ asmlinkage void do_notify_resume(struct
 	__u32 thread_info_flags)
 {
 	local_irq_enable();
+	preempt_check_resched();
 
 	user_exit();
 
Index: linux-3.18.13-rt10-r7s4/arch/mips/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/mips/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/mips/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:92 @ static void __kprobes __do_page_fault(st
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto bad_area_nosemaphore;
 
 	if (user_mode(regs))
Index: linux-3.18.13-rt10-r7s4/arch/mips/mm/init.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/mips/mm/init.c
+++ linux-3.18.13-rt10-r7s4/arch/mips/mm/init.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:93 @ static void *__kmap_pgprot(struct page *
 
 	BUG_ON(Page_dcache_dirty(page));
 
-	pagefault_disable();
+	raw_pagefault_disable();
 	idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
 	idx += in_interrupt() ? FIX_N_COLOURS : 0;
 	vaddr = __fix_to_virt(FIX_CMAP_END - idx);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:149 @ void kunmap_coherent(void)
 	tlbw_use_hazard();
 	write_c0_entryhi(old_ctx);
 	local_irq_restore(flags);
-	pagefault_enable();
+	raw_pagefault_enable();
 }
 
 void copy_user_highpage(struct page *to, struct page *from,
Index: linux-3.18.13-rt10-r7s4/arch/mn10300/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/mn10300/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/mn10300/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:171 @ asmlinkage void do_page_fault(struct pt_
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 	if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
Index: linux-3.18.13-rt10-r7s4/arch/parisc/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/parisc/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/parisc/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:210 @ void do_page_fault(struct pt_regs *regs,
 	int fault;
 	unsigned int flags;
 
-	if (in_atomic())
+	if (pagefault_disabled())
 		goto no_context;
 
 	tsk = current;
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:63 @ config LOCKDEP_SUPPORT
 
 config RWSEM_GENERIC_SPINLOCK
 	bool
+	default y if PREEMPT_RT_FULL
 
 config RWSEM_XCHGADD_ALGORITHM
 	bool
-	default y
+	default y if !PREEMPT_RT_FULL
 
 config GENERIC_LOCKBREAK
 	bool
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:140 @ config PPC
 	select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
 	select GENERIC_STRNCPY_FROM_USER
 	select GENERIC_STRNLEN_USER
+	select HAVE_PREEMPT_LAZY
 	select HAVE_MOD_ARCH_SPECIFIC
 	select MODULES_USE_ELF_RELA
 	select CLONE_BACKWARDS
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:308 @ menu "Kernel options"
 
 config HIGHMEM
 	bool "High memory support"
-	depends on PPC32
+	depends on PPC32 && !PREEMPT_RT_FULL
 
 source kernel/Kconfig.hz
 source kernel/Kconfig.preempt
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/include/asm/kvm_host.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/include/asm/kvm_host.h
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/include/asm/kvm_host.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:299 @ struct kvmppc_vcore {
 	u8 in_guest;
 	struct list_head runnable_threads;
 	spinlock_t lock;
-	wait_queue_head_t wq;
+	struct swait_head wq;
 	u64 stolen_tb;
 	u64 preempt_tb;
 	struct kvm_vcpu *runner;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:621 @ struct kvm_vcpu_arch {
 	u8 prodded;
 	u32 last_inst;
 
-	wait_queue_head_t *wqp;
+	struct swait_head *wqp;
 	struct kvmppc_vcore *vcore;
 	int ret;
 	int trap;
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/include/asm/thread_info.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/include/asm/thread_info.h
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/include/asm/thread_info.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:46 @ struct thread_info {
 	int		cpu;			/* cpu we're on */
 	int		preempt_count;		/* 0 => preemptable,
 						   <0 => BUG */
+	int		preempt_lazy_count;	/* 0 => preemptable,
+						   <0 => BUG */
 	struct restart_block restart_block;
 	unsigned long	local_flags;		/* private flags for thread */
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:93 @ static inline struct thread_info *curren
 #define TIF_SYSCALL_TRACE	0	/* syscall trace active */
 #define TIF_SIGPENDING		1	/* signal pending */
 #define TIF_NEED_RESCHED	2	/* rescheduling necessary */
-#define TIF_POLLING_NRFLAG	3	/* true if poll_idle() is polling
-					   TIF_NEED_RESCHED */
+#define TIF_NEED_RESCHED_LAZY	3	/* lazy rescheduling necessary */
 #define TIF_32BIT		4	/* 32 bit binary */
 #define TIF_RESTORE_TM		5	/* need to restore TM FP/VEC/VSX */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:111 @ static inline struct thread_info *curren
 #if defined(CONFIG_PPC64)
 #define TIF_ELF2ABI		18	/* function descriptors must die! */
 #endif
+#define TIF_POLLING_NRFLAG	19	/* true if poll_idle() is polling
+					   TIF_NEED_RESCHED */
 
 /* as above, but as bit values */
 #define _TIF_SYSCALL_TRACE	(1<<TIF_SYSCALL_TRACE)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:131 @ static inline struct thread_info *curren
 #define _TIF_SYSCALL_TRACEPOINT	(1<<TIF_SYSCALL_TRACEPOINT)
 #define _TIF_EMULATE_STACK_STORE	(1<<TIF_EMULATE_STACK_STORE)
 #define _TIF_NOHZ		(1<<TIF_NOHZ)
+#define _TIF_NEED_RESCHED_LAZY	(1<<TIF_NEED_RESCHED_LAZY)
 #define _TIF_SYSCALL_T_OR_A	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
 				 _TIF_NOHZ)
 
 #define _TIF_USER_WORK_MASK	(_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
 				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-				 _TIF_RESTORE_TM)
+				 _TIF_RESTORE_TM | _TIF_NEED_RESCHED_LAZY)
 #define _TIF_PERSYSCALL_MASK	(_TIF_RESTOREALL|_TIF_NOERROR)
+#define _TIF_NEED_RESCHED_MASK	(_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
 
 /* Bits in local_flags */
 /* Don't move TLF_NAPPING without adjusting the code in entry_32.S */
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/asm-offsets.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kernel/asm-offsets.c
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/asm-offsets.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:162 @ int main(void)
 	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
 	DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
 	DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count));
+	DEFINE(TI_PREEMPT_LAZY, offsetof(struct thread_info, preempt_lazy_count));
 	DEFINE(TI_TASK, offsetof(struct thread_info, task));
 	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
 
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/entry_32.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kernel/entry_32.S
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/entry_32.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:893 @ resume_kernel:
 	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
 	bne	restore
 	andi.	r8,r8,_TIF_NEED_RESCHED
+	bne+	1f
+	lwz	r0,TI_PREEMPT_LAZY(r9)
+	cmpwi	0,r0,0		/* if non-zero, just restore regs and return */
+	bne	restore
+	lwz	r0,TI_FLAGS(r9)
+	andi.	r0,r0,_TIF_NEED_RESCHED_LAZY
 	beq+	restore
+1:
 	lwz	r3,_MSR(r1)
 	andi.	r0,r3,MSR_EE	/* interrupts off? */
 	beq	restore		/* don't schedule if so */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:911 @ resume_kernel:
 	 */
 	bl	trace_hardirqs_off
 #endif
-1:	bl	preempt_schedule_irq
+2:	bl	preempt_schedule_irq
 	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r3,TI_FLAGS(r9)
-	andi.	r0,r3,_TIF_NEED_RESCHED
-	bne-	1b
+	andi.	r0,r3,_TIF_NEED_RESCHED_MASK
+	bne-	2b
 #ifdef CONFIG_TRACE_IRQFLAGS
 	/* And now, to properly rebalance the above, we tell lockdep they
 	 * are being turned back on, which will happen when we return
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1236 @ global_dbcr0:
 #endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
 
 do_work:			/* r10 contains MSR_KERNEL here */
-	andi.	r0,r9,_TIF_NEED_RESCHED
+	andi.	r0,r9,_TIF_NEED_RESCHED_MASK
 	beq	do_user_signal
 
 do_resched:			/* r10 contains MSR_KERNEL here */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1257 @ recheck:
 	MTMSRD(r10)		/* disable interrupts */
 	CURRENT_THREAD_INFO(r9, r1)
 	lwz	r9,TI_FLAGS(r9)
-	andi.	r0,r9,_TIF_NEED_RESCHED
+	andi.	r0,r9,_TIF_NEED_RESCHED_MASK
 	bne-	do_resched
 	andi.	r0,r9,_TIF_USER_WORK_MASK
 	beq	restore_user
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/entry_64.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kernel/entry_64.S
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/entry_64.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:647 @ _GLOBAL(ret_from_except_lite)
 #else
 	beq	restore
 #endif
-1:	andi.	r0,r4,_TIF_NEED_RESCHED
+1:	andi.	r0,r4,_TIF_NEED_RESCHED_MASK
 	beq	2f
 	bl	restore_interrupts
 	SCHEDULE_USER
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:709 @ resume_kernel:
 
 #ifdef CONFIG_PREEMPT
 	/* Check if we need to preempt */
+	lwz	r8,TI_PREEMPT(r9)
+	cmpwi	0,r8,0		/* if non-zero, just restore regs and return */
+	bne	restore
 	andi.	r0,r4,_TIF_NEED_RESCHED
+	bne+	check_count
+
+	andi.	r0,r4,_TIF_NEED_RESCHED_LAZY
 	beq+	restore
+	lwz	r8,TI_PREEMPT_LAZY(r9)
+
 	/* Check that preempt_count() == 0 and interrupts are enabled */
-	lwz	r8,TI_PREEMPT(r9)
+check_count:
 	cmpwi	cr1,r8,0
 	ld	r0,SOFTE(r1)
 	cmpdi	r0,0
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:737 @ resume_kernel:
 	/* Re-test flags and eventually loop */
 	CURRENT_THREAD_INFO(r9, r1)
 	ld	r4,TI_FLAGS(r9)
-	andi.	r0,r4,_TIF_NEED_RESCHED
+	andi.	r0,r4,_TIF_NEED_RESCHED_MASK
 	bne	1b
 
 	/*
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/irq.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kernel/irq.c
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/irq.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:618 @ void irq_ctx_init(void)
 	}
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 void do_softirq_own_stack(void)
 {
 	struct thread_info *curtp, *irqtp;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:636 @ void do_softirq_own_stack(void)
 	if (irqtp->flags)
 		set_bits(irqtp->flags, &curtp->flags);
 }
+#endif
 
 irq_hw_number_t virq_to_hw(unsigned int virq)
 {
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/misc_32.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kernel/misc_32.S
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/misc_32.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:43 @
  * We store the saved ksp_limit in the unused part
  * of the STACK_FRAME_OVERHEAD
  */
+#ifndef CONFIG_PREEMPT_RT_FULL
 _GLOBAL(call_do_softirq)
 	mflr	r0
 	stw	r0,4(r1)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:60 @ _GLOBAL(call_do_softirq)
 	stw	r10,THREAD+KSP_LIMIT(r2)
 	mtlr	r0
 	blr
+#endif
 
 /*
  * void call_do_irq(struct pt_regs *regs, struct thread_info *irqtp);
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/misc_64.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kernel/misc_64.S
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/misc_64.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:32 @
 
 	.text
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 _GLOBAL(call_do_softirq)
 	mflr	r0
 	std	r0,16(r1)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:43 @ _GLOBAL(call_do_softirq)
 	ld	r0,16(r1)
 	mtlr	r0
 	blr
+#endif
 
 _GLOBAL(call_do_irq)
 	mflr	r0
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/time.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kernel/time.c
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kernel/time.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:427 @ unsigned long profile_pc(struct pt_regs
 EXPORT_SYMBOL(profile_pc);
 #endif
 
-#ifdef CONFIG_IRQ_WORK
+#if defined(CONFIG_IRQ_WORK)
 
 /*
  * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable...
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kvm/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kvm/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kvm/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:160 @ config KVM_E500MC
 config KVM_MPIC
 	bool "KVM in-kernel MPIC emulation"
 	depends on KVM && E500
+	depends on !PREEMPT_RT_FULL
 	select HAVE_KVM_IRQCHIP
 	select HAVE_KVM_IRQFD
 	select HAVE_KVM_IRQ_ROUTING
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/kvm/book3s_hv.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/kvm/book3s_hv.c
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/kvm/book3s_hv.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:87 @ static void kvmppc_fast_vcpu_kick_hv(str
 {
 	int me;
 	int cpu = vcpu->cpu;
-	wait_queue_head_t *wqp;
+	struct swait_head *wqp;
 
 	wqp = kvm_arch_vcpu_wq(vcpu);
-	if (waitqueue_active(wqp)) {
-		wake_up_interruptible(wqp);
+	if (swaitqueue_active(wqp)) {
+		swait_wake_interruptible(wqp);
 		++vcpu->stat.halt_wakeup;
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:642 @ int kvmppc_pseries_do_hcall(struct kvm_v
 		tvcpu->arch.prodded = 1;
 		smp_mb();
 		if (vcpu->arch.ceded) {
-			if (waitqueue_active(&vcpu->wq)) {
-				wake_up_interruptible(&vcpu->wq);
+			if (swaitqueue_active(&vcpu->wq)) {
+				swait_wake_interruptible(&vcpu->wq);
 				vcpu->stat.halt_wakeup++;
 			}
 		}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1360 @ static struct kvmppc_vcore *kvmppc_vcore
 
 	INIT_LIST_HEAD(&vcore->runnable_threads);
 	spin_lock_init(&vcore->lock);
-	init_waitqueue_head(&vcore->wq);
+	init_swait_head(&vcore->wq);
 	vcore->preempt_tb = TB_NIL;
 	vcore->lpcr = kvm->arch.lpcr;
 	vcore->first_vcpuid = core * threads_per_subcore;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1829 @ static void kvmppc_wait_for_exec(struct
  */
 static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
 {
-	DEFINE_WAIT(wait);
+	DEFINE_SWAITER(wait);
 
-	prepare_to_wait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
+	swait_prepare(&vc->wq, &wait, TASK_INTERRUPTIBLE);
 	vc->vcore_state = VCORE_SLEEPING;
 	spin_unlock(&vc->lock);
 	schedule();
-	finish_wait(&vc->wq, &wait);
+	swait_finish(&vc->wq, &wait);
 	spin_lock(&vc->lock);
 	vc->vcore_state = VCORE_INACTIVE;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1876 @ static int kvmppc_run_vcpu(struct kvm_ru
 			kvmppc_create_dtl_entry(vcpu, vc);
 			kvmppc_start_thread(vcpu);
 		} else if (vc->vcore_state == VCORE_SLEEPING) {
-			wake_up(&vc->wq);
+			swait_wake(&vc->wq);
 		}
 
 	}
Index: linux-3.18.13-rt10-r7s4/arch/powerpc/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/powerpc/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/powerpc/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:276 @ int __kprobes do_page_fault(struct pt_re
 	if (!arch_irq_disabled_regs(regs))
 		local_irq_enable();
 
-	if (in_atomic() || mm == NULL) {
+	if (in_atomic() || mm == NULL || pagefault_disabled()) {
 		if (!user_mode(regs)) {
 			rc = SIGSEGV;
 			goto bail;
Index: linux-3.18.13-rt10-r7s4/arch/s390/include/asm/kvm_host.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/s390/include/asm/kvm_host.h
+++ linux-3.18.13-rt10-r7s4/arch/s390/include/asm/kvm_host.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:314 @ struct kvm_s390_local_interrupt {
 	struct list_head list;
 	atomic_t active;
 	struct kvm_s390_float_interrupt *float_int;
-	wait_queue_head_t *wq;
+	struct swait_head *wq;
 	atomic_t *cpuflags;
 	unsigned int action_bits;
 };
Index: linux-3.18.13-rt10-r7s4/arch/s390/kvm/interrupt.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/s390/kvm/interrupt.c
+++ linux-3.18.13-rt10-r7s4/arch/s390/kvm/interrupt.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:622 @ no_timer:
 
 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
 {
-	if (waitqueue_active(&vcpu->wq)) {
+	if (swaitqueue_active(&vcpu->wq)) {
 		/*
 		 * The vcpu gave up the cpu voluntarily, mark it as a good
 		 * yield-candidate.
 		 */
 		vcpu->preempted = true;
-		wake_up_interruptible(&vcpu->wq);
+		swait_wake_interruptible(&vcpu->wq);
 		vcpu->stat.halt_wakeup++;
 	}
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:749 @ int kvm_s390_inject_program_int(struct k
 	spin_lock(&li->lock);
 	list_add(&inti->list, &li->list);
 	atomic_set(&li->active, 1);
-	BUG_ON(waitqueue_active(li->wq));
+	BUG_ON(swaitqueue_active(li->wq));
 	spin_unlock(&li->lock);
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:774 @ int kvm_s390_inject_prog_irq(struct kvm_
 	spin_lock(&li->lock);
 	list_add(&inti->list, &li->list);
 	atomic_set(&li->active, 1);
-	BUG_ON(waitqueue_active(li->wq));
+	BUG_ON(swaitqueue_active(li->wq));
 	spin_unlock(&li->lock);
 	return 0;
 }
Index: linux-3.18.13-rt10-r7s4/arch/s390/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/s390/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/s390/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:438 @ static inline int do_exception(struct pt
 	 * user context.
 	 */
 	fault = VM_FAULT_BADCONTEXT;
-	if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
+	if (unlikely(!user_space_fault(regs) || !mm ||
+		     tsk->pagefault_disabled))
 		goto out;
 
 	address = trans_exc_code & __FAIL_ADDR_MASK;
Index: linux-3.18.13-rt10-r7s4/arch/score/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/score/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/score/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:76 @ asmlinkage void do_page_fault(struct pt_
 	* If we're in an interrupt or have no user
 	* context, we must not take the fault..
 	*/
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto bad_area_nosemaphore;
 
 	if (user_mode(regs))
Index: linux-3.18.13-rt10-r7s4/arch/sh/kernel/irq.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sh/kernel/irq.c
+++ linux-3.18.13-rt10-r7s4/arch/sh/kernel/irq.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:152 @ void irq_ctx_exit(int cpu)
 	hardirq_ctx[cpu] = NULL;
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 void do_softirq_own_stack(void)
 {
 	struct thread_info *curctx;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:180 @ void do_softirq_own_stack(void)
 		  "r5", "r6", "r7", "r8", "r9", "r15", "t", "pr"
 	);
 }
+#endif
 #else
 static inline void handle_one_irq(unsigned int irq)
 {
Index: linux-3.18.13-rt10-r7s4/arch/sh/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sh/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/sh/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:443 @ asmlinkage void __kprobes do_page_fault(
 	 * If we're in an interrupt, have no user context or are running
 	 * in an atomic region then we must not take the fault:
 	 */
-	if (unlikely(in_atomic() || !mm)) {
+	if (unlikely(!mm || pagefault_disabled())) {
 		bad_area_nosemaphore(regs, error_code, address);
 		return;
 	}
Index: linux-3.18.13-rt10-r7s4/arch/sparc/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sparc/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/sparc/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:185 @ config NR_CPUS
 source kernel/Kconfig.hz
 
 config RWSEM_GENERIC_SPINLOCK
-	bool
-	default y if SPARC32
+	def_bool PREEMPT_RT_FULL
 
 config RWSEM_XCHGADD_ALGORITHM
-	bool
-	default y if SPARC64
+	def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
 
 config GENERIC_HWEIGHT
 	bool
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:529 @ menu "Executable file formats"
 
 source "fs/Kconfig.binfmt"
 
+config EARLY_PRINTK
+	bool
+	default y
+
 config COMPAT
 	bool
 	depends on SPARC64
Index: linux-3.18.13-rt10-r7s4/arch/sparc/kernel/irq_64.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sparc/kernel/irq_64.c
+++ linux-3.18.13-rt10-r7s4/arch/sparc/kernel/irq_64.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:852 @ void __irq_entry handler_irq(int pil, st
 	set_irq_regs(old_regs);
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 void do_softirq_own_stack(void)
 {
 	void *orig_sp, *sp = softirq_stack[smp_processor_id()];
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:867 @ void do_softirq_own_stack(void)
 	__asm__ __volatile__("mov %0, %%sp"
 			     : : "r" (orig_sp));
 }
+#endif
 
 #ifdef CONFIG_HOTPLUG_CPU
 void fixup_irqs(void)
Index: linux-3.18.13-rt10-r7s4/arch/sparc/kernel/setup_32.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sparc/kernel/setup_32.c
+++ linux-3.18.13-rt10-r7s4/arch/sparc/kernel/setup_32.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:312 @ void __init setup_arch(char **cmdline_p)
 
 	boot_flags_init(*cmdline_p);
 
+	early_console = &prom_early_console;
 	register_console(&prom_early_console);
 
 	printk("ARCH: ");
Index: linux-3.18.13-rt10-r7s4/arch/sparc/kernel/setup_64.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sparc/kernel/setup_64.c
+++ linux-3.18.13-rt10-r7s4/arch/sparc/kernel/setup_64.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:566 @ static void __init init_sparc64_elf_hwca
 		pause_patch();
 }
 
+static inline void register_prom_console(void)
+{
+	early_console = &prom_early_console;
+	register_console(&prom_early_console);
+}
+
 void __init setup_arch(char **cmdline_p)
 {
 	/* Initialize PROM console and command line. */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:583 @ void __init setup_arch(char **cmdline_p)
 #ifdef CONFIG_EARLYFB
 	if (btext_find_display())
 #endif
-		register_console(&prom_early_console);
+		register_prom_console();
 
 	if (tlb_type == hypervisor)
 		printk("ARCH: SUN4V\n");
Index: linux-3.18.13-rt10-r7s4/arch/sparc/mm/fault_32.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sparc/mm/fault_32.c
+++ linux-3.18.13-rt10-r7s4/arch/sparc/mm/fault_32.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:199 @ asmlinkage void do_sparc_fault(struct pt
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto no_context;
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Index: linux-3.18.13-rt10-r7s4/arch/sparc/mm/fault_64.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/sparc/mm/fault_64.c
+++ linux-3.18.13-rt10-r7s4/arch/sparc/mm/fault_64.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:333 @ asmlinkage void __kprobes do_sparc64_fau
 	 * If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm)
+	if (!mm || pagefault_disabled())
 		goto intr_or_no_mm;
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
Index: linux-3.18.13-rt10-r7s4/arch/tile/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/tile/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/tile/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:360 @ static int handle_page_fault(struct pt_r
 	 * If we're in an interrupt, have no user context or are running in an
 	 * atomic region then we must not take the fault.
 	 */
-	if (in_atomic() || !mm) {
+	if (!mm || pagefault_disabled()) {
 		vma = NULL;  /* happy compiler */
 		goto bad_area_nosemaphore;
 	}
Index: linux-3.18.13-rt10-r7s4/arch/um/kernel/trap.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/um/kernel/trap.c
+++ linux-3.18.13-rt10-r7s4/arch/um/kernel/trap.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:41 @ int handle_page_fault(unsigned long addr
 	 * If the fault was during atomic operation, don't take the fault, just
 	 * fail.
 	 */
-	if (in_atomic())
+	if (pagefault_disabled())
 		goto out_nosemaphore;
 
 	if (is_user)
Index: linux-3.18.13-rt10-r7s4/arch/x86/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/Kconfig
+++ linux-3.18.13-rt10-r7s4/arch/x86/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:24 @ config X86_64
 ### Arch settings
 config X86
 	def_bool y
+	select HAVE_PREEMPT_LAZY
 	select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
 	select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
 	select ARCH_HAS_FAST_MULTIPLIER
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:201 @ config ARCH_MAY_HAVE_PC_FDC
 	def_bool y
 	depends on ISA_DMA_API
 
+config RWSEM_GENERIC_SPINLOCK
+	def_bool PREEMPT_RT_FULL
+
 config RWSEM_XCHGADD_ALGORITHM
-	def_bool y
+	def_bool !RWSEM_GENERIC_SPINLOCK && !PREEMPT_RT_FULL
 
 config GENERIC_CALIBRATE_DELAY
 	def_bool y
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:818 @ config IOMMU_HELPER
 config MAXSMP
 	bool "Enable Maximum number of SMP Processors and NUMA Nodes"
 	depends on X86_64 && SMP && DEBUG_KERNEL
-	select CPUMASK_OFFSTACK
+	select CPUMASK_OFFSTACK if !PREEMPT_RT_FULL
 	---help---
 	  Enable maximum number of CPUS and NUMA Nodes for this architecture.
 	  If unsure, say N.
Index: linux-3.18.13-rt10-r7s4/arch/x86/crypto/aesni-intel_glue.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/crypto/aesni-intel_glue.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/crypto/aesni-intel_glue.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:384 @ static int ecb_encrypt(struct blkcipher_
 	err = blkcipher_walk_virt(desc, &walk);
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
-	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
+		kernel_fpu_begin();
 		aesni_ecb_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
-			      nbytes & AES_BLOCK_MASK);
+				nbytes & AES_BLOCK_MASK);
+		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
-	kernel_fpu_end();
 
 	return err;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:408 @ static int ecb_decrypt(struct blkcipher_
 	err = blkcipher_walk_virt(desc, &walk);
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
-	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
+		kernel_fpu_begin();
 		aesni_ecb_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			      nbytes & AES_BLOCK_MASK);
+		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
-	kernel_fpu_end();
 
 	return err;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:432 @ static int cbc_encrypt(struct blkcipher_
 	err = blkcipher_walk_virt(desc, &walk);
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
-	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
+		kernel_fpu_begin();
 		aesni_cbc_enc(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			      nbytes & AES_BLOCK_MASK, walk.iv);
+		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
-	kernel_fpu_end();
 
 	return err;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:456 @ static int cbc_decrypt(struct blkcipher_
 	err = blkcipher_walk_virt(desc, &walk);
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
-	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes)) {
+		kernel_fpu_begin();
 		aesni_cbc_dec(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 			      nbytes & AES_BLOCK_MASK, walk.iv);
+		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
-	kernel_fpu_end();
 
 	return err;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:515 @ static int ctr_crypt(struct blkcipher_de
 	err = blkcipher_walk_virt_block(desc, &walk, AES_BLOCK_SIZE);
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
-	kernel_fpu_begin();
 	while ((nbytes = walk.nbytes) >= AES_BLOCK_SIZE) {
+		kernel_fpu_begin();
 		aesni_ctr_enc_tfm(ctx, walk.dst.virt.addr, walk.src.virt.addr,
 				  nbytes & AES_BLOCK_MASK, walk.iv);
+		kernel_fpu_end();
 		nbytes &= AES_BLOCK_SIZE - 1;
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
 	if (walk.nbytes) {
+		kernel_fpu_begin();
 		ctr_crypt_final(ctx, &walk);
+		kernel_fpu_end();
 		err = blkcipher_walk_done(desc, &walk, 0);
 	}
-	kernel_fpu_end();
 
 	return err;
 }
Index: linux-3.18.13-rt10-r7s4/arch/x86/crypto/cast5_avx_glue.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/crypto/cast5_avx_glue.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/crypto/cast5_avx_glue.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:63 @ static inline void cast5_fpu_end(bool fp
 static int ecb_crypt(struct blkcipher_desc *desc, struct blkcipher_walk *walk,
 		     bool enc)
 {
-	bool fpu_enabled = false;
+	bool fpu_enabled;
 	struct cast5_ctx *ctx = crypto_blkcipher_ctx(desc->tfm);
 	const unsigned int bsize = CAST5_BLOCK_SIZE;
 	unsigned int nbytes;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:79 @ static int ecb_crypt(struct blkcipher_de
 		u8 *wsrc = walk->src.virt.addr;
 		u8 *wdst = walk->dst.virt.addr;
 
-		fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+		fpu_enabled = cast5_fpu_begin(false, nbytes);
 
 		/* Process multi-block batch */
 		if (nbytes >= bsize * CAST5_PARALLEL_BLOCKS) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:107 @ static int ecb_crypt(struct blkcipher_de
 		} while (nbytes >= bsize);
 
 done:
+		cast5_fpu_end(fpu_enabled);
 		err = blkcipher_walk_done(desc, walk, nbytes);
 	}
-
-	cast5_fpu_end(fpu_enabled);
 	return err;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:230 @ done:
 static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 		       struct scatterlist *src, unsigned int nbytes)
 {
-	bool fpu_enabled = false;
+	bool fpu_enabled;
 	struct blkcipher_walk walk;
 	int err;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:239 @ static int cbc_decrypt(struct blkcipher_
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	while ((nbytes = walk.nbytes)) {
-		fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+		fpu_enabled = cast5_fpu_begin(false, nbytes);
 		nbytes = __cbc_decrypt(desc, &walk);
+		cast5_fpu_end(fpu_enabled);
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
-
-	cast5_fpu_end(fpu_enabled);
 	return err;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:313 @ done:
 static int ctr_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
 		     struct scatterlist *src, unsigned int nbytes)
 {
-	bool fpu_enabled = false;
+	bool fpu_enabled;
 	struct blkcipher_walk walk;
 	int err;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:322 @ static int ctr_crypt(struct blkcipher_de
 	desc->flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
 
 	while ((nbytes = walk.nbytes) >= CAST5_BLOCK_SIZE) {
-		fpu_enabled = cast5_fpu_begin(fpu_enabled, nbytes);
+		fpu_enabled = cast5_fpu_begin(false, nbytes);
 		nbytes = __ctr_crypt(desc, &walk);
+		cast5_fpu_end(fpu_enabled);
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
 
-	cast5_fpu_end(fpu_enabled);
-
 	if (walk.nbytes) {
 		ctr_crypt_final(desc, &walk);
 		err = blkcipher_walk_done(desc, &walk, 0);
Index: linux-3.18.13-rt10-r7s4/arch/x86/crypto/glue_helper.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/crypto/glue_helper.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/crypto/glue_helper.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:42 @ static int __glue_ecb_crypt_128bit(const
 	void *ctx = crypto_blkcipher_ctx(desc->tfm);
 	const unsigned int bsize = 128 / 8;
 	unsigned int nbytes, i, func_bytes;
-	bool fpu_enabled = false;
+	bool fpu_enabled;
 	int err;
 
 	err = blkcipher_walk_virt(desc, walk);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:52 @ static int __glue_ecb_crypt_128bit(const
 		u8 *wdst = walk->dst.virt.addr;
 
 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-					     desc, fpu_enabled, nbytes);
+					     desc, false, nbytes);
 
 		for (i = 0; i < gctx->num_funcs; i++) {
 			func_bytes = bsize * gctx->funcs[i].num_blocks;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:74 @ static int __glue_ecb_crypt_128bit(const
 		}
 
 done:
+		glue_fpu_end(fpu_enabled);
 		err = blkcipher_walk_done(desc, walk, nbytes);
 	}
 
-	glue_fpu_end(fpu_enabled);
 	return err;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:197 @ int glue_cbc_decrypt_128bit(const struct
 			    struct scatterlist *src, unsigned int nbytes)
 {
 	const unsigned int bsize = 128 / 8;
-	bool fpu_enabled = false;
+	bool fpu_enabled;
 	struct blkcipher_walk walk;
 	int err;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:206 @ int glue_cbc_decrypt_128bit(const struct
 
 	while ((nbytes = walk.nbytes)) {
 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-					     desc, fpu_enabled, nbytes);
+					     desc, false, nbytes);
 		nbytes = __glue_cbc_decrypt_128bit(gctx, desc, &walk);
+		glue_fpu_end(fpu_enabled);
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
 
-	glue_fpu_end(fpu_enabled);
 	return err;
 }
 EXPORT_SYMBOL_GPL(glue_cbc_decrypt_128bit);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:281 @ int glue_ctr_crypt_128bit(const struct c
 			  struct scatterlist *src, unsigned int nbytes)
 {
 	const unsigned int bsize = 128 / 8;
-	bool fpu_enabled = false;
+	bool fpu_enabled;
 	struct blkcipher_walk walk;
 	int err;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:290 @ int glue_ctr_crypt_128bit(const struct c
 
 	while ((nbytes = walk.nbytes) >= bsize) {
 		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-					     desc, fpu_enabled, nbytes);
+					     desc, false, nbytes);
 		nbytes = __glue_ctr_crypt_128bit(gctx, desc, &walk);
+		glue_fpu_end(fpu_enabled);
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 	}
 
-	glue_fpu_end(fpu_enabled);
-
 	if (walk.nbytes) {
 		glue_ctr_crypt_final_128bit(
 			gctx->funcs[gctx->num_funcs - 1].fn_u.ctr, desc, &walk);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:350 @ int glue_xts_crypt_128bit(const struct c
 			  void *tweak_ctx, void *crypt_ctx)
 {
 	const unsigned int bsize = 128 / 8;
-	bool fpu_enabled = false;
+	bool fpu_enabled;
 	struct blkcipher_walk walk;
 	int err;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:363 @ int glue_xts_crypt_128bit(const struct c
 
 	/* set minimum length to bsize, for tweak_fn */
 	fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
-				     desc, fpu_enabled,
+				     desc, false,
 				     nbytes < bsize ? bsize : nbytes);
-
 	/* calculate first value of T */
 	tweak_fn(tweak_ctx, walk.iv, walk.iv);
+	glue_fpu_end(fpu_enabled);
 
 	while (nbytes) {
+		fpu_enabled = glue_fpu_begin(bsize, gctx->fpu_blocks_limit,
+				desc, false, nbytes);
 		nbytes = __glue_xts_crypt_128bit(gctx, crypt_ctx, desc, &walk);
 
+		glue_fpu_end(fpu_enabled);
 		err = blkcipher_walk_done(desc, &walk, nbytes);
 		nbytes = walk.nbytes;
 	}
-
-	glue_fpu_end(fpu_enabled);
-
 	return err;
 }
 EXPORT_SYMBOL_GPL(glue_xts_crypt_128bit);
Index: linux-3.18.13-rt10-r7s4/arch/x86/include/asm/preempt.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/include/asm/preempt.h
+++ linux-3.18.13-rt10-r7s4/arch/x86/include/asm/preempt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:88 @ static __always_inline void __preempt_co
  * a decrement which hits zero means we have no preempt_count and should
  * reschedule.
  */
-static __always_inline bool __preempt_count_dec_and_test(void)
+static __always_inline bool ____preempt_count_dec_and_test(void)
 {
 	GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
 }
 
+static __always_inline bool __preempt_count_dec_and_test(void)
+{
+	if (____preempt_count_dec_and_test())
+		return true;
+#ifdef CONFIG_PREEMPT_LAZY
+	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+#else
+	return false;
+#endif
+}
+
 /*
  * Returns true when we need to resched and can (barring IRQ state).
  */
 static __always_inline bool should_resched(void)
 {
+#ifdef CONFIG_PREEMPT_LAZY
+	return unlikely(!raw_cpu_read_4(__preempt_count) || \
+			test_thread_flag(TIF_NEED_RESCHED_LAZY));
+#else
 	return unlikely(!raw_cpu_read_4(__preempt_count));
+#endif
 }
 
 #ifdef CONFIG_PREEMPT
Index: linux-3.18.13-rt10-r7s4/arch/x86/include/asm/signal.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/include/asm/signal.h
+++ linux-3.18.13-rt10-r7s4/arch/x86/include/asm/signal.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:26 @ typedef struct {
 	unsigned long sig[_NSIG_WORDS];
 } sigset_t;
 
+/*
+ * Because some traps use the IST stack, we must keep preemption
+ * disabled while calling do_trap(), but do_trap() may call
+ * force_sig_info() which will grab the signal spin_locks for the
+ * task, which in PREEMPT_RT_FULL are mutexes.  By defining
+ * ARCH_RT_DELAYS_SIGNAL_SEND the force_sig_info() will set
+ * TIF_NOTIFY_RESUME and set up the signal to be sent on exit of the
+ * trap.
+ */
+#if defined(CONFIG_PREEMPT_RT_FULL) && defined(CONFIG_X86_64)
+#define ARCH_RT_DELAYS_SIGNAL_SEND
+#endif
+
 #ifndef CONFIG_COMPAT
 typedef sigset_t compat_sigset_t;
 #endif
Index: linux-3.18.13-rt10-r7s4/arch/x86/include/asm/stackprotector.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/include/asm/stackprotector.h
+++ linux-3.18.13-rt10-r7s4/arch/x86/include/asm/stackprotector.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:60 @
  */
 static __always_inline void boot_init_stack_canary(void)
 {
-	u64 canary;
+	u64 uninitialized_var(canary);
 	u64 tsc;
 
 #ifdef CONFIG_X86_64
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:71 @ static __always_inline void boot_init_st
 	 * of randomness. The TSC only matters for very early init,
 	 * there it already has some randomness on most systems. Later
 	 * on during the bootup the random pool has true entropy too.
+	 *
+	 * For preempt-rt we need to weaken the randomness a bit, as
+	 * we can't call into the random generator from atomic context
+	 * due to locking constraints. We just leave canary
+	 * uninitialized and use the TSC based randomness on top of
+	 * it.
 	 */
+#ifndef CONFIG_PREEMPT_RT_FULL
 	get_random_bytes(&canary, sizeof(canary));
+#endif
 	tsc = __native_read_tsc();
 	canary += tsc + (tsc << 32UL);
 
Index: linux-3.18.13-rt10-r7s4/arch/x86/include/asm/thread_info.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/include/asm/thread_info.h
+++ linux-3.18.13-rt10-r7s4/arch/x86/include/asm/thread_info.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:33 @ struct thread_info {
 	__u32			status;		/* thread synchronous flags */
 	__u32			cpu;		/* current CPU */
 	int			saved_preempt_count;
+	int			preempt_lazy_count;	/* 0 => lazy preemptable
+							   <0 => BUG */
 	mm_segment_t		addr_limit;
 	struct restart_block    restart_block;
 	void __user		*sysenter_return;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:80 @ struct thread_info {
 #define TIF_SYSCALL_EMU		6	/* syscall emulation active */
 #define TIF_SYSCALL_AUDIT	7	/* syscall auditing active */
 #define TIF_SECCOMP		8	/* secure computing */
+#define TIF_NEED_RESCHED_LAZY	9	/* lazy rescheduling necessary */
 #define TIF_MCE_NOTIFY		10	/* notify userspace of an MCE */
 #define TIF_USER_RETURN_NOTIFY	11	/* notify kernel of userspace return */
 #define TIF_UPROBE		12	/* breakpointed or singlestepping */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:106 @ struct thread_info {
 #define _TIF_SYSCALL_EMU	(1 << TIF_SYSCALL_EMU)
 #define _TIF_SYSCALL_AUDIT	(1 << TIF_SYSCALL_AUDIT)
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
+#define _TIF_NEED_RESCHED_LAZY	(1 << TIF_NEED_RESCHED_LAZY)
 #define _TIF_MCE_NOTIFY		(1 << TIF_MCE_NOTIFY)
 #define _TIF_USER_RETURN_NOTIFY	(1 << TIF_USER_RETURN_NOTIFY)
 #define _TIF_UPROBE		(1 << TIF_UPROBE)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:157 @ struct thread_info {
 #define _TIF_WORK_CTXSW_PREV (_TIF_WORK_CTXSW|_TIF_USER_RETURN_NOTIFY)
 #define _TIF_WORK_CTXSW_NEXT (_TIF_WORK_CTXSW)
 
+#define _TIF_NEED_RESCHED_MASK (_TIF_NEED_RESCHED | _TIF_NEED_RESCHED_LAZY)
+
 #define STACK_WARN		(THREAD_SIZE/8)
 #define KERNEL_STACK_OFFSET	(5*(BITS_PER_LONG/8))
 
Index: linux-3.18.13-rt10-r7s4/arch/x86/include/asm/uv/uv_bau.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/include/asm/uv/uv_bau.h
+++ linux-3.18.13-rt10-r7s4/arch/x86/include/asm/uv/uv_bau.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:618 @ struct bau_control {
 	cycles_t		send_message;
 	cycles_t		period_end;
 	cycles_t		period_time;
-	spinlock_t		uvhub_lock;
-	spinlock_t		queue_lock;
-	spinlock_t		disable_lock;
+	raw_spinlock_t		uvhub_lock;
+	raw_spinlock_t		queue_lock;
+	raw_spinlock_t		disable_lock;
 	/* tunables */
 	int			max_concurr;
 	int			max_concurr_const;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:779 @ static inline int atom_asr(short i, stru
  * to be lowered below the current 'v'.  atomic_add_unless can only stop
  * on equal.
  */
-static inline int atomic_inc_unless_ge(spinlock_t *lock, atomic_t *v, int u)
+static inline int atomic_inc_unless_ge(raw_spinlock_t *lock, atomic_t *v, int u)
 {
-	spin_lock(lock);
+	raw_spin_lock(lock);
 	if (atomic_read(v) >= u) {
-		spin_unlock(lock);
+		raw_spin_unlock(lock);
 		return 0;
 	}
 	atomic_inc(v);
-	spin_unlock(lock);
+	raw_spin_unlock(lock);
 	return 1;
 }
 
Index: linux-3.18.13-rt10-r7s4/arch/x86/include/asm/uv/uv_hub.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/include/asm/uv/uv_hub.h
+++ linux-3.18.13-rt10-r7s4/arch/x86/include/asm/uv/uv_hub.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:495 @ struct uv_blade_info {
 	unsigned short	nr_online_cpus;
 	unsigned short	pnode;
 	short		memory_nid;
-	spinlock_t	nmi_lock;	/* obsolete, see uv_hub_nmi */
+	raw_spinlock_t	nmi_lock;	/* obsolete, see uv_hub_nmi */
 	unsigned long	nmi_count;	/* obsolete, see uv_hub_nmi */
 };
 extern struct uv_blade_info *uv_blade_info;
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/apic/io_apic.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/apic/io_apic.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/apic/io_apic.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2497 @ static bool io_apic_level_ack_pending(st
 static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
 {
 	/* If we are moving the irq we need to mask it */
-	if (unlikely(irqd_is_setaffinity_pending(data))) {
+	if (unlikely(irqd_is_setaffinity_pending(data) &&
+		     !irqd_irq_inprogress(data))) {
 		mask_ioapic(cfg);
 		return true;
 	}
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/apic/x2apic_uv_x.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/apic/x2apic_uv_x.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/apic/x2apic_uv_x.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:921 @ void __init uv_system_init(void)
 			uv_blade_info[blade].pnode = pnode;
 			uv_blade_info[blade].nr_possible_cpus = 0;
 			uv_blade_info[blade].nr_online_cpus = 0;
-			spin_lock_init(&uv_blade_info[blade].nmi_lock);
+			raw_spin_lock_init(&uv_blade_info[blade].nmi_lock);
 			min_pnode = min(pnode, min_pnode);
 			max_pnode = max(pnode, max_pnode);
 			blade++;
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/asm-offsets.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/asm-offsets.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/asm-offsets.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:35 @ void common(void) {
 	OFFSET(TI_flags, thread_info, flags);
 	OFFSET(TI_status, thread_info, status);
 	OFFSET(TI_addr_limit, thread_info, addr_limit);
+	OFFSET(TI_preempt_lazy_count, thread_info, preempt_lazy_count);
 
 	BLANK();
 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:75 @ void common(void) {
 
 	BLANK();
 	DEFINE(PTREGS_SIZE, sizeof(struct pt_regs));
+	DEFINE(_PREEMPT_ENABLED, PREEMPT_ENABLED);
 }
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/cpu/mcheck/mce.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/cpu/mcheck/mce.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/cpu/mcheck/mce.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:44 @
 #include <linux/debugfs.h>
 #include <linux/irq_work.h>
 #include <linux/export.h>
+#include <linux/jiffies.h>
+#include <linux/work-simple.h>
 
 #include <asm/processor.h>
 #include <asm/mce.h>
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1271 @ void mce_log_therm_throt_event(__u64 sta
 static unsigned long check_interval = 5 * 60; /* 5 minutes */
 
 static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
-static DEFINE_PER_CPU(struct timer_list, mce_timer);
+static DEFINE_PER_CPU(struct hrtimer, mce_timer);
 
 static unsigned long mce_adjust_timer_default(unsigned long interval)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1288 @ static int cmc_error_seen(void)
 	return test_and_clear_bit(0, v);
 }
 
-static void mce_timer_fn(unsigned long data)
+static enum hrtimer_restart mce_timer_fn(struct hrtimer *timer)
 {
-	struct timer_list *t = this_cpu_ptr(&mce_timer);
 	unsigned long iv;
 	int notify;
 
-	WARN_ON(smp_processor_id() != data);
-
 	if (mce_available(this_cpu_ptr(&cpu_info))) {
 		machine_check_poll(MCP_TIMESTAMP,
 				this_cpu_ptr(&mce_poll_banks));
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1315 @ static void mce_timer_fn(unsigned long d
 	__this_cpu_write(mce_next_interval, iv);
 	/* Might have become 0 after CMCI storm subsided */
 	if (iv) {
-		t->expires = jiffies + iv;
-		add_timer_on(t, smp_processor_id());
+		hrtimer_forward_now(timer, ns_to_ktime(
+					jiffies_to_usecs(iv) * 1000ULL));
+		return HRTIMER_RESTART;
 	}
+	return HRTIMER_NORESTART;
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1327 @ static void mce_timer_fn(unsigned long d
  */
 void mce_timer_kick(unsigned long interval)
 {
-	struct timer_list *t = this_cpu_ptr(&mce_timer);
-	unsigned long when = jiffies + interval;
+	struct hrtimer *t = this_cpu_ptr(&mce_timer);
 	unsigned long iv = __this_cpu_read(mce_next_interval);
 
-	if (timer_pending(t)) {
-		if (time_before(when, t->expires))
-			mod_timer_pinned(t, when);
+	if (hrtimer_active(t)) {
+		s64 exp;
+		s64 intv_us;
+
+		intv_us = jiffies_to_usecs(interval);
+		exp = ktime_to_us(hrtimer_expires_remaining(t));
+		if (intv_us < exp) {
+			hrtimer_cancel(t);
+			hrtimer_start_range_ns(t,
+					ns_to_ktime(intv_us * 1000),
+					0, HRTIMER_MODE_REL_PINNED);
+		}
 	} else {
-		t->expires = round_jiffies(when);
-		add_timer_on(t, smp_processor_id());
+		hrtimer_start_range_ns(t,
+			ns_to_ktime(jiffies_to_usecs(interval) * 1000ULL),
+				0, HRTIMER_MODE_REL_PINNED);
 	}
 	if (interval < iv)
 		__this_cpu_write(mce_next_interval, interval);
 }
 
-/* Must not be called in IRQ context where del_timer_sync() can deadlock */
+/* Must not be called in IRQ context where hrtimer_cancel() can deadlock */
 static void mce_timer_delete_all(void)
 {
 	int cpu;
 
 	for_each_online_cpu(cpu)
-		del_timer_sync(&per_cpu(mce_timer, cpu));
+		hrtimer_cancel(&per_cpu(mce_timer, cpu));
 }
 
 static void mce_do_trigger(struct work_struct *work)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1367 @ static void mce_do_trigger(struct work_s
 
 static DECLARE_WORK(mce_trigger_work, mce_do_trigger);
 
+static void __mce_notify_work(struct swork_event *event)
+{
+	/* Not more than two messages every minute */
+	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
+
+	/* wake processes polling /dev/mcelog */
+	wake_up_interruptible(&mce_chrdev_wait);
+
+	/*
+	 * There is no risk of missing notifications because
+	 * work_pending is always cleared before the function is
+	 * executed.
+	 */
+	if (mce_helper[0] && !work_pending(&mce_trigger_work))
+		schedule_work(&mce_trigger_work);
+
+	if (__ratelimit(&ratelimit))
+		pr_info(HW_ERR "Machine check events logged\n");
+}
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+static bool notify_work_ready __read_mostly;
+static struct swork_event notify_work;
+
+static int mce_notify_work_init(void)
+{
+	int err;
+
+	err = swork_get();
+	if (err)
+		return err;
+
+	INIT_SWORK(&notify_work, __mce_notify_work);
+	notify_work_ready = true;
+	return 0;
+}
+
+static void mce_notify_work(void)
+{
+	if (notify_work_ready)
+		swork_queue(&notify_work);
+}
+#else
+static void mce_notify_work(void)
+{
+	__mce_notify_work(NULL);
+}
+static inline int mce_notify_work_init(void) { return 0; }
+#endif
+
 /*
  * Notify the user(s) about new machine check events.
  * Can be called from interrupt context, but not from machine check/NMI
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1424 @ static DECLARE_WORK(mce_trigger_work, mc
  */
 int mce_notify_irq(void)
 {
-	/* Not more than two messages every minute */
-	static DEFINE_RATELIMIT_STATE(ratelimit, 60*HZ, 2);
-
 	if (test_and_clear_bit(0, &mce_need_notify)) {
-		/* wake processes polling /dev/mcelog */
-		wake_up_interruptible(&mce_chrdev_wait);
-
-		if (mce_helper[0])
-			schedule_work(&mce_trigger_work);
-
-		if (__ratelimit(&ratelimit))
-			pr_info(HW_ERR "Machine check events logged\n");
-
+		mce_notify_work();
 		return 1;
 	}
 	return 0;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1696 @ static void __mcheck_cpu_init_vendor(str
 	}
 }
 
-static void mce_start_timer(unsigned int cpu, struct timer_list *t)
+static void mce_start_timer(unsigned int cpu, struct hrtimer *t)
 {
 	unsigned long iv = check_interval * HZ;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1705 @ static void mce_start_timer(unsigned int
 
 	per_cpu(mce_next_interval, cpu) = iv;
 
-	t->expires = round_jiffies(jiffies + iv);
-	add_timer_on(t, cpu);
+	hrtimer_start_range_ns(t, ns_to_ktime(jiffies_to_usecs(iv) * 1000ULL),
+			0, HRTIMER_MODE_REL_PINNED);
 }
 
 static void __mcheck_cpu_init_timer(void)
 {
-	struct timer_list *t = this_cpu_ptr(&mce_timer);
+	struct hrtimer *t = this_cpu_ptr(&mce_timer);
 	unsigned int cpu = smp_processor_id();
 
-	setup_timer(t, mce_timer_fn, cpu);
+	hrtimer_init(t, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+	t->function = mce_timer_fn;
 	mce_start_timer(cpu, t);
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2392 @ static void mce_disable_cpu(void *h)
 	if (!mce_available(raw_cpu_ptr(&cpu_info)))
 		return;
 
+	hrtimer_cancel(this_cpu_ptr(&mce_timer));
+
 	if (!(action & CPU_TASKS_FROZEN))
 		cmci_clear();
 	for (i = 0; i < mca_cfg.banks; i++) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2420 @ static void mce_reenable_cpu(void *h)
 		if (b->init)
 			wrmsrl(MSR_IA32_MCx_CTL(i), b->ctl);
 	}
+	__mcheck_cpu_init_timer();
 }
 
 /* Get notified when a cpu comes on/off. Be hotplug friendly. */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2428 @ static int
 mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
 	unsigned int cpu = (unsigned long)hcpu;
-	struct timer_list *t = &per_cpu(mce_timer, cpu);
 
 	switch (action & ~CPU_TASKS_FROZEN) {
 	case CPU_ONLINE:
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2447 @ mce_cpu_callback(struct notifier_block *
 		break;
 	case CPU_DOWN_PREPARE:
 		smp_call_function_single(cpu, mce_disable_cpu, &action, 1);
-		del_timer_sync(t);
 		break;
 	case CPU_DOWN_FAILED:
 		smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
-		mce_start_timer(cpu, t);
 		break;
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2488 @ static __init int mcheck_init_device(voi
 		goto err_out;
 	}
 
+	err = mce_notify_work_init();
+	if (err)
+		goto err_out;
+
 	if (!zalloc_cpumask_var(&mce_device_initialized, GFP_KERNEL)) {
 		err = -ENOMEM;
 		goto err_out;
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/entry_32.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/entry_32.S
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/entry_32.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:362 @ END(ret_from_exception)
 ENTRY(resume_kernel)
 	DISABLE_INTERRUPTS(CLBR_ANY)
 need_resched:
+	# preempt count == 0 + NEED_RS set?
 	cmpl $0,PER_CPU_VAR(__preempt_count)
+#ifndef CONFIG_PREEMPT_LAZY
 	jnz restore_all
+#else
+	jz test_int_off
+
+	# atleast preempt count == 0 ?
+	cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
+	jne restore_all
+
+	cmpl $0,TI_preempt_lazy_count(%ebp)	# non-zero preempt_lazy_count ?
+	jnz restore_all
+
+	testl $_TIF_NEED_RESCHED_LAZY, TI_flags(%ebp)
+	jz restore_all
+test_int_off:
+#endif
 	testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)	# interrupts off (exception path) ?
 	jz restore_all
 	call preempt_schedule_irq
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:610 @ ENDPROC(system_call)
 	ALIGN
 	RING0_PTREGS_FRAME		# can't unwind into user space anyway
 work_pending:
-	testb $_TIF_NEED_RESCHED, %cl
+	testl $_TIF_NEED_RESCHED_MASK, %ecx
 	jz work_notifysig
 work_resched:
 	call schedule
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:623 @ work_resched:
 	andl $_TIF_WORK_MASK, %ecx	# is there any work to be done other
 					# than syscall tracing?
 	jz restore_all
-	testb $_TIF_NEED_RESCHED, %cl
+	testl $_TIF_NEED_RESCHED_MASK, %ecx
 	jnz work_resched
 
 work_notifysig:				# deal with pending signals and
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/entry_64.S
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/entry_64.S
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/entry_64.S
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:457 @ sysret_check:
 	/* Handle reschedules */
 	/* edx:	work, edi: workmask */
 sysret_careful:
-	bt $TIF_NEED_RESCHED,%edx
-	jnc sysret_signal
+	testl $_TIF_NEED_RESCHED_MASK,%edx
+	jz sysret_signal
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushq_cfi %rdi
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:557 @ GLOBAL(int_with_check)
 	/* First do a reschedule test. */
 	/* edx:	work, edi: workmask */
 int_careful:
-	bt $TIF_NEED_RESCHED,%edx
-	jnc  int_very_careful
+	testl $_TIF_NEED_RESCHED_MASK,%edx
+	jz  int_very_careful
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushq_cfi %rdi
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:873 @ native_irq_return_ldt:
 	/* edi: workmask, edx: work */
 retint_careful:
 	CFI_RESTORE_STATE
-	bt    $TIF_NEED_RESCHED,%edx
-	jnc   retint_signal
+	testl $_TIF_NEED_RESCHED_MASK,%edx
+	jz   retint_signal
 	TRACE_IRQS_ON
 	ENABLE_INTERRUPTS(CLBR_NONE)
 	pushq_cfi %rdi
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:906 @ retint_signal:
 	/* rcx:	 threadinfo. interrupts off. */
 ENTRY(retint_kernel)
 	cmpl $0,PER_CPU_VAR(__preempt_count)
+#ifndef CONFIG_PREEMPT_LAZY
 	jnz  retint_restore_args
+#else
+	jz  check_int_off
+
+	# atleast preempt count == 0 ?
+	cmpl $_PREEMPT_ENABLED,PER_CPU_VAR(__preempt_count)
+	jnz retint_restore_args
+
+	cmpl $0, TI_preempt_lazy_count(%rcx)
+	jnz retint_restore_args
+
+	bt $TIF_NEED_RESCHED_LAZY,TI_flags(%rcx)
+	jnc  retint_restore_args
+check_int_off:
+#endif
 	bt   $9,EFLAGS-ARGOFFSET(%rsp)	/* interrupts off? */
 	jnc  retint_restore_args
 	call preempt_schedule_irq
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1137 @ bad_gs:
 	jmp  2b
 	.previous
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /* Call softirq on interrupt stack. Interrupts are off. */
 ENTRY(do_softirq_own_stack)
 	CFI_STARTPROC
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1157 @ ENTRY(do_softirq_own_stack)
 	ret
 	CFI_ENDPROC
 END(do_softirq_own_stack)
+#endif
 
 #ifdef CONFIG_XEN
 idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1322 @ paranoid_userspace:
 	movq %rsp,%rdi			/* &pt_regs */
 	call sync_regs
 	movq %rax,%rsp			/* switch stack for scheduling */
-	testl $_TIF_NEED_RESCHED,%ebx
+	testl $_TIF_NEED_RESCHED_MASK,%ebx
 	jnz paranoid_schedule
 	movl %ebx,%edx			/* arg3: thread flags */
 	TRACE_IRQS_ON
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/irq_32.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/irq_32.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/irq_32.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:145 @ void irq_ctx_init(int cpu)
 	       cpu, per_cpu(hardirq_stack, cpu),  per_cpu(softirq_stack, cpu));
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 void do_softirq_own_stack(void)
 {
 	struct thread_info *curstk;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:164 @ void do_softirq_own_stack(void)
 
 	call_on_stack(__do_softirq, isp);
 }
+#endif
 
 bool handle_irq(unsigned irq, struct pt_regs *regs)
 {
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/process_32.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/process_32.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/process_32.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:38 @
 #include <linux/uaccess.h>
 #include <linux/io.h>
 #include <linux/kdebug.h>
+#include <linux/highmem.h>
 
 #include <asm/pgtable.h>
 #include <asm/ldt.h>
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:218 @ start_thread(struct pt_regs *regs, unsig
 }
 EXPORT_SYMBOL_GPL(start_thread);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p)
+{
+	int i;
+
+	/*
+	 * Clear @prev's kmap_atomic mappings
+	 */
+	for (i = 0; i < prev_p->kmap_idx; i++) {
+		int idx = i + KM_TYPE_NR * smp_processor_id();
+		pte_t *ptep = kmap_pte - idx;
+
+		kpte_clear_flush(ptep, __fix_to_virt(FIX_KMAP_BEGIN + idx));
+	}
+	/*
+	 * Restore @next_p's kmap_atomic mappings
+	 */
+	for (i = 0; i < next_p->kmap_idx; i++) {
+		int idx = i + KM_TYPE_NR * smp_processor_id();
+
+		if (!pte_none(next_p->kmap_pte[i]))
+			set_pte(kmap_pte - idx, next_p->kmap_pte[i]);
+	}
+}
+#else
+static inline void
+switch_kmaps(struct task_struct *prev_p, struct task_struct *next_p) { }
+#endif
+
 
 /*
  *	switch_to(x,y) should switch tasks from x to y.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:334 @ __switch_to(struct task_struct *prev_p,
 		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
 		__switch_to_xtra(prev_p, next_p, tss);
 
+	switch_kmaps(prev_p, next_p);
+
 	/*
 	 * Leave lazy mode, flushing any hypercalls made here.
 	 * This must be done before restoring TLS segments so
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/signal.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/signal.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/signal.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:749 @ do_notify_resume(struct pt_regs *regs, v
 		mce_notify_process();
 #endif /* CONFIG_X86_64 && CONFIG_X86_MCE */
 
+#ifdef ARCH_RT_DELAYS_SIGNAL_SEND
+	if (unlikely(current->forced_info.si_signo)) {
+		struct task_struct *t = current;
+		force_sig_info(t->forced_info.si_signo,	&t->forced_info, t);
+		t->forced_info.si_signo = 0;
+	}
+#endif
+
 	if (thread_info_flags & _TIF_UPROBE)
 		uprobe_notify_resume(regs);
 
Index: linux-3.18.13-rt10-r7s4/arch/x86/kernel/traps.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kernel/traps.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kernel/traps.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:90 @ static inline void conditional_sti(struc
 		local_irq_enable();
 }
 
-static inline void preempt_conditional_sti(struct pt_regs *regs)
+static inline void conditional_sti_ist(struct pt_regs *regs)
 {
+#ifdef CONFIG_X86_64
+	/*
+	 * X86_64 uses a per CPU stack on the IST for certain traps
+	 * like int3. The task can not be preempted when using one
+	 * of these stacks, thus preemption must be disabled, otherwise
+	 * the stack can be corrupted if the task is scheduled out,
+	 * and another task comes in and uses this stack.
+	 *
+	 * On x86_32 the task keeps its own stack and it is OK if the
+	 * task schedules out.
+	 */
 	preempt_count_inc();
+#endif
 	if (regs->flags & X86_EFLAGS_IF)
 		local_irq_enable();
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:115 @ static inline void conditional_cli(struc
 		local_irq_disable();
 }
 
-static inline void preempt_conditional_cli(struct pt_regs *regs)
+static inline void conditional_cli_ist(struct pt_regs *regs)
 {
 	if (regs->flags & X86_EFLAGS_IF)
 		local_irq_disable();
+#ifdef CONFIG_X86_64
 	preempt_count_dec();
+#endif
 }
 
 static nokprobe_inline int
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:389 @ dotraplinkage void notrace do_int3(struc
 	 * as we may switch to the interrupt stack.
 	 */
 	debug_stack_usage_inc();
-	preempt_conditional_sti(regs);
+	conditional_sti_ist(regs);
 	do_trap(X86_TRAP_BP, SIGTRAP, "int3", regs, error_code, NULL);
-	preempt_conditional_cli(regs);
+	conditional_cli_ist(regs);
 	debug_stack_usage_dec();
 exit:
 	exception_exit(prev_state);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:534 @ dotraplinkage void do_debug(struct pt_re
 	debug_stack_usage_inc();
 
 	/* It's safe to allow irq's after DR6 has been saved */
-	preempt_conditional_sti(regs);
+	conditional_sti_ist(regs);
 
 	if (regs->flags & X86_VM_MASK) {
 		handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
 					X86_TRAP_DB);
-		preempt_conditional_cli(regs);
+		conditional_cli_ist(regs);
 		debug_stack_usage_dec();
 		goto exit;
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:559 @ dotraplinkage void do_debug(struct pt_re
 	si_code = get_si_code(tsk->thread.debugreg6);
 	if (tsk->thread.debugreg6 & (DR_STEP | DR_TRAP_BITS) || user_icebp)
 		send_sigtrap(tsk, regs, error_code, si_code);
-	preempt_conditional_cli(regs);
+	conditional_cli_ist(regs);
 	debug_stack_usage_dec();
 
 exit:
Index: linux-3.18.13-rt10-r7s4/arch/x86/kvm/lapic.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kvm/lapic.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kvm/lapic.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1037 @ static void update_divide_count(struct k
 				   apic->divide_count);
 }
 
+
+static enum hrtimer_restart apic_timer_fn(struct hrtimer *data);
+
+static void apic_timer_expired(struct hrtimer *data)
+{
+	int ret, i = 0;
+	enum hrtimer_restart r;
+	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
+
+	r = apic_timer_fn(data);
+
+	if (r == HRTIMER_RESTART) {
+		do {
+			ret = hrtimer_start_expires(data, HRTIMER_MODE_ABS);
+			if (ret == -ETIME)
+				hrtimer_add_expires_ns(&ktimer->timer,
+							ktimer->period);
+			i++;
+		} while (ret == -ETIME && i < 10);
+
+		if (ret == -ETIME) {
+			printk_once(KERN_ERR "%s: failed to reprogram timer\n",
+			       __func__);
+			WARN_ON_ONCE(1);
+		}
+	}
+}
+
+
 static void start_apic_timer(struct kvm_lapic *apic)
 {
+	int ret;
 	ktime_t now;
 	atomic_set(&apic->lapic_timer.pending, 0);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1098 @ static void start_apic_timer(struct kvm_
 			}
 		}
 
-		hrtimer_start(&apic->lapic_timer.timer,
+		ret = hrtimer_start(&apic->lapic_timer.timer,
 			      ktime_add_ns(now, apic->lapic_timer.period),
 			      HRTIMER_MODE_ABS);
+		if (ret == -ETIME)
+			apic_timer_expired(&apic->lapic_timer.timer);
 
 		apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016"
 			   PRIx64 ", "
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1132 @ static void start_apic_timer(struct kvm_
 			ns = (tscdeadline - guest_tsc) * 1000000ULL;
 			do_div(ns, this_tsc_khz);
 		}
-		hrtimer_start(&apic->lapic_timer.timer,
+		ret = hrtimer_start(&apic->lapic_timer.timer,
 			ktime_add_ns(now, ns), HRTIMER_MODE_ABS);
+		if (ret == -ETIME)
+			apic_timer_expired(&apic->lapic_timer.timer);
 
 		local_irq_restore(flags);
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1576 @ static enum hrtimer_restart apic_timer_f
 	struct kvm_timer *ktimer = container_of(data, struct kvm_timer, timer);
 	struct kvm_lapic *apic = container_of(ktimer, struct kvm_lapic, lapic_timer);
 	struct kvm_vcpu *vcpu = apic->vcpu;
-	wait_queue_head_t *q = &vcpu->wq;
+	struct swait_head *q = &vcpu->wq;
 
 	/*
 	 * There is a race window between reading and incrementing, but we do
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1590 @ static enum hrtimer_restart apic_timer_f
 		kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
 	}
 
-	if (waitqueue_active(q))
-		wake_up_interruptible(q);
+	if (swaitqueue_active(q))
+		swait_wake_interruptible(q);
 
 	if (lapic_is_periodic(apic)) {
 		hrtimer_add_expires_ns(&ktimer->timer, ktimer->period);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1624 @ int kvm_create_lapic(struct kvm_vcpu *vc
 	hrtimer_init(&apic->lapic_timer.timer, CLOCK_MONOTONIC,
 		     HRTIMER_MODE_ABS);
 	apic->lapic_timer.timer.function = apic_timer_fn;
+	apic->lapic_timer.timer.irqsafe = 1;
 
 	/*
 	 * APIC is created enabled. This will prevent kvm_lapic_set_base from
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1745 @ void __kvm_migrate_apic_timer(struct kvm
 
 	timer = &vcpu->arch.apic->lapic_timer.timer;
 	if (hrtimer_cancel(timer))
-		hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+		if (hrtimer_start_expires(timer, HRTIMER_MODE_ABS) == -ETIME)
+			apic_timer_expired(timer);
 }
 
 /*
Index: linux-3.18.13-rt10-r7s4/arch/x86/kvm/x86.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/kvm/x86.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/kvm/x86.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:5775 @ int kvm_arch_init(void *opaque)
 		goto out;
 	}
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+	if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
+		printk(KERN_ERR "RT requires X86_FEATURE_CONSTANT_TSC\n");
+		return -EOPNOTSUPP;
+	}
+#endif
+
 	r = kvm_mmu_module_init();
 	if (r)
 		goto out_free_percpu;
Index: linux-3.18.13-rt10-r7s4/arch/x86/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1131 @ __do_page_fault(struct pt_regs *regs, un
 	 * If we're in an interrupt, have no user context or are running
 	 * in an atomic region then we must not take the fault:
 	 */
-	if (unlikely(in_atomic() || !mm)) {
+	if (unlikely(!mm || pagefault_disabled())) {
 		bad_area_nosemaphore(regs, error_code, address);
 		return;
 	}
Index: linux-3.18.13-rt10-r7s4/arch/x86/mm/highmem_32.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/mm/highmem_32.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/mm/highmem_32.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:35 @ EXPORT_SYMBOL(kunmap);
  */
 void *kmap_atomic_prot(struct page *page, pgprot_t prot)
 {
+	pte_t pte = mk_pte(page, prot);
 	unsigned long vaddr;
 	int idx, type;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:49 @ void *kmap_atomic_prot(struct page *page
 	idx = type + KM_TYPE_NR*smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
 	BUG_ON(!pte_none(*(kmap_pte-idx)));
-	set_pte(kmap_pte-idx, mk_pte(page, prot));
+#ifdef CONFIG_PREEMPT_RT_FULL
+	current->kmap_pte[type] = pte;
+#endif
+	set_pte(kmap_pte-idx, pte);
 	arch_flush_lazy_mmu_mode();
 
 	return (void *)vaddr;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:95 @ void __kunmap_atomic(void *kvaddr)
 		 * is a bad idea also, in case the page changes cacheability
 		 * attributes or becomes a protected page in a hypervisor.
 		 */
+#ifdef CONFIG_PREEMPT_RT_FULL
+		current->kmap_pte[type] = __pte(0);
+#endif
 		kpte_clear_flush(kmap_pte-idx, vaddr);
 		kmap_atomic_idx_pop();
 		arch_flush_lazy_mmu_mode();
Index: linux-3.18.13-rt10-r7s4/arch/x86/mm/iomap_32.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/mm/iomap_32.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/mm/iomap_32.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:59 @ EXPORT_SYMBOL_GPL(iomap_free);
 
 void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
+	pte_t pte = pfn_pte(pfn, prot);
 	unsigned long vaddr;
 	int idx, type;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:68 @ void *kmap_atomic_prot_pfn(unsigned long
 	type = kmap_atomic_idx_push();
 	idx = type + KM_TYPE_NR * smp_processor_id();
 	vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
-	set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
+	WARN_ON(!pte_none(*(kmap_pte - idx)));
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+	current->kmap_pte[type] = pte;
+#endif
+	set_pte(kmap_pte - idx, pte);
 	arch_flush_lazy_mmu_mode();
 
 	return (void *)vaddr;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:119 @ iounmap_atomic(void __iomem *kvaddr)
 		 * is a bad idea also, in case the page changes cacheability
 		 * attributes or becomes a protected page in a hypervisor.
 		 */
+#ifdef CONFIG_PREEMPT_RT_FULL
+		current->kmap_pte[type] = __pte(0);
+#endif
 		kpte_clear_flush(kmap_pte-idx, vaddr);
 		kmap_atomic_idx_pop();
 	}
Index: linux-3.18.13-rt10-r7s4/arch/x86/platform/uv/tlb_uv.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/platform/uv/tlb_uv.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/platform/uv/tlb_uv.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:717 @ static void destination_plugged(struct b
 
 		quiesce_local_uvhub(hmaster);
 
-		spin_lock(&hmaster->queue_lock);
+		raw_spin_lock(&hmaster->queue_lock);
 		reset_with_ipi(&bau_desc->distribution, bcp);
-		spin_unlock(&hmaster->queue_lock);
+		raw_spin_unlock(&hmaster->queue_lock);
 
 		end_uvhub_quiesce(hmaster);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:739 @ static void destination_timeout(struct b
 
 		quiesce_local_uvhub(hmaster);
 
-		spin_lock(&hmaster->queue_lock);
+		raw_spin_lock(&hmaster->queue_lock);
 		reset_with_ipi(&bau_desc->distribution, bcp);
-		spin_unlock(&hmaster->queue_lock);
+		raw_spin_unlock(&hmaster->queue_lock);
 
 		end_uvhub_quiesce(hmaster);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:762 @ static void disable_for_period(struct ba
 	cycles_t tm1;
 
 	hmaster = bcp->uvhub_master;
-	spin_lock(&hmaster->disable_lock);
+	raw_spin_lock(&hmaster->disable_lock);
 	if (!bcp->baudisabled) {
 		stat->s_bau_disabled++;
 		tm1 = get_cycles();
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:775 @ static void disable_for_period(struct ba
 			}
 		}
 	}
-	spin_unlock(&hmaster->disable_lock);
+	raw_spin_unlock(&hmaster->disable_lock);
 }
 
 static void count_max_concurr(int stat, struct bau_control *bcp,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:838 @ static void record_send_stats(cycles_t t
  */
 static void uv1_throttle(struct bau_control *hmaster, struct ptc_stats *stat)
 {
-	spinlock_t *lock = &hmaster->uvhub_lock;
+	raw_spinlock_t *lock = &hmaster->uvhub_lock;
 	atomic_t *v;
 
 	v = &hmaster->active_descriptor_count;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:971 @ static int check_enable(struct bau_contr
 	struct bau_control *hmaster;
 
 	hmaster = bcp->uvhub_master;
-	spin_lock(&hmaster->disable_lock);
+	raw_spin_lock(&hmaster->disable_lock);
 	if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) {
 		stat->s_bau_reenabled++;
 		for_each_present_cpu(tcpu) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:983 @ static int check_enable(struct bau_contr
 				tbcp->period_giveups = 0;
 			}
 		}
-		spin_unlock(&hmaster->disable_lock);
+		raw_spin_unlock(&hmaster->disable_lock);
 		return 0;
 	}
-	spin_unlock(&hmaster->disable_lock);
+	raw_spin_unlock(&hmaster->disable_lock);
 	return -1;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1902 @ static void __init init_per_cpu_tunables
 		bcp->cong_reps			= congested_reps;
 		bcp->disabled_period =		sec_2_cycles(disabled_period);
 		bcp->giveup_limit =		giveup_limit;
-		spin_lock_init(&bcp->queue_lock);
-		spin_lock_init(&bcp->uvhub_lock);
-		spin_lock_init(&bcp->disable_lock);
+		raw_spin_lock_init(&bcp->queue_lock);
+		raw_spin_lock_init(&bcp->uvhub_lock);
+		raw_spin_lock_init(&bcp->disable_lock);
 	}
 }
 
Index: linux-3.18.13-rt10-r7s4/arch/x86/platform/uv/uv_time.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/x86/platform/uv/uv_time.c
+++ linux-3.18.13-rt10-r7s4/arch/x86/platform/uv/uv_time.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:61 @ static DEFINE_PER_CPU(struct clock_event
 
 /* There is one of these allocated per node */
 struct uv_rtc_timer_head {
-	spinlock_t	lock;
+	raw_spinlock_t	lock;
 	/* next cpu waiting for timer, local node relative: */
 	int		next_cpu;
 	/* number of cpus on this node: */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:181 @ static __init int uv_rtc_allocate_timers
 				uv_rtc_deallocate_timers();
 				return -ENOMEM;
 			}
-			spin_lock_init(&head->lock);
+			raw_spin_lock_init(&head->lock);
 			head->ncpus = uv_blade_nr_possible_cpus(bid);
 			head->next_cpu = -1;
 			blade_info[bid] = head;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:235 @ static int uv_rtc_set_timer(int cpu, u64
 	unsigned long flags;
 	int next_cpu;
 
-	spin_lock_irqsave(&head->lock, flags);
+	raw_spin_lock_irqsave(&head->lock, flags);
 
 	next_cpu = head->next_cpu;
 	*t = expires;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:247 @ static int uv_rtc_set_timer(int cpu, u64
 		if (uv_setup_intr(cpu, expires)) {
 			*t = ULLONG_MAX;
 			uv_rtc_find_next_timer(head, pnode);
-			spin_unlock_irqrestore(&head->lock, flags);
+			raw_spin_unlock_irqrestore(&head->lock, flags);
 			return -ETIME;
 		}
 	}
 
-	spin_unlock_irqrestore(&head->lock, flags);
+	raw_spin_unlock_irqrestore(&head->lock, flags);
 	return 0;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:271 @ static int uv_rtc_unset_timer(int cpu, i
 	unsigned long flags;
 	int rc = 0;
 
-	spin_lock_irqsave(&head->lock, flags);
+	raw_spin_lock_irqsave(&head->lock, flags);
 
 	if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
 		rc = 1;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:283 @ static int uv_rtc_unset_timer(int cpu, i
 			uv_rtc_find_next_timer(head, pnode);
 	}
 
-	spin_unlock_irqrestore(&head->lock, flags);
+	raw_spin_unlock_irqrestore(&head->lock, flags);
 
 	return rc;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:303 @ static int uv_rtc_unset_timer(int cpu, i
 static cycle_t uv_read_rtc(struct clocksource *cs)
 {
 	unsigned long offset;
+	cycle_t cycles;
 
+	preempt_disable();
 	if (uv_get_min_hub_revision_id() == 1)
 		offset = 0;
 	else
 		offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
 
-	return (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+	cycles = (cycle_t)uv_read_local_mmr(UVH_RTC | offset);
+	preempt_enable();
+
+	return cycles;
 }
 
 /*
Index: linux-3.18.13-rt10-r7s4/arch/xtensa/mm/fault.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/arch/xtensa/mm/fault.c
+++ linux-3.18.13-rt10-r7s4/arch/xtensa/mm/fault.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:60 @ void do_page_fault(struct pt_regs *regs)
 	/* If we're in an interrupt or have no user
 	 * context, we must not take the fault..
 	 */
-	if (in_atomic() || !mm) {
+	if (!mm || pagefault_disabled()) {
 		bad_page_fault(regs, address, SIGSEGV);
 		return;
 	}
Index: linux-3.18.13-rt10-r7s4/block/blk-core.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/blk-core.c
+++ linux-3.18.13-rt10-r7s4/block/blk-core.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:103 @ void blk_rq_init(struct request_queue *q
 
 	INIT_LIST_HEAD(&rq->queuelist);
 	INIT_LIST_HEAD(&rq->timeout_list);
+#if CONFIG_PREEMPT_RT_FULL
+	INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
 	rq->cpu = -1;
 	rq->q = q;
 	rq->__sector = (sector_t) -1;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:200 @ EXPORT_SYMBOL(blk_delay_queue);
  **/
 void blk_start_queue(struct request_queue *q)
 {
-	WARN_ON(!irqs_disabled());
+	WARN_ON_NONRT(!irqs_disabled());
 
 	queue_flag_clear(QUEUE_FLAG_STOPPED, q);
 	__blk_run_queue(q);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:633 @ struct request_queue *blk_alloc_queue_no
 	q->bypass_depth = 1;
 	__set_bit(QUEUE_FLAG_BYPASS, &q->queue_flags);
 
-	init_waitqueue_head(&q->mq_freeze_wq);
+	init_swait_head(&q->mq_freeze_wq);
 
 	if (blkcg_init_queue(q))
 		goto fail_bdi;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3043 @ static void queue_unplugged(struct reque
 		blk_run_queue_async(q);
 	else
 		__blk_run_queue(q);
-	spin_unlock(q->queue_lock);
+	spin_unlock_irq(q->queue_lock);
 }
 
 static void flush_plug_callbacks(struct blk_plug *plug, bool from_schedule)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3091 @ EXPORT_SYMBOL(blk_check_plugged);
 void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
 {
 	struct request_queue *q;
-	unsigned long flags;
 	struct request *rq;
 	LIST_HEAD(list);
 	unsigned int depth;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3110 @ void blk_flush_plug_list(struct blk_plug
 	q = NULL;
 	depth = 0;
 
-	/*
-	 * Save and disable interrupts here, to avoid doing it for every
-	 * queue lock we have to take.
-	 */
-	local_irq_save(flags);
 	while (!list_empty(&list)) {
 		rq = list_entry_rq(list.next);
 		list_del_init(&rq->queuelist);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3122 @ void blk_flush_plug_list(struct blk_plug
 				queue_unplugged(q, depth, from_schedule);
 			q = rq->q;
 			depth = 0;
-			spin_lock(q->queue_lock);
+			spin_lock_irq(q->queue_lock);
 		}
 
 		/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3149 @ void blk_flush_plug_list(struct blk_plug
 	 */
 	if (q)
 		queue_unplugged(q, depth, from_schedule);
-
-	local_irq_restore(flags);
 }
 
 void blk_finish_plug(struct blk_plug *plug)
Index: linux-3.18.13-rt10-r7s4/block/blk-ioc.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/blk-ioc.c
+++ linux-3.18.13-rt10-r7s4/block/blk-ioc.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:10 @
 #include <linux/bio.h>
 #include <linux/blkdev.h>
 #include <linux/slab.h>
+#include <linux/delay.h>
 
 #include "blk.h"
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:113 @ static void ioc_release_fn(struct work_s
 			spin_unlock(q->queue_lock);
 		} else {
 			spin_unlock_irqrestore(&ioc->lock, flags);
-			cpu_relax();
+			cpu_chill();
 			spin_lock_irqsave_nested(&ioc->lock, flags, 1);
 		}
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:191 @ retry:
 			spin_unlock(icq->q->queue_lock);
 		} else {
 			spin_unlock_irqrestore(&ioc->lock, flags);
-			cpu_relax();
+			cpu_chill();
 			goto retry;
 		}
 	}
Index: linux-3.18.13-rt10-r7s4/block/blk-iopoll.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/blk-iopoll.c
+++ linux-3.18.13-rt10-r7s4/block/blk-iopoll.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:38 @ void blk_iopoll_sched(struct blk_iopoll
 	list_add_tail(&iop->list, this_cpu_ptr(&blk_cpu_iopoll));
 	__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
 	local_irq_restore(flags);
+	preempt_check_resched_rt();
 }
 EXPORT_SYMBOL(blk_iopoll_sched);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:136 @ static void blk_iopoll_softirq(struct so
 		__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
 
 	local_irq_enable();
+	preempt_check_resched_rt();
 }
 
 /**
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:206 @ static int blk_iopoll_cpu_notify(struct
 				 this_cpu_ptr(&blk_cpu_iopoll));
 		__raise_softirq_irqoff(BLOCK_IOPOLL_SOFTIRQ);
 		local_irq_enable();
+		preempt_check_resched_rt();
 	}
 
 	return NOTIFY_OK;
Index: linux-3.18.13-rt10-r7s4/block/blk-mq-cpu.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/blk-mq-cpu.c
+++ linux-3.18.13-rt10-r7s4/block/blk-mq-cpu.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:19 @
 #include "blk-mq.h"
 
 static LIST_HEAD(blk_mq_cpu_notify_list);
-static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock);
+static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock);
 
 static int blk_mq_main_cpu_notify(struct notifier_block *self,
 				  unsigned long action, void *hcpu)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:28 @ static int blk_mq_main_cpu_notify(struct
 	struct blk_mq_cpu_notifier *notify;
 	int ret = NOTIFY_OK;
 
-	raw_spin_lock(&blk_mq_cpu_notify_lock);
+	if (action != CPU_POST_DEAD)
+		return NOTIFY_OK;
+
+	spin_lock(&blk_mq_cpu_notify_lock);
 
 	list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) {
 		ret = notify->notify(notify->data, action, cpu);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:39 @ static int blk_mq_main_cpu_notify(struct
 			break;
 	}
 
-	raw_spin_unlock(&blk_mq_cpu_notify_lock);
+	spin_unlock(&blk_mq_cpu_notify_lock);
 	return ret;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:47 @ void blk_mq_register_cpu_notifier(struct
 {
 	BUG_ON(!notifier->notify);
 
-	raw_spin_lock(&blk_mq_cpu_notify_lock);
+	spin_lock(&blk_mq_cpu_notify_lock);
 	list_add_tail(&notifier->list, &blk_mq_cpu_notify_list);
-	raw_spin_unlock(&blk_mq_cpu_notify_lock);
+	spin_unlock(&blk_mq_cpu_notify_lock);
 }
 
 void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier)
 {
-	raw_spin_lock(&blk_mq_cpu_notify_lock);
+	spin_lock(&blk_mq_cpu_notify_lock);
 	list_del(&notifier->list);
-	raw_spin_unlock(&blk_mq_cpu_notify_lock);
+	spin_unlock(&blk_mq_cpu_notify_lock);
 }
 
 void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier,
Index: linux-3.18.13-rt10-r7s4/block/blk-mq.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/blk-mq.c
+++ linux-3.18.13-rt10-r7s4/block/blk-mq.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:88 @ static int blk_mq_queue_enter(struct req
 		if (percpu_ref_tryget_live(&q->mq_usage_counter))
 			return 0;
 
-		ret = wait_event_interruptible(q->mq_freeze_wq,
+		ret = swait_event_interruptible(q->mq_freeze_wq,
 				!q->mq_freeze_depth || blk_queue_dying(q));
 		if (blk_queue_dying(q))
 			return -ENODEV;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:107 @ static void blk_mq_usage_counter_release
 	struct request_queue *q =
 		container_of(ref, struct request_queue, mq_usage_counter);
 
-	wake_up_all(&q->mq_freeze_wq);
+	swait_wake_all(&q->mq_freeze_wq);
 }
 
 static void blk_mq_freeze_queue_start(struct request_queue *q)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:126 @ static void blk_mq_freeze_queue_start(st
 
 static void blk_mq_freeze_queue_wait(struct request_queue *q)
 {
-	wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
+	swait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:149 @ static void blk_mq_unfreeze_queue(struct
 	spin_unlock_irq(q->queue_lock);
 	if (wake) {
 		percpu_ref_reinit(&q->mq_usage_counter);
-		wake_up_all(&q->mq_freeze_wq);
+		swait_wake_all(&q->mq_freeze_wq);
 	}
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:197 @ static void blk_mq_rq_ctx_init(struct re
 	rq->resid_len = 0;
 	rq->sense = NULL;
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+	INIT_WORK(&rq->work, __blk_mq_complete_request_remote_work);
+#endif
 	INIT_LIST_HEAD(&rq->timeout_list);
 	rq->timeout = 0;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:319 @ void blk_mq_end_request(struct request *
 }
 EXPORT_SYMBOL(blk_mq_end_request);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+void __blk_mq_complete_request_remote_work(struct work_struct *work)
+{
+	struct request *rq = container_of(work, struct request, work);
+
+	rq->q->softirq_done_fn(rq);
+}
+
+#else
+
 static void __blk_mq_complete_request_remote(void *data)
 {
 	struct request *rq = data;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:337 @ static void __blk_mq_complete_request_re
 	rq->q->softirq_done_fn(rq);
 }
 
+#endif
+
 static void blk_mq_ipi_complete_request(struct request *rq)
 {
 	struct blk_mq_ctx *ctx = rq->mq_ctx;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:350 @ static void blk_mq_ipi_complete_request(
 		return;
 	}
 
-	cpu = get_cpu();
+	cpu = get_cpu_light();
 	if (!test_bit(QUEUE_FLAG_SAME_FORCE, &rq->q->queue_flags))
 		shared = cpus_share_cache(cpu, ctx->cpu);
 
 	if (cpu != ctx->cpu && !shared && cpu_online(ctx->cpu)) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+		schedule_work_on(ctx->cpu, &rq->work);
+#else
 		rq->csd.func = __blk_mq_complete_request_remote;
 		rq->csd.info = rq;
 		rq->csd.flags = 0;
 		smp_call_function_single_async(ctx->cpu, &rq->csd);
+#endif
 	} else {
 		rq->q->softirq_done_fn(rq);
 	}
-	put_cpu();
+	put_cpu_light();
 }
 
 void __blk_mq_complete_request(struct request *rq)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:837 @ void blk_mq_run_queues(struct request_qu
 		    test_bit(BLK_MQ_S_STOPPED, &hctx->state))
 			continue;
 
-		preempt_disable();
+		migrate_disable();
 		blk_mq_run_hw_queue(hctx, async);
-		preempt_enable();
+		migrate_enable();
 	}
 }
 EXPORT_SYMBOL(blk_mq_run_queues);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:866 @ void blk_mq_start_hw_queue(struct blk_mq
 {
 	clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 
-	preempt_disable();
+	migrate_disable();
 	blk_mq_run_hw_queue(hctx, false);
-	preempt_enable();
+	migrate_enable();
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queue);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:893 @ void blk_mq_start_stopped_hw_queues(stru
 			continue;
 
 		clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
-		preempt_disable();
+		migrate_disable();
 		blk_mq_run_hw_queue(hctx, async);
-		preempt_enable();
+		migrate_enable();
 	}
 }
 EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1517 @ static int blk_mq_hctx_notify(void *data
 {
 	struct blk_mq_hw_ctx *hctx = data;
 
-	if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
+	if (action == CPU_POST_DEAD)
 		return blk_mq_hctx_cpu_offline(hctx, cpu);
 	else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
 		return blk_mq_hctx_cpu_online(hctx, cpu);
Index: linux-3.18.13-rt10-r7s4/block/blk-mq.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/blk-mq.h
+++ linux-3.18.13-rt10-r7s4/block/blk-mq.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:76 @ struct blk_align_bitmap {
 static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q,
 					   unsigned int cpu)
 {
-	return per_cpu_ptr(q->queue_ctx, cpu);
+	struct blk_mq_ctx *ctx;
+
+	ctx = per_cpu_ptr(q->queue_ctx, cpu);
+	return ctx;
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:90 @ static inline struct blk_mq_ctx *__blk_m
  */
 static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q)
 {
-	return __blk_mq_get_ctx(q, get_cpu());
+	return __blk_mq_get_ctx(q, get_cpu_light());
 }
 
 static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx)
 {
-	put_cpu();
+	put_cpu_light();
 }
 
 struct blk_mq_alloc_data {
Index: linux-3.18.13-rt10-r7s4/block/blk-softirq.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/blk-softirq.c
+++ linux-3.18.13-rt10-r7s4/block/blk-softirq.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:54 @ static void trigger_softirq(void *data)
 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
 
 	local_irq_restore(flags);
+	preempt_check_resched_rt();
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:97 @ static int blk_cpu_notify(struct notifie
 				 this_cpu_ptr(&blk_cpu_done));
 		raise_softirq_irqoff(BLOCK_SOFTIRQ);
 		local_irq_enable();
+		preempt_check_resched_rt();
 	}
 
 	return NOTIFY_OK;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:155 @ do_local:
 		goto do_local;
 
 	local_irq_restore(flags);
+	preempt_check_resched_rt();
 }
 
 /**
Index: linux-3.18.13-rt10-r7s4/block/bounce.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/block/bounce.c
+++ linux-3.18.13-rt10-r7s4/block/bounce.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:57 @ static void bounce_copy_vec(struct bio_v
 	unsigned long flags;
 	unsigned char *vto;
 
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	vto = kmap_atomic(to->bv_page);
 	memcpy(vto + to->bv_offset, vfrom, to->bv_len);
 	kunmap_atomic(vto);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 }
 
 #else /* CONFIG_HIGHMEM */
Index: linux-3.18.13-rt10-r7s4/crypto/algapi.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/crypto/algapi.c
+++ linux-3.18.13-rt10-r7s4/crypto/algapi.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:701 @ EXPORT_SYMBOL_GPL(crypto_spawn_tfm2);
 
 int crypto_register_notifier(struct notifier_block *nb)
 {
-	return blocking_notifier_chain_register(&crypto_chain, nb);
+	return srcu_notifier_chain_register(&crypto_chain, nb);
 }
 EXPORT_SYMBOL_GPL(crypto_register_notifier);
 
 int crypto_unregister_notifier(struct notifier_block *nb)
 {
-	return blocking_notifier_chain_unregister(&crypto_chain, nb);
+	return srcu_notifier_chain_unregister(&crypto_chain, nb);
 }
 EXPORT_SYMBOL_GPL(crypto_unregister_notifier);
 
Index: linux-3.18.13-rt10-r7s4/crypto/api.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/crypto/api.c
+++ linux-3.18.13-rt10-r7s4/crypto/api.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:34 @ EXPORT_SYMBOL_GPL(crypto_alg_list);
 DECLARE_RWSEM(crypto_alg_sem);
 EXPORT_SYMBOL_GPL(crypto_alg_sem);
 
-BLOCKING_NOTIFIER_HEAD(crypto_chain);
+SRCU_NOTIFIER_HEAD(crypto_chain);
 EXPORT_SYMBOL_GPL(crypto_chain);
 
 static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:239 @ int crypto_probing_notify(unsigned long
 {
 	int ok;
 
-	ok = blocking_notifier_call_chain(&crypto_chain, val, v);
+	ok = srcu_notifier_call_chain(&crypto_chain, val, v);
 	if (ok == NOTIFY_DONE) {
 		request_module("cryptomgr");
-		ok = blocking_notifier_call_chain(&crypto_chain, val, v);
+		ok = srcu_notifier_call_chain(&crypto_chain, val, v);
 	}
 
 	return ok;
Index: linux-3.18.13-rt10-r7s4/crypto/internal.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/crypto/internal.h
+++ linux-3.18.13-rt10-r7s4/crypto/internal.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:51 @ struct crypto_larval {
 
 extern struct list_head crypto_alg_list;
 extern struct rw_semaphore crypto_alg_sem;
-extern struct blocking_notifier_head crypto_chain;
+extern struct srcu_notifier_head crypto_chain;
 
 #ifdef CONFIG_PROC_FS
 void __init crypto_init_proc(void);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:145 @ static inline int crypto_is_moribund(str
 
 static inline void crypto_notify(unsigned long val, void *v)
 {
-	blocking_notifier_call_chain(&crypto_chain, val, v);
+	srcu_notifier_call_chain(&crypto_chain, val, v);
 }
 
 #endif	/* _CRYPTO_INTERNAL_H */
Index: linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/acglobal.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/acpi/acpica/acglobal.h
+++ linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/acglobal.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:115 @ ACPI_GLOBAL(u8, acpi_gbl_global_lock_pen
  * interrupt level
  */
 ACPI_GLOBAL(acpi_spinlock, acpi_gbl_gpe_lock);	/* For GPE data structs and registers */
-ACPI_GLOBAL(acpi_spinlock, acpi_gbl_hardware_lock);	/* For ACPI H/W except GPE registers */
+ACPI_GLOBAL(acpi_raw_spinlock, acpi_gbl_hardware_lock);	/* For ACPI H/W except GPE registers */
 ACPI_GLOBAL(acpi_spinlock, acpi_gbl_reference_count_lock);
 
 /* Mutex for _OSI support */
Index: linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/hwregs.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/acpi/acpica/hwregs.c
+++ linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/hwregs.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:272 @ acpi_status acpi_hw_clear_acpi_status(vo
 			  ACPI_BITMASK_ALL_FIXED_STATUS,
 			  ACPI_FORMAT_UINT64(acpi_gbl_xpm1a_status.address)));
 
-	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+	raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
 
 	/* Clear the fixed events in PM1 A/B */
 
 	status = acpi_hw_register_write(ACPI_REGISTER_PM1_STATUS,
 					ACPI_BITMASK_ALL_FIXED_STATUS);
 
-	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+	raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
 
 	if (ACPI_FAILURE(status)) {
 		goto exit;
Index: linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/hwxface.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/acpi/acpica/hwxface.c
+++ linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/hwxface.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:377 @ acpi_status acpi_write_bit_register(u32
 		return_ACPI_STATUS(AE_BAD_PARAMETER);
 	}
 
-	lock_flags = acpi_os_acquire_lock(acpi_gbl_hardware_lock);
+	raw_spin_lock_irqsave(acpi_gbl_hardware_lock, lock_flags);
 
 	/*
 	 * At this point, we know that the parent register is one of the
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:438 @ acpi_status acpi_write_bit_register(u32
 
 unlock_and_exit:
 
-	acpi_os_release_lock(acpi_gbl_hardware_lock, lock_flags);
+	raw_spin_unlock_irqrestore(acpi_gbl_hardware_lock, lock_flags);
 	return_ACPI_STATUS(status);
 }
 
Index: linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/utmutex.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/acpi/acpica/utmutex.c
+++ linux-3.18.13-rt10-r7s4/drivers/acpi/acpica/utmutex.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:91 @ acpi_status acpi_ut_mutex_initialize(voi
 		return_ACPI_STATUS (status);
 	}
 
-	status = acpi_os_create_lock (&acpi_gbl_hardware_lock);
+	status = acpi_os_create_raw_lock (&acpi_gbl_hardware_lock);
 	if (ACPI_FAILURE (status)) {
 		return_ACPI_STATUS (status);
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:144 @ void acpi_ut_mutex_terminate(void)
 	/* Delete the spinlocks */
 
 	acpi_os_delete_lock(acpi_gbl_gpe_lock);
-	acpi_os_delete_lock(acpi_gbl_hardware_lock);
+	acpi_os_delete_raw_lock(acpi_gbl_hardware_lock);
 	acpi_os_delete_lock(acpi_gbl_reference_count_lock);
 
 	/* Delete the reader/writer lock */
Index: linux-3.18.13-rt10-r7s4/drivers/ata/libata-sff.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ata/libata-sff.c
+++ linux-3.18.13-rt10-r7s4/drivers/ata/libata-sff.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:681 @ unsigned int ata_sff_data_xfer_noirq(str
 	unsigned long flags;
 	unsigned int consumed;
 
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	consumed = ata_sff_data_xfer32(dev, buf, buflen, rw);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 
 	return consumed;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:722 @ static void ata_pio_sector(struct ata_qu
 		unsigned long flags;
 
 		/* FIXME: use a bounce buffer */
-		local_irq_save(flags);
+		local_irq_save_nort(flags);
 		buf = kmap_atomic(page);
 
 		/* do the actual data transfer */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:730 @ static void ata_pio_sector(struct ata_qu
 				       do_write);
 
 		kunmap_atomic(buf);
-		local_irq_restore(flags);
+		local_irq_restore_nort(flags);
 	} else {
 		buf = page_address(page);
 		ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:867 @ next_sg:
 		unsigned long flags;
 
 		/* FIXME: use bounce buffer */
-		local_irq_save(flags);
+		local_irq_save_nort(flags);
 		buf = kmap_atomic(page);
 
 		/* do the actual data transfer */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:875 @ next_sg:
 								count, rw);
 
 		kunmap_atomic(buf);
-		local_irq_restore(flags);
+		local_irq_restore_nort(flags);
 	} else {
 		buf = page_address(page);
 		consumed = ap->ops->sff_data_xfer(dev,  buf + offset,
Index: linux-3.18.13-rt10-r7s4/drivers/char/random.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/char/random.c
+++ linux-3.18.13-rt10-r7s4/drivers/char/random.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:779 @ static void add_timer_randomness(struct
 	} sample;
 	long delta, delta2, delta3;
 
-	preempt_disable();
-
 	sample.jiffies = jiffies;
 	sample.cycles = random_get_entropy();
 	sample.num = num;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:819 @ static void add_timer_randomness(struct
 		 */
 		credit_entropy_bits(r, min_t(int, fls(delta>>1), 11));
 	}
-	preempt_enable();
 }
 
 void add_input_randomness(unsigned int type, unsigned int code,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:871 @ static __u32 get_reg(struct fast_pool *f
 	return *(ptr + f->reg_idx++);
 }
 
-void add_interrupt_randomness(int irq, int irq_flags)
+void add_interrupt_randomness(int irq, int irq_flags, __u64 ip)
 {
 	struct entropy_store	*r;
 	struct fast_pool	*fast_pool = this_cpu_ptr(&irq_randomness);
-	struct pt_regs		*regs = get_irq_regs();
 	unsigned long		now = jiffies;
 	cycles_t		cycles = random_get_entropy();
 	__u32			c_high, j_high;
-	__u64			ip;
 	unsigned long		seed;
 	int			credit = 0;
 
 	if (cycles == 0)
-		cycles = get_reg(fast_pool, regs);
+		cycles = get_reg(fast_pool, NULL);
 	c_high = (sizeof(cycles) > 4) ? cycles >> 32 : 0;
 	j_high = (sizeof(now) > 4) ? now >> 32 : 0;
 	fast_pool->pool[0] ^= cycles ^ j_high ^ irq;
 	fast_pool->pool[1] ^= now ^ c_high;
-	ip = regs ? instruction_pointer(regs) : _RET_IP_;
+	if (!ip)
+		ip = _RET_IP_;
 	fast_pool->pool[2] ^= ip;
 	fast_pool->pool[3] ^= (sizeof(ip) > 4) ? ip >> 32 :
-		get_reg(fast_pool, regs);
+		get_reg(fast_pool, NULL);
 
 	fast_mix(fast_pool);
 	add_interrupt_bench(cycles);
Index: linux-3.18.13-rt10-r7s4/drivers/clocksource/tcb_clksrc.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/clocksource/tcb_clksrc.c
+++ linux-3.18.13-rt10-r7s4/drivers/clocksource/tcb_clksrc.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:27 @
  *     this 32 bit free-running counter. the second channel is not used.
  *
  *   - The third channel may be used to provide a 16-bit clockevent
- *     source, used in either periodic or oneshot mode.  This runs
- *     at 32 KiHZ, and can handle delays of up to two seconds.
+ *     source, used in either periodic or oneshot mode.
  *
  * A boot clocksource and clockevent source are also currently needed,
  * unless the relevant platforms (ARM/AT91, AVR32/AT32) are changed so
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:85 @ static struct clocksource clksrc = {
 struct tc_clkevt_device {
 	struct clock_event_device	clkevt;
 	struct clk			*clk;
+	u32				freq;
 	void __iomem			*regs;
 };
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:94 @ static struct tc_clkevt_device *to_tc_cl
 	return container_of(clkevt, struct tc_clkevt_device, clkevt);
 }
 
-/* For now, we always use the 32K clock ... this optimizes for NO_HZ,
- * because using one of the divided clocks would usually mean the
- * tick rate can never be less than several dozen Hz (vs 0.5 Hz).
- *
- * A divided clock could be good for high resolution timers, since
- * 30.5 usec resolution can seem "low".
- */
 static u32 timer_clock;
 
 static void tc_mode(enum clock_event_mode m, struct clock_event_device *d)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:116 @ static void tc_mode(enum clock_event_mod
 	case CLOCK_EVT_MODE_PERIODIC:
 		clk_enable(tcd->clk);
 
-		/* slow clock, count up to RC, then irq and restart */
+		/* count up to RC, then irq and restart */
 		__raw_writel(timer_clock
 				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
 				regs + ATMEL_TC_REG(2, CMR));
-		__raw_writel((32768 + HZ/2) / HZ, tcaddr + ATMEL_TC_REG(2, RC));
+		__raw_writel((tcd->freq + HZ / 2) / HZ,
+			     tcaddr + ATMEL_TC_REG(2, RC));
 
 		/* Enable clock and interrupts on RC compare */
 		__raw_writel(ATMEL_TC_CPCS, regs + ATMEL_TC_REG(2, IER));
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:134 @ static void tc_mode(enum clock_event_mod
 	case CLOCK_EVT_MODE_ONESHOT:
 		clk_enable(tcd->clk);
 
-		/* slow clock, count up to RC, then irq and stop */
+		/* count up to RC, then irq and stop */
 		__raw_writel(timer_clock | ATMEL_TC_CPCSTOP
 				| ATMEL_TC_WAVE | ATMEL_TC_WAVESEL_UP_AUTO,
 				regs + ATMEL_TC_REG(2, CMR));
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:163 @ static struct tc_clkevt_device clkevt =
 		.name		= "tc_clkevt",
 		.features	= CLOCK_EVT_FEAT_PERIODIC
 					| CLOCK_EVT_FEAT_ONESHOT,
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
 		/* Should be lower than at91rm9200's system timer */
 		.rating		= 125,
+#else
+		.rating		= 200,
+#endif
 		.set_next_event	= tc_next_event,
 		.set_mode	= tc_mode,
 	},
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:188 @ static irqreturn_t ch2_irq(int irq, void
 	return IRQ_NONE;
 }
 
-static int __init setup_clkevents(struct atmel_tc *tc, int clk32k_divisor_idx)
+static int __init setup_clkevents(struct atmel_tc *tc, int divisor_idx)
 {
+	unsigned divisor = atmel_tc_divisors[divisor_idx];
 	int ret;
 	struct clk *t2_clk = tc->clk[2];
 	int irq = tc->irq[2];
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:204 @ static int __init setup_clkevents(struct
 	clkevt.regs = tc->regs;
 	clkevt.clk = t2_clk;
 
-	timer_clock = clk32k_divisor_idx;
+	timer_clock = divisor_idx;
+	if (!divisor)
+		clkevt.freq = 32768;
+	else
+		clkevt.freq = clk_get_rate(t2_clk) / divisor;
 
 	clkevt.clkevt.cpumask = cpumask_of(0);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:218 @ static int __init setup_clkevents(struct
 		return ret;
 	}
 
-	clockevents_config_and_register(&clkevt.clkevt, 32768, 1, 0xffff);
+	clockevents_config_and_register(&clkevt.clkevt, clkevt.freq, 1, 0xffff);
 
 	return ret;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:361 @ static int __init tcb_clksrc_init(void)
 	local_irq_restore(flags);
 
 	/* channel 2:  periodic and oneshot timer support */
+#ifdef CONFIG_ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
 	ret = setup_clkevents(tc, clk32k_divisor_idx);
+#else
+	ret = setup_clkevents(tc, best_divisor_idx);
+#endif
 	if (ret)
 		goto err_unregister_clksrc;
 
Index: linux-3.18.13-rt10-r7s4/drivers/clocksource/timer-atmel-pit.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/clocksource/timer-atmel-pit.c
+++ linux-3.18.13-rt10-r7s4/drivers/clocksource/timer-atmel-pit.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:93 @ static cycle_t read_pit_clk(struct clock
 	return elapsed;
 }
 
+static struct irqaction at91sam926x_pit_irq;
 /*
  * Clockevent device:  interrupts every 1/HZ (== pit_cycles * MCK/16)
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:104 @ pit_clkevt_mode(enum clock_event_mode mo
 
 	switch (mode) {
 	case CLOCK_EVT_MODE_PERIODIC:
+		/* Set up irq handler */
+		setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
 		/* update clocksource counter */
 		data->cnt += data->cycle * PIT_PICNT(pit_read(data->base, AT91_PIT_PIVR));
 		pit_write(data->base, AT91_PIT_MR,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:119 @ pit_clkevt_mode(enum clock_event_mode mo
 		/* disable irq, leaving the clocksource active */
 		pit_write(data->base, AT91_PIT_MR,
 			  (data->cycle - 1) | AT91_PIT_PITEN);
+		remove_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq);
 		break;
 	case CLOCK_EVT_MODE_RESUME:
 		break;
Index: linux-3.18.13-rt10-r7s4/drivers/cpufreq/Kconfig.x86
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/cpufreq/Kconfig.x86
+++ linux-3.18.13-rt10-r7s4/drivers/cpufreq/Kconfig.x86
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:116 @ config X86_POWERNOW_K7_ACPI
 
 config X86_POWERNOW_K8
 	tristate "AMD Opteron/Athlon64 PowerNow!"
-	depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ
+	depends on ACPI && ACPI_PROCESSOR && X86_ACPI_CPUFREQ && !PREEMPT_RT_BASE
 	help
 	  This adds the CPUFreq driver for K8/early Opteron/Athlon64 processors.
 	  Support for K10 and newer processors is now in acpi-cpufreq.
Index: linux-3.18.13-rt10-r7s4/drivers/gpio/gpio-omap.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/gpio/gpio-omap.c
+++ linux-3.18.13-rt10-r7s4/drivers/gpio/gpio-omap.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:60 @ struct gpio_bank {
 	u32 saved_datain;
 	u32 level_mask;
 	u32 toggle_mask;
-	spinlock_t lock;
+	raw_spinlock_t lock;
 	struct gpio_chip chip;
 	struct clk *dbck;
 	u32 mod_usage;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:506 @ static int omap_gpio_irq_type(struct irq
 		(type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH)))
 		return -EINVAL;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	offset = GPIO_INDEX(bank, gpio);
 	retval = omap_set_gpio_triggering(bank, offset, type);
 	if (!LINE_USED(bank->mod_usage, offset)) {
 		omap_enable_gpio_module(bank, offset);
 		omap_set_gpio_direction(bank, offset, 1);
 	} else if (!omap_gpio_is_input(bank, BIT(offset))) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		return -EINVAL;
 	}
 
 	bank->irq_usage |= BIT(GPIO_INDEX(bank, gpio));
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH))
 		__irq_set_handler_locked(d->irq, handle_level_irq);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:636 @ static int omap_set_gpio_wakeup(struct g
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (enable)
 		bank->context.wake_en |= gpio_bit;
 	else
 		bank->context.wake_en &= ~gpio_bit;
 
 	writel_relaxed(bank->context.wake_en, bank->base + bank->regs->wkup_en);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:678 @ static int omap_gpio_request(struct gpio
 	if (!BANK_USED(bank))
 		pm_runtime_get_sync(bank->dev);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	/* Set trigger to none. You need to enable the desired trigger with
 	 * request_irq() or set_irq_type(). Only do this if the IRQ line has
 	 * not already been requested.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:688 @ static int omap_gpio_request(struct gpio
 		omap_enable_gpio_module(bank, offset);
 	}
 	bank->mod_usage |= BIT(offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:698 @ static void omap_gpio_free(struct gpio_c
 	struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->mod_usage &= ~(BIT(offset));
 	omap_disable_gpio_module(bank, offset);
 	omap_reset_gpio(bank, bank->chip.base + offset);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last gpio to be freed in the bank,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:802 @ static void omap_gpio_irq_shutdown(struc
 	unsigned long flags;
 	unsigned offset = GPIO_INDEX(bank, gpio);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	gpiochip_unlock_as_irq(&bank->chip, offset);
 	bank->irq_usage &= ~(BIT(offset));
 	omap_disable_gpio_module(bank, offset);
 	omap_reset_gpio(bank, gpio);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	/*
 	 * If this is the last IRQ to be freed in the bank,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:831 @ static void omap_gpio_mask_irq(struct ir
 	unsigned int gpio = omap_irq_to_gpio(bank, d->hwirq);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_irqenable(bank, gpio, 0);
 	omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 static void omap_gpio_unmask_irq(struct irq_data *d)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:845 @ static void omap_gpio_unmask_irq(struct
 	u32 trigger = irqd_get_trigger_type(d);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	if (trigger)
 		omap_set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:857 @ static void omap_gpio_unmask_irq(struct
 	}
 
 	omap_set_gpio_irqenable(bank, gpio, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:870 @ static int omap_mpuio_suspend_noirq(stru
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:885 @ static int omap_mpuio_resume_noirq(struc
 					OMAP_MPUIO_GPIO_MASKIT / bank->stride;
 	unsigned long		flags;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	writel_relaxed(bank->context.wake_en, mask_reg);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:933 @ static int omap_gpio_get_direction(struc
 
 	bank = container_of(chip, struct gpio_bank, chip);
 	reg = bank->base + bank->regs->direction;
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	dir = !!(readl_relaxed(reg) & BIT(offset));
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return dir;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:945 @ static int omap_gpio_input(struct gpio_c
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap_set_gpio_direction(bank, offset, 1);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:971 @ static int omap_gpio_output(struct gpio_
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
 	omap_set_gpio_direction(bank, offset, 0);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 	return 0;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:986 @ static int omap_gpio_debounce(struct gpi
 
 	bank = container_of(chip, struct gpio_bank, chip);
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	omap2_set_gpio_debounce(bank, offset, debounce);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:999 @ static void omap_gpio_set(struct gpio_ch
 	unsigned long flags;
 
 	bank = container_of(chip, struct gpio_bank, chip);
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 	bank->set_dataout(bank, offset, value);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 }
 
 /*---------------------------------------------------------------------*/
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1226 @ static int omap_gpio_probe(struct platfo
 	else
 		bank->set_dataout = omap_set_gpio_dataout_mask;
 
-	spin_lock_init(&bank->lock);
+	raw_spin_lock_init(&bank->lock);
 
 	/* Static mapping, never released */
 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1273 @ static int omap_gpio_runtime_suspend(str
 	unsigned long flags;
 	u32 wake_low, wake_hi;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * Only edges can generate a wakeup event to the PRCM.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1326 @ update_gpio_context_count:
 				bank->get_context_loss_count(bank->dev);
 
 	omap_gpio_dbck_disable(bank);
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1341 @ static int omap_gpio_runtime_resume(stru
 	unsigned long flags;
 	int c;
 
-	spin_lock_irqsave(&bank->lock, flags);
+	raw_spin_lock_irqsave(&bank->lock, flags);
 
 	/*
 	 * On the first resume during the probe, the context has not
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1377 @ static int omap_gpio_runtime_resume(stru
 			if (c != bank->context_loss_count) {
 				omap_gpio_restore_context(bank);
 			} else {
-				spin_unlock_irqrestore(&bank->lock, flags);
+				raw_spin_unlock_irqrestore(&bank->lock, flags);
 				return 0;
 			}
 		}
 	}
 
 	if (!bank->workaround_enabled) {
-		spin_unlock_irqrestore(&bank->lock, flags);
+		raw_spin_unlock_irqrestore(&bank->lock, flags);
 		return 0;
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1439 @ static int omap_gpio_runtime_resume(stru
 	}
 
 	bank->workaround_enabled = false;
-	spin_unlock_irqrestore(&bank->lock, flags);
+	raw_spin_unlock_irqrestore(&bank->lock, flags);
 
 	return 0;
 }
Index: linux-3.18.13-rt10-r7s4/drivers/gpu/drm/i915/i915_gem.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/gpu/drm/i915/i915_gem.c
+++ linux-3.18.13-rt10-r7s4/drivers/gpu/drm/i915/i915_gem.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:5147 @ static bool mutex_is_locked_by(struct mu
 	if (!mutex_is_locked(mutex))
 		return false;
 
-#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES)
+#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) && !defined(CONFIG_PREEMPT_RT_BASE)
 	return mutex->owner == task;
 #else
 	/* Since UP may be pre-empted, we cannot assume that we own the lock */
Index: linux-3.18.13-rt10-r7s4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ linux-3.18.13-rt10-r7s4/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1173 @ i915_gem_ringbuffer_submission(struct dr
 			return ret;
 	}
 
+#ifndef CONFIG_PREEMPT_RT_BASE
 	trace_i915_gem_ring_dispatch(ring, intel_ring_get_seqno(ring), flags);
+#endif
 
 	i915_gem_execbuffer_move_to_active(vmas, ring);
 	i915_gem_execbuffer_retire_commands(dev, file, ring, batch_obj);
Index: linux-3.18.13-rt10-r7s4/drivers/i2c/busses/i2c-omap.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/i2c/busses/i2c-omap.c
+++ linux-3.18.13-rt10-r7s4/drivers/i2c/busses/i2c-omap.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:878 @ omap_i2c_isr(int irq, void *dev_id)
 	u16 mask;
 	u16 stat;
 
-	spin_lock(&dev->lock);
-	mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
 	stat = omap_i2c_read_reg(dev, OMAP_I2C_STAT_REG);
+	mask = omap_i2c_read_reg(dev, OMAP_I2C_IE_REG);
 
 	if (stat & mask)
 		ret = IRQ_WAKE_THREAD;
 
-	spin_unlock(&dev->lock);
-
 	return ret;
 }
 
Index: linux-3.18.13-rt10-r7s4/drivers/ide/alim15x3.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ide/alim15x3.c
+++ linux-3.18.13-rt10-r7s4/drivers/ide/alim15x3.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:237 @ static int init_chipset_ali15x3(struct p
 
 	isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
 
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 
 	if (m5229_revision < 0xC2) {
 		/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:328 @ out:
 	}
 	pci_dev_put(north);
 	pci_dev_put(isa_dev);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 	return 0;
 }
 
Index: linux-3.18.13-rt10-r7s4/drivers/ide/hpt366.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ide/hpt366.c
+++ linux-3.18.13-rt10-r7s4/drivers/ide/hpt366.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1244 @ static int init_dma_hpt366(ide_hwif_t *h
 
 	dma_old = inb(base + 2);
 
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 
 	dma_new = dma_old;
 	pci_read_config_byte(dev, hwif->channel ? 0x4b : 0x43, &masterdma);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1255 @ static int init_dma_hpt366(ide_hwif_t *h
 	if (dma_new != dma_old)
 		outb(dma_new, base + 2);
 
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 
 	printk(KERN_INFO "    %s: BM-DMA at 0x%04lx-0x%04lx\n",
 			 hwif->name, base, base + 7);
Index: linux-3.18.13-rt10-r7s4/drivers/ide/ide-io-std.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ide/ide-io-std.c
+++ linux-3.18.13-rt10-r7s4/drivers/ide/ide-io-std.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:178 @ void ide_input_data(ide_drive_t *drive,
 		unsigned long uninitialized_var(flags);
 
 		if ((io_32bit & 2) && !mmio) {
-			local_irq_save(flags);
+			local_irq_save_nort(flags);
 			ata_vlb_sync(io_ports->nsect_addr);
 		}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:189 @ void ide_input_data(ide_drive_t *drive,
 			insl(data_addr, buf, words);
 
 		if ((io_32bit & 2) && !mmio)
-			local_irq_restore(flags);
+			local_irq_restore_nort(flags);
 
 		if (((len + 1) & 3) < 2)
 			return;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:222 @ void ide_output_data(ide_drive_t *drive,
 		unsigned long uninitialized_var(flags);
 
 		if ((io_32bit & 2) && !mmio) {
-			local_irq_save(flags);
+			local_irq_save_nort(flags);
 			ata_vlb_sync(io_ports->nsect_addr);
 		}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:233 @ void ide_output_data(ide_drive_t *drive,
 			outsl(data_addr, buf, words);
 
 		if ((io_32bit & 2) && !mmio)
-			local_irq_restore(flags);
+			local_irq_restore_nort(flags);
 
 		if (((len + 1) & 3) < 2)
 			return;
Index: linux-3.18.13-rt10-r7s4/drivers/ide/ide-io.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ide/ide-io.c
+++ linux-3.18.13-rt10-r7s4/drivers/ide/ide-io.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:662 @ void ide_timer_expiry (unsigned long dat
 		/* disable_irq_nosync ?? */
 		disable_irq(hwif->irq);
 		/* local CPU only, as if we were handling an interrupt */
-		local_irq_disable();
+		local_irq_disable_nort();
 		if (hwif->polling) {
 			startstop = handler(drive);
 		} else if (drive_is_ready(drive)) {
Index: linux-3.18.13-rt10-r7s4/drivers/ide/ide-iops.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ide/ide-iops.c
+++ linux-3.18.13-rt10-r7s4/drivers/ide/ide-iops.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:132 @ int __ide_wait_stat(ide_drive_t *drive,
 				if ((stat & ATA_BUSY) == 0)
 					break;
 
-				local_irq_restore(flags);
+				local_irq_restore_nort(flags);
 				*rstat = stat;
 				return -EBUSY;
 			}
 		}
-		local_irq_restore(flags);
+		local_irq_restore_nort(flags);
 	}
 	/*
 	 * Allow status to settle, then read it again.
Index: linux-3.18.13-rt10-r7s4/drivers/ide/ide-probe.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ide/ide-probe.c
+++ linux-3.18.13-rt10-r7s4/drivers/ide/ide-probe.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:199 @ static void do_identify(ide_drive_t *dri
 	int bswap = 1;
 
 	/* local CPU only; some systems need this */
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	/* read 512 bytes of id info */
 	hwif->tp_ops->input_data(drive, NULL, id, SECTOR_SIZE);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 
 	drive->dev_flags |= IDE_DFLAG_ID_READ;
 #ifdef DEBUG
Index: linux-3.18.13-rt10-r7s4/drivers/ide/ide-taskfile.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/ide/ide-taskfile.c
+++ linux-3.18.13-rt10-r7s4/drivers/ide/ide-taskfile.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:253 @ void ide_pio_bytes(ide_drive_t *drive, s
 
 		page_is_high = PageHighMem(page);
 		if (page_is_high)
-			local_irq_save(flags);
+			local_irq_save_nort(flags);
 
 		buf = kmap_atomic(page) + offset;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:274 @ void ide_pio_bytes(ide_drive_t *drive, s
 		kunmap_atomic(buf);
 
 		if (page_is_high)
-			local_irq_restore(flags);
+			local_irq_restore_nort(flags);
 
 		len -= nr_bytes;
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:417 @ static ide_startstop_t pre_task_out_intr
 	}
 
 	if ((drive->dev_flags & IDE_DFLAG_UNMASK) == 0)
-		local_irq_disable();
+		local_irq_disable_nort();
 
 	ide_set_handler(drive, &task_pio_intr, WAIT_WORSTCASE);
 
Index: linux-3.18.13-rt10-r7s4/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ linux-3.18.13-rt10-r7s4/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:799 @ void ipoib_mcast_restart_task(struct wor
 
 	ipoib_mcast_stop_thread(dev, 0);
 
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	netif_addr_lock(dev);
 	spin_lock(&priv->lock);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:881 @ void ipoib_mcast_restart_task(struct wor
 
 	spin_unlock(&priv->lock);
 	netif_addr_unlock(dev);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 
 	/* We have to cancel outside of the spinlock */
 	list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
Index: linux-3.18.13-rt10-r7s4/drivers/input/gameport/gameport.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/input/gameport/gameport.c
+++ linux-3.18.13-rt10-r7s4/drivers/input/gameport/gameport.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:127 @ static int old_gameport_measure_speed(st
 	tx = 1 << 30;
 
 	for(i = 0; i < 50; i++) {
-		local_irq_save(flags);
+		local_irq_save_nort(flags);
 		GET_TIME(t1);
 		for (t = 0; t < 50; t++) gameport_read(gameport);
 		GET_TIME(t2);
 		GET_TIME(t3);
-		local_irq_restore(flags);
+		local_irq_restore_nort(flags);
 		udelay(i * 10);
 		if ((t = DELTA(t2,t1) - DELTA(t3,t2)) < tx) tx = t;
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:151 @ static int old_gameport_measure_speed(st
 	tx = 1 << 30;
 
 	for(i = 0; i < 50; i++) {
-		local_irq_save(flags);
+		local_irq_save_nort(flags);
 		rdtscl(t1);
 		for (t = 0; t < 50; t++) gameport_read(gameport);
 		rdtscl(t2);
-		local_irq_restore(flags);
+		local_irq_restore_nort(flags);
 		udelay(i * 10);
 		if (t2 - t1 < tx) tx = t2 - t1;
 	}
Index: linux-3.18.13-rt10-r7s4/drivers/leds/trigger/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/leds/trigger/Kconfig
+++ linux-3.18.13-rt10-r7s4/drivers/leds/trigger/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:64 @ config LEDS_TRIGGER_BACKLIGHT
 
 config LEDS_TRIGGER_CPU
 	bool "LED CPU Trigger"
-	depends on LEDS_TRIGGERS
+	depends on LEDS_TRIGGERS && !PREEMPT_RT_BASE
 	help
 	  This allows LEDs to be controlled by active CPUs. This shows
 	  the active CPUs across an array of LEDs so you can see which
Index: linux-3.18.13-rt10-r7s4/drivers/md/bcache/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/md/bcache/Kconfig
+++ linux-3.18.13-rt10-r7s4/drivers/md/bcache/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
 
 config BCACHE
 	tristate "Block device as cache"
+	depends on !PREEMPT_RT_FULL
 	---help---
 	Allows a block device to be used as cache for other devices; uses
 	a btree for indexing and the layout is optimized for SSDs.
Index: linux-3.18.13-rt10-r7s4/drivers/md/dm.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/md/dm.c
+++ linux-3.18.13-rt10-r7s4/drivers/md/dm.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1901 @ static void dm_request_fn(struct request
 		if (map_request(ti, clone, md))
 			goto requeued;
 
-		BUG_ON(!irqs_disabled());
+		BUG_ON_NONRT(!irqs_disabled());
 		spin_lock(q->queue_lock);
 	}
 
 	goto out;
 
 requeued:
-	BUG_ON(!irqs_disabled());
+	BUG_ON_NONRT(!irqs_disabled());
 	spin_lock(q->queue_lock);
 
 delay_and_out:
Index: linux-3.18.13-rt10-r7s4/drivers/md/raid5.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/md/raid5.c
+++ linux-3.18.13-rt10-r7s4/drivers/md/raid5.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1652 @ static void raid_run_ops(struct stripe_h
 	struct raid5_percpu *percpu;
 	unsigned long cpu;
 
-	cpu = get_cpu();
+	cpu = get_cpu_light();
 	percpu = per_cpu_ptr(conf->percpu, cpu);
+	spin_lock(&percpu->lock);
 	if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) {
 		ops_run_biofill(sh);
 		overlap_clear++;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1706 @ static void raid_run_ops(struct stripe_h
 			if (test_and_clear_bit(R5_Overlap, &dev->flags))
 				wake_up(&sh->raid_conf->wait_for_overlap);
 		}
-	put_cpu();
+	spin_unlock(&percpu->lock);
+	put_cpu_light();
 }
 
 static int grow_one_stripe(struct r5conf *conf, int hash)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:5713 @ static int raid5_alloc_percpu(struct r5c
 			       __func__, cpu);
 			break;
 		}
+		spin_lock_init(&per_cpu_ptr(conf->percpu, cpu)->lock);
 	}
 	put_online_cpus();
 
Index: linux-3.18.13-rt10-r7s4/drivers/md/raid5.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/md/raid5.h
+++ linux-3.18.13-rt10-r7s4/drivers/md/raid5.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:460 @ struct r5conf {
 	int			recovery_disabled;
 	/* per cpu variables */
 	struct raid5_percpu {
+		spinlock_t	lock;	     /* Protection for -RT */
 		struct page	*spare_page; /* Used when checking P/Q in raid6 */
 		void		*scribble;   /* space for constructing buffer
 					      * lists and performing address
Index: linux-3.18.13-rt10-r7s4/drivers/misc/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/misc/Kconfig
+++ linux-3.18.13-rt10-r7s4/drivers/misc/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:57 @ config AD525X_DPOT_SPI
 config ATMEL_TCLIB
 	bool "Atmel AT32/AT91 Timer/Counter Library"
 	depends on (AVR32 || ARCH_AT91)
+	default y if PREEMPT_RT_FULL
 	help
 	  Select this if you want a library to allocate the Timer/Counter
 	  blocks found on many Atmel processors.  This facilitates using
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:73 @ config ATMEL_TCB_CLKSRC
 	  are combined to make a single 32-bit timer.
 
 	  When GENERIC_CLOCKEVENTS is defined, the third timer channel
-	  may be used as a clock event device supporting oneshot mode
-	  (delays of up to two seconds) based on the 32 KiHz clock.
+	  may be used as a clock event device supporting oneshot mode.
 
 config ATMEL_TCB_CLKSRC_BLOCK
 	int
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:87 @ config ATMEL_TCB_CLKSRC_BLOCK
 	  TC can be used for other purposes, such as PWM generation and
 	  interval timing.
 
+config ATMEL_TCB_CLKSRC_USE_SLOW_CLOCK
+	bool "TC Block use 32 KiHz clock"
+	depends on ATMEL_TCB_CLKSRC
+	default y if !PREEMPT_RT_FULL
+	help
+	  Select this to use 32 KiHz base clock rate as TC block clock
+	  source for clock events.
+
+
 config DUMMY_IRQ
 	tristate "Dummy IRQ handler"
 	default n
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:125 @ config IBM_ASM
 	  for information on the specific driver level and support statement
 	  for your IBM server.
 
+config HWLAT_DETECTOR
+	tristate "Testing module to detect hardware-induced latencies"
+	depends on DEBUG_FS
+	depends on RING_BUFFER
+	default m
+	---help---
+	  A simple hardware latency detector. Use this module to detect
+	  large latencies introduced by the behavior of the underlying
+	  system firmware external to Linux. We do this using periodic
+	  use of stop_machine to grab all available CPUs and measure
+	  for unexplainable gaps in the CPU timestamp counter(s). By
+	  default, the module is not enabled until the "enable" file
+	  within the "hwlat_detector" debugfs directory is toggled.
+
+	  This module is often used to detect SMI (System Management
+	  Interrupts) on x86 systems, though is not x86 specific. To
+	  this end, we default to using a sample window of 1 second,
+	  during which we will sample for 0.5 seconds. If an SMI or
+	  similar event occurs during that time, it is recorded
+	  into an 8K samples global ring buffer until retreived.
+
+	  WARNING: This software should never be enabled (it can be built
+	  but should not be turned on after it is loaded) in a production
+	  environment where high latencies are a concern since the
+	  sampling mechanism actually introduces latencies for
+	  regular tasks while the CPU(s) are being held.
+
+	  If unsure, say N
+
 config PHANTOM
 	tristate "Sensable PHANToM (PCI)"
 	depends on PCI
Index: linux-3.18.13-rt10-r7s4/drivers/misc/Makefile
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/misc/Makefile
+++ linux-3.18.13-rt10-r7s4/drivers/misc/Makefile
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:41 @ obj-$(CONFIG_C2PORT)		+= c2port/
 obj-$(CONFIG_HMC6352)		+= hmc6352.o
 obj-y				+= eeprom/
 obj-y				+= cb710/
+obj-$(CONFIG_HWLAT_DETECTOR)	+= hwlat_detector.o
 obj-$(CONFIG_SPEAR13XX_PCIE_GADGET)	+= spear13xx_pcie_gadget.o
 obj-$(CONFIG_VMWARE_BALLOON)	+= vmw_balloon.o
 obj-$(CONFIG_ARM_CHARLCD)	+= arm-charlcd.o
Index: linux-3.18.13-rt10-r7s4/drivers/misc/hwlat_detector.c
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/drivers/misc/hwlat_detector.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+/*
+ * hwlat_detector.c - A simple Hardware Latency detector.
+ *
+ * Use this module to detect large system latencies induced by the behavior of
+ * certain underlying system hardware or firmware, independent of Linux itself.
+ * The code was developed originally to detect the presence of SMIs on Intel
+ * and AMD systems, although there is no dependency upon x86 herein.
+ *
+ * The classical example usage of this module is in detecting the presence of
+ * SMIs or System Management Interrupts on Intel and AMD systems. An SMI is a
+ * somewhat special form of hardware interrupt spawned from earlier CPU debug
+ * modes in which the (BIOS/EFI/etc.) firmware arranges for the South Bridge
+ * LPC (or other device) to generate a special interrupt under certain
+ * circumstances, for example, upon expiration of a special SMI timer device,
+ * due to certain external thermal readings, on certain I/O address accesses,
+ * and other situations. An SMI hits a special CPU pin, triggers a special
+ * SMI mode (complete with special memory map), and the OS is unaware.
+ *
+ * Although certain hardware-inducing latencies are necessary (for example,
+ * a modern system often requires an SMI handler for correct thermal control
+ * and remote management) they can wreak havoc upon any OS-level performance
+ * guarantees toward low-latency, especially when the OS is not even made
+ * aware of the presence of these interrupts. For this reason, we need a
+ * somewhat brute force mechanism to detect these interrupts. In this case,
+ * we do it by hogging all of the CPU(s) for configurable timer intervals,
+ * sampling the built-in CPU timer, looking for discontiguous readings.
+ *
+ * WARNING: This implementation necessarily introduces latencies. Therefore,
+ *          you should NEVER use this module in a production environment
+ *          requiring any kind of low-latency performance guarantee(s).
+ *
+ * Copyright (C) 2008-2009 Jon Masters, Red Hat, Inc. <jcm@redhat.com>
+ *
+ * Includes useful feedback from Clark Williams <clark@redhat.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/ring_buffer.h>
+#include <linux/time.h>
+#include <linux/hrtimer.h>
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/trace_clock.h>
+
+#define BUF_SIZE_DEFAULT	262144UL		/* 8K*(sizeof(entry)) */
+#define BUF_FLAGS		(RB_FL_OVERWRITE)	/* no block on full */
+#define U64STR_SIZE		22			/* 20 digits max */
+
+#define VERSION			"1.0.0"
+#define BANNER			"hwlat_detector: "
+#define DRVNAME			"hwlat_detector"
+#define DEFAULT_SAMPLE_WINDOW	1000000			/* 1s */
+#define DEFAULT_SAMPLE_WIDTH	500000			/* 0.5s */
+#define DEFAULT_LAT_THRESHOLD	10			/* 10us */
+
+/* Module metadata */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Jon Masters <jcm@redhat.com>");
+MODULE_DESCRIPTION("A simple hardware latency detector");
+MODULE_VERSION(VERSION);
+
+/* Module parameters */
+
+static int debug;
+static int enabled;
+static int threshold;
+
+module_param(debug, int, 0);			/* enable debug */
+module_param(enabled, int, 0);			/* enable detector */
+module_param(threshold, int, 0);		/* latency threshold */
+
+/* Buffering and sampling */
+
+static struct ring_buffer *ring_buffer;		/* sample buffer */
+static DEFINE_MUTEX(ring_buffer_mutex);		/* lock changes */
+static unsigned long buf_size = BUF_SIZE_DEFAULT;
+static struct task_struct *kthread;		/* sampling thread */
+
+/* DebugFS filesystem entries */
+
+static struct dentry *debug_dir;		/* debugfs directory */
+static struct dentry *debug_max;		/* maximum TSC delta */
+static struct dentry *debug_count;		/* total detect count */
+static struct dentry *debug_sample_width;	/* sample width us */
+static struct dentry *debug_sample_window;	/* sample window us */
+static struct dentry *debug_sample;		/* raw samples us */
+static struct dentry *debug_threshold;		/* threshold us */
+static struct dentry *debug_enable;		/* enable/disable */
+
+/* Individual samples and global state */
+
+struct sample;					/* latency sample */
+struct data;					/* Global state */
+
+/* Sampling functions */
+static int __buffer_add_sample(struct sample *sample);
+static struct sample *buffer_get_sample(struct sample *sample);
+
+/* Threading and state */
+static int kthread_fn(void *unused);
+static int start_kthread(void);
+static int stop_kthread(void);
+static void __reset_stats(void);
+static int init_stats(void);
+
+/* Debugfs interface */
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+				size_t cnt, loff_t *ppos, const u64 *entry);
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
+				 size_t cnt, loff_t *ppos, u64 *entry);
+static int debug_sample_fopen(struct inode *inode, struct file *filp);
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
+				  size_t cnt, loff_t *ppos);
+static int debug_sample_release(struct inode *inode, struct file *filp);
+static int debug_enable_fopen(struct inode *inode, struct file *filp);
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
+				  size_t cnt, loff_t *ppos);
+static ssize_t debug_enable_fwrite(struct file *file,
+				   const char __user *user_buffer,
+				   size_t user_size, loff_t *offset);
+
+/* Initialization functions */
+static int init_debugfs(void);
+static void free_debugfs(void);
+static int detector_init(void);
+static void detector_exit(void);
+
+/* Individual latency samples are stored here when detected and packed into
+ * the ring_buffer circular buffer, where they are overwritten when
+ * more than buf_size/sizeof(sample) samples are received. */
+struct sample {
+	u64		seqnum;		/* unique sequence */
+	u64		duration;	/* ktime delta */
+	u64		outer_duration;	/* ktime delta (outer loop) */
+	struct timespec	timestamp;	/* wall time */
+	unsigned long   lost;
+};
+
+/* keep the global state somewhere. */
+static struct data {
+
+	struct mutex lock;		/* protect changes */
+
+	u64	count;			/* total since reset */
+	u64	max_sample;		/* max hardware latency */
+	u64	threshold;		/* sample threshold level */
+
+	u64	sample_window;		/* total sampling window (on+off) */
+	u64	sample_width;		/* active sampling portion of window */
+
+	atomic_t sample_open;		/* whether the sample file is open */
+
+	wait_queue_head_t wq;		/* waitqeue for new sample values */
+
+} data;
+
+/**
+ * __buffer_add_sample - add a new latency sample recording to the ring buffer
+ * @sample: The new latency sample value
+ *
+ * This receives a new latency sample and records it in a global ring buffer.
+ * No additional locking is used in this case.
+ */
+static int __buffer_add_sample(struct sample *sample)
+{
+	return ring_buffer_write(ring_buffer,
+				 sizeof(struct sample), sample);
+}
+
+/**
+ * buffer_get_sample - remove a hardware latency sample from the ring buffer
+ * @sample: Pre-allocated storage for the sample
+ *
+ * This retrieves a hardware latency sample from the global circular buffer
+ */
+static struct sample *buffer_get_sample(struct sample *sample)
+{
+	struct ring_buffer_event *e = NULL;
+	struct sample *s = NULL;
+	unsigned int cpu = 0;
+
+	if (!sample)
+		return NULL;
+
+	mutex_lock(&ring_buffer_mutex);
+	for_each_online_cpu(cpu) {
+		e = ring_buffer_consume(ring_buffer, cpu, NULL, &sample->lost);
+		if (e)
+			break;
+	}
+
+	if (e) {
+		s = ring_buffer_event_data(e);
+		memcpy(sample, s, sizeof(struct sample));
+	} else
+		sample = NULL;
+	mutex_unlock(&ring_buffer_mutex);
+
+	return sample;
+}
+
+#ifndef CONFIG_TRACING
+#define time_type	ktime_t
+#define time_get()	ktime_get()
+#define time_to_us(x)	ktime_to_us(x)
+#define time_sub(a, b)	ktime_sub(a, b)
+#define init_time(a, b)	(a).tv64 = b
+#define time_u64(a)	((a).tv64)
+#else
+#define time_type	u64
+#define time_get()	trace_clock_local()
+#define time_to_us(x)	div_u64(x, 1000)
+#define time_sub(a, b)	((a) - (b))
+#define init_time(a, b)	(a = b)
+#define time_u64(a)	a
+#endif
+/**
+ * get_sample - sample the CPU TSC and look for likely hardware latencies
+ *
+ * Used to repeatedly capture the CPU TSC (or similar), looking for potential
+ * hardware-induced latency. Called with interrupts disabled and with
+ * data.lock held.
+ */
+static int get_sample(void)
+{
+	time_type start, t1, t2, last_t2;
+	s64 diff, total = 0;
+	u64 sample = 0;
+	u64 outer_sample = 0;
+	int ret = -1;
+
+	init_time(last_t2, 0);
+	start = time_get(); /* start timestamp */
+
+	do {
+
+		t1 = time_get();	/* we'll look for a discontinuity */
+		t2 = time_get();
+
+		if (time_u64(last_t2)) {
+			/* Check the delta from outer loop (t2 to next t1) */
+			diff = time_to_us(time_sub(t1, last_t2));
+			/* This shouldn't happen */
+			if (diff < 0) {
+				pr_err(BANNER "time running backwards\n");
+				goto out;
+			}
+			if (diff > outer_sample)
+				outer_sample = diff;
+		}
+		last_t2 = t2;
+
+		total = time_to_us(time_sub(t2, start)); /* sample width */
+
+		/* This checks the inner loop (t1 to t2) */
+		diff = time_to_us(time_sub(t2, t1));     /* current diff */
+
+		/* This shouldn't happen */
+		if (diff < 0) {
+			pr_err(BANNER "time running backwards\n");
+			goto out;
+		}
+
+		if (diff > sample)
+			sample = diff; /* only want highest value */
+
+	} while (total <= data.sample_width);
+
+	ret = 0;
+
+	/* If we exceed the threshold value, we have found a hardware latency */
+	if (sample > data.threshold || outer_sample > data.threshold) {
+		struct sample s;
+
+		ret = 1;
+
+		data.count++;
+		s.seqnum = data.count;
+		s.duration = sample;
+		s.outer_duration = outer_sample;
+		s.timestamp = CURRENT_TIME;
+		__buffer_add_sample(&s);
+
+		/* Keep a running maximum ever recorded hardware latency */
+		if (sample > data.max_sample)
+			data.max_sample = sample;
+	}
+
+out:
+	return ret;
+}
+
+/*
+ * kthread_fn - The CPU time sampling/hardware latency detection kernel thread
+ * @unused: A required part of the kthread API.
+ *
+ * Used to periodically sample the CPU TSC via a call to get_sample. We
+ * disable interrupts, which does (intentionally) introduce latency since we
+ * need to ensure nothing else might be running (and thus pre-empting).
+ * Obviously this should never be used in production environments.
+ *
+ * Currently this runs on which ever CPU it was scheduled on, but most
+ * real-worald hardware latency situations occur across several CPUs,
+ * but we might later generalize this if we find there are any actualy
+ * systems with alternate SMI delivery or other hardware latencies.
+ */
+static int kthread_fn(void *unused)
+{
+	int ret;
+	u64 interval;
+
+	while (!kthread_should_stop()) {
+
+		mutex_lock(&data.lock);
+
+		local_irq_disable();
+		ret = get_sample();
+		local_irq_enable();
+
+		if (ret > 0)
+			wake_up(&data.wq); /* wake up reader(s) */
+
+		interval = data.sample_window - data.sample_width;
+		do_div(interval, USEC_PER_MSEC); /* modifies interval value */
+
+		mutex_unlock(&data.lock);
+
+		if (msleep_interruptible(interval))
+			break;
+	}
+
+	return 0;
+}
+
+/**
+ * start_kthread - Kick off the hardware latency sampling/detector kthread
+ *
+ * This starts a kernel thread that will sit and sample the CPU timestamp
+ * counter (TSC or similar) and look for potential hardware latencies.
+ */
+static int start_kthread(void)
+{
+	kthread = kthread_run(kthread_fn, NULL,
+					DRVNAME);
+	if (IS_ERR(kthread)) {
+		pr_err(BANNER "could not start sampling thread\n");
+		enabled = 0;
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * stop_kthread - Inform the hardware latency samping/detector kthread to stop
+ *
+ * This kicks the running hardware latency sampling/detector kernel thread and
+ * tells it to stop sampling now. Use this on unload and at system shutdown.
+ */
+static int stop_kthread(void)
+{
+	int ret;
+
+	ret = kthread_stop(kthread);
+
+	return ret;
+}
+
+/**
+ * __reset_stats - Reset statistics for the hardware latency detector
+ *
+ * We use data to store various statistics and global state. We call this
+ * function in order to reset those when "enable" is toggled on or off, and
+ * also at initialization. Should be called with data.lock held.
+ */
+static void __reset_stats(void)
+{
+	data.count = 0;
+	data.max_sample = 0;
+	ring_buffer_reset(ring_buffer); /* flush out old sample entries */
+}
+
+/**
+ * init_stats - Setup global state statistics for the hardware latency detector
+ *
+ * We use data to store various statistics and global state. We also use
+ * a global ring buffer (ring_buffer) to keep raw samples of detected hardware
+ * induced system latencies. This function initializes these structures and
+ * allocates the global ring buffer also.
+ */
+static int init_stats(void)
+{
+	int ret = -ENOMEM;
+
+	mutex_init(&data.lock);
+	init_waitqueue_head(&data.wq);
+	atomic_set(&data.sample_open, 0);
+
+	ring_buffer = ring_buffer_alloc(buf_size, BUF_FLAGS);
+
+	if (WARN(!ring_buffer, KERN_ERR BANNER
+			       "failed to allocate ring buffer!\n"))
+		goto out;
+
+	__reset_stats();
+	data.threshold = threshold ?: DEFAULT_LAT_THRESHOLD; /* threshold us */
+	data.sample_window = DEFAULT_SAMPLE_WINDOW; /* window us */
+	data.sample_width = DEFAULT_SAMPLE_WIDTH;   /* width us */
+
+	ret = 0;
+
+out:
+	return ret;
+
+}
+
+/*
+ * simple_data_read - Wrapper read function for global state debugfs entries
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ * @entry: The entry to read from
+ *
+ * This function provides a generic read implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_read directly, but we need to make sure that the data.lock
+ * is held during the actual read.
+ */
+static ssize_t simple_data_read(struct file *filp, char __user *ubuf,
+				size_t cnt, loff_t *ppos, const u64 *entry)
+{
+	char buf[U64STR_SIZE];
+	u64 val = 0;
+	int len = 0;
+
+	memset(buf, 0, sizeof(buf));
+
+	if (!entry)
+		return -EFAULT;
+
+	mutex_lock(&data.lock);
+	val = *entry;
+	mutex_unlock(&data.lock);
+
+	len = snprintf(buf, sizeof(buf), "%llu\n", (unsigned long long)val);
+
+	return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
+
+}
+
+/*
+ * simple_data_write - Wrapper write function for global state debugfs entries
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to write value from
+ * @cnt: The maximum number of bytes to write
+ * @ppos: The current "file" position
+ * @entry: The entry to write to
+ *
+ * This function provides a generic write implementation for the global state
+ * "data" structure debugfs filesystem entries. It would be nice to use
+ * simple_attr_write directly, but we need to make sure that the data.lock
+ * is held during the actual write.
+ */
+static ssize_t simple_data_write(struct file *filp, const char __user *ubuf,
+				 size_t cnt, loff_t *ppos, u64 *entry)
+{
+	char buf[U64STR_SIZE];
+	int csize = min(cnt, sizeof(buf));
+	u64 val = 0;
+	int err = 0;
+
+	memset(buf, '\0', sizeof(buf));
+	if (copy_from_user(buf, ubuf, csize))
+		return -EFAULT;
+
+	buf[U64STR_SIZE-1] = '\0';			/* just in case */
+	err = kstrtoull(buf, 10, &val);
+	if (err)
+		return -EINVAL;
+
+	mutex_lock(&data.lock);
+	*entry = val;
+	mutex_unlock(&data.lock);
+
+	return csize;
+}
+
+/**
+ * debug_count_fopen - Open function for "count" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "count" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_count_fopen(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+/**
+ * debug_count_fread - Read function for "count" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "count" debugfs
+ * interface to the hardware latency detector. Can be used to read the
+ * number of latency readings exceeding the configured threshold since
+ * the detector was last reset (e.g. by writing a zero into "count").
+ */
+static ssize_t debug_count_fread(struct file *filp, char __user *ubuf,
+				     size_t cnt, loff_t *ppos)
+{
+	return simple_data_read(filp, ubuf, cnt, ppos, &data.count);
+}
+
+/**
+ * debug_count_fwrite - Write function for "count" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "count" debugfs
+ * interface to the hardware latency detector. Can be used to write a
+ * desired value, especially to zero the total count.
+ */
+static ssize_t  debug_count_fwrite(struct file *filp,
+				       const char __user *ubuf,
+				       size_t cnt,
+				       loff_t *ppos)
+{
+	return simple_data_write(filp, ubuf, cnt, ppos, &data.count);
+}
+
+/**
+ * debug_enable_fopen - Dummy open function for "enable" debugfs interface
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "enable" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_enable_fopen(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+/**
+ * debug_enable_fread - Read function for "enable" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "enable" debugfs
+ * interface to the hardware latency detector. Can be used to determine
+ * whether the detector is currently enabled ("0\n" or "1\n" returned).
+ */
+static ssize_t debug_enable_fread(struct file *filp, char __user *ubuf,
+				      size_t cnt, loff_t *ppos)
+{
+	char buf[4];
+
+	if ((cnt < sizeof(buf)) || (*ppos))
+		return 0;
+
+	buf[0] = enabled ? '1' : '0';
+	buf[1] = '\n';
+	buf[2] = '\0';
+	if (copy_to_user(ubuf, buf, strlen(buf)))
+		return -EFAULT;
+	return *ppos = strlen(buf);
+}
+
+/**
+ * debug_enable_fwrite - Write function for "enable" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "enable" debugfs
+ * interface to the hardware latency detector. Can be used to enable or
+ * disable the detector, which will have the side-effect of possibly
+ * also resetting the global stats and kicking off the measuring
+ * kthread (on an enable) or the converse (upon a disable).
+ */
+static ssize_t  debug_enable_fwrite(struct file *filp,
+					const char __user *ubuf,
+					size_t cnt,
+					loff_t *ppos)
+{
+	char buf[4];
+	int csize = min(cnt, sizeof(buf));
+	long val = 0;
+	int err = 0;
+
+	memset(buf, '\0', sizeof(buf));
+	if (copy_from_user(buf, ubuf, csize))
+		return -EFAULT;
+
+	buf[sizeof(buf)-1] = '\0';			/* just in case */
+	err = kstrtoul(buf, 10, &val);
+	if (0 != err)
+		return -EINVAL;
+
+	if (val) {
+		if (enabled)
+			goto unlock;
+		enabled = 1;
+		__reset_stats();
+		if (start_kthread())
+			return -EFAULT;
+	} else {
+		if (!enabled)
+			goto unlock;
+		enabled = 0;
+		err = stop_kthread();
+		if (err) {
+			pr_err(BANNER "cannot stop kthread\n");
+			return -EFAULT;
+		}
+		wake_up(&data.wq);		/* reader(s) should return */
+	}
+unlock:
+	return csize;
+}
+
+/**
+ * debug_max_fopen - Open function for "max" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "max" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_max_fopen(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+/**
+ * debug_max_fread - Read function for "max" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "max" debugfs
+ * interface to the hardware latency detector. Can be used to determine
+ * the maximum latency value observed since it was last reset.
+ */
+static ssize_t debug_max_fread(struct file *filp, char __user *ubuf,
+				   size_t cnt, loff_t *ppos)
+{
+	return simple_data_read(filp, ubuf, cnt, ppos, &data.max_sample);
+}
+
+/**
+ * debug_max_fwrite - Write function for "max" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "max" debugfs
+ * interface to the hardware latency detector. Can be used to reset the
+ * maximum or set it to some other desired value - if, then, subsequent
+ * measurements exceed this value, the maximum will be updated.
+ */
+static ssize_t  debug_max_fwrite(struct file *filp,
+				     const char __user *ubuf,
+				     size_t cnt,
+				     loff_t *ppos)
+{
+	return simple_data_write(filp, ubuf, cnt, ppos, &data.max_sample);
+}
+
+
+/**
+ * debug_sample_fopen - An open function for "sample" debugfs interface
+ * @inode: The in-kernel inode representation of this debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function handles opening the "sample" file within the hardware
+ * latency detector debugfs directory interface. This file is used to read
+ * raw samples from the global ring_buffer and allows the user to see a
+ * running latency history. Can be opened blocking or non-blocking,
+ * affecting whether it behaves as a buffer read pipe, or does not.
+ * Implements simple locking to prevent multiple simultaneous use.
+ */
+static int debug_sample_fopen(struct inode *inode, struct file *filp)
+{
+	if (!atomic_add_unless(&data.sample_open, 1, 1))
+		return -EBUSY;
+	else
+		return 0;
+}
+
+/**
+ * debug_sample_fread - A read function for "sample" debugfs interface
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that will contain the samples read
+ * @cnt: The maximum bytes to read from the debugfs "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function handles reading from the "sample" file within the hardware
+ * latency detector debugfs directory interface. This file is used to read
+ * raw samples from the global ring_buffer and allows the user to see a
+ * running latency history. By default this will block pending a new
+ * value written into the sample buffer, unless there are already a
+ * number of value(s) waiting in the buffer, or the sample file was
+ * previously opened in a non-blocking mode of operation.
+ */
+static ssize_t debug_sample_fread(struct file *filp, char __user *ubuf,
+					size_t cnt, loff_t *ppos)
+{
+	int len = 0;
+	char buf[64];
+	struct sample *sample = NULL;
+
+	if (!enabled)
+		return 0;
+
+	sample = kzalloc(sizeof(struct sample), GFP_KERNEL);
+	if (!sample)
+		return -ENOMEM;
+
+	while (!buffer_get_sample(sample)) {
+
+		DEFINE_WAIT(wait);
+
+		if (filp->f_flags & O_NONBLOCK) {
+			len = -EAGAIN;
+			goto out;
+		}
+
+		prepare_to_wait(&data.wq, &wait, TASK_INTERRUPTIBLE);
+		schedule();
+		finish_wait(&data.wq, &wait);
+
+		if (signal_pending(current)) {
+			len = -EINTR;
+			goto out;
+		}
+
+		if (!enabled) {			/* enable was toggled */
+			len = 0;
+			goto out;
+		}
+	}
+
+	len = snprintf(buf, sizeof(buf), "%010lu.%010lu\t%llu\t%llu\n",
+		       sample->timestamp.tv_sec,
+		       sample->timestamp.tv_nsec,
+		       sample->duration,
+		       sample->outer_duration);
+
+
+	/* handling partial reads is more trouble than it's worth */
+	if (len > cnt)
+		goto out;
+
+	if (copy_to_user(ubuf, buf, len))
+		len = -EFAULT;
+
+out:
+	kfree(sample);
+	return len;
+}
+
+/**
+ * debug_sample_release - Release function for "sample" debugfs interface
+ * @inode: The in-kernel inode represenation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function completes the close of the debugfs interface "sample" file.
+ * Frees the sample_open "lock" so that other users may open the interface.
+ */
+static int debug_sample_release(struct inode *inode, struct file *filp)
+{
+	atomic_dec(&data.sample_open);
+
+	return 0;
+}
+
+/**
+ * debug_threshold_fopen - Open function for "threshold" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "threshold" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_threshold_fopen(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+/**
+ * debug_threshold_fread - Read function for "threshold" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "threshold" debugfs
+ * interface to the hardware latency detector. It can be used to determine
+ * the current threshold level at which a latency will be recorded in the
+ * global ring buffer, typically on the order of 10us.
+ */
+static ssize_t debug_threshold_fread(struct file *filp, char __user *ubuf,
+					 size_t cnt, loff_t *ppos)
+{
+	return simple_data_read(filp, ubuf, cnt, ppos, &data.threshold);
+}
+
+/**
+ * debug_threshold_fwrite - Write function for "threshold" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "threshold" debugfs
+ * interface to the hardware latency detector. It can be used to configure
+ * the threshold level at which any subsequently detected latencies will
+ * be recorded into the global ring buffer.
+ */
+static ssize_t  debug_threshold_fwrite(struct file *filp,
+					const char __user *ubuf,
+					size_t cnt,
+					loff_t *ppos)
+{
+	int ret;
+
+	ret = simple_data_write(filp, ubuf, cnt, ppos, &data.threshold);
+
+	if (enabled)
+		wake_up_process(kthread);
+
+	return ret;
+}
+
+/**
+ * debug_width_fopen - Open function for "width" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "width" debugfs
+ * interface to the hardware latency detector.
+ */
+static int debug_width_fopen(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+/**
+ * debug_width_fread - Read function for "width" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "width" debugfs
+ * interface to the hardware latency detector. It can be used to determine
+ * for how many us of the total window us we will actively sample for any
+ * hardware-induced latecy periods. Obviously, it is not possible to
+ * sample constantly and have the system respond to a sample reader, or,
+ * worse, without having the system appear to have gone out to lunch.
+ */
+static ssize_t debug_width_fread(struct file *filp, char __user *ubuf,
+				     size_t cnt, loff_t *ppos)
+{
+	return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_width);
+}
+
+/**
+ * debug_width_fwrite - Write function for "width" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "width" debugfs
+ * interface to the hardware latency detector. It can be used to configure
+ * for how many us of the total window us we will actively sample for any
+ * hardware-induced latency periods. Obviously, it is not possible to
+ * sample constantly and have the system respond to a sample reader, or,
+ * worse, without having the system appear to have gone out to lunch. It
+ * is enforced that width is less that the total window size.
+ */
+static ssize_t  debug_width_fwrite(struct file *filp,
+				       const char __user *ubuf,
+				       size_t cnt,
+				       loff_t *ppos)
+{
+	char buf[U64STR_SIZE];
+	int csize = min(cnt, sizeof(buf));
+	u64 val = 0;
+	int err = 0;
+
+	memset(buf, '\0', sizeof(buf));
+	if (copy_from_user(buf, ubuf, csize))
+		return -EFAULT;
+
+	buf[U64STR_SIZE-1] = '\0';			/* just in case */
+	err = kstrtoull(buf, 10, &val);
+	if (0 != err)
+		return -EINVAL;
+
+	mutex_lock(&data.lock);
+	if (val < data.sample_window)
+		data.sample_width = val;
+	else {
+		mutex_unlock(&data.lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&data.lock);
+
+	if (enabled)
+		wake_up_process(kthread);
+
+	return csize;
+}
+
+/**
+ * debug_window_fopen - Open function for "window" debugfs entry
+ * @inode: The in-kernel inode representation of the debugfs "file"
+ * @filp: The active open file structure for the debugfs "file"
+ *
+ * This function provides an open implementation for the "window" debugfs
+ * interface to the hardware latency detector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs.
+ */
+static int debug_window_fopen(struct inode *inode, struct file *filp)
+{
+	return 0;
+}
+
+/**
+ * debug_window_fread - Read function for "window" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The userspace provided buffer to read value into
+ * @cnt: The maximum number of bytes to read
+ * @ppos: The current "file" position
+ *
+ * This function provides a read implementation for the "window" debugfs
+ * interface to the hardware latency detector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs. Can be used to read the total window size.
+ */
+static ssize_t debug_window_fread(struct file *filp, char __user *ubuf,
+				      size_t cnt, loff_t *ppos)
+{
+	return simple_data_read(filp, ubuf, cnt, ppos, &data.sample_window);
+}
+
+/**
+ * debug_window_fwrite - Write function for "window" debugfs entry
+ * @filp: The active open file structure for the debugfs "file"
+ * @ubuf: The user buffer that contains the value to write
+ * @cnt: The maximum number of bytes to write to "file"
+ * @ppos: The current position in the debugfs "file"
+ *
+ * This function provides a write implementation for the "window" debufds
+ * interface to the hardware latency detetector. The window is the total time
+ * in us that will be considered one sample period. Conceptually, windows
+ * occur back-to-back and contain a sample width period during which
+ * actual sampling occurs. Can be used to write a new total window size. It
+ * is enfoced that any value written must be greater than the sample width
+ * size, or an error results.
+ */
+static ssize_t  debug_window_fwrite(struct file *filp,
+					const char __user *ubuf,
+					size_t cnt,
+					loff_t *ppos)
+{
+	char buf[U64STR_SIZE];
+	int csize = min(cnt, sizeof(buf));
+	u64 val = 0;
+	int err = 0;
+
+	memset(buf, '\0', sizeof(buf));
+	if (copy_from_user(buf, ubuf, csize))
+		return -EFAULT;
+
+	buf[U64STR_SIZE-1] = '\0';			/* just in case */
+	err = kstrtoull(buf, 10, &val);
+	if (0 != err)
+		return -EINVAL;
+
+	mutex_lock(&data.lock);
+	if (data.sample_width < val)
+		data.sample_window = val;
+	else {
+		mutex_unlock(&data.lock);
+		return -EINVAL;
+	}
+	mutex_unlock(&data.lock);
+
+	return csize;
+}
+
+/*
+ * Function pointers for the "count" debugfs file operations
+ */
+static const struct file_operations count_fops = {
+	.open		= debug_count_fopen,
+	.read		= debug_count_fread,
+	.write		= debug_count_fwrite,
+	.owner		= THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "enable" debugfs file operations
+ */
+static const struct file_operations enable_fops = {
+	.open		= debug_enable_fopen,
+	.read		= debug_enable_fread,
+	.write		= debug_enable_fwrite,
+	.owner		= THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "max" debugfs file operations
+ */
+static const struct file_operations max_fops = {
+	.open		= debug_max_fopen,
+	.read		= debug_max_fread,
+	.write		= debug_max_fwrite,
+	.owner		= THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "sample" debugfs file operations
+ */
+static const struct file_operations sample_fops = {
+	.open		= debug_sample_fopen,
+	.read		= debug_sample_fread,
+	.release	= debug_sample_release,
+	.owner		= THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "threshold" debugfs file operations
+ */
+static const struct file_operations threshold_fops = {
+	.open		= debug_threshold_fopen,
+	.read		= debug_threshold_fread,
+	.write		= debug_threshold_fwrite,
+	.owner		= THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "width" debugfs file operations
+ */
+static const struct file_operations width_fops = {
+	.open		= debug_width_fopen,
+	.read		= debug_width_fread,
+	.write		= debug_width_fwrite,
+	.owner		= THIS_MODULE,
+};
+
+/*
+ * Function pointers for the "window" debugfs file operations
+ */
+static const struct file_operations window_fops = {
+	.open		= debug_window_fopen,
+	.read		= debug_window_fread,
+	.write		= debug_window_fwrite,
+	.owner		= THIS_MODULE,
+};
+
+/**
+ * init_debugfs - A function to initialize the debugfs interface files
+ *
+ * This function creates entries in debugfs for "hwlat_detector", including
+ * files to read values from the detector, current samples, and the
+ * maximum sample that has been captured since the hardware latency
+ * dectector was started.
+ */
+static int init_debugfs(void)
+{
+	int ret = -ENOMEM;
+
+	debug_dir = debugfs_create_dir(DRVNAME, NULL);
+	if (!debug_dir)
+		goto err_debug_dir;
+
+	debug_sample = debugfs_create_file("sample", 0444,
+					       debug_dir, NULL,
+					       &sample_fops);
+	if (!debug_sample)
+		goto err_sample;
+
+	debug_count = debugfs_create_file("count", 0444,
+					      debug_dir, NULL,
+					      &count_fops);
+	if (!debug_count)
+		goto err_count;
+
+	debug_max = debugfs_create_file("max", 0444,
+					    debug_dir, NULL,
+					    &max_fops);
+	if (!debug_max)
+		goto err_max;
+
+	debug_sample_window = debugfs_create_file("window", 0644,
+						      debug_dir, NULL,
+						      &window_fops);
+	if (!debug_sample_window)
+		goto err_window;
+
+	debug_sample_width = debugfs_create_file("width", 0644,
+						     debug_dir, NULL,
+						     &width_fops);
+	if (!debug_sample_width)
+		goto err_width;
+
+	debug_threshold = debugfs_create_file("threshold", 0644,
+						  debug_dir, NULL,
+						  &threshold_fops);
+	if (!debug_threshold)
+		goto err_threshold;
+
+	debug_enable = debugfs_create_file("enable", 0644,
+					       debug_dir, &enabled,
+					       &enable_fops);
+	if (!debug_enable)
+		goto err_enable;
+
+	else {
+		ret = 0;
+		goto out;
+	}
+
+err_enable:
+	debugfs_remove(debug_threshold);
+err_threshold:
+	debugfs_remove(debug_sample_width);
+err_width:
+	debugfs_remove(debug_sample_window);
+err_window:
+	debugfs_remove(debug_max);
+err_max:
+	debugfs_remove(debug_count);
+err_count:
+	debugfs_remove(debug_sample);
+err_sample:
+	debugfs_remove(debug_dir);
+err_debug_dir:
+out:
+	return ret;
+}
+
+/**
+ * free_debugfs - A function to cleanup the debugfs file interface
+ */
+static void free_debugfs(void)
+{
+	/* could also use a debugfs_remove_recursive */
+	debugfs_remove(debug_enable);
+	debugfs_remove(debug_threshold);
+	debugfs_remove(debug_sample_width);
+	debugfs_remove(debug_sample_window);
+	debugfs_remove(debug_max);
+	debugfs_remove(debug_count);
+	debugfs_remove(debug_sample);
+	debugfs_remove(debug_dir);
+}
+
+/**
+ * detector_init - Standard module initialization code
+ */
+static int detector_init(void)
+{
+	int ret = -ENOMEM;
+
+	pr_info(BANNER "version %s\n", VERSION);
+
+	ret = init_stats();
+	if (0 != ret)
+		goto out;
+
+	ret = init_debugfs();
+	if (0 != ret)
+		goto err_stats;
+
+	if (enabled)
+		ret = start_kthread();
+
+	goto out;
+
+err_stats:
+	ring_buffer_free(ring_buffer);
+out:
+	return ret;
+
+}
+
+/**
+ * detector_exit - Standard module cleanup code
+ */
+static void detector_exit(void)
+{
+	int err;
+
+	if (enabled) {
+		enabled = 0;
+		err = stop_kthread();
+		if (err)
+			pr_err(BANNER "cannot stop kthread\n");
+	}
+
+	free_debugfs();
+	ring_buffer_free(ring_buffer);	/* free up the ring buffer */
+
+}
+
+module_init(detector_init);
+module_exit(detector_exit);
Index: linux-3.18.13-rt10-r7s4/drivers/mmc/host/mmci.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/mmc/host/mmci.c
+++ linux-3.18.13-rt10-r7s4/drivers/mmc/host/mmci.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1156 @ static irqreturn_t mmci_pio_irq(int irq,
 	struct sg_mapping_iter *sg_miter = &host->sg_miter;
 	struct variant_data *variant = host->variant;
 	void __iomem *base = host->base;
-	unsigned long flags;
 	u32 status;
 
 	status = readl(base + MMCISTATUS);
 
 	dev_dbg(mmc_dev(host->mmc), "irq1 (pio) %08x\n", status);
 
-	local_irq_save(flags);
-
 	do {
 		unsigned int remain, len;
 		char *buffer;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1201 @ static irqreturn_t mmci_pio_irq(int irq,
 
 	sg_miter_stop(sg_miter);
 
-	local_irq_restore(flags);
-
 	/*
 	 * If we have less than the fifo 'half-full' threshold to transfer,
 	 * trigger a PIO interrupt as soon as any data is available.
Index: linux-3.18.13-rt10-r7s4/drivers/mmc/host/sdhci.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/mmc/host/sdhci.c
+++ linux-3.18.13-rt10-r7s4/drivers/mmc/host/sdhci.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2568 @ static irqreturn_t sdhci_thread_irq(int
 	return isr ? IRQ_HANDLED : IRQ_NONE;
 }
 
+#ifdef CONFIG_PREEMPT_RT_BASE
+static irqreturn_t sdhci_rt_irq(int irq, void *dev_id)
+{
+	irqreturn_t ret;
+
+	local_bh_disable();
+	ret = sdhci_irq(irq, dev_id);
+	local_bh_enable();
+	if (ret == IRQ_WAKE_THREAD)
+		ret = sdhci_thread_irq(irq, dev_id);
+	return ret;
+}
+#endif
+
+static int sdhci_req_irq(struct sdhci_host *host)
+{
+#ifdef CONFIG_PREEMPT_RT_BASE
+	return request_threaded_irq(host->irq, NULL, sdhci_rt_irq,
+				    IRQF_SHARED, mmc_hostname(host->mmc), host);
+#else
+	return request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
+				    IRQF_SHARED, mmc_hostname(host->mmc), host);
+#endif
+}
+
 /*****************************************************************************\
  *                                                                           *
  * Suspend/resume                                                            *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2660 @ int sdhci_resume_host(struct sdhci_host
 	}
 
 	if (!device_may_wakeup(mmc_dev(host->mmc))) {
-		ret = request_threaded_irq(host->irq, sdhci_irq,
-					   sdhci_thread_irq, IRQF_SHARED,
-					   mmc_hostname(host->mmc), host);
+		ret = sdhci_req_irq(host);
 		if (ret)
 			return ret;
 	} else {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3279 @ int sdhci_add_host(struct sdhci_host *ho
 
 	sdhci_init(host, 0);
 
-	ret = request_threaded_irq(host->irq, sdhci_irq, sdhci_thread_irq,
-				   IRQF_SHARED,	mmc_hostname(mmc), host);
+	ret = sdhci_req_irq(host);
 	if (ret) {
 		pr_err("%s: Failed to request IRQ %d: %d\n",
 		       mmc_hostname(mmc), host->irq, ret);
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/3com/3c59x.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/3com/3c59x.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/3com/3c59x.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:845 @ static void poll_vortex(struct net_devic
 {
 	struct vortex_private *vp = netdev_priv(dev);
 	unsigned long flags;
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	(vp->full_bus_master_rx ? boomerang_interrupt:vortex_interrupt)(dev->irq,dev);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 }
 #endif
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1919 @ static void vortex_tx_timeout(struct net
 			 * Block interrupts because vortex_interrupt does a bare spin_lock()
 			 */
 			unsigned long flags;
-			local_irq_save(flags);
+			local_irq_save_nort(flags);
 			if (vp->full_bus_master_tx)
 				boomerang_interrupt(dev->irq, dev);
 			else
 				vortex_interrupt(dev->irq, dev);
-			local_irq_restore(flags);
+			local_irq_restore_nort(flags);
 		}
 	}
 
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2216 @ static netdev_tx_t atl1c_xmit_frame(stru
 	}
 
 	tpd_req = atl1c_cal_tpd_req(skb);
-	if (!spin_trylock_irqsave(&adapter->tx_lock, flags)) {
-		if (netif_msg_pktdata(adapter))
-			dev_info(&adapter->pdev->dev, "tx locked\n");
-		return NETDEV_TX_LOCKED;
-	}
+	spin_lock_irqsave(&adapter->tx_lock, flags);
 
 	if (atl1c_tpd_avail(adapter, type) < tpd_req) {
 		/* no enough descriptor, just stop queue */
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1883 @ static netdev_tx_t atl1e_xmit_frame(stru
 		return NETDEV_TX_OK;
 	}
 	tpd_req = atl1e_cal_tdp_req(skb);
-	if (!spin_trylock_irqsave(&adapter->tx_lock, flags))
-		return NETDEV_TX_LOCKED;
+	spin_lock_irqsave(&adapter->tx_lock, flags);
 
 	if (atl1e_tpd_avail(adapter) < tpd_req) {
 		/* no enough descriptor, just stop queue */
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/chelsio/cxgb/sge.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/chelsio/cxgb/sge.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1666 @ static int t1_sge_tx(struct sk_buff *skb
 	struct cmdQ *q = &sge->cmdQ[qid];
 	unsigned int credits, pidx, genbit, count, use_sched_skb = 0;
 
-	if (!spin_trylock(&q->lock))
-		return NETDEV_TX_LOCKED;
+	spin_lock(&q->lock);
 
 	reclaim_completed_tx(sge, q);
 
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/freescale/gianfar.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/freescale/gianfar.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/freescale/gianfar.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1486 @ static int gfar_suspend(struct device *d
 
 	if (netif_running(ndev)) {
 
-		local_irq_save(flags);
+		local_irq_save_nort(flags);
 		lock_tx_qs(priv);
 
 		gfar_halt_nodisable(priv);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1502 @ static int gfar_suspend(struct device *d
 		gfar_write(&regs->maccfg1, tempval);
 
 		unlock_tx_qs(priv);
-		local_irq_restore(flags);
+		local_irq_restore_nort(flags);
 
 		disable_napi(priv);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1544 @ static int gfar_resume(struct device *de
 	/* Disable Magic Packet mode, in case something
 	 * else woke us up.
 	 */
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	lock_tx_qs(priv);
 
 	tempval = gfar_read(&regs->maccfg2);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1554 @ static int gfar_resume(struct device *de
 	gfar_start(priv);
 
 	unlock_tx_qs(priv);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 
 	netif_device_attach(ndev);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3310 @ static irqreturn_t gfar_error(int irq, v
 			dev->stats.tx_dropped++;
 			atomic64_inc(&priv->extra_stats.tx_underrun);
 
-			local_irq_save(flags);
+			local_irq_save_nort(flags);
 			lock_tx_qs(priv);
 
 			/* Reactivate the Tx Queues */
 			gfar_write(&regs->tstat, gfargrp->tstat);
 
 			unlock_tx_qs(priv);
-			local_irq_restore(flags);
+			local_irq_restore_nort(flags);
 		}
 		netif_dbg(priv, tx_err, dev, "Transmit Error\n");
 	}
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/neterion/s2io.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/neterion/s2io.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/neterion/s2io.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4087 @ static netdev_tx_t s2io_xmit(struct sk_b
 			[skb->priority & (MAX_TX_FIFOS - 1)];
 	fifo = &mac_control->fifos[queue];
 
-	if (do_spin_lock)
-		spin_lock_irqsave(&fifo->tx_lock, flags);
-	else {
-		if (unlikely(!spin_trylock_irqsave(&fifo->tx_lock, flags)))
-			return NETDEV_TX_LOCKED;
-	}
+	spin_lock_irqsave(&fifo->tx_lock, flags);
 
 	if (sp->config.multiq) {
 		if (__netif_subqueue_stopped(dev, fifo->fifo_no)) {
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2140 @ static int pch_gbe_xmit_frame(struct sk_
 	struct pch_gbe_tx_ring *tx_ring = adapter->tx_ring;
 	unsigned long flags;
 
-	if (!spin_trylock_irqsave(&tx_ring->tx_lock, flags)) {
-		/* Collision - tell upper layer to requeue */
-		return NETDEV_TX_LOCKED;
-	}
+	spin_lock_irqsave(&tx_ring->tx_lock, flags);
+
 	if (unlikely(!PCH_GBE_DESC_UNUSED(tx_ring))) {
 		netif_stop_queue(netdev);
 		spin_unlock_irqrestore(&tx_ring->tx_lock, flags);
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/realtek/8139too.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/realtek/8139too.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/realtek/8139too.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2218 @ static void rtl8139_poll_controller(stru
 	struct rtl8139_private *tp = netdev_priv(dev);
 	const int irq = tp->pci_dev->irq;
 
-	disable_irq(irq);
+	disable_irq_nosync(irq);
 	rtl8139_interrupt(irq, dev);
 	enable_irq(irq);
 }
Index: linux-3.18.13-rt10-r7s4/drivers/net/ethernet/tehuti/tehuti.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/ethernet/tehuti/tehuti.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/ethernet/tehuti/tehuti.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1632 @ static netdev_tx_t bdx_tx_transmit(struc
 	unsigned long flags;
 
 	ENTER;
-	local_irq_save(flags);
-	if (!spin_trylock(&priv->tx_lock)) {
-		local_irq_restore(flags);
-		DBG("%s[%s]: TX locked, returning NETDEV_TX_LOCKED\n",
-		    BDX_DRV_NAME, ndev->name);
-		return NETDEV_TX_LOCKED;
-	}
+
+	spin_lock_irqsave(&priv->tx_lock, flags);
 
 	/* build tx descriptor */
 	BDX_ASSERT(f->m.wptr >= f->m.memsz);	/* started with valid wptr */
Index: linux-3.18.13-rt10-r7s4/drivers/net/rionet.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/rionet.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/rionet.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:177 @ static int rionet_start_xmit(struct sk_b
 	unsigned long flags;
 	int add_num = 1;
 
-	local_irq_save(flags);
-	if (!spin_trylock(&rnet->tx_lock)) {
-		local_irq_restore(flags);
-		return NETDEV_TX_LOCKED;
-	}
+	spin_lock_irqsave(&rnet->tx_lock, flags);
 
 	if (is_multicast_ether_addr(eth->h_dest))
 		add_num = nets[rnet->mport->id].nact;
Index: linux-3.18.13-rt10-r7s4/drivers/net/wireless/orinoco/orinoco_usb.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/net/wireless/orinoco/orinoco_usb.c
+++ linux-3.18.13-rt10-r7s4/drivers/net/wireless/orinoco/orinoco_usb.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:702 @ static void ezusb_req_ctx_wait(struct ez
 			while (!ctx->done.done && msecs--)
 				udelay(1000);
 		} else {
-			wait_event_interruptible(ctx->done.wait,
+			swait_event_interruptible(ctx->done.wait,
 						 ctx->done.done);
 		}
 		break;
Index: linux-3.18.13-rt10-r7s4/drivers/pci/access.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/pci/access.c
+++ linux-3.18.13-rt10-r7s4/drivers/pci/access.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:437 @ void pci_cfg_access_unlock(struct pci_de
 	WARN_ON(!dev->block_cfg_access);
 
 	dev->block_cfg_access = 0;
-	wake_up_all(&pci_cfg_wait);
+	wake_up_all_locked(&pci_cfg_wait);
 	raw_spin_unlock_irqrestore(&pci_lock, flags);
 }
 EXPORT_SYMBOL_GPL(pci_cfg_access_unlock);
Index: linux-3.18.13-rt10-r7s4/drivers/scsi/fcoe/fcoe.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/scsi/fcoe/fcoe.c
+++ linux-3.18.13-rt10-r7s4/drivers/scsi/fcoe/fcoe.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1289 @ static void fcoe_percpu_thread_destroy(u
 	struct sk_buff *skb;
 #ifdef CONFIG_SMP
 	struct fcoe_percpu_s *p0;
-	unsigned targ_cpu = get_cpu();
+	unsigned targ_cpu = get_cpu_light();
 #endif /* CONFIG_SMP */
 
 	FCOE_DBG("Destroying receive thread for CPU %d\n", cpu);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1345 @ static void fcoe_percpu_thread_destroy(u
 			kfree_skb(skb);
 		spin_unlock_bh(&p->fcoe_rx_list.lock);
 	}
-	put_cpu();
+	put_cpu_light();
 #else
 	/*
 	 * This a non-SMP scenario where the singular Rx thread is
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1569 @ err2:
 static int fcoe_alloc_paged_crc_eof(struct sk_buff *skb, int tlen)
 {
 	struct fcoe_percpu_s *fps;
-	int rc;
+	int rc, cpu = get_cpu_light();
 
-	fps = &get_cpu_var(fcoe_percpu);
+	fps = &per_cpu(fcoe_percpu, cpu);
 	rc = fcoe_get_paged_crc_eof(skb, tlen, fps);
-	put_cpu_var(fcoe_percpu);
+	put_cpu_light();
 
 	return rc;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1771 @ static inline int fcoe_filter_frames(str
 		return 0;
 	}
 
-	stats = per_cpu_ptr(lport->stats, get_cpu());
+	stats = per_cpu_ptr(lport->stats, get_cpu_light());
 	stats->InvalidCRCCount++;
 	if (stats->InvalidCRCCount < 5)
 		printk(KERN_WARNING "fcoe: dropping frame with CRC error\n");
-	put_cpu();
+	put_cpu_light();
 	return -EINVAL;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1851 @ static void fcoe_recv_frame(struct sk_bu
 		goto drop;
 
 	if (!fcoe_filter_frames(lport, fp)) {
-		put_cpu();
+		put_cpu_light();
 		fc_exch_recv(lport, fp);
 		return;
 	}
 drop:
 	stats->ErrorFrames++;
-	put_cpu();
+	put_cpu_light();
 	kfree_skb(skb);
 }
 
Index: linux-3.18.13-rt10-r7s4/drivers/scsi/fcoe/fcoe_ctlr.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/scsi/fcoe/fcoe_ctlr.c
+++ linux-3.18.13-rt10-r7s4/drivers/scsi/fcoe/fcoe_ctlr.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:834 @ static unsigned long fcoe_ctlr_age_fcfs(
 
 	INIT_LIST_HEAD(&del_list);
 
-	stats = per_cpu_ptr(fip->lp->stats, get_cpu());
+	stats = per_cpu_ptr(fip->lp->stats, get_cpu_light());
 
 	list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
 		deadline = fcf->time + fcf->fka_period + fcf->fka_period / 2;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:870 @ static unsigned long fcoe_ctlr_age_fcfs(
 				sel_time = fcf->time;
 		}
 	}
-	put_cpu();
+	put_cpu_light();
 
 	list_for_each_entry_safe(fcf, next, &del_list, list) {
 		/* Removes fcf from current list */
Index: linux-3.18.13-rt10-r7s4/drivers/scsi/libfc/fc_exch.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/scsi/libfc/fc_exch.c
+++ linux-3.18.13-rt10-r7s4/drivers/scsi/libfc/fc_exch.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:819 @ static struct fc_exch *fc_exch_em_alloc(
 	}
 	memset(ep, 0, sizeof(*ep));
 
-	cpu = get_cpu();
+	cpu = get_cpu_light();
 	pool = per_cpu_ptr(mp->pool, cpu);
 	spin_lock_bh(&pool->lock);
-	put_cpu();
+	put_cpu_light();
 
 	/* peek cache of free slot */
 	if (pool->left != FC_XID_UNKNOWN) {
Index: linux-3.18.13-rt10-r7s4/drivers/scsi/libsas/sas_ata.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/scsi/libsas/sas_ata.c
+++ linux-3.18.13-rt10-r7s4/drivers/scsi/libsas/sas_ata.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:194 @ static unsigned int sas_ata_qc_issue(str
 	/* TODO: audit callers to ensure they are ready for qc_issue to
 	 * unconditionally re-enable interrupts
 	 */
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	spin_unlock(ap->lock);
 
 	/* If the device fell off, no sense in issuing commands */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:264 @ static unsigned int sas_ata_qc_issue(str
 
  out:
 	spin_lock(ap->lock);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 	return ret;
 }
 
Index: linux-3.18.13-rt10-r7s4/drivers/scsi/qla2xxx/qla_inline.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/scsi/qla2xxx/qla_inline.h
+++ linux-3.18.13-rt10-r7s4/drivers/scsi/qla2xxx/qla_inline.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:62 @ qla2x00_poll(struct rsp_que *rsp)
 {
 	unsigned long flags;
 	struct qla_hw_data *ha = rsp->hw;
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	if (IS_P3P_TYPE(ha))
 		qla82xx_poll(0, rsp);
 	else
 		ha->isp_ops->intr_handler(0, rsp);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 }
 
 static inline uint8_t *
Index: linux-3.18.13-rt10-r7s4/drivers/thermal/x86_pkg_temp_thermal.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/thermal/x86_pkg_temp_thermal.c
+++ linux-3.18.13-rt10-r7s4/drivers/thermal/x86_pkg_temp_thermal.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:32 @
 #include <linux/pm.h>
 #include <linux/thermal.h>
 #include <linux/debugfs.h>
+#include <linux/work-simple.h>
 #include <asm/cpu_device_id.h>
 #include <asm/mce.h>
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:356 @ static void pkg_temp_thermal_threshold_w
 	}
 }
 
-static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
+static void platform_thermal_notify_work(struct swork_event *event)
 {
 	unsigned long flags;
 	int cpu = smp_processor_id();
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:373 @ static int pkg_temp_thermal_platform_the
 			pkg_work_scheduled[phy_id]) {
 		disable_pkg_thres_interrupt();
 		spin_unlock_irqrestore(&pkg_work_lock, flags);
-		return -EINVAL;
+		return;
 	}
 	pkg_work_scheduled[phy_id] = 1;
 	spin_unlock_irqrestore(&pkg_work_lock, flags);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:382 @ static int pkg_temp_thermal_platform_the
 	schedule_delayed_work_on(cpu,
 				&per_cpu(pkg_temp_thermal_threshold_work, cpu),
 				msecs_to_jiffies(notify_delay_ms));
+}
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+static struct swork_event notify_work;
+
+static int thermal_notify_work_init(void)
+{
+	int err;
+
+	err = swork_get();
+	if (err)
+		return err;
+
+	INIT_SWORK(&notify_work, platform_thermal_notify_work);
 	return 0;
 }
 
+static void thermal_notify_work_cleanup(void)
+{
+	swork_put();
+}
+
+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
+{
+	swork_queue(&notify_work);
+	return 0;
+}
+
+#else  /* !CONFIG_PREEMPT_RT_FULL */
+
+static int thermal_notify_work_init(void) { return 0; }
+
+static int thermal_notify_work_cleanup(void) {  }
+
+static int pkg_temp_thermal_platform_thermal_notify(__u64 msr_val)
+{
+	platform_thermal_notify_work(NULL);
+
+	return 0;
+}
+#endif /* CONFIG_PREEMPT_RT_FULL */
+
 static int find_siblings_cpu(int cpu)
 {
 	int i;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:627 @ static int __init pkg_temp_thermal_init(
 	if (!x86_match_cpu(pkg_temp_thermal_ids))
 		return -ENODEV;
 
+	if (!thermal_notify_work_init())
+		return -ENODEV;
+
 	spin_lock_init(&pkg_work_lock);
 	platform_thermal_package_notify =
 			pkg_temp_thermal_platform_thermal_notify;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:654 @ err_ret:
 	kfree(pkg_work_scheduled);
 	platform_thermal_package_notify = NULL;
 	platform_thermal_package_rate_control = NULL;
-
+	thermal_notify_work_cleanup();
 	return -ENODEV;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:679 @ static void __exit pkg_temp_thermal_exit
 	mutex_unlock(&phy_dev_list_mutex);
 	platform_thermal_package_notify = NULL;
 	platform_thermal_package_rate_control = NULL;
+	thermal_notify_work_cleanup();
 	for_each_online_cpu(i)
 		cancel_delayed_work_sync(
 			&per_cpu(pkg_temp_thermal_threshold_work, i));
Index: linux-3.18.13-rt10-r7s4/drivers/tty/serial/8250/8250_core.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/tty/serial/8250/8250_core.c
+++ linux-3.18.13-rt10-r7s4/drivers/tty/serial/8250/8250_core.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:40 @
 #include <linux/nmi.h>
 #include <linux/mutex.h>
 #include <linux/slab.h>
+#include <linux/kdb.h>
 #include <linux/uaccess.h>
 #include <linux/pm_runtime.h>
 #ifdef CONFIG_SPARC
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:85 @ static unsigned int skip_txen_test; /* f
 #define DEBUG_INTR(fmt...)	do { } while (0)
 #endif
 
-#define PASS_LIMIT	512
+/*
+ * On -rt we can have a more delays, and legitimately
+ * so - so don't drop work spuriously and spam the
+ * syslog:
+ */
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define PASS_LIMIT	1000000
+#else
+# define PASS_LIMIT	512
+#endif
 
 #define BOTH_EMPTY 	(UART_LSR_TEMT | UART_LSR_THRE)
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3212 @ serial8250_console_write(struct console
 
 	serial8250_rpm_get(up);
 
-	if (port->sysrq || oops_in_progress)
+	if (port->sysrq || oops_in_progress || in_kdb_printk())
 		locked = spin_trylock_irqsave(&port->lock, flags);
 	else
 		spin_lock_irqsave(&port->lock, flags);
Index: linux-3.18.13-rt10-r7s4/drivers/tty/serial/amba-pl011.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/tty/serial/amba-pl011.c
+++ linux-3.18.13-rt10-r7s4/drivers/tty/serial/amba-pl011.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1939 @ pl011_console_write(struct console *co,
 
 	clk_enable(uap->clk);
 
-	local_irq_save(flags);
+	/*
+	 * local_irq_save(flags);
+	 *
+	 * This local_irq_save() is nonsense. If we come in via sysrq
+	 * handling then interrupts are already disabled. Aside of
+	 * that the port.sysrq check is racy on SMP regardless.
+	*/
 	if (uap->port.sysrq)
 		locked = 0;
 	else if (oops_in_progress)
-		locked = spin_trylock(&uap->port.lock);
+		locked = spin_trylock_irqsave(&uap->port.lock, flags);
 	else
-		spin_lock(&uap->port.lock);
+		spin_lock_irqsave(&uap->port.lock, flags);
 
 	/*
 	 *	First save the CR then disable the interrupts
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1973 @ pl011_console_write(struct console *co,
 	writew(old_cr, uap->port.membase + UART011_CR);
 
 	if (locked)
-		spin_unlock(&uap->port.lock);
-	local_irq_restore(flags);
+		spin_unlock_irqrestore(&uap->port.lock, flags);
 
 	clk_disable(uap->clk);
 }
Index: linux-3.18.13-rt10-r7s4/drivers/tty/serial/omap-serial.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/tty/serial/omap-serial.c
+++ linux-3.18.13-rt10-r7s4/drivers/tty/serial/omap-serial.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1273 @ serial_omap_console_write(struct console
 
 	pm_runtime_get_sync(up->dev);
 
-	local_irq_save(flags);
-	if (up->port.sysrq)
-		locked = 0;
-	else if (oops_in_progress)
-		locked = spin_trylock(&up->port.lock);
+	if (up->port.sysrq || oops_in_progress)
+		locked = spin_trylock_irqsave(&up->port.lock, flags);
 	else
-		spin_lock(&up->port.lock);
+		spin_lock_irqsave(&up->port.lock, flags);
 
 	/*
 	 * First save the IER then disable the interrupts
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1305 @ serial_omap_console_write(struct console
 	pm_runtime_mark_last_busy(up->dev);
 	pm_runtime_put_autosuspend(up->dev);
 	if (locked)
-		spin_unlock(&up->port.lock);
-	local_irq_restore(flags);
+		spin_unlock_irqrestore(&up->port.lock, flags);
 }
 
 static int __init
Index: linux-3.18.13-rt10-r7s4/drivers/usb/core/hcd.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/usb/core/hcd.c
+++ linux-3.18.13-rt10-r7s4/drivers/usb/core/hcd.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1684 @ static void __usb_hcd_giveback_urb(struc
 	 * and no one may trigger the above deadlock situation when
 	 * running complete() in tasklet.
 	 */
-	local_irq_save(flags);
+	local_irq_save_nort(flags);
 	urb->complete(urb);
-	local_irq_restore(flags);
+	local_irq_restore_nort(flags);
 
 	usb_anchor_resume_wakeups(anchor);
 	atomic_dec(&urb->use_count);
Index: linux-3.18.13-rt10-r7s4/drivers/usb/gadget/function/f_fs.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/usb/gadget/function/f_fs.c
+++ linux-3.18.13-rt10-r7s4/drivers/usb/gadget/function/f_fs.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1431 @ static void ffs_data_put(struct ffs_data
 		pr_info("%s(): freeing\n", __func__);
 		ffs_data_clear(ffs);
 		BUG_ON(waitqueue_active(&ffs->ev.waitq) ||
-		       waitqueue_active(&ffs->ep0req_completion.wait));
+		       swaitqueue_active(&ffs->ep0req_completion.wait));
 		kfree(ffs->dev_name);
 		kfree(ffs);
 	}
Index: linux-3.18.13-rt10-r7s4/drivers/usb/gadget/legacy/inode.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/drivers/usb/gadget/legacy/inode.c
+++ linux-3.18.13-rt10-r7s4/drivers/usb/gadget/legacy/inode.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:342 @ ep_io (struct ep_data *epdata, void *buf
 	spin_unlock_irq (&epdata->dev->lock);
 
 	if (likely (value == 0)) {
-		value = wait_event_interruptible (done.wait, done.done);
+		value = swait_event_interruptible (done.wait, done.done);
 		if (value != 0) {
 			spin_lock_irq (&epdata->dev->lock);
 			if (likely (epdata->ep != NULL)) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:351 @ ep_io (struct ep_data *epdata, void *buf
 				usb_ep_dequeue (epdata->ep, epdata->req);
 				spin_unlock_irq (&epdata->dev->lock);
 
-				wait_event (done.wait, done.done);
+				swait_event (done.wait, done.done);
 				if (epdata->status == -ECONNRESET)
 					epdata->status = -EINTR;
 			} else {
Index: linux-3.18.13-rt10-r7s4/fs/aio.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/aio.c
+++ linux-3.18.13-rt10-r7s4/fs/aio.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:43 @
 #include <linux/ramfs.h>
 #include <linux/percpu-refcount.h>
 #include <linux/mount.h>
+#include <linux/work-simple.h>
 
 #include <asm/kmap_types.h>
 #include <asm/uaccess.h>
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:114 @ struct kioctx {
 	struct page		**ring_pages;
 	long			nr_pages;
 
-	struct work_struct	free_work;
+	struct swork_event	free_work;
 
 	/*
 	 * signals when all in-flight requests are done
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:230 @ static int __init aio_setup(void)
 		.mount		= aio_mount,
 		.kill_sb	= kill_anon_super,
 	};
+	BUG_ON(swork_get());
 	aio_mnt = kern_mount(&aio_fs);
 	if (IS_ERR(aio_mnt))
 		panic("Failed to create aio fs mount.");
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:510 @ static int kiocb_cancel(struct kiocb *ki
 	return cancel(kiocb);
 }
 
-static void free_ioctx(struct work_struct *work)
+static void free_ioctx(struct swork_event *sev)
 {
-	struct kioctx *ctx = container_of(work, struct kioctx, free_work);
+	struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
 
 	pr_debug("freeing %p\n", ctx);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:531 @ static void free_ioctx_reqs(struct percp
 	if (ctx->requests_done)
 		complete(ctx->requests_done);
 
-	INIT_WORK(&ctx->free_work, free_ioctx);
-	schedule_work(&ctx->free_work);
+	INIT_SWORK(&ctx->free_work, free_ioctx);
+	swork_queue(&ctx->free_work);
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:540 @ static void free_ioctx_reqs(struct percp
  * and ctx->users has dropped to 0, so we know no more kiocbs can be submitted -
  * now it's safe to cancel any that need to be.
  */
-static void free_ioctx_users(struct percpu_ref *ref)
+static void free_ioctx_users_work(struct swork_event *sev)
 {
-	struct kioctx *ctx = container_of(ref, struct kioctx, users);
+	struct kioctx *ctx = container_of(sev, struct kioctx, free_work);
 	struct kiocb *req;
 
 	spin_lock_irq(&ctx->ctx_lock);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:561 @ static void free_ioctx_users(struct perc
 	percpu_ref_put(&ctx->reqs);
 }
 
+static void free_ioctx_users(struct percpu_ref *ref)
+{
+	struct kioctx *ctx = container_of(ref, struct kioctx, users);
+
+	INIT_SWORK(&ctx->free_work, free_ioctx_users_work);
+	swork_queue(&ctx->free_work);
+}
+
 static int ioctx_add_table(struct kioctx *ctx, struct mm_struct *mm)
 {
 	unsigned i, new_nr;
Index: linux-3.18.13-rt10-r7s4/fs/autofs4/autofs_i.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/autofs4/autofs_i.h
+++ linux-3.18.13-rt10-r7s4/fs/autofs4/autofs_i.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:37 @
 #include <linux/sched.h>
 #include <linux/mount.h>
 #include <linux/namei.h>
+#include <linux/delay.h>
 #include <asm/current.h>
 #include <asm/uaccess.h>
 
Index: linux-3.18.13-rt10-r7s4/fs/autofs4/expire.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/autofs4/expire.c
+++ linux-3.18.13-rt10-r7s4/fs/autofs4/expire.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:154 @ again:
 			parent = p->d_parent;
 			if (!spin_trylock(&parent->d_lock)) {
 				spin_unlock(&p->d_lock);
-				cpu_relax();
+				cpu_chill();
 				goto relock;
 			}
 			spin_unlock(&p->d_lock);
Index: linux-3.18.13-rt10-r7s4/fs/buffer.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/buffer.c
+++ linux-3.18.13-rt10-r7s4/fs/buffer.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:304 @ static void end_buffer_async_read(struct
 	 * decide that the page is now completely done.
 	 */
 	first = page_buffers(page);
-	local_irq_save(flags);
-	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+	flags = bh_uptodate_lock_irqsave(first);
 	clear_buffer_async_read(bh);
 	unlock_buffer(bh);
 	tmp = bh;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:317 @ static void end_buffer_async_read(struct
 		}
 		tmp = tmp->b_this_page;
 	} while (tmp != bh);
-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-	local_irq_restore(flags);
+	bh_uptodate_unlock_irqrestore(first, flags);
 
 	/*
 	 * If none of the buffers had errors and they are all
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:329 @ static void end_buffer_async_read(struct
 	return;
 
 still_busy:
-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-	local_irq_restore(flags);
-	return;
+	bh_uptodate_unlock_irqrestore(first, flags);
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:357 @ void end_buffer_async_write(struct buffe
 	}
 
 	first = page_buffers(page);
-	local_irq_save(flags);
-	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+	flags = bh_uptodate_lock_irqsave(first);
 
 	clear_buffer_async_write(bh);
 	unlock_buffer(bh);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:369 @ void end_buffer_async_write(struct buffe
 		}
 		tmp = tmp->b_this_page;
 	}
-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-	local_irq_restore(flags);
+	bh_uptodate_unlock_irqrestore(first, flags);
 	end_page_writeback(page);
 	return;
 
 still_busy:
-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-	local_irq_restore(flags);
-	return;
+	bh_uptodate_unlock_irqrestore(first, flags);
 }
 EXPORT_SYMBOL(end_buffer_async_write);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3320 @ struct buffer_head *alloc_buffer_head(gf
 	struct buffer_head *ret = kmem_cache_zalloc(bh_cachep, gfp_flags);
 	if (ret) {
 		INIT_LIST_HEAD(&ret->b_assoc_buffers);
+		buffer_head_init_locks(ret);
 		preempt_disable();
 		__this_cpu_inc(bh_accounting.nr);
 		recalc_bh_state();
Index: linux-3.18.13-rt10-r7s4/fs/dcache.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/dcache.c
+++ linux-3.18.13-rt10-r7s4/fs/dcache.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:22 @
 #include <linux/mm.h>
 #include <linux/fs.h>
 #include <linux/fsnotify.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/init.h>
 #include <linux/hash.h>
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:556 @ static struct dentry *dentry_kill(struct
 
 failed:
 	spin_unlock(&dentry->d_lock);
-	cpu_relax();
+	cpu_chill();
 	return dentry; /* try again with same dentry */
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2289 @ again:
 	if (dentry->d_lockref.count == 1) {
 		if (!spin_trylock(&inode->i_lock)) {
 			spin_unlock(&dentry->d_lock);
-			cpu_relax();
+			cpu_chill();
 			goto again;
 		}
 		dentry->d_flags &= ~DCACHE_CANT_MOUNT;
Index: linux-3.18.13-rt10-r7s4/fs/eventpoll.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/eventpoll.c
+++ linux-3.18.13-rt10-r7s4/fs/eventpoll.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:508 @ static int ep_poll_wakeup_proc(void *pri
  */
 static void ep_poll_safewake(wait_queue_head_t *wq)
 {
-	int this_cpu = get_cpu();
+	int this_cpu = get_cpu_light();
 
 	ep_call_nested(&poll_safewake_ncalls, EP_MAX_NESTS,
 		       ep_poll_wakeup_proc, NULL, wq, (void *) (long) this_cpu);
 
-	put_cpu();
+	put_cpu_light();
 }
 
 static void ep_remove_wait_queue(struct eppoll_entry *pwq)
Index: linux-3.18.13-rt10-r7s4/fs/exec.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/exec.c
+++ linux-3.18.13-rt10-r7s4/fs/exec.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:844 @ static int exec_mmap(struct mm_struct *m
 		}
 	}
 	task_lock(tsk);
+	preempt_disable_rt();
 	active_mm = tsk->active_mm;
 	tsk->mm = mm;
 	tsk->active_mm = mm;
 	activate_mm(active_mm, mm);
 	tsk->mm->vmacache_seqnum = 0;
 	vmacache_flush(tsk);
+	preempt_enable_rt();
 	task_unlock(tsk);
 	if (old_mm) {
 		up_read(&old_mm->mmap_sem);
Index: linux-3.18.13-rt10-r7s4/fs/jbd/checkpoint.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/jbd/checkpoint.c
+++ linux-3.18.13-rt10-r7s4/fs/jbd/checkpoint.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:132 @ void __log_wait_for_space(journal_t *jou
 		if (journal->j_flags & JFS_ABORT)
 			return;
 		spin_unlock(&journal->j_state_lock);
+		if (current->plug)
+			io_schedule();
 		mutex_lock(&journal->j_checkpoint_mutex);
 
 		/*
Index: linux-3.18.13-rt10-r7s4/fs/jbd2/checkpoint.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/jbd2/checkpoint.c
+++ linux-3.18.13-rt10-r7s4/fs/jbd2/checkpoint.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:119 @ void __jbd2_log_wait_for_space(journal_t
 	nblocks = jbd2_space_needed(journal);
 	while (jbd2_log_space_left(journal) < nblocks) {
 		write_unlock(&journal->j_state_lock);
+		if (current->plug)
+			io_schedule();
 		mutex_lock(&journal->j_checkpoint_mutex);
 
 		/*
Index: linux-3.18.13-rt10-r7s4/fs/namespace.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/namespace.c
+++ linux-3.18.13-rt10-r7s4/fs/namespace.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:17 @
 #include <linux/mnt_namespace.h>
 #include <linux/user_namespace.h>
 #include <linux/namei.h>
+#include <linux/delay.h>
 #include <linux/security.h>
 #include <linux/idr.h>
 #include <linux/init.h>		/* init_rootfs */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:348 @ int __mnt_want_write(struct vfsmount *m)
 	 * incremented count after it has set MNT_WRITE_HOLD.
 	 */
 	smp_mb();
-	while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD)
-		cpu_relax();
+	while (ACCESS_ONCE(mnt->mnt.mnt_flags) & MNT_WRITE_HOLD) {
+		preempt_enable();
+		cpu_chill();
+		preempt_disable();
+	}
 	/*
 	 * After the slowpath clears MNT_WRITE_HOLD, mnt_is_readonly will
 	 * be set to match its requirements. So we must not load that until
Index: linux-3.18.13-rt10-r7s4/fs/ntfs/aops.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/ntfs/aops.c
+++ linux-3.18.13-rt10-r7s4/fs/ntfs/aops.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:110 @ static void ntfs_end_buffer_async_read(s
 				"0x%llx.", (unsigned long long)bh->b_blocknr);
 	}
 	first = page_buffers(page);
-	local_irq_save(flags);
-	bit_spin_lock(BH_Uptodate_Lock, &first->b_state);
+	flags = bh_uptodate_lock_irqsave(first);
 	clear_buffer_async_read(bh);
 	unlock_buffer(bh);
 	tmp = bh;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:125 @ static void ntfs_end_buffer_async_read(s
 		}
 		tmp = tmp->b_this_page;
 	} while (tmp != bh);
-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-	local_irq_restore(flags);
+	bh_uptodate_unlock_irqrestore(first, flags);
 	/*
 	 * If none of the buffers had errors then we can set the page uptodate,
 	 * but we first have to perform the post read mst fixups, if the
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:146 @ static void ntfs_end_buffer_async_read(s
 		recs = PAGE_CACHE_SIZE / rec_size;
 		/* Should have been verified before we got here... */
 		BUG_ON(!recs);
-		local_irq_save(flags);
+		local_irq_save_nort(flags);
 		kaddr = kmap_atomic(page);
 		for (i = 0; i < recs; i++)
 			post_read_mst_fixup((NTFS_RECORD*)(kaddr +
 					i * rec_size), rec_size);
 		kunmap_atomic(kaddr);
-		local_irq_restore(flags);
+		local_irq_restore_nort(flags);
 		flush_dcache_page(page);
 		if (likely(page_uptodate && !PageError(page)))
 			SetPageUptodate(page);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:160 @ static void ntfs_end_buffer_async_read(s
 	unlock_page(page);
 	return;
 still_busy:
-	bit_spin_unlock(BH_Uptodate_Lock, &first->b_state);
-	local_irq_restore(flags);
-	return;
+	bh_uptodate_unlock_irqrestore(first, flags);
 }
 
 /**
Index: linux-3.18.13-rt10-r7s4/fs/timerfd.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/timerfd.c
+++ linux-3.18.13-rt10-r7s4/fs/timerfd.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:452 @ static int do_timerfd_settime(int ufd, i
 				break;
 		}
 		spin_unlock_irq(&ctx->wqh.lock);
-		cpu_relax();
+		if (isalarm(ctx))
+			hrtimer_wait_for_timer(&ctx->t.alarm.timer);
+		else
+			hrtimer_wait_for_timer(&ctx->t.tmr);
 	}
 
 	/*
Index: linux-3.18.13-rt10-r7s4/fs/xfs/xfs_linux.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/fs/xfs/xfs_linux.h
+++ linux-3.18.13-rt10-r7s4/fs/xfs/xfs_linux.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:122 @ typedef __uint64_t __psunsigned_t;
 /*
  * Feature macros (disable/enable)
  */
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) && !defined(CONFIG_PREEMPT_RT_FULL)
 #define HAVE_PERCPU_SB	/* per cpu superblock counters are a 2.6 feature */
 #else
 #undef  HAVE_PERCPU_SB	/* per cpu superblock counters are a 2.6 feature */
Index: linux-3.18.13-rt10-r7s4/include/acpi/platform/aclinux.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/acpi/platform/aclinux.h
+++ linux-3.18.13-rt10-r7s4/include/acpi/platform/aclinux.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:126 @
 
 #define acpi_cache_t                        struct kmem_cache
 #define acpi_spinlock                       spinlock_t *
+#define acpi_raw_spinlock		raw_spinlock_t *
 #define acpi_cpu_flags                      unsigned long
 
 /* Use native linux version of acpi_os_allocate_zeroed */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:145 @
 #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_get_thread_id
 #define ACPI_USE_ALTERNATE_PROTOTYPE_acpi_os_create_lock
 
+#define acpi_os_create_raw_lock(__handle)			\
+({								\
+	 raw_spinlock_t *lock = ACPI_ALLOCATE(sizeof(*lock));	\
+								\
+	 if (lock) {						\
+		*(__handle) = lock;				\
+		raw_spin_lock_init(*(__handle));		\
+	 }							\
+	 lock ? AE_OK : AE_NO_MEMORY;				\
+ })
+
+#define acpi_os_delete_raw_lock(__handle)	kfree(__handle)
+
+
 /*
  * OSL interfaces used by debugger/disassembler
  */
Index: linux-3.18.13-rt10-r7s4/include/asm-generic/bug.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/asm-generic/bug.h
+++ linux-3.18.13-rt10-r7s4/include/asm-generic/bug.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:209 @ extern void warn_slowpath_null(const cha
 # define WARN_ON_SMP(x)			({0;})
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define BUG_ON_RT(c)			BUG_ON(c)
+# define BUG_ON_NONRT(c)		do { } while (0)
+# define WARN_ON_RT(condition)		WARN_ON(condition)
+# define WARN_ON_NONRT(condition)	do { } while (0)
+# define WARN_ON_ONCE_NONRT(condition)	do { } while (0)
+#else
+# define BUG_ON_RT(c)			do { } while (0)
+# define BUG_ON_NONRT(c)		BUG_ON(c)
+# define WARN_ON_RT(condition)		do { } while (0)
+# define WARN_ON_NONRT(condition)	WARN_ON(condition)
+# define WARN_ON_ONCE_NONRT(condition)	WARN_ON_ONCE(condition)
+#endif
+
 #endif /* __ASSEMBLY__ */
 
 #endif
Index: linux-3.18.13-rt10-r7s4/include/linux/blk-mq.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/blk-mq.h
+++ linux-3.18.13-rt10-r7s4/include/linux/blk-mq.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:172 @ struct request *blk_mq_tag_to_rq(struct
 
 struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
 struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
+void __blk_mq_complete_request_remote_work(struct work_struct *work);
 
 void blk_mq_start_request(struct request *rq);
 void blk_mq_end_request(struct request *rq, int error);
Index: linux-3.18.13-rt10-r7s4/include/linux/blkdev.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/blkdev.h
+++ linux-3.18.13-rt10-r7s4/include/linux/blkdev.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:104 @ struct request {
 	struct list_head queuelist;
 	union {
 		struct call_single_data csd;
+		struct work_struct work;
 		unsigned long fifo_time;
 	};
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:482 @ struct request_queue {
 	struct throtl_data *td;
 #endif
 	struct rcu_head		rcu_head;
-	wait_queue_head_t	mq_freeze_wq;
+	struct swait_head	mq_freeze_wq;
 	struct percpu_ref	mq_usage_counter;
 	struct list_head	all_q_node;
 
Index: linux-3.18.13-rt10-r7s4/include/linux/bottom_half.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/bottom_half.h
+++ linux-3.18.13-rt10-r7s4/include/linux/bottom_half.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:7 @
 #include <linux/preempt.h>
 #include <linux/preempt_mask.h>
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+extern void local_bh_disable(void);
+extern void _local_bh_enable(void);
+extern void local_bh_enable(void);
+extern void local_bh_enable_ip(unsigned long ip);
+extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
+extern void __local_bh_enable_ip(unsigned long ip, unsigned int cnt);
+
+#else
+
 #ifdef CONFIG_TRACE_IRQFLAGS
 extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
 #else
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:45 @ static inline void local_bh_enable(void)
 {
 	__local_bh_enable_ip(_THIS_IP_, SOFTIRQ_DISABLE_OFFSET);
 }
+#endif
 
 #endif /* _LINUX_BH_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/buffer_head.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/buffer_head.h
+++ linux-3.18.13-rt10-r7s4/include/linux/buffer_head.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:78 @ struct buffer_head {
 	struct address_space *b_assoc_map;	/* mapping this buffer is
 						   associated with */
 	atomic_t b_count;		/* users using this buffer_head */
+#ifdef CONFIG_PREEMPT_RT_BASE
+	spinlock_t b_uptodate_lock;
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
+	defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+	spinlock_t b_state_lock;
+	spinlock_t b_journal_head_lock;
+#endif
+#endif
 };
 
+static inline unsigned long bh_uptodate_lock_irqsave(struct buffer_head *bh)
+{
+	unsigned long flags;
+
+#ifndef CONFIG_PREEMPT_RT_BASE
+	local_irq_save(flags);
+	bit_spin_lock(BH_Uptodate_Lock, &bh->b_state);
+#else
+	spin_lock_irqsave(&bh->b_uptodate_lock, flags);
+#endif
+	return flags;
+}
+
+static inline void
+bh_uptodate_unlock_irqrestore(struct buffer_head *bh, unsigned long flags)
+{
+#ifndef CONFIG_PREEMPT_RT_BASE
+	bit_spin_unlock(BH_Uptodate_Lock, &bh->b_state);
+	local_irq_restore(flags);
+#else
+	spin_unlock_irqrestore(&bh->b_uptodate_lock, flags);
+#endif
+}
+
+static inline void buffer_head_init_locks(struct buffer_head *bh)
+{
+#ifdef CONFIG_PREEMPT_RT_BASE
+	spin_lock_init(&bh->b_uptodate_lock);
+#if defined(CONFIG_JBD) || defined(CONFIG_JBD_MODULE) || \
+	defined(CONFIG_JBD2) || defined(CONFIG_JBD2_MODULE)
+	spin_lock_init(&bh->b_state_lock);
+	spin_lock_init(&bh->b_journal_head_lock);
+#endif
+#endif
+}
+
 /*
  * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
  * and buffer_foo() functions.
Index: linux-3.18.13-rt10-r7s4/include/linux/cgroup.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/cgroup.h
+++ linux-3.18.13-rt10-r7s4/include/linux/cgroup.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:25 @
 #include <linux/seq_file.h>
 #include <linux/kernfs.h>
 #include <linux/wait.h>
+#include <linux/work-simple.h>
 
 #ifdef CONFIG_CGROUPS
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:95 @ struct cgroup_subsys_state {
 	/* percpu_ref killing and RCU release */
 	struct rcu_head rcu_head;
 	struct work_struct destroy_work;
+	struct swork_event destroy_swork;
 };
 
 /* bits in struct cgroup_subsys_state flags field */
Index: linux-3.18.13-rt10-r7s4/include/linux/completion.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/completion.h
+++ linux-3.18.13-rt10-r7s4/include/linux/completion.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:10 @
  * Atomic wait-for-completion handler data structures.
  * See kernel/sched/completion.c for details.
  */
-
-#include <linux/wait.h>
+#include <linux/wait-simple.h>
 
 /*
  * struct completion - structure used to maintain state for a "completion"
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:26 @
  */
 struct completion {
 	unsigned int done;
-	wait_queue_head_t wait;
+	struct swait_head wait;
 };
 
 #define COMPLETION_INITIALIZER(work) \
-	{ 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+	{ 0, SWAIT_HEAD_INITIALIZER((work).wait) }
 
 #define COMPLETION_INITIALIZER_ONSTACK(work) \
 	({ init_completion(&work); work; })
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:75 @ struct completion {
 static inline void init_completion(struct completion *x)
 {
 	x->done = 0;
-	init_waitqueue_head(&x->wait);
+	init_swait_head(&x->wait);
 }
 
 /**
Index: linux-3.18.13-rt10-r7s4/include/linux/cpu.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/cpu.h
+++ linux-3.18.13-rt10-r7s4/include/linux/cpu.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:220 @ extern bool try_get_online_cpus(void);
 extern void put_online_cpus(void);
 extern void cpu_hotplug_disable(void);
 extern void cpu_hotplug_enable(void);
+extern void pin_current_cpu(void);
+extern void unpin_current_cpu(void);
 #define hotcpu_notifier(fn, pri)	cpu_notifier(fn, pri)
 #define __hotcpu_notifier(fn, pri)	__cpu_notifier(fn, pri)
 #define register_hotcpu_notifier(nb)	register_cpu_notifier(nb)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:240 @ static inline void cpu_hotplug_done(void
 #define put_online_cpus()	do { } while (0)
 #define cpu_hotplug_disable()	do { } while (0)
 #define cpu_hotplug_enable()	do { } while (0)
+static inline void pin_current_cpu(void) { }
+static inline void unpin_current_cpu(void) { }
 #define hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
 #define __hotcpu_notifier(fn, pri)	do { (void)(fn); } while (0)
 /* These aren't inline functions due to a GCC bug. */
Index: linux-3.18.13-rt10-r7s4/include/linux/delay.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/delay.h
+++ linux-3.18.13-rt10-r7s4/include/linux/delay.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:55 @ static inline void ssleep(unsigned int s
 	msleep(seconds * 1000);
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void cpu_chill(void);
+#else
+# define cpu_chill()	cpu_relax()
+#endif
+
 #endif /* defined(_LINUX_DELAY_H) */
Index: linux-3.18.13-rt10-r7s4/include/linux/ftrace_event.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/ftrace_event.h
+++ linux-3.18.13-rt10-r7s4/include/linux/ftrace_event.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:64 @ struct trace_entry {
 	unsigned char		flags;
 	unsigned char		preempt_count;
 	int			pid;
+	unsigned short		migrate_disable;
+	unsigned short		padding;
+	unsigned char		preempt_lazy_count;
 };
 
 #define FTRACE_MAX_EVENT						\
Index: linux-3.18.13-rt10-r7s4/include/linux/highmem.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/highmem.h
+++ linux-3.18.13-rt10-r7s4/include/linux/highmem.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:10 @
 #include <linux/mm.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
+#include <linux/sched.h>
 
 #include <asm/cacheflush.h>
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:89 @ static inline void __kunmap_atomic(void
 
 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_X86_32)
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 DECLARE_PER_CPU(int, __kmap_atomic_idx);
+#endif
 
 static inline int kmap_atomic_idx_push(void)
 {
+#ifndef CONFIG_PREEMPT_RT_FULL
 	int idx = __this_cpu_inc_return(__kmap_atomic_idx) - 1;
 
-#ifdef CONFIG_DEBUG_HIGHMEM
+# ifdef CONFIG_DEBUG_HIGHMEM
 	WARN_ON_ONCE(in_irq() && !irqs_disabled());
 	BUG_ON(idx >= KM_TYPE_NR);
-#endif
+# endif
 	return idx;
+#else
+	current->kmap_idx++;
+	BUG_ON(current->kmap_idx > KM_TYPE_NR);
+	return current->kmap_idx - 1;
+#endif
 }
 
 static inline int kmap_atomic_idx(void)
 {
+#ifndef CONFIG_PREEMPT_RT_FULL
 	return __this_cpu_read(__kmap_atomic_idx) - 1;
+#else
+	return current->kmap_idx - 1;
+#endif
 }
 
 static inline void kmap_atomic_idx_pop(void)
 {
-#ifdef CONFIG_DEBUG_HIGHMEM
+#ifndef CONFIG_PREEMPT_RT_FULL
+# ifdef CONFIG_DEBUG_HIGHMEM
 	int idx = __this_cpu_dec_return(__kmap_atomic_idx);
 
 	BUG_ON(idx < 0);
-#else
+# else
 	__this_cpu_dec(__kmap_atomic_idx);
+# endif
+#else
+	current->kmap_idx--;
+# ifdef CONFIG_DEBUG_HIGHMEM
+	BUG_ON(current->kmap_idx < 0);
+# endif
 #endif
 }
 
Index: linux-3.18.13-rt10-r7s4/include/linux/hrtimer.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/hrtimer.h
+++ linux-3.18.13-rt10-r7s4/include/linux/hrtimer.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:114 @ struct hrtimer {
 	enum hrtimer_restart		(*function)(struct hrtimer *);
 	struct hrtimer_clock_base	*base;
 	unsigned long			state;
+	struct list_head		cb_entry;
+	int				irqsafe;
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+	ktime_t				praecox;
+#endif
 #ifdef CONFIG_TIMER_STATS
 	int				start_pid;
 	void				*start_site;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:155 @ struct hrtimer_clock_base {
 	int			index;
 	clockid_t		clockid;
 	struct timerqueue_head	active;
+	struct list_head	expired;
 	ktime_t			resolution;
 	ktime_t			(*get_time)(void);
 	ktime_t			softirq_time;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:201 @ struct hrtimer_cpu_base {
 	unsigned long			nr_hangs;
 	ktime_t				max_hang_time;
 #endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+	wait_queue_head_t		wait;
+#endif
 	struct hrtimer_clock_base	clock_base[HRTIMER_MAX_CLOCK_BASES];
 };
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:391 @ static inline int hrtimer_restart(struct
 	return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
 
+/* Softirq preemption could deadlock timer removal */
+#ifdef CONFIG_PREEMPT_RT_BASE
+  extern void hrtimer_wait_for_timer(const struct hrtimer *timer);
+#else
+# define hrtimer_wait_for_timer(timer)	do { cpu_relax(); } while (0)
+#endif
+
 /* Query timers: */
 extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
 extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
Index: linux-3.18.13-rt10-r7s4/include/linux/idr.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/idr.h
+++ linux-3.18.13-rt10-r7s4/include/linux/idr.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:98 @ bool idr_is_empty(struct idr *idp);
  * Each idr_preload() should be matched with an invocation of this
  * function.  See idr_preload() for details.
  */
+#ifdef CONFIG_PREEMPT_RT_FULL
+void idr_preload_end(void);
+#else
 static inline void idr_preload_end(void)
 {
 	preempt_enable();
 }
+#endif
 
 /**
  * idr_find - return pointer for given id
Index: linux-3.18.13-rt10-r7s4/include/linux/init_task.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/init_task.h
+++ linux-3.18.13-rt10-r7s4/include/linux/init_task.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:150 @ extern struct task_group root_task_group
 # define INIT_PERF_EVENTS(tsk)
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define INIT_TIMER_LIST		.posix_timer_list = NULL,
+#else
+# define INIT_TIMER_LIST
+#endif
+
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 # define INIT_VTIME(tsk)						\
-	.vtime_seqlock = __SEQLOCK_UNLOCKED(tsk.vtime_seqlock),	\
+	.vtime_lock = __RAW_SPIN_LOCK_UNLOCKED(tsk.vtime_lock),	\
+	.vtime_seq = SEQCNT_ZERO(tsk.vtime_seq),			\
 	.vtime_snap = 0,				\
 	.vtime_snap_whence = VTIME_SYS,
 #else
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:229 @ extern struct task_group root_task_group
 	.cpu_timers	= INIT_CPU_TIMERS(tsk.cpu_timers),		\
 	.pi_lock	= __RAW_SPIN_LOCK_UNLOCKED(tsk.pi_lock),	\
 	.timer_slack_ns = 50000, /* 50 usec default slack */		\
+	INIT_TIMER_LIST							\
 	.pids = {							\
 		[PIDTYPE_PID]  = INIT_PID_LINK(PIDTYPE_PID),		\
 		[PIDTYPE_PGID] = INIT_PID_LINK(PIDTYPE_PGID),		\
Index: linux-3.18.13-rt10-r7s4/include/linux/interrupt.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/interrupt.h
+++ linux-3.18.13-rt10-r7s4/include/linux/interrupt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:64 @
  *                interrupt handler after suspending interrupts. For system
  *                wakeup devices users need to implement wakeup detection in
  *                their interrupt handlers.
+ * IRQF_NO_SOFTIRQ_CALL - Do not process softirqs in the irq thread context (RT)
  */
 #define IRQF_DISABLED		0x00000020
 #define IRQF_SHARED		0x00000080
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:79 @
 #define IRQF_NO_THREAD		0x00010000
 #define IRQF_EARLY_RESUME	0x00020000
 #define IRQF_COND_SUSPEND	0x00040000
+#define IRQF_NO_SOFTIRQ_CALL	0x00080000
 
 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:190 @ extern void devm_free_irq(struct device
 #ifdef CONFIG_LOCKDEP
 # define local_irq_enable_in_hardirq()	do { } while (0)
 #else
-# define local_irq_enable_in_hardirq()	local_irq_enable()
+# define local_irq_enable_in_hardirq()	local_irq_enable_nort()
 #endif
 
 extern void disable_irq_nosync(unsigned int irq);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:220 @ struct irq_affinity_notify {
 	unsigned int irq;
 	struct kref kref;
 	struct work_struct work;
+	struct list_head list;
 	void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
 	void (*release)(struct kref *ref);
 };
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:369 @ static inline int disable_irq_wake(unsig
 
 
 #ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
 extern bool force_irqthreads;
+# else
+#  define force_irqthreads	(true)
+# endif
 #else
-#define force_irqthreads	(0)
+#define force_irqthreads	(false)
 #endif
 
 #ifndef __ARCH_SET_SOFTIRQ_PENDING
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:431 @ struct softirq_action
 	void	(*action)(struct softirq_action *);
 };
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 asmlinkage void do_softirq(void);
 asmlinkage void __do_softirq(void);
-
+static inline void thread_do_softirq(void) { do_softirq(); }
 #ifdef __ARCH_HAS_DO_SOFTIRQ
 void do_softirq_own_stack(void);
 #else
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:443 @ static inline void do_softirq_own_stack(
 	__do_softirq();
 }
 #endif
+#else
+extern void thread_do_softirq(void);
+#endif
 
 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
 extern void softirq_init(void);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:453 @ extern void __raise_softirq_irqoff(unsig
 
 extern void raise_softirq_irqoff(unsigned int nr);
 extern void raise_softirq(unsigned int nr);
+extern void softirq_check_pending_idle(void);
 
 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:475 @ static inline struct task_struct *this_c
      to be executed on some cpu at least once after this.
    * If the tasklet is already scheduled, but its execution is still not
      started, it will be executed only once.
-   * If this tasklet is already running on another CPU (or schedule is called
-     from tasklet itself), it is rescheduled for later.
+   * If this tasklet is already running on another CPU, it is rescheduled
+     for later.
+   * Schedule must not be called from the tasklet itself (a lockup occurs)
    * Tasklet is strictly serialized wrt itself, but not
      wrt another tasklets. If client needs some intertask synchronization,
      he makes it with spinlocks.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:502 @ struct tasklet_struct name = { NULL, 0,
 enum
 {
 	TASKLET_STATE_SCHED,	/* Tasklet is scheduled for execution */
-	TASKLET_STATE_RUN	/* Tasklet is running (SMP only) */
+	TASKLET_STATE_RUN,	/* Tasklet is running (SMP only) */
+	TASKLET_STATE_PENDING	/* Tasklet is pending */
 };
 
-#ifdef CONFIG_SMP
+#define TASKLET_STATEF_SCHED	(1 << TASKLET_STATE_SCHED)
+#define TASKLET_STATEF_RUN	(1 << TASKLET_STATE_RUN)
+#define TASKLET_STATEF_PENDING	(1 << TASKLET_STATE_PENDING)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
 static inline int tasklet_trylock(struct tasklet_struct *t)
 {
 	return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
 }
 
+static inline int tasklet_tryunlock(struct tasklet_struct *t)
+{
+	return cmpxchg(&t->state, TASKLET_STATEF_RUN, 0) == TASKLET_STATEF_RUN;
+}
+
 static inline void tasklet_unlock(struct tasklet_struct *t)
 {
 	smp_mb__before_atomic();
 	clear_bit(TASKLET_STATE_RUN, &(t)->state);
 }
 
-static inline void tasklet_unlock_wait(struct tasklet_struct *t)
-{
-	while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
-}
+extern void tasklet_unlock_wait(struct tasklet_struct *t);
+
 #else
 #define tasklet_trylock(t) 1
+#define tasklet_tryunlock(t)	1
 #define tasklet_unlock_wait(t) do { } while (0)
 #define tasklet_unlock(t) do { } while (0)
 #endif
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:580 @ static inline void tasklet_disable(struc
 	smp_mb();
 }
 
-static inline void tasklet_enable(struct tasklet_struct *t)
-{
-	smp_mb__before_atomic();
-	atomic_dec(&t->count);
-}
-
-static inline void tasklet_hi_enable(struct tasklet_struct *t)
-{
-	smp_mb__before_atomic();
-	atomic_dec(&t->count);
-}
+extern void tasklet_enable(struct tasklet_struct *t);
+extern void tasklet_hi_enable(struct tasklet_struct *t);
 
 extern void tasklet_kill(struct tasklet_struct *t);
 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:613 @ void tasklet_hrtimer_cancel(struct taskl
 	tasklet_kill(&ttimer->tasklet);
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+extern void softirq_early_init(void);
+#else
+static inline void softirq_early_init(void) { }
+#endif
+
 /*
  * Autoprobing for irqs:
  *
Index: linux-3.18.13-rt10-r7s4/include/linux/irq.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/irq.h
+++ linux-3.18.13-rt10-r7s4/include/linux/irq.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:76 @ typedef	void (*irq_preflow_handler_t)(st
  * IRQ_IS_POLLED		- Always polled by another interrupt. Exclude
  *				  it from the spurious interrupt detection
  *				  mechanism and from core side polling.
+ * IRQ_NO_SOFTIRQ_CALL		- No softirq processing in the irq thread context (RT)
  */
 enum {
 	IRQ_TYPE_NONE		= 0x00000000,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:102 @ enum {
 	IRQ_NOTHREAD		= (1 << 16),
 	IRQ_PER_CPU_DEVID	= (1 << 17),
 	IRQ_IS_POLLED		= (1 << 18),
+	IRQ_NO_SOFTIRQ_CALL     = (1 << 19),
 };
 
 #define IRQF_MODIFY_MASK	\
 	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
 	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
 	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-	 IRQ_IS_POLLED)
+	 IRQ_IS_POLLED | IRQ_NO_SOFTIRQ_CALL)
 
 #define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING)
 
Index: linux-3.18.13-rt10-r7s4/include/linux/irq_work.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/irq_work.h
+++ linux-3.18.13-rt10-r7s4/include/linux/irq_work.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:19 @
 #define IRQ_WORK_BUSY		2UL
 #define IRQ_WORK_FLAGS		3UL
 #define IRQ_WORK_LAZY		4UL /* Doesn't want IPI, wait for tick */
+#define IRQ_WORK_HARD_IRQ	8UL /* Run hard IRQ context, even on RT */
 
 struct irq_work {
 	unsigned long flags;
Index: linux-3.18.13-rt10-r7s4/include/linux/irqdesc.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/irqdesc.h
+++ linux-3.18.13-rt10-r7s4/include/linux/irqdesc.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:66 @ struct irq_desc {
 	unsigned int		irqs_unhandled;
 	atomic_t		threads_handled;
 	int			threads_handled_last;
+	u64			random_ip;
 	raw_spinlock_t		lock;
 	struct cpumask		*percpu_enabled;
 #ifdef CONFIG_SMP
Index: linux-3.18.13-rt10-r7s4/include/linux/irqflags.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/irqflags.h
+++ linux-3.18.13-rt10-r7s4/include/linux/irqflags.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:28 @
 # define trace_softirqs_enabled(p)	((p)->softirqs_enabled)
 # define trace_hardirq_enter()	do { current->hardirq_context++; } while (0)
 # define trace_hardirq_exit()	do { current->hardirq_context--; } while (0)
-# define lockdep_softirq_enter()	do { current->softirq_context++; } while (0)
-# define lockdep_softirq_exit()	do { current->softirq_context--; } while (0)
 # define INIT_TRACE_IRQFLAGS	.softirqs_enabled = 1,
 #else
 # define trace_hardirqs_on()		do { } while (0)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:40 @
 # define trace_softirqs_enabled(p)	0
 # define trace_hardirq_enter()		do { } while (0)
 # define trace_hardirq_exit()		do { } while (0)
+# define INIT_TRACE_IRQFLAGS
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT_FULL)
+# define lockdep_softirq_enter() do { current->softirq_context++; } while (0)
+# define lockdep_softirq_exit()	 do { current->softirq_context--; } while (0)
+#else
 # define lockdep_softirq_enter()	do { } while (0)
 # define lockdep_softirq_exit()		do { } while (0)
-# define INIT_TRACE_IRQFLAGS
 #endif
 
 #if defined(CONFIG_IRQSOFF_TRACER) || \
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:154 @
 
 #endif /* CONFIG_TRACE_IRQFLAGS_SUPPORT */
 
+/*
+ * local_irq* variants depending on RT/!RT
+ */
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define local_irq_disable_nort()	do { } while (0)
+# define local_irq_enable_nort()	do { } while (0)
+# define local_irq_save_nort(flags)	local_save_flags(flags)
+# define local_irq_restore_nort(flags)	(void)(flags)
+# define local_irq_disable_rt()		local_irq_disable()
+# define local_irq_enable_rt()		local_irq_enable()
+#else
+# define local_irq_disable_nort()	local_irq_disable()
+# define local_irq_enable_nort()	local_irq_enable()
+# define local_irq_save_nort(flags)	local_irq_save(flags)
+# define local_irq_restore_nort(flags)	local_irq_restore(flags)
+# define local_irq_disable_rt()		do { } while (0)
+# define local_irq_enable_rt()		do { } while (0)
+#endif
+
 #endif
Index: linux-3.18.13-rt10-r7s4/include/linux/jbd_common.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/jbd_common.h
+++ linux-3.18.13-rt10-r7s4/include/linux/jbd_common.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:18 @ static inline struct journal_head *bh2jh
 
 static inline void jbd_lock_bh_state(struct buffer_head *bh)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	bit_spin_lock(BH_State, &bh->b_state);
+#else
+	spin_lock(&bh->b_state_lock);
+#endif
 }
 
 static inline int jbd_trylock_bh_state(struct buffer_head *bh)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	return bit_spin_trylock(BH_State, &bh->b_state);
+#else
+	return spin_trylock(&bh->b_state_lock);
+#endif
 }
 
 static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	return bit_spin_is_locked(BH_State, &bh->b_state);
+#else
+	return spin_is_locked(&bh->b_state_lock);
+#endif
 }
 
 static inline void jbd_unlock_bh_state(struct buffer_head *bh)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	bit_spin_unlock(BH_State, &bh->b_state);
+#else
+	spin_unlock(&bh->b_state_lock);
+#endif
 }
 
 static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	bit_spin_lock(BH_JournalHead, &bh->b_state);
+#else
+	spin_lock(&bh->b_journal_head_lock);
+#endif
 }
 
 static inline void jbd_unlock_bh_journal_head(struct buffer_head *bh)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	bit_spin_unlock(BH_JournalHead, &bh->b_state);
+#else
+	spin_unlock(&bh->b_journal_head_lock);
+#endif
 }
 
 #endif
Index: linux-3.18.13-rt10-r7s4/include/linux/jump_label.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/jump_label.h
+++ linux-3.18.13-rt10-r7s4/include/linux/jump_label.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:58 @ extern bool static_key_initialized;
 				    "%s used before call to jump_label_init", \
 				    __func__)
 
-#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL)
+#if defined(CC_HAVE_ASM_GOTO) && defined(CONFIG_JUMP_LABEL) && \
+	!defined(CONFIG_PREEMPT_BASE)
 
 struct static_key {
 	atomic_t enabled;
Index: linux-3.18.13-rt10-r7s4/include/linux/kdb.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/kdb.h
+++ linux-3.18.13-rt10-r7s4/include/linux/kdb.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:119 @ extern int kdb_trap_printk;
 extern __printf(1, 0) int vkdb_printf(const char *fmt, va_list args);
 extern __printf(1, 2) int kdb_printf(const char *, ...);
 typedef __printf(1, 2) int (*kdb_printf_t)(const char *, ...);
-
+#define in_kdb_printk() (kdb_trap_printk)
 extern void kdb_init(int level);
 
 /* Access to kdb specific polling devices */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:154 @ extern int kdb_register_repeat(char *, k
 extern int kdb_unregister(char *);
 #else /* ! CONFIG_KGDB_KDB */
 static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; }
+#define in_kdb_printk() (0)
 static inline void kdb_init(int level) {}
 static inline int kdb_register(char *cmd, kdb_func_t func, char *usage,
 			       char *help, short minlen) { return 0; }
Index: linux-3.18.13-rt10-r7s4/include/linux/kernel.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/kernel.h
+++ linux-3.18.13-rt10-r7s4/include/linux/kernel.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:454 @ extern enum system_states {
 	SYSTEM_HALT,
 	SYSTEM_POWER_OFF,
 	SYSTEM_RESTART,
+	SYSTEM_SUSPEND,
 } system_state;
 
 #define TAINT_PROPRIETARY_MODULE	0
Index: linux-3.18.13-rt10-r7s4/include/linux/kvm_host.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/kvm_host.h
+++ linux-3.18.13-rt10-r7s4/include/linux/kvm_host.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:247 @ struct kvm_vcpu {
 
 	int fpu_active;
 	int guest_fpu_loaded, guest_xcr0_loaded;
-	wait_queue_head_t wq;
+	struct swait_head wq;
 	struct pid *pid;
 	int sigset_active;
 	sigset_t sigset;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:690 @ static inline bool kvm_arch_has_noncoher
 }
 #endif
 
-static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
+static inline struct swait_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
 {
 #ifdef __KVM_HAVE_ARCH_WQP
 	return vcpu->arch.wqp;
Index: linux-3.18.13-rt10-r7s4/include/linux/lglock.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/lglock.h
+++ linux-3.18.13-rt10-r7s4/include/linux/lglock.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:37 @
 #endif
 
 struct lglock {
+#ifndef CONFIG_PREEMPT_RT_FULL
 	arch_spinlock_t __percpu *lock;
+#else
+	struct rt_mutex __percpu *lock;
+#endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 	struct lock_class_key lock_key;
 	struct lockdep_map    lock_dep_map;
 #endif
 };
 
-#define DEFINE_LGLOCK(name)						\
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define DEFINE_LGLOCK(name)						\
 	static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)		\
 	= __ARCH_SPIN_LOCK_UNLOCKED;					\
 	struct lglock name = { .lock = &name ## _lock }
 
-#define DEFINE_STATIC_LGLOCK(name)					\
+# define DEFINE_STATIC_LGLOCK(name)					\
 	static DEFINE_PER_CPU(arch_spinlock_t, name ## _lock)		\
 	= __ARCH_SPIN_LOCK_UNLOCKED;					\
 	static struct lglock name = { .lock = &name ## _lock }
+#else
+
+# define DEFINE_LGLOCK(name)						\
+	static DEFINE_PER_CPU(struct rt_mutex, name ## _lock)		\
+	= __RT_MUTEX_INITIALIZER( name ## _lock);			\
+	struct lglock name = { .lock = &name ## _lock }
+
+# define DEFINE_STATIC_LGLOCK(name)					\
+	static DEFINE_PER_CPU(struct rt_mutex, name ## _lock)		\
+	= __RT_MUTEX_INITIALIZER( name ## _lock);			\
+	static struct lglock name = { .lock = &name ## _lock }
+#endif
 
 void lg_lock_init(struct lglock *lg, char *name);
 void lg_local_lock(struct lglock *lg);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:79 @ void lg_local_unlock_cpu(struct lglock *
 void lg_global_lock(struct lglock *lg);
 void lg_global_unlock(struct lglock *lg);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
+#define lg_global_trylock_relax(name)	lg_global_lock(name)
+#else
+void lg_global_trylock_relax(struct lglock *lg);
+#endif
+
 #else
 /* When !CONFIG_SMP, map lglock to spinlock */
 #define lglock spinlock
Index: linux-3.18.13-rt10-r7s4/include/linux/list_bl.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/list_bl.h
+++ linux-3.18.13-rt10-r7s4/include/linux/list_bl.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:5 @
 #define _LINUX_LIST_BL_H
 
 #include <linux/list.h>
+#include <linux/spinlock.h>
 #include <linux/bit_spinlock.h>
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:36 @
 
 struct hlist_bl_head {
 	struct hlist_bl_node *first;
+#ifdef CONFIG_PREEMPT_RT_BASE
+	raw_spinlock_t lock;
+#endif
 };
 
 struct hlist_bl_node {
 	struct hlist_bl_node *next, **pprev;
 };
-#define INIT_HLIST_BL_HEAD(ptr) \
-	((ptr)->first = NULL)
+
+static inline void INIT_HLIST_BL_HEAD(struct hlist_bl_head *h)
+{
+	h->first = NULL;
+#ifdef CONFIG_PREEMPT_RT_BASE
+	raw_spin_lock_init(&h->lock);
+#endif
+}
 
 static inline void INIT_HLIST_BL_NODE(struct hlist_bl_node *h)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:130 @ static inline void hlist_bl_del_init(str
 
 static inline void hlist_bl_lock(struct hlist_bl_head *b)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	bit_spin_lock(0, (unsigned long *)b);
+#else
+	raw_spin_lock(&b->lock);
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+	__set_bit(0, (unsigned long *)b);
+#endif
+#endif
 }
 
 static inline void hlist_bl_unlock(struct hlist_bl_head *b)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	__bit_spin_unlock(0, (unsigned long *)b);
+#else
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+	__clear_bit(0, (unsigned long *)b);
+#endif
+	raw_spin_unlock(&b->lock);
+#endif
 }
 
 static inline bool hlist_bl_is_locked(struct hlist_bl_head *b)
Index: linux-3.18.13-rt10-r7s4/include/linux/locallock.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/locallock.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef _LINUX_LOCALLOCK_H
+#define _LINUX_LOCALLOCK_H
+
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+#ifdef CONFIG_PREEMPT_RT_BASE
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define LL_WARN(cond)	WARN_ON(cond)
+#else
+# define LL_WARN(cond)	do { } while (0)
+#endif
+
+/*
+ * per cpu lock based substitute for local_irq_*()
+ */
+struct local_irq_lock {
+	spinlock_t		lock;
+	struct task_struct	*owner;
+	int			nestcnt;
+	unsigned long		flags;
+};
+
+#define DEFINE_LOCAL_IRQ_LOCK(lvar)					\
+	DEFINE_PER_CPU(struct local_irq_lock, lvar) = {			\
+		.lock = __SPIN_LOCK_UNLOCKED((lvar).lock) }
+
+#define DECLARE_LOCAL_IRQ_LOCK(lvar)					\
+	DECLARE_PER_CPU(struct local_irq_lock, lvar)
+
+#define local_irq_lock_init(lvar)					\
+	do {								\
+		int __cpu;						\
+		for_each_possible_cpu(__cpu)				\
+			spin_lock_init(&per_cpu(lvar, __cpu).lock);	\
+	} while (0)
+
+/*
+ * spin_lock|trylock|unlock_local flavour that does not migrate disable
+ * used for __local_lock|trylock|unlock where get_local_var/put_local_var
+ * already takes care of the migrate_disable/enable
+ * for CONFIG_PREEMPT_BASE map to the normal spin_* calls.
+ */
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define spin_lock_local(lock)			rt_spin_lock(lock)
+# define spin_trylock_local(lock)		rt_spin_trylock(lock)
+# define spin_unlock_local(lock)		rt_spin_unlock(lock)
+#else
+# define spin_lock_local(lock)			spin_lock(lock)
+# define spin_trylock_local(lock)		spin_trylock(lock)
+# define spin_unlock_local(lock)		spin_unlock(lock)
+#endif
+
+static inline void __local_lock(struct local_irq_lock *lv)
+{
+	if (lv->owner != current) {
+		spin_lock_local(&lv->lock);
+		LL_WARN(lv->owner);
+		LL_WARN(lv->nestcnt);
+		lv->owner = current;
+	}
+	lv->nestcnt++;
+}
+
+#define local_lock(lvar)					\
+	do { __local_lock(&get_local_var(lvar)); } while (0)
+
+static inline int __local_trylock(struct local_irq_lock *lv)
+{
+	if (lv->owner != current && spin_trylock_local(&lv->lock)) {
+		LL_WARN(lv->owner);
+		LL_WARN(lv->nestcnt);
+		lv->owner = current;
+		lv->nestcnt = 1;
+		return 1;
+	}
+	return 0;
+}
+
+#define local_trylock(lvar)						\
+	({								\
+		int __locked;						\
+		__locked = __local_trylock(&get_local_var(lvar));	\
+		if (!__locked)						\
+			put_local_var(lvar);				\
+		__locked;						\
+	})
+
+static inline void __local_unlock(struct local_irq_lock *lv)
+{
+	LL_WARN(lv->nestcnt == 0);
+	LL_WARN(lv->owner != current);
+	if (--lv->nestcnt)
+		return;
+
+	lv->owner = NULL;
+	spin_unlock_local(&lv->lock);
+}
+
+#define local_unlock(lvar)					\
+	do {							\
+		__local_unlock(&__get_cpu_var(lvar));		\
+		put_local_var(lvar);				\
+	} while (0)
+
+static inline void __local_lock_irq(struct local_irq_lock *lv)
+{
+	spin_lock_irqsave(&lv->lock, lv->flags);
+	LL_WARN(lv->owner);
+	LL_WARN(lv->nestcnt);
+	lv->owner = current;
+	lv->nestcnt = 1;
+}
+
+#define local_lock_irq(lvar)						\
+	do { __local_lock_irq(&get_local_var(lvar)); } while (0)
+
+#define local_lock_irq_on(lvar, cpu)					\
+	do { __local_lock_irq(&per_cpu(lvar, cpu)); } while (0)
+
+static inline void __local_unlock_irq(struct local_irq_lock *lv)
+{
+	LL_WARN(!lv->nestcnt);
+	LL_WARN(lv->owner != current);
+	lv->owner = NULL;
+	lv->nestcnt = 0;
+	spin_unlock_irq(&lv->lock);
+}
+
+#define local_unlock_irq(lvar)						\
+	do {								\
+		__local_unlock_irq(&__get_cpu_var(lvar));		\
+		put_local_var(lvar);					\
+	} while (0)
+
+#define local_unlock_irq_on(lvar, cpu)					\
+	do {								\
+		__local_unlock_irq(&per_cpu(lvar, cpu));		\
+	} while (0)
+
+static inline int __local_lock_irqsave(struct local_irq_lock *lv)
+{
+	if (lv->owner != current) {
+		__local_lock_irq(lv);
+		return 0;
+	} else {
+		lv->nestcnt++;
+		return 1;
+	}
+}
+
+#define local_lock_irqsave(lvar, _flags)				\
+	do {								\
+		if (__local_lock_irqsave(&get_local_var(lvar)))		\
+			put_local_var(lvar);				\
+		_flags = __get_cpu_var(lvar).flags;			\
+	} while (0)
+
+#define local_lock_irqsave_on(lvar, _flags, cpu)			\
+	do {								\
+		__local_lock_irqsave(&per_cpu(lvar, cpu));		\
+		_flags = per_cpu(lvar, cpu).flags;			\
+	} while (0)
+
+static inline int __local_unlock_irqrestore(struct local_irq_lock *lv,
+					    unsigned long flags)
+{
+	LL_WARN(!lv->nestcnt);
+	LL_WARN(lv->owner != current);
+	if (--lv->nestcnt)
+		return 0;
+
+	lv->owner = NULL;
+	spin_unlock_irqrestore(&lv->lock, lv->flags);
+	return 1;
+}
+
+#define local_unlock_irqrestore(lvar, flags)				\
+	do {								\
+		if (__local_unlock_irqrestore(&__get_cpu_var(lvar), flags)) \
+			put_local_var(lvar);				\
+	} while (0)
+
+#define local_unlock_irqrestore_on(lvar, flags, cpu)			\
+	do {								\
+		__local_unlock_irqrestore(&per_cpu(lvar, cpu), flags);	\
+	} while (0)
+
+#define local_spin_trylock_irq(lvar, lock)				\
+	({								\
+		int __locked;						\
+		local_lock_irq(lvar);					\
+		__locked = spin_trylock(lock);				\
+		if (!__locked)						\
+			local_unlock_irq(lvar);				\
+		__locked;						\
+	})
+
+#define local_spin_lock_irq(lvar, lock)					\
+	do {								\
+		local_lock_irq(lvar);					\
+		spin_lock(lock);					\
+	} while (0)
+
+#define local_spin_unlock_irq(lvar, lock)				\
+	do {								\
+		spin_unlock(lock);					\
+		local_unlock_irq(lvar);					\
+	} while (0)
+
+#define local_spin_lock_irqsave(lvar, lock, flags)			\
+	do {								\
+		local_lock_irqsave(lvar, flags);			\
+		spin_lock(lock);					\
+	} while (0)
+
+#define local_spin_unlock_irqrestore(lvar, lock, flags)			\
+	do {								\
+		spin_unlock(lock);					\
+		local_unlock_irqrestore(lvar, flags);			\
+	} while (0)
+
+#define get_locked_var(lvar, var)					\
+	(*({								\
+		local_lock(lvar);					\
+		&__get_cpu_var(var);					\
+	}))
+
+#define put_locked_var(lvar, var)	local_unlock(lvar);
+
+#define local_lock_cpu(lvar)						\
+	({								\
+		local_lock(lvar);					\
+		smp_processor_id();					\
+	})
+
+#define local_unlock_cpu(lvar)			local_unlock(lvar)
+
+#else /* PREEMPT_RT_BASE */
+
+#define DEFINE_LOCAL_IRQ_LOCK(lvar)		__typeof__(const int) lvar
+#define DECLARE_LOCAL_IRQ_LOCK(lvar)		extern __typeof__(const int) lvar
+
+static inline void local_irq_lock_init(int lvar) { }
+
+#define local_lock(lvar)			preempt_disable()
+#define local_unlock(lvar)			preempt_enable()
+#define local_lock_irq(lvar)			local_irq_disable()
+#define local_unlock_irq(lvar)			local_irq_enable()
+#define local_lock_irqsave(lvar, flags)		local_irq_save(flags)
+#define local_unlock_irqrestore(lvar, flags)	local_irq_restore(flags)
+
+#define local_spin_trylock_irq(lvar, lock)	spin_trylock_irq(lock)
+#define local_spin_lock_irq(lvar, lock)		spin_lock_irq(lock)
+#define local_spin_unlock_irq(lvar, lock)	spin_unlock_irq(lock)
+#define local_spin_lock_irqsave(lvar, lock, flags)	\
+	spin_lock_irqsave(lock, flags)
+#define local_spin_unlock_irqrestore(lvar, lock, flags)	\
+	spin_unlock_irqrestore(lock, flags)
+
+#define get_locked_var(lvar, var)		get_cpu_var(var)
+#define put_locked_var(lvar, var)		put_cpu_var(var)
+
+#define local_lock_cpu(lvar)			get_cpu()
+#define local_unlock_cpu(lvar)			put_cpu()
+
+#endif
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/mm_types.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/mm_types.h
+++ linux-3.18.13-rt10-r7s4/include/linux/mm_types.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:14 @
 #include <linux/completion.h>
 #include <linux/cpumask.h>
 #include <linux/page-debug-flags.h>
+#include <linux/rcupdate.h>
 #include <linux/uprobes.h>
 #include <linux/page-flags-layout.h>
 #include <asm/page.h>
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:458 @ struct mm_struct {
 	bool tlb_flush_pending;
 #endif
 	struct uprobes_state uprobes_state;
+#ifdef CONFIG_PREEMPT_RT_BASE
+	struct rcu_head delayed_drop;
+#endif
 };
 
 static inline void mm_init_cpumask(struct mm_struct *mm)
Index: linux-3.18.13-rt10-r7s4/include/linux/mutex.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/mutex.h
+++ linux-3.18.13-rt10-r7s4/include/linux/mutex.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:22 @
 #include <asm/processor.h>
 #include <linux/osq_lock.h>
 
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+	, .dep_map = { .name = #lockname }
+#else
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
+#endif
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/mutex_rt.h>
+#else
+
 /*
  * Simple, straightforward mutexes with strict semantics:
  *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:114 @ do {							\
 static inline void mutex_destroy(struct mutex *lock) {}
 #endif
 
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
-		, .dep_map = { .name = #lockname }
-#else
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
-#endif
-
 #define __MUTEX_INITIALIZER(lockname) \
 		{ .count = ATOMIC_INIT(1) \
 		, .wait_lock = __SPIN_LOCK_UNLOCKED(lockname.wait_lock) \
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:181 @ extern int __must_check mutex_lock_killa
 extern int mutex_trylock(struct mutex *lock);
 extern void mutex_unlock(struct mutex *lock);
 
+#endif /* !PREEMPT_RT_FULL */
+
 extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);
 
 #endif /* __LINUX_MUTEX_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/mutex_rt.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/mutex_rt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef __LINUX_MUTEX_RT_H
+#define __LINUX_MUTEX_RT_H
+
+#ifndef __LINUX_MUTEX_H
+#error "Please include mutex.h"
+#endif
+
+#include <linux/rtmutex.h>
+
+/* FIXME: Just for __lockfunc */
+#include <linux/spinlock.h>
+
+struct mutex {
+	struct rt_mutex		lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map	dep_map;
+#endif
+};
+
+#define __MUTEX_INITIALIZER(mutexname)					\
+	{								\
+		.lock = __RT_MUTEX_INITIALIZER(mutexname.lock)		\
+		__DEP_MAP_MUTEX_INITIALIZER(mutexname)			\
+	}
+
+#define DEFINE_MUTEX(mutexname)						\
+	struct mutex mutexname = __MUTEX_INITIALIZER(mutexname)
+
+extern void __mutex_do_init(struct mutex *lock, const char *name, struct lock_class_key *key);
+extern void __lockfunc _mutex_lock(struct mutex *lock);
+extern int __lockfunc _mutex_lock_interruptible(struct mutex *lock);
+extern int __lockfunc _mutex_lock_killable(struct mutex *lock);
+extern void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass);
+extern void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest_lock);
+extern int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass);
+extern int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass);
+extern int __lockfunc _mutex_trylock(struct mutex *lock);
+extern void __lockfunc _mutex_unlock(struct mutex *lock);
+
+#define mutex_is_locked(l)		rt_mutex_is_locked(&(l)->lock)
+#define mutex_lock(l)			_mutex_lock(l)
+#define mutex_lock_interruptible(l)	_mutex_lock_interruptible(l)
+#define mutex_lock_killable(l)		_mutex_lock_killable(l)
+#define mutex_trylock(l)		_mutex_trylock(l)
+#define mutex_unlock(l)			_mutex_unlock(l)
+#define mutex_destroy(l)		rt_mutex_destroy(&(l)->lock)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define mutex_lock_nested(l, s)	_mutex_lock_nested(l, s)
+# define mutex_lock_interruptible_nested(l, s) \
+					_mutex_lock_interruptible_nested(l, s)
+# define mutex_lock_killable_nested(l, s) \
+					_mutex_lock_killable_nested(l, s)
+
+# define mutex_lock_nest_lock(lock, nest_lock)				\
+do {									\
+	typecheck(struct lockdep_map *, &(nest_lock)->dep_map);		\
+	_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map);		\
+} while (0)
+
+#else
+# define mutex_lock_nested(l, s)	_mutex_lock(l)
+# define mutex_lock_interruptible_nested(l, s) \
+					_mutex_lock_interruptible(l)
+# define mutex_lock_killable_nested(l, s) \
+					_mutex_lock_killable(l)
+# define mutex_lock_nest_lock(lock, nest_lock) mutex_lock(lock)
+#endif
+
+# define mutex_init(mutex)				\
+do {							\
+	static struct lock_class_key __key;		\
+							\
+	rt_mutex_init(&(mutex)->lock);			\
+	__mutex_do_init((mutex), #mutex, &__key);	\
+} while (0)
+
+# define __mutex_init(mutex, name, key)			\
+do {							\
+	rt_mutex_init(&(mutex)->lock);			\
+	__mutex_do_init((mutex), name, key);		\
+} while (0)
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/netdevice.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/netdevice.h
+++ linux-3.18.13-rt10-r7s4/include/linux/netdevice.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2354 @ struct softnet_data {
 	unsigned int		dropped;
 	struct sk_buff_head	input_pkt_queue;
 	struct napi_struct	backlog;
+	struct sk_buff_head	tofree_queue;
 
 #ifdef CONFIG_NET_FLOW_LIMIT
 	struct sd_flow_limit __rcu *flow_limit;
Index: linux-3.18.13-rt10-r7s4/include/linux/netfilter/x_tables.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/netfilter/x_tables.h
+++ linux-3.18.13-rt10-r7s4/include/linux/netfilter/x_tables.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:6 @
 
 
 #include <linux/netdevice.h>
+#include <linux/locallock.h>
 #include <uapi/linux/netfilter/x_tables.h>
 
 /**
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:286 @ void xt_free_table_info(struct xt_table_
  */
 DECLARE_PER_CPU(seqcount_t, xt_recseq);
 
+DECLARE_LOCAL_IRQ_LOCK(xt_write_lock);
+
 /**
  * xt_write_recseq_begin - start of a write section
  *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:302 @ static inline unsigned int xt_write_recs
 {
 	unsigned int addend;
 
+	/* RT protection */
+	local_lock(xt_write_lock);
+
 	/*
 	 * Low order bit of sequence is set if we already
 	 * called xt_write_recseq_begin().
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:335 @ static inline void xt_write_recseq_end(u
 	/* this is kind of a write_seqcount_end(), but addend is 0 or 1 */
 	smp_wmb();
 	__this_cpu_add(xt_recseq.sequence, addend);
+	local_unlock(xt_write_lock);
 }
 
 /*
Index: linux-3.18.13-rt10-r7s4/include/linux/notifier.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/notifier.h
+++ linux-3.18.13-rt10-r7s4/include/linux/notifier.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:9 @
  *
  *				Alan Cox <Alan.Cox@linux.org>
  */
- 
+
 #ifndef _LINUX_NOTIFIER_H
 #define _LINUX_NOTIFIER_H
 #include <linux/errno.h>
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:45 @
  * in srcu_notifier_call_chain(): no cache bounces and no memory barriers.
  * As compensation, srcu_notifier_chain_unregister() is rather expensive.
  * SRCU notifier chains should be used when the chain will be called very
- * often but notifier_blocks will seldom be removed.  Also, SRCU notifier
- * chains are slightly more difficult to use because they require special
- * runtime initialization.
+ * often but notifier_blocks will seldom be removed.
  */
 
 typedef	int (*notifier_fn_t)(struct notifier_block *nb,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:89 @ struct srcu_notifier_head {
 		(name)->head = NULL;		\
 	} while (0)
 
-/* srcu_notifier_heads must be initialized and cleaned up dynamically */
+/* srcu_notifier_heads must be cleaned up dynamically */
 extern void srcu_init_notifier_head(struct srcu_notifier_head *nh);
 #define srcu_cleanup_notifier_head(name)	\
 		cleanup_srcu_struct(&(name)->srcu);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:102 @ extern void srcu_init_notifier_head(stru
 		.head = NULL }
 #define RAW_NOTIFIER_INIT(name)	{				\
 		.head = NULL }
-/* srcu_notifier_heads cannot be initialized statically */
+
+#define SRCU_NOTIFIER_INIT(name, pcpu)				\
+	{							\
+		.mutex = __MUTEX_INITIALIZER(name.mutex),	\
+		.head = NULL,					\
+		.srcu = __SRCU_STRUCT_INIT(name.srcu, pcpu),	\
+	}
 
 #define ATOMIC_NOTIFIER_HEAD(name)				\
 	struct atomic_notifier_head name =			\
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:120 @ extern void srcu_init_notifier_head(stru
 	struct raw_notifier_head name =				\
 		RAW_NOTIFIER_INIT(name)
 
+#define _SRCU_NOTIFIER_HEAD(name, mod)				\
+	static DEFINE_PER_CPU(struct srcu_struct_array,		\
+			name##_head_srcu_array);		\
+	mod struct srcu_notifier_head name =			\
+			SRCU_NOTIFIER_INIT(name, name##_head_srcu_array)
+
+#define SRCU_NOTIFIER_HEAD(name)				\
+	_SRCU_NOTIFIER_HEAD(name, )
+
+#define SRCU_NOTIFIER_HEAD_STATIC(name)				\
+	_SRCU_NOTIFIER_HEAD(name, static)
+
 #ifdef __KERNEL__
 
 extern int atomic_notifier_chain_register(struct atomic_notifier_head *nh,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:201 @ static inline int notifier_to_errno(int
 
 /*
  *	Declared notifiers so far. I can imagine quite a few more chains
- *	over time (eg laptop power reset chains, reboot chain (to clean 
+ *	over time (eg laptop power reset chains, reboot chain (to clean
  *	device units up), device [un]mount chain, module load/unload chain,
- *	low memory chain, screenblank chain (for plug in modular screenblankers) 
+ *	low memory chain, screenblank chain (for plug in modular screenblankers)
  *	VC switch chains (for loadable kernel svgalib VC switch helpers) etc...
  */
- 
+
 /* CPU notfiers are defined in include/linux/cpu.h. */
 
 /* netdevice notifiers are defined in include/linux/netdevice.h */
Index: linux-3.18.13-rt10-r7s4/include/linux/percpu.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/percpu.h
+++ linux-3.18.13-rt10-r7s4/include/linux/percpu.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:26 @
 	 PERCPU_MODULE_RESERVE)
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+
+#define get_local_var(var) (*({		\
+	       migrate_disable();	\
+	       &__get_cpu_var(var);	}))
+
+#define put_local_var(var) do {	\
+	(void)&(var);		\
+	migrate_enable();	\
+} while (0)
+
+# define get_local_ptr(var) ({		\
+		migrate_disable();	\
+		this_cpu_ptr(var);	})
+
+# define put_local_ptr(var) do {	\
+	(void)(var);			\
+	migrate_enable();		\
+} while (0)
+
+#else
+
+#define get_local_var(var)	get_cpu_var(var)
+#define put_local_var(var)	put_cpu_var(var)
+#define get_local_ptr(var)	get_cpu_ptr(var)
+#define put_local_ptr(var)	put_cpu_ptr(var)
+
+#endif
+
 /* minimum unit size, also is the maximum supported allocation size */
 #define PCPU_MIN_UNIT_SIZE		PFN_ALIGN(32 << 10)
 
Index: linux-3.18.13-rt10-r7s4/include/linux/pid.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/pid.h
+++ linux-3.18.13-rt10-r7s4/include/linux/pid.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:5 @
 #define _LINUX_PID_H
 
 #include <linux/rcupdate.h>
+#include <linux/atomic.h>
 
 enum pid_type
 {
Index: linux-3.18.13-rt10-r7s4/include/linux/preempt.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/preempt.h
+++ linux-3.18.13-rt10-r7s4/include/linux/preempt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:36 @ extern void preempt_count_sub(int val);
 #define preempt_count_inc() preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
 
+#ifdef CONFIG_PREEMPT_LAZY
+#define add_preempt_lazy_count(val)	do { preempt_lazy_count() += (val); } while (0)
+#define sub_preempt_lazy_count(val)	do { preempt_lazy_count() -= (val); } while (0)
+#define inc_preempt_lazy_count()	add_preempt_lazy_count(1)
+#define dec_preempt_lazy_count()	sub_preempt_lazy_count(1)
+#define preempt_lazy_count()		(current_thread_info()->preempt_lazy_count)
+#else
+#define add_preempt_lazy_count(val)	do { } while (0)
+#define sub_preempt_lazy_count(val)	do { } while (0)
+#define inc_preempt_lazy_count()	do { } while (0)
+#define dec_preempt_lazy_count()	do { } while (0)
+#define preempt_lazy_count()		(0)
+#endif
+
 #ifdef CONFIG_PREEMPT_COUNT
 
 #define preempt_disable() \
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:58 @ do { \
 	barrier(); \
 } while (0)
 
+#define preempt_lazy_disable() \
+do { \
+	inc_preempt_lazy_count(); \
+	barrier(); \
+} while (0)
+
 #define sched_preempt_enable_no_resched() \
 do { \
 	barrier(); \
 	preempt_count_dec(); \
 } while (0)
 
-#define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+#ifdef CONFIG_PREEMPT_RT_BASE
+# define preempt_enable_no_resched() sched_preempt_enable_no_resched()
+# define preempt_check_resched_rt() preempt_check_resched()
+#else
+# define preempt_enable_no_resched() preempt_enable()
+# define preempt_check_resched_rt() barrier();
+#endif
 
 #ifdef CONFIG_PREEMPT
 #define preempt_enable() \
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:92 @ do { \
 		__preempt_schedule(); \
 } while (0)
 
+#define preempt_lazy_enable() \
+do { \
+	dec_preempt_lazy_count(); \
+	barrier(); \
+	preempt_check_resched(); \
+} while (0)
+
 #else
 #define preempt_enable() \
 do { \
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:157 @ do { \
 #define preempt_disable_notrace()		barrier()
 #define preempt_enable_no_resched_notrace()	barrier()
 #define preempt_enable_notrace()		barrier()
+#define preempt_check_resched_rt()		barrier()
 
 #endif /* CONFIG_PREEMPT_COUNT */
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:177 @ do { \
 } while (0)
 #define preempt_fold_need_resched() \
 do { \
-	if (tif_need_resched()) \
+	if (tif_need_resched_now()) \
 		set_preempt_need_resched(); \
 } while (0)
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define preempt_disable_rt()		preempt_disable()
+# define preempt_enable_rt()		preempt_enable()
+# define preempt_disable_nort()		barrier()
+# define preempt_enable_nort()		barrier()
+# ifdef CONFIG_SMP
+   extern void migrate_disable(void);
+   extern void migrate_enable(void);
+# else /* CONFIG_SMP */
+#  define migrate_disable()		barrier()
+#  define migrate_enable()		barrier()
+# endif /* CONFIG_SMP */
+#else
+# define preempt_disable_rt()		barrier()
+# define preempt_enable_rt()		barrier()
+# define preempt_disable_nort()		preempt_disable()
+# define preempt_enable_nort()		preempt_enable()
+# define migrate_disable()		preempt_disable()
+# define migrate_enable()		preempt_enable()
+#endif
+
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 
 struct preempt_notifier;
Index: linux-3.18.13-rt10-r7s4/include/linux/preempt_mask.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/preempt_mask.h
+++ linux-3.18.13-rt10-r7s4/include/linux/preempt_mask.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:47 @
 #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
 #define NMI_OFFSET	(1UL << NMI_SHIFT)
 
-#define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)
+#else
+# define SOFTIRQ_DISABLE_OFFSET	(0)
+#endif
 
 #define PREEMPT_ACTIVE_BITS	1
 #define PREEMPT_ACTIVE_SHIFT	(NMI_SHIFT + NMI_BITS)
 #define PREEMPT_ACTIVE	(__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
 
 #define hardirq_count()	(preempt_count() & HARDIRQ_MASK)
-#define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
 #define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
 				 | NMI_MASK))
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define softirq_count()	(preempt_count() & SOFTIRQ_MASK)
+# define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
+#else
+# define softirq_count()	(0UL)
+extern int in_serving_softirq(void);
+#endif
 
 /*
  * Are we doing bottom half or hardware interrupt processing?
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:77 @
 #define in_irq()		(hardirq_count())
 #define in_softirq()		(softirq_count())
 #define in_interrupt()		(irq_count())
-#define in_serving_softirq()	(softirq_count() & SOFTIRQ_OFFSET)
 
 /*
  * Are we in NMI context?
Index: linux-3.18.13-rt10-r7s4/include/linux/printk.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/printk.h
+++ linux-3.18.13-rt10-r7s4/include/linux/printk.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:122 @ int no_printk(const char *fmt, ...)
 extern asmlinkage __printf(1, 2)
 void early_printk(const char *fmt, ...);
 void early_vprintk(const char *fmt, va_list ap);
+extern void printk_kill(void);
 #else
 static inline __printf(1, 2) __cold
 void early_printk(const char *s, ...) { }
+static inline void printk_kill(void) { }
 #endif
 
 #ifdef CONFIG_PRINTK
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:160 @ extern int __printk_ratelimit(const char
 #define printk_ratelimit() __printk_ratelimit(__func__)
 extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
 				   unsigned int interval_msec);
-
 extern int printk_delay_msec;
 extern int dmesg_restrict;
 extern int kptr_restrict;
Index: linux-3.18.13-rt10-r7s4/include/linux/radix-tree.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/radix-tree.h
+++ linux-3.18.13-rt10-r7s4/include/linux/radix-tree.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:280 @ radix_tree_gang_lookup(struct radix_tree
 unsigned int radix_tree_gang_lookup_slot(struct radix_tree_root *root,
 			void ***results, unsigned long *indices,
 			unsigned long first_index, unsigned int max_items);
+#ifndef CONFIG_PREEMPT_RT_FULL
 int radix_tree_preload(gfp_t gfp_mask);
 int radix_tree_maybe_preload(gfp_t gfp_mask);
+#else
+static inline int radix_tree_preload(gfp_t gm) { return 0; }
+static inline int radix_tree_maybe_preload(gfp_t gfp_mask) { return 0; }
+#endif
 void radix_tree_init(void);
 void *radix_tree_tag_set(struct radix_tree_root *root,
 			unsigned long index, unsigned int tag);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:311 @ unsigned long radix_tree_locate_item(str
 
 static inline void radix_tree_preload_end(void)
 {
-	preempt_enable();
+	preempt_enable_nort();
 }
 
 /**
Index: linux-3.18.13-rt10-r7s4/include/linux/random.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/random.h
+++ linux-3.18.13-rt10-r7s4/include/linux/random.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:14 @
 extern void add_device_randomness(const void *, unsigned int);
 extern void add_input_randomness(unsigned int type, unsigned int code,
 				 unsigned int value);
-extern void add_interrupt_randomness(int irq, int irq_flags);
+extern void add_interrupt_randomness(int irq, int irq_flags, __u64 ip);
 
 extern void get_random_bytes(void *buf, int nbytes);
 extern void get_random_bytes_arch(void *buf, int nbytes);
Index: linux-3.18.13-rt10-r7s4/include/linux/rcupdate.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/rcupdate.h
+++ linux-3.18.13-rt10-r7s4/include/linux/rcupdate.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:150 @ void call_rcu(struct rcu_head *head,
 
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+#define call_rcu_bh	call_rcu
+#else
 /**
  * call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
  * @head: structure to be used for queueing the RCU updates.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:176 @ void call_rcu(struct rcu_head *head,
  */
 void call_rcu_bh(struct rcu_head *head,
 		 void (*func)(struct rcu_head *head));
+#endif
 
 /**
  * call_rcu_sched() - Queue an RCU for invocation after sched grace period.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:238 @ void synchronize_rcu(void);
  * types of kernel builds, the rcu_read_lock() nesting depth is unknowable.
  */
 #define rcu_preempt_depth() (current->rcu_read_lock_nesting)
+#ifndef CONFIG_PREEMPT_RT_FULL
+#define sched_rcu_preempt_depth()	rcu_preempt_depth()
+#else
+static inline int sched_rcu_preempt_depth(void) { return 0; }
+#endif
 
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:266 @ static inline int rcu_preempt_depth(void
 	return 0;
 }
 
+#define sched_rcu_preempt_depth()	rcu_preempt_depth()
+
 #endif /* #else #ifdef CONFIG_PREEMPT_RCU */
 
 /* Internal to kernel */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:444 @ extern struct lockdep_map rcu_callback_m
 int debug_lockdep_rcu_enabled(void);
 
 int rcu_read_lock_held(void);
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline int rcu_read_lock_bh_held(void)
+{
+	return rcu_read_lock_held();
+}
+#else
 int rcu_read_lock_bh_held(void);
+#endif
 
 /**
  * rcu_read_lock_sched_held() - might we be in RCU-sched read-side critical section?
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:976 @ static inline void rcu_read_unlock(void)
 static inline void rcu_read_lock_bh(void)
 {
 	local_bh_disable();
+#ifdef CONFIG_PREEMPT_RT_FULL
+	rcu_read_lock();
+#else
 	__acquire(RCU_BH);
 	rcu_lock_acquire(&rcu_bh_lock_map);
 	rcu_lockdep_assert(rcu_is_watching(),
 			   "rcu_read_lock_bh() used illegally while idle");
+#endif
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:993 @ static inline void rcu_read_lock_bh(void
  */
 static inline void rcu_read_unlock_bh(void)
 {
+#ifdef CONFIG_PREEMPT_RT_FULL
+	rcu_read_unlock();
+#else
 	rcu_lockdep_assert(rcu_is_watching(),
 			   "rcu_read_unlock_bh() used illegally while idle");
 	rcu_lock_release(&rcu_bh_lock_map);
 	__release(RCU_BH);
+#endif
 	local_bh_enable();
 }
 
Index: linux-3.18.13-rt10-r7s4/include/linux/rcutree.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/rcutree.h
+++ linux-3.18.13-rt10-r7s4/include/linux/rcutree.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:49 @ static inline void rcu_virt_note_context
 	rcu_note_context_switch(cpu);
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define synchronize_rcu_bh	synchronize_rcu
+#else
 void synchronize_rcu_bh(void);
+#endif
 void synchronize_sched_expedited(void);
 void synchronize_rcu_expedited(void);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:81 @ static inline void synchronize_rcu_bh_ex
 }
 
 void rcu_barrier(void);
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define rcu_barrier_bh                rcu_barrier
+#else
 void rcu_barrier_bh(void);
+#endif
 void rcu_barrier_sched(void);
 unsigned long get_state_synchronize_rcu(void);
 void cond_synchronize_rcu(unsigned long oldstate);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:93 @ void cond_synchronize_rcu(unsigned long
 extern unsigned long rcutorture_testseq;
 extern unsigned long rcutorture_vernum;
 long rcu_batches_completed(void);
-long rcu_batches_completed_bh(void);
 long rcu_batches_completed_sched(void);
 void show_rcu_gp_kthreads(void);
 
 void rcu_force_quiescent_state(void);
-void rcu_bh_force_quiescent_state(void);
 void rcu_sched_force_quiescent_state(void);
 
 void exit_rcu(void);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:106 @ extern int rcu_scheduler_active __read_m
 
 bool rcu_is_watching(void);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
+void rcu_bh_force_quiescent_state(void);
+long rcu_batches_completed_bh(void);
+#else
+# define rcu_bh_force_quiescent_state	rcu_force_quiescent_state
+# define rcu_batches_completed_bh	rcu_batches_completed
+#endif
+
 #endif /* __LINUX_RCUTREE_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/rtmutex.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/rtmutex.h
+++ linux-3.18.13-rt10-r7s4/include/linux/rtmutex.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:17 @
 
 #include <linux/linkage.h>
 #include <linux/rbtree.h>
-#include <linux/spinlock_types.h>
+#include <linux/spinlock_types_raw.h>
 
 extern int max_lock_depth; /* for sysctl */
 
+#ifdef CONFIG_DEBUG_MUTEXES
+#include <linux/debug_locks.h>
+#endif
+
 /**
  * The rt_mutex structure
  *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:38 @ struct rt_mutex {
 	struct rb_root          waiters;
 	struct rb_node          *waiters_leftmost;
 	struct task_struct	*owner;
-#ifdef CONFIG_DEBUG_RT_MUTEXES
 	int			save_state;
+#ifdef CONFIG_DEBUG_RT_MUTEXES
 	const char 		*name, *file;
 	int			line;
 	void			*magic;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:62 @ struct hrtimer_sleeper;
 # define rt_mutex_debug_check_no_locks_held(task)	do { } while (0)
 #endif
 
+# define rt_mutex_init(mutex)					\
+	do {							\
+		raw_spin_lock_init(&(mutex)->wait_lock);	\
+		__rt_mutex_init(mutex, #mutex);			\
+	} while (0)
+
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
 	, .name = #mutexname, .file = __FILE__, .line = __LINE__
-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, __func__)
  extern void rt_mutex_debug_task_free(struct task_struct *tsk);
 #else
 # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
-# define rt_mutex_init(mutex)			__rt_mutex_init(mutex, NULL)
 # define rt_mutex_debug_task_free(t)			do { } while (0)
 #endif
 
-#define __RT_MUTEX_INITIALIZER(mutexname) \
-	{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
+#define __RT_MUTEX_INITIALIZER_PLAIN(mutexname) \
+	 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
 	, .waiters = RB_ROOT \
 	, .owner = NULL \
-	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)}
+	__DEBUG_RT_MUTEX_INITIALIZER(mutexname)
+
+#define __RT_MUTEX_INITIALIZER(mutexname) \
+	{ __RT_MUTEX_INITIALIZER_PLAIN(mutexname) }
+
+#define __RT_MUTEX_INITIALIZER_SAVE_STATE(mutexname) \
+	{ __RT_MUTEX_INITIALIZER_PLAIN(mutexname)    \
+	, .save_state = 1 }
 
 #define DEFINE_RT_MUTEX(mutexname) \
 	struct rt_mutex mutexname = __RT_MUTEX_INITIALIZER(mutexname)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:109 @ extern void rt_mutex_destroy(struct rt_m
 
 extern void rt_mutex_lock(struct rt_mutex *lock);
 extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
+extern int rt_mutex_lock_killable(struct rt_mutex *lock);
 extern int rt_mutex_timed_lock(struct rt_mutex *lock,
 			       struct hrtimer_sleeper *timeout);
 
Index: linux-3.18.13-rt10-r7s4/include/linux/rwlock_rt.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/rwlock_rt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef __LINUX_RWLOCK_RT_H
+#define __LINUX_RWLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#define rwlock_init(rwl)				\
+do {							\
+	static struct lock_class_key __key;		\
+							\
+	rt_mutex_init(&(rwl)->lock);			\
+	__rt_rwlock_init(rwl, #rwl, &__key);		\
+} while (0)
+
+extern void __lockfunc rt_write_lock(rwlock_t *rwlock);
+extern void __lockfunc rt_read_lock(rwlock_t *rwlock);
+extern int __lockfunc rt_write_trylock(rwlock_t *rwlock);
+extern int __lockfunc rt_write_trylock_irqsave(rwlock_t *trylock, unsigned long *flags);
+extern int __lockfunc rt_read_trylock(rwlock_t *rwlock);
+extern void __lockfunc rt_write_unlock(rwlock_t *rwlock);
+extern void __lockfunc rt_read_unlock(rwlock_t *rwlock);
+extern unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock);
+extern unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock);
+extern void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key);
+
+#define read_trylock(lock)	__cond_lock(lock, rt_read_trylock(lock))
+#define write_trylock(lock)	__cond_lock(lock, rt_write_trylock(lock))
+
+#define write_trylock_irqsave(lock, flags)	\
+	__cond_lock(lock, rt_write_trylock_irqsave(lock, &flags))
+
+#define read_lock_irqsave(lock, flags)			\
+	do {						\
+		typecheck(unsigned long, flags);	\
+		flags = rt_read_lock_irqsave(lock);	\
+	} while (0)
+
+#define write_lock_irqsave(lock, flags)			\
+	do {						\
+		typecheck(unsigned long, flags);	\
+		flags = rt_write_lock_irqsave(lock);	\
+	} while (0)
+
+#define read_lock(lock)		rt_read_lock(lock)
+
+#define read_lock_bh(lock)				\
+	do {						\
+		local_bh_disable();			\
+		rt_read_lock(lock);			\
+	} while (0)
+
+#define read_lock_irq(lock)	read_lock(lock)
+
+#define write_lock(lock)	rt_write_lock(lock)
+
+#define write_lock_bh(lock)				\
+	do {						\
+		local_bh_disable();			\
+		rt_write_lock(lock);			\
+	} while (0)
+
+#define write_lock_irq(lock)	write_lock(lock)
+
+#define read_unlock(lock)	rt_read_unlock(lock)
+
+#define read_unlock_bh(lock)				\
+	do {						\
+		rt_read_unlock(lock);			\
+		local_bh_enable();			\
+	} while (0)
+
+#define read_unlock_irq(lock)	read_unlock(lock)
+
+#define write_unlock(lock)	rt_write_unlock(lock)
+
+#define write_unlock_bh(lock)				\
+	do {						\
+		rt_write_unlock(lock);			\
+		local_bh_enable();			\
+	} while (0)
+
+#define write_unlock_irq(lock)	write_unlock(lock)
+
+#define read_unlock_irqrestore(lock, flags)		\
+	do {						\
+		typecheck(unsigned long, flags);	\
+		(void) flags;				\
+		rt_read_unlock(lock);			\
+	} while (0)
+
+#define write_unlock_irqrestore(lock, flags) \
+	do {						\
+		typecheck(unsigned long, flags);	\
+		(void) flags;				\
+		rt_write_unlock(lock);			\
+	} while (0)
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/rwlock_types.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/rwlock_types.h
+++ linux-3.18.13-rt10-r7s4/include/linux/rwlock_types.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
 #ifndef __LINUX_RWLOCK_TYPES_H
 #define __LINUX_RWLOCK_TYPES_H
 
+#if !defined(__LINUX_SPINLOCK_TYPES_H)
+# error "Do not include directly, include spinlock_types.h"
+#endif
+
 /*
  * include/linux/rwlock_types.h - generic rwlock type definitions
  *				  and initializers
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:50 @ typedef struct {
 				RW_DEP_MAP_INIT(lockname) }
 #endif
 
-#define DEFINE_RWLOCK(x)	rwlock_t x = __RW_LOCK_UNLOCKED(x)
+#define DEFINE_RWLOCK(name) \
+	rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
 
 #endif /* __LINUX_RWLOCK_TYPES_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/rwlock_types_rt.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/rwlock_types_rt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef __LINUX_RWLOCK_TYPES_RT_H
+#define __LINUX_RWLOCK_TYPES_RT_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#error "Do not include directly. Include spinlock_types.h instead"
+#endif
+
+/*
+ * rwlocks - rtmutex which allows single reader recursion
+ */
+typedef struct {
+	struct rt_mutex		lock;
+	int			read_depth;
+	unsigned int		break_lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map	dep_map;
+#endif
+} rwlock_t;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define RW_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
+#else
+# define RW_DEP_MAP_INIT(lockname)
+#endif
+
+#define __RW_LOCK_UNLOCKED(name) \
+	{ .lock = __RT_MUTEX_INITIALIZER_SAVE_STATE(name.lock),	\
+	  RW_DEP_MAP_INIT(name) }
+
+#define DEFINE_RWLOCK(name) \
+	rwlock_t name __cacheline_aligned_in_smp = __RW_LOCK_UNLOCKED(name)
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/rwsem.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/rwsem.h
+++ linux-3.18.13-rt10-r7s4/include/linux/rwsem.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:21 @
 #include <linux/osq_lock.h>
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+#include <linux/rwsem_rt.h>
+#else /* PREEMPT_RT_FULL */
+
 struct rw_semaphore;
 
 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:184 @ extern void up_read_non_owner(struct rw_
 # define up_read_non_owner(sem)			up_read(sem)
 #endif
 
+#endif /* !PREEMPT_RT_FULL */
+
 #endif /* _LINUX_RWSEM_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/rwsem_rt.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/rwsem_rt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef _LINUX_RWSEM_RT_H
+#define _LINUX_RWSEM_RT_H
+
+#ifndef _LINUX_RWSEM_H
+#error "Include rwsem.h"
+#endif
+
+/*
+ * RW-semaphores are a spinlock plus a reader-depth count.
+ *
+ * Note that the semantics are different from the usual
+ * Linux rw-sems, in PREEMPT_RT mode we do not allow
+ * multiple readers to hold the lock at once, we only allow
+ * a read-lock owner to read-lock recursively. This is
+ * better for latency, makes the implementation inherently
+ * fair and makes it simpler as well.
+ */
+
+#include <linux/rtmutex.h>
+
+struct rw_semaphore {
+	struct rt_mutex		lock;
+	int			read_depth;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map	dep_map;
+#endif
+};
+
+#define __RWSEM_INITIALIZER(name) \
+	{ .lock = __RT_MUTEX_INITIALIZER(name.lock), \
+	  RW_DEP_MAP_INIT(name) }
+
+#define DECLARE_RWSEM(lockname) \
+	struct rw_semaphore lockname = __RWSEM_INITIALIZER(lockname)
+
+extern void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
+				     struct lock_class_key *key);
+
+#define __rt_init_rwsem(sem, name, key)			\
+	do {						\
+		rt_mutex_init(&(sem)->lock);		\
+		__rt_rwsem_init((sem), (name), (key));\
+	} while (0)
+
+#define __init_rwsem(sem, name, key) __rt_init_rwsem(sem, name, key)
+
+# define rt_init_rwsem(sem)				\
+do {							\
+	static struct lock_class_key __key;		\
+							\
+	__rt_init_rwsem((sem), #sem, &__key);		\
+} while (0)
+
+extern void  rt_down_write(struct rw_semaphore *rwsem);
+extern void rt_down_read_nested(struct rw_semaphore *rwsem, int subclass);
+extern void rt_down_write_nested(struct rw_semaphore *rwsem, int subclass);
+extern void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
+		struct lockdep_map *nest);
+extern void  rt_down_read(struct rw_semaphore *rwsem);
+extern int  rt_down_write_trylock(struct rw_semaphore *rwsem);
+extern int  rt_down_read_trylock(struct rw_semaphore *rwsem);
+extern void  rt_up_read(struct rw_semaphore *rwsem);
+extern void  rt_up_write(struct rw_semaphore *rwsem);
+extern void  rt_downgrade_write(struct rw_semaphore *rwsem);
+
+#define init_rwsem(sem)		rt_init_rwsem(sem)
+#define rwsem_is_locked(s)	rt_mutex_is_locked(&(s)->lock)
+
+static inline int rwsem_is_contended(struct rw_semaphore *sem)
+{
+	/* rt_mutex_has_waiters() */
+	return !RB_EMPTY_ROOT(&sem->lock.waiters);
+}
+
+static inline void down_read(struct rw_semaphore *sem)
+{
+	rt_down_read(sem);
+}
+
+static inline int down_read_trylock(struct rw_semaphore *sem)
+{
+	return rt_down_read_trylock(sem);
+}
+
+static inline void down_write(struct rw_semaphore *sem)
+{
+	rt_down_write(sem);
+}
+
+static inline int down_write_trylock(struct rw_semaphore *sem)
+{
+	return rt_down_write_trylock(sem);
+}
+
+static inline void up_read(struct rw_semaphore *sem)
+{
+	rt_up_read(sem);
+}
+
+static inline void up_write(struct rw_semaphore *sem)
+{
+	rt_up_write(sem);
+}
+
+static inline void downgrade_write(struct rw_semaphore *sem)
+{
+	rt_downgrade_write(sem);
+}
+
+static inline void down_read_nested(struct rw_semaphore *sem, int subclass)
+{
+	return rt_down_read_nested(sem, subclass);
+}
+
+static inline void down_write_nested(struct rw_semaphore *sem, int subclass)
+{
+	rt_down_write_nested(sem, subclass);
+}
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline void down_write_nest_lock(struct rw_semaphore *sem,
+		struct rw_semaphore *nest_lock)
+{
+	rt_down_write_nested_lock(sem, &nest_lock->dep_map);
+}
+
+#else
+
+static inline void down_write_nest_lock(struct rw_semaphore *sem,
+		struct rw_semaphore *nest_lock)
+{
+	rt_down_write_nested_lock(sem, NULL);
+}
+#endif
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/sched.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/sched.h
+++ linux-3.18.13-rt10-r7s4/include/linux/sched.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:29 @ struct sched_param {
 #include <linux/nodemask.h>
 #include <linux/mm_types.h>
 #include <linux/preempt_mask.h>
+#include <asm/kmap_types.h>
 
 #include <asm/page.h>
 #include <asm/ptrace.h>
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:60 @ struct sched_param {
 #include <linux/cred.h>
 #include <linux/llist.h>
 #include <linux/uidgid.h>
+#include <linux/hardirq.h>
 #include <linux/gfp.h>
 #include <linux/magic.h>
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:240 @ extern char ___assert_task_state[1 - 2*!
 				 TASK_UNINTERRUPTIBLE | __TASK_STOPPED | \
 				 __TASK_TRACED | EXIT_ZOMBIE | EXIT_DEAD)
 
-#define task_is_traced(task)	((task->state & __TASK_TRACED) != 0)
 #define task_is_stopped(task)	((task->state & __TASK_STOPPED) != 0)
-#define task_is_stopped_or_traced(task)	\
-			((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define task_contributes_to_load(task)	\
 				((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
 				 (task->flags & PF_FROZEN) == 0)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1236 @ enum perf_event_task_context {
 
 struct task_struct {
 	volatile long state;	/* -1 unrunnable, 0 runnable, >0 stopped */
+	volatile long saved_state;	/* saved state for "spinlock sleepers" */
 	void *stack;
 	atomic_t usage;
 	unsigned int flags;	/* per process flags, defined below */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1273 @ struct task_struct {
 #endif
 
 	unsigned int policy;
+#ifdef CONFIG_PREEMPT_RT_FULL
+	int migrate_disable;
+# ifdef CONFIG_SCHED_DEBUG
+	int migrate_disable_atomic;
+# endif
+#endif
 	int nr_cpus_allowed;
 	cpumask_t cpus_allowed;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1380 @ struct task_struct {
 	struct cputime prev_cputime;
 #endif
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-	seqlock_t vtime_seqlock;
+	raw_spinlock_t vtime_lock;
+	seqcount_t vtime_seq;
 	unsigned long long vtime_snap;
 	enum {
 		VTIME_SLEEPING = 0,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1397 @ struct task_struct {
 
 	struct task_cputime cputime_expires;
 	struct list_head cpu_timers[3];
+#ifdef CONFIG_PREEMPT_RT_BASE
+	struct task_struct *posix_timer_list;
+#endif
 
 /* process credentials */
 	const struct cred __rcu *real_cred; /* objective and real subjective task
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1432 @ struct task_struct {
 /* signal handlers */
 	struct signal_struct *signal;
 	struct sighand_struct *sighand;
+	struct sigqueue *sigqueue_cache;
 
 	sigset_t blocked, real_blocked;
 	sigset_t saved_sigmask;	/* restored if set_restore_sigmask() was used */
 	struct sigpending pending;
+#ifdef CONFIG_PREEMPT_RT_FULL
+	/* TODO: move me into ->restart_block ? */
+	struct siginfo forced_info;
+#endif
 
 	unsigned long sas_ss_sp;
 	size_t sas_ss_size;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1478 @ struct task_struct {
 	/* mutex deadlock detection */
 	struct mutex_waiter *blocked_on;
 #endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+	int pagefault_disabled;
+#endif
 #ifdef CONFIG_TRACE_IRQFLAGS
 	unsigned int irq_events;
 	unsigned long hardirq_enable_ip;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1665 @ struct task_struct {
 	unsigned long trace;
 	/* bitmask and counter of trace recursion */
 	unsigned long trace_recursion;
+#ifdef CONFIG_WAKEUP_LATENCY_HIST
+	u64 preempt_timestamp_hist;
+#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
+	long timer_offset;
+#endif
+#endif
 #endif /* CONFIG_TRACING */
 #ifdef CONFIG_MEMCG /* memcg uses this to do batch job */
 	unsigned int memcg_kmem_skip_account;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1688 @ struct task_struct {
 	unsigned int	sequential_io;
 	unsigned int	sequential_io_avg;
 #endif
+#ifdef CONFIG_PREEMPT_RT_BASE
+	struct rcu_head put_rcu;
+	int softirq_nestcnt;
+	unsigned int softirqs_raised;
+#endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+# if defined CONFIG_HIGHMEM || defined CONFIG_X86_32
+	int kmap_idx;
+	pte_t kmap_pte[KM_TYPE_NR];
+# endif
+#endif
 };
 
-/* Future-safe accessor for struct task_struct's cpus_allowed. */
-#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
-
 #define TNF_MIGRATED	0x01
 #define TNF_NO_GROUP	0x02
 #define TNF_SHARED	0x04
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1735 @ static inline bool should_numa_migrate_m
 }
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static inline bool cur_pf_disabled(void) { return current->pagefault_disabled; }
+#else
+static inline bool cur_pf_disabled(void) { return false; }
+#endif
+
+static inline bool pagefault_disabled(void)
+{
+	return in_atomic() || cur_pf_disabled();
+}
+
 static inline struct pid *task_pid(struct task_struct *task)
 {
 	return task->pids[PIDTYPE_PID].pid;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1899 @ extern struct pid *cad_pid;
 extern void free_task(struct task_struct *tsk);
 #define get_task_struct(tsk) do { atomic_inc(&(tsk)->usage); } while(0)
 
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __put_task_struct_cb(struct rcu_head *rhp);
+
+static inline void put_task_struct(struct task_struct *t)
+{
+	if (atomic_dec_and_test(&t->usage))
+		call_rcu(&t->put_rcu, __put_task_struct_cb);
+}
+#else
 extern void __put_task_struct(struct task_struct *t);
 
 static inline void put_task_struct(struct task_struct *t)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1915 @ static inline void put_task_struct(struc
 	if (atomic_dec_and_test(&t->usage))
 		__put_task_struct(t);
 }
+#endif
 
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
 extern void task_cputime(struct task_struct *t,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1954 @ extern void thread_group_cputime_adjuste
 /*
  * Per process flags
  */
+#define PF_IN_SOFTIRQ	0x00000001	/* Task is serving softirq */
 #define PF_EXITING	0x00000004	/* getting shut down */
 #define PF_EXITPIDONE	0x00000008	/* pi exit done on shut down */
 #define PF_VCPU		0x00000010	/* I'm a virtual CPU */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2115 @ extern void do_set_cpus_allowed(struct t
 
 extern int set_cpus_allowed_ptr(struct task_struct *p,
 				const struct cpumask *new_mask);
+int migrate_me(void);
+void tell_sched_cpu_down_begin(int cpu);
+void tell_sched_cpu_down_done(int cpu);
+
 #else
 static inline void do_set_cpus_allowed(struct task_struct *p,
 				      const struct cpumask *new_mask)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2131 @ static inline int set_cpus_allowed_ptr(s
 		return -EINVAL;
 	return 0;
 }
+static inline int migrate_me(void) { return 0; }
+static inline void tell_sched_cpu_down_begin(int cpu) { }
+static inline void tell_sched_cpu_down_done(int cpu) { }
 #endif
 
 #ifdef CONFIG_NO_HZ_COMMON
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2354 @ extern void xtime_update(unsigned long t
 
 extern int wake_up_state(struct task_struct *tsk, unsigned int state);
 extern int wake_up_process(struct task_struct *tsk);
+extern int wake_up_lock_sleeper(struct task_struct * tsk);
 extern void wake_up_new_task(struct task_struct *tsk);
 #ifdef CONFIG_SMP
  extern void kick_process(struct task_struct *tsk);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2471 @ extern struct mm_struct * mm_alloc(void)
 
 /* mmdrop drops the mm and the page tables */
 extern void __mmdrop(struct mm_struct *);
+
 static inline void mmdrop(struct mm_struct * mm)
 {
 	if (unlikely(atomic_dec_and_test(&mm->mm_count)))
 		__mmdrop(mm);
 }
 
+#ifdef CONFIG_PREEMPT_RT_BASE
+extern void __mmdrop_delayed(struct rcu_head *rhp);
+static inline void mmdrop_delayed(struct mm_struct *mm)
+{
+	if (atomic_dec_and_test(&mm->mm_count))
+		call_rcu(&mm->delayed_drop, __mmdrop_delayed);
+}
+#else
+# define mmdrop_delayed(mm)	mmdrop(mm)
+#endif
+
 /* mmput gets rid of the mappings and all user-space */
 extern void mmput(struct mm_struct *);
 /* Grab a reference to a task's mm, if it is not already going away */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2796 @ static inline int test_tsk_need_resched(
 	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED));
 }
 
+#ifdef CONFIG_PREEMPT_LAZY
+static inline void set_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+	set_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+	clear_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int test_tsk_need_resched_lazy(struct task_struct *tsk)
+{
+	return unlikely(test_tsk_thread_flag(tsk,TIF_NEED_RESCHED_LAZY));
+}
+
+static inline int need_resched_lazy(void)
+{
+	return test_thread_flag(TIF_NEED_RESCHED_LAZY);
+}
+
+static inline int need_resched_now(void)
+{
+	return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#else
+static inline void clear_tsk_need_resched_lazy(struct task_struct *tsk) { }
+static inline int need_resched_lazy(void) { return 0; }
+
+static inline int need_resched_now(void)
+{
+	return test_thread_flag(TIF_NEED_RESCHED);
+}
+
+#endif
+
 static inline int restart_syscall(void)
 {
 	set_tsk_thread_flag(current, TIF_SIGPENDING);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2864 @ static inline int signal_pending_state(l
 	return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
 }
 
+static inline bool __task_is_stopped_or_traced(struct task_struct *task)
+{
+	if (task->state & (__TASK_STOPPED | __TASK_TRACED))
+		return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+	if (task->saved_state & (__TASK_STOPPED | __TASK_TRACED))
+		return true;
+#endif
+	return false;
+}
+
+static inline bool task_is_stopped_or_traced(struct task_struct *task)
+{
+	bool traced_stopped;
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&task->pi_lock, flags);
+	traced_stopped = __task_is_stopped_or_traced(task);
+	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+#else
+	traced_stopped = __task_is_stopped_or_traced(task);
+#endif
+	return traced_stopped;
+}
+
+static inline bool task_is_traced(struct task_struct *task)
+{
+	bool traced = false;
+
+	if (task->state & __TASK_TRACED)
+		return true;
+#ifdef CONFIG_PREEMPT_RT_FULL
+	/* in case the task is sleeping on tasklist_lock */
+	raw_spin_lock_irq(&task->pi_lock);
+	if (task->state & __TASK_TRACED)
+		traced = true;
+	else if (task->saved_state & __TASK_TRACED)
+		traced = true;
+	raw_spin_unlock_irq(&task->pi_lock);
+#endif
+	return traced;
+}
+
 /*
  * cond_resched() and cond_resched_lock(): latency reduction via
  * explicit rescheduling in places that are safe. The return
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2925 @ extern int _cond_resched(void);
 
 extern int __cond_resched_lock(spinlock_t *lock);
 
-#ifdef CONFIG_PREEMPT_COUNT
+#if defined(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT_FULL)
 #define PREEMPT_LOCK_OFFSET	PREEMPT_OFFSET
 #else
 #define PREEMPT_LOCK_OFFSET	0
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2936 @ extern int __cond_resched_lock(spinlock_
 	__cond_resched_lock(lock);				\
 })
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 extern int __cond_resched_softirq(void);
 
 #define cond_resched_softirq() ({					\
 	__might_sleep(__FILE__, __LINE__, SOFTIRQ_DISABLE_OFFSET);	\
 	__cond_resched_softirq();					\
 })
+#else
+# define cond_resched_softirq()		cond_resched()
+#endif
 
 static inline void cond_resched_rcu(void)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3112 @ static inline void set_task_cpu(struct t
 
 #endif /* CONFIG_SMP */
 
+static inline int __migrate_disabled(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+	return p->migrate_disable;
+#else
+	return 0;
+#endif
+}
+
+/* Future-safe accessor for struct task_struct's cpus_allowed. */
+static inline const struct cpumask *tsk_cpus_allowed(struct task_struct *p)
+{
+#ifdef CONFIG_PREEMPT_RT_FULL
+	if (p->migrate_disable)
+		return cpumask_of(task_cpu(p));
+#endif
+
+	return &p->cpus_allowed;
+}
+
 extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
 extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
 
Index: linux-3.18.13-rt10-r7s4/include/linux/seqlock.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/seqlock.h
+++ linux-3.18.13-rt10-r7s4/include/linux/seqlock.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:222 @ static inline int read_seqcount_retry(co
 	return __read_seqcount_retry(s, start);
 }
 
-
-
-static inline void raw_write_seqcount_begin(seqcount_t *s)
+static inline void __raw_write_seqcount_begin(seqcount_t *s)
 {
 	s->sequence++;
 	smp_wmb();
 }
 
-static inline void raw_write_seqcount_end(seqcount_t *s)
+static inline void raw_write_seqcount_begin(seqcount_t *s)
+{
+	preempt_disable_rt();
+	__raw_write_seqcount_begin(s);
+}
+
+static inline void __raw_write_seqcount_end(seqcount_t *s)
 {
 	smp_wmb();
 	s->sequence++;
 }
 
+static inline void raw_write_seqcount_end(seqcount_t *s)
+{
+	__raw_write_seqcount_end(s);
+	preempt_enable_rt();
+}
+
 /*
  * raw_write_seqcount_latch - redirect readers to even/odd copy
  * @s: pointer to seqcount_t
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:318 @ typedef struct {
 /*
  * Read side functions for starting and finalizing a read side section.
  */
+#ifndef CONFIG_PREEMPT_RT_FULL
 static inline unsigned read_seqbegin(const seqlock_t *sl)
 {
 	return read_seqcount_begin(&sl->seqcount);
 }
+#else
+/*
+ * Starvation safe read side for RT
+ */
+static inline unsigned read_seqbegin(seqlock_t *sl)
+{
+	unsigned ret;
+
+repeat:
+	ret = ACCESS_ONCE(sl->seqcount.sequence);
+	if (unlikely(ret & 1)) {
+		/*
+		 * Take the lock and let the writer proceed (i.e. evtl
+		 * boost it), otherwise we could loop here forever.
+		 */
+		spin_unlock_wait(&sl->lock);
+		goto repeat;
+	}
+	return ret;
+}
+#endif
 
 static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:358 @ static inline unsigned read_seqretry(con
 static inline void write_seqlock(seqlock_t *sl)
 {
 	spin_lock(&sl->lock);
-	write_seqcount_begin(&sl->seqcount);
+	__raw_write_seqcount_begin(&sl->seqcount);
 }
 
 static inline void write_sequnlock(seqlock_t *sl)
 {
-	write_seqcount_end(&sl->seqcount);
+	__raw_write_seqcount_end(&sl->seqcount);
 	spin_unlock(&sl->lock);
 }
 
 static inline void write_seqlock_bh(seqlock_t *sl)
 {
 	spin_lock_bh(&sl->lock);
-	write_seqcount_begin(&sl->seqcount);
+	__raw_write_seqcount_begin(&sl->seqcount);
 }
 
 static inline void write_sequnlock_bh(seqlock_t *sl)
 {
-	write_seqcount_end(&sl->seqcount);
+	__raw_write_seqcount_end(&sl->seqcount);
 	spin_unlock_bh(&sl->lock);
 }
 
 static inline void write_seqlock_irq(seqlock_t *sl)
 {
 	spin_lock_irq(&sl->lock);
-	write_seqcount_begin(&sl->seqcount);
+	__raw_write_seqcount_begin(&sl->seqcount);
 }
 
 static inline void write_sequnlock_irq(seqlock_t *sl)
 {
-	write_seqcount_end(&sl->seqcount);
+	__raw_write_seqcount_end(&sl->seqcount);
 	spin_unlock_irq(&sl->lock);
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:396 @ static inline unsigned long __write_seql
 	unsigned long flags;
 
 	spin_lock_irqsave(&sl->lock, flags);
-	write_seqcount_begin(&sl->seqcount);
+	__raw_write_seqcount_begin(&sl->seqcount);
 	return flags;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:406 @ static inline unsigned long __write_seql
 static inline void
 write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
 {
-	write_seqcount_end(&sl->seqcount);
+	__raw_write_seqcount_end(&sl->seqcount);
 	spin_unlock_irqrestore(&sl->lock, flags);
 }
 
Index: linux-3.18.13-rt10-r7s4/include/linux/signal.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/signal.h
+++ linux-3.18.13-rt10-r7s4/include/linux/signal.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:221 @ static inline void init_sigpending(struc
 }
 
 extern void flush_sigqueue(struct sigpending *queue);
+extern void flush_task_sigqueue(struct task_struct *tsk);
 
 /* Test if 'sig' is valid signal. Use this instead of testing _NSIG directly */
 static inline int valid_signal(unsigned long sig)
Index: linux-3.18.13-rt10-r7s4/include/linux/skbuff.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/skbuff.h
+++ linux-3.18.13-rt10-r7s4/include/linux/skbuff.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:175 @ struct sk_buff_head {
 
 	__u32		qlen;
 	spinlock_t	lock;
+	raw_spinlock_t	raw_lock;
 };
 
 struct sk_buff;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1331 @ static inline void skb_queue_head_init(s
 	__skb_queue_head_init(list);
 }
 
+static inline void skb_queue_head_init_raw(struct sk_buff_head *list)
+{
+	raw_spin_lock_init(&list->raw_lock);
+	__skb_queue_head_init(list);
+}
+
 static inline void skb_queue_head_init_class(struct sk_buff_head *list,
 		struct lock_class_key *class)
 {
Index: linux-3.18.13-rt10-r7s4/include/linux/smp.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/smp.h
+++ linux-3.18.13-rt10-r7s4/include/linux/smp.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:181 @ static inline void wake_up_all_idle_cpus
 #define get_cpu()		({ preempt_disable(); smp_processor_id(); })
 #define put_cpu()		preempt_enable()
 
+#define get_cpu_light()		({ migrate_disable(); smp_processor_id(); })
+#define put_cpu_light()		migrate_enable()
+
 /*
  * Callback to arch code if there's nosmp or maxcpus=0 on the
  * boot command line:
Index: linux-3.18.13-rt10-r7s4/include/linux/spinlock.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/spinlock.h
+++ linux-3.18.13-rt10-r7s4/include/linux/spinlock.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:281 @ static inline void do_raw_spin_unlock(ra
 #define raw_spin_can_lock(lock)	(!raw_spin_is_locked(lock))
 
 /* Include rwlock functions */
-#include <linux/rwlock.h>
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/rwlock_rt.h>
+#else
+# include <linux/rwlock.h>
+#endif
 
 /*
  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:296 @ static inline void do_raw_spin_unlock(ra
 # include <linux/spinlock_api_up.h>
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+# include <linux/spinlock_rt.h>
+#else /* PREEMPT_RT_FULL */
+
 /*
  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:429 @ extern int _atomic_dec_and_lock(atomic_t
 #define atomic_dec_and_lock(atomic, lock) \
 		__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
 
+#endif /* !PREEMPT_RT_FULL */
+
 #endif /* __LINUX_SPINLOCK_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/spinlock_api_smp.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/spinlock_api_smp.h
+++ linux-3.18.13-rt10-r7s4/include/linux/spinlock_api_smp.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:190 @ static inline int __raw_spin_trylock_bh(
 	return 0;
 }
 
-#include <linux/rwlock_api_smp.h>
+#ifndef CONFIG_PREEMPT_RT_FULL
+# include <linux/rwlock_api_smp.h>
+#endif
 
 #endif /* __LINUX_SPINLOCK_API_SMP_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/spinlock_rt.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/spinlock_rt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef __LINUX_SPINLOCK_RT_H
+#define __LINUX_SPINLOCK_RT_H
+
+#ifndef __LINUX_SPINLOCK_H
+#error Do not include directly. Use spinlock.h
+#endif
+
+#include <linux/bug.h>
+
+extern void
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key);
+
+#define spin_lock_init(slock)				\
+do {							\
+	static struct lock_class_key __key;		\
+							\
+	rt_mutex_init(&(slock)->lock);			\
+	__rt_spin_lock_init(slock, #slock, &__key);	\
+} while (0)
+
+extern void __lockfunc rt_spin_lock(spinlock_t *lock);
+extern unsigned long __lockfunc rt_spin_lock_trace_flags(spinlock_t *lock);
+extern void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass);
+extern void __lockfunc rt_spin_unlock(spinlock_t *lock);
+extern void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock);
+extern void __lockfunc rt_spin_unlock_wait(spinlock_t *lock);
+extern int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags);
+extern int __lockfunc rt_spin_trylock_bh(spinlock_t *lock);
+extern int __lockfunc rt_spin_trylock(spinlock_t *lock);
+extern int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock);
+
+/*
+ * lockdep-less calls, for derived types like rwlock:
+ * (for trylock they can use rt_mutex_trylock() directly.
+ */
+extern void __lockfunc __rt_spin_lock(struct rt_mutex *lock);
+extern void __lockfunc __rt_spin_unlock(struct rt_mutex *lock);
+extern int __lockfunc __rt_spin_trylock(struct rt_mutex *lock);
+
+#define spin_lock(lock)				\
+	do {					\
+		migrate_disable();		\
+		rt_spin_lock(lock);		\
+	} while (0)
+
+#define spin_lock_bh(lock)			\
+	do {					\
+		local_bh_disable();		\
+		migrate_disable();		\
+		rt_spin_lock(lock);		\
+	} while (0)
+
+#define spin_lock_irq(lock)		spin_lock(lock)
+
+#define spin_do_trylock(lock)		__cond_lock(lock, rt_spin_trylock(lock))
+
+#define spin_trylock(lock)			\
+({						\
+	int __locked;				\
+	migrate_disable();			\
+	__locked = spin_do_trylock(lock);	\
+	if (!__locked)				\
+		migrate_enable();		\
+	__locked;				\
+})
+
+#ifdef CONFIG_LOCKDEP
+# define spin_lock_nested(lock, subclass)		\
+	do {						\
+		migrate_disable();			\
+		rt_spin_lock_nested(lock, subclass);	\
+	} while (0)
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
+	do {						 \
+		typecheck(unsigned long, flags);	 \
+		flags = 0;				 \
+		migrate_disable();			 \
+		rt_spin_lock_nested(lock, subclass);	 \
+	} while (0)
+#else
+# define spin_lock_nested(lock, subclass)	spin_lock(lock)
+
+# define spin_lock_irqsave_nested(lock, flags, subclass) \
+	do {						 \
+		typecheck(unsigned long, flags);	 \
+		flags = 0;				 \
+		spin_lock(lock);			 \
+	} while (0)
+#endif
+
+#define spin_lock_irqsave(lock, flags)			 \
+	do {						 \
+		typecheck(unsigned long, flags);	 \
+		flags = 0;				 \
+		spin_lock(lock);			 \
+	} while (0)
+
+static inline unsigned long spin_lock_trace_flags(spinlock_t *lock)
+{
+	unsigned long flags = 0;
+#ifdef CONFIG_TRACE_IRQFLAGS
+	flags = rt_spin_lock_trace_flags(lock);
+#else
+	spin_lock(lock); /* lock_local */
+#endif
+	return flags;
+}
+
+/* FIXME: we need rt_spin_lock_nest_lock */
+#define spin_lock_nest_lock(lock, nest_lock) spin_lock_nested(lock, 0)
+
+#define spin_unlock(lock)				\
+	do {						\
+		rt_spin_unlock(lock);			\
+		migrate_enable();			\
+	} while (0)
+
+#define spin_unlock_bh(lock)				\
+	do {						\
+		rt_spin_unlock(lock);			\
+		migrate_enable();			\
+		local_bh_enable();			\
+	} while (0)
+
+#define spin_unlock_irq(lock)		spin_unlock(lock)
+
+#define spin_unlock_irqrestore(lock, flags)		\
+	do {						\
+		typecheck(unsigned long, flags);	\
+		(void) flags;				\
+		spin_unlock(lock);			\
+	} while (0)
+
+#define spin_trylock_bh(lock)	__cond_lock(lock, rt_spin_trylock_bh(lock))
+#define spin_trylock_irq(lock)	spin_trylock(lock)
+
+#define spin_trylock_irqsave(lock, flags)	\
+	rt_spin_trylock_irqsave(lock, &(flags))
+
+#define spin_unlock_wait(lock)		rt_spin_unlock_wait(lock)
+
+#ifdef CONFIG_GENERIC_LOCKBREAK
+# define spin_is_contended(lock)	((lock)->break_lock)
+#else
+# define spin_is_contended(lock)	(((void)(lock), 0))
+#endif
+
+static inline int spin_can_lock(spinlock_t *lock)
+{
+	return !rt_mutex_is_locked(&lock->lock);
+}
+
+static inline int spin_is_locked(spinlock_t *lock)
+{
+	return rt_mutex_is_locked(&lock->lock);
+}
+
+static inline void assert_spin_locked(spinlock_t *lock)
+{
+	BUG_ON(!spin_is_locked(lock));
+}
+
+#define atomic_dec_and_lock(atomic, lock) \
+	atomic_dec_and_spin_lock(atomic, lock)
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/spinlock_types.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/spinlock_types.h
+++ linux-3.18.13-rt10-r7s4/include/linux/spinlock_types.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:12 @
  * Released under the General Public License (GPL).
  */
 
-#if defined(CONFIG_SMP)
-# include <asm/spinlock_types.h>
-#else
-# include <linux/spinlock_types_up.h>
-#endif
-
-#include <linux/lockdep.h>
-
-typedef struct raw_spinlock {
-	arch_spinlock_t raw_lock;
-#ifdef CONFIG_GENERIC_LOCKBREAK
-	unsigned int break_lock;
-#endif
-#ifdef CONFIG_DEBUG_SPINLOCK
-	unsigned int magic, owner_cpu;
-	void *owner;
-#endif
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-	struct lockdep_map dep_map;
-#endif
-} raw_spinlock_t;
-
-#define SPINLOCK_MAGIC		0xdead4ead
-
-#define SPINLOCK_OWNER_INIT	((void *)-1L)
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
-#else
-# define SPIN_DEP_MAP_INIT(lockname)
-#endif
+#include <linux/spinlock_types_raw.h>
 
-#ifdef CONFIG_DEBUG_SPINLOCK
-# define SPIN_DEBUG_INIT(lockname)		\
-	.magic = SPINLOCK_MAGIC,		\
-	.owner_cpu = -1,			\
-	.owner = SPINLOCK_OWNER_INIT,
+#ifndef CONFIG_PREEMPT_RT_FULL
+# include <linux/spinlock_types_nort.h>
+# include <linux/rwlock_types.h>
 #else
-# define SPIN_DEBUG_INIT(lockname)
+# include <linux/rtmutex.h>
+# include <linux/spinlock_types_rt.h>
+# include <linux/rwlock_types_rt.h>
 #endif
 
-#define __RAW_SPIN_LOCK_INITIALIZER(lockname)	\
-	{					\
-	.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
-	SPIN_DEBUG_INIT(lockname)		\
-	SPIN_DEP_MAP_INIT(lockname) }
-
-#define __RAW_SPIN_LOCK_UNLOCKED(lockname)	\
-	(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_RAW_SPINLOCK(x)	raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
-
-typedef struct spinlock {
-	union {
-		struct raw_spinlock rlock;
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
-		struct {
-			u8 __padding[LOCK_PADSIZE];
-			struct lockdep_map dep_map;
-		};
-#endif
-	};
-} spinlock_t;
-
-#define __SPIN_LOCK_INITIALIZER(lockname) \
-	{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
-
-#define __SPIN_LOCK_UNLOCKED(lockname) \
-	(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
-
-#define DEFINE_SPINLOCK(x)	spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
-
-#include <linux/rwlock_types.h>
-
 #endif /* __LINUX_SPINLOCK_TYPES_H */
Index: linux-3.18.13-rt10-r7s4/include/linux/spinlock_types_nort.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/spinlock_types_nort.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef __LINUX_SPINLOCK_TYPES_NORT_H
+#define __LINUX_SPINLOCK_TYPES_NORT_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#error "Do not include directly. Include spinlock_types.h instead"
+#endif
+
+/*
+ * The non RT version maps spinlocks to raw_spinlocks
+ */
+typedef struct spinlock {
+	union {
+		struct raw_spinlock rlock;
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define LOCK_PADSIZE (offsetof(struct raw_spinlock, dep_map))
+		struct {
+			u8 __padding[LOCK_PADSIZE];
+			struct lockdep_map dep_map;
+		};
+#endif
+	};
+} spinlock_t;
+
+#define __SPIN_LOCK_INITIALIZER(lockname) \
+	{ { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+
+#define __SPIN_LOCK_UNLOCKED(lockname) \
+	(spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_SPINLOCK(x)	spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/spinlock_types_raw.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/spinlock_types_raw.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef __LINUX_SPINLOCK_TYPES_RAW_H
+#define __LINUX_SPINLOCK_TYPES_RAW_H
+
+#if defined(CONFIG_SMP)
+# include <asm/spinlock_types.h>
+#else
+# include <linux/spinlock_types_up.h>
+#endif
+
+#include <linux/lockdep.h>
+
+typedef struct raw_spinlock {
+	arch_spinlock_t raw_lock;
+#ifdef CONFIG_GENERIC_LOCKBREAK
+	unsigned int break_lock;
+#endif
+#ifdef CONFIG_DEBUG_SPINLOCK
+	unsigned int magic, owner_cpu;
+	void *owner;
+#endif
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map dep_map;
+#endif
+} raw_spinlock_t;
+
+#define SPINLOCK_MAGIC		0xdead4ead
+
+#define SPINLOCK_OWNER_INIT	((void *)-1L)
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+# define SPIN_DEP_MAP_INIT(lockname)	.dep_map = { .name = #lockname }
+#else
+# define SPIN_DEP_MAP_INIT(lockname)
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+# define SPIN_DEBUG_INIT(lockname)		\
+	.magic = SPINLOCK_MAGIC,		\
+	.owner_cpu = -1,			\
+	.owner = SPINLOCK_OWNER_INIT,
+#else
+# define SPIN_DEBUG_INIT(lockname)
+#endif
+
+#define __RAW_SPIN_LOCK_INITIALIZER(lockname)	\
+	{					\
+	.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
+	SPIN_DEBUG_INIT(lockname)		\
+	SPIN_DEP_MAP_INIT(lockname) }
+
+#define __RAW_SPIN_LOCK_UNLOCKED(lockname)	\
+	(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
+
+#define DEFINE_RAW_SPINLOCK(x)	raw_spinlock_t x = __RAW_SPIN_LOCK_UNLOCKED(x)
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/spinlock_types_rt.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/spinlock_types_rt.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef __LINUX_SPINLOCK_TYPES_RT_H
+#define __LINUX_SPINLOCK_TYPES_RT_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+#error "Do not include directly. Include spinlock_types.h instead"
+#endif
+
+#include <linux/cache.h>
+
+/*
+ * PREEMPT_RT: spinlocks - an RT mutex plus lock-break field:
+ */
+typedef struct spinlock {
+	struct rt_mutex		lock;
+	unsigned int		break_lock;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	struct lockdep_map	dep_map;
+#endif
+} spinlock_t;
+
+#ifdef CONFIG_DEBUG_RT_MUTEXES
+# define __RT_SPIN_INITIALIZER(name) \
+	{ \
+	.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
+	.save_state = 1, \
+	.file = __FILE__, \
+	.line = __LINE__ , \
+	}
+#else
+# define __RT_SPIN_INITIALIZER(name) \
+	{								\
+	.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),		\
+	.save_state = 1, \
+	}
+#endif
+
+/*
+.wait_list = PLIST_HEAD_INIT_RAW((name).lock.wait_list, (name).lock.wait_lock)
+*/
+
+#define __SPIN_LOCK_UNLOCKED(name)			\
+	{ .lock = __RT_SPIN_INITIALIZER(name.lock),		\
+	  SPIN_DEP_MAP_INIT(name) }
+
+#define __DEFINE_SPINLOCK(name) \
+	spinlock_t name = __SPIN_LOCK_UNLOCKED(name)
+
+#define DEFINE_SPINLOCK(name) \
+	spinlock_t name __cacheline_aligned_in_smp = __SPIN_LOCK_UNLOCKED(name)
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/srcu.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/srcu.h
+++ linux-3.18.13-rt10-r7s4/include/linux/srcu.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:87 @ int init_srcu_struct(struct srcu_struct
 
 void process_srcu(struct work_struct *work);
 
-#define __SRCU_STRUCT_INIT(name)					\
+#define __SRCU_STRUCT_INIT(name, pcpu_name)				\
 	{								\
 		.completed = -300,					\
-		.per_cpu_ref = &name##_srcu_array,			\
+		.per_cpu_ref = &pcpu_name,				\
 		.queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock),	\
 		.running = false,					\
 		.batch_queue = RCU_BATCH_INIT(name.batch_queue),	\
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:107 @ void process_srcu(struct work_struct *wo
  */
 #define DEFINE_SRCU(name)						\
 	static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
-	struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+	struct srcu_struct name = __SRCU_STRUCT_INIT(name, name##_srcu_array);
 
 #define DEFINE_STATIC_SRCU(name)					\
 	static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\
-	static struct srcu_struct name = __SRCU_STRUCT_INIT(name);
+	static struct srcu_struct name = __SRCU_STRUCT_INIT(\
+		name, name##_srcu_array);
 
 /**
  * call_srcu() - Queue a callback for invocation after an SRCU grace period
Index: linux-3.18.13-rt10-r7s4/include/linux/swap.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/swap.h
+++ linux-3.18.13-rt10-r7s4/include/linux/swap.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:14 @
 #include <linux/fs.h>
 #include <linux/atomic.h>
 #include <linux/page-flags.h>
+#include <linux/locallock.h>
 #include <asm/page.h>
 
 struct notifier_block;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:264 @ struct swap_info_struct {
 void *workingset_eviction(struct address_space *mapping, struct page *page);
 bool workingset_refault(void *shadow);
 void workingset_activation(struct page *page);
-extern struct list_lru workingset_shadow_nodes;
+extern struct list_lru __workingset_shadow_nodes;
+DECLARE_LOCAL_IRQ_LOCK(workingset_shadow_lock);
 
 static inline unsigned int workingset_node_pages(struct radix_tree_node *node)
 {
Index: linux-3.18.13-rt10-r7s4/include/linux/sysctl.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/sysctl.h
+++ linux-3.18.13-rt10-r7s4/include/linux/sysctl.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:28 @
 #include <linux/rcupdate.h>
 #include <linux/wait.h>
 #include <linux/rbtree.h>
+#include <linux/atomic.h>
 #include <uapi/linux/sysctl.h>
 
 /* For the /proc/sys support */
Index: linux-3.18.13-rt10-r7s4/include/linux/thread_info.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/thread_info.h
+++ linux-3.18.13-rt10-r7s4/include/linux/thread_info.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:105 @ static inline int test_ti_thread_flag(st
 #define test_thread_flag(flag) \
 	test_ti_thread_flag(current_thread_info(), flag)
 
-#define tif_need_resched() test_thread_flag(TIF_NEED_RESCHED)
+#ifdef CONFIG_PREEMPT_LAZY
+#define tif_need_resched()	(test_thread_flag(TIF_NEED_RESCHED) || \
+				 test_thread_flag(TIF_NEED_RESCHED_LAZY))
+#define tif_need_resched_now()	(test_thread_flag(TIF_NEED_RESCHED))
+#define tif_need_resched_lazy()	test_thread_flag(TIF_NEED_RESCHED_LAZY))
+
+#else
+#define tif_need_resched()	test_thread_flag(TIF_NEED_RESCHED)
+#define tif_need_resched_now()	test_thread_flag(TIF_NEED_RESCHED)
+#define tif_need_resched_lazy()	0
+#endif
 
 #if defined TIF_RESTORE_SIGMASK && !defined HAVE_SET_RESTORE_SIGMASK
 /*
Index: linux-3.18.13-rt10-r7s4/include/linux/timer.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/timer.h
+++ linux-3.18.13-rt10-r7s4/include/linux/timer.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:244 @ extern void add_timer(struct timer_list
 
 extern int try_to_del_timer_sync(struct timer_list *timer);
 
-#ifdef CONFIG_SMP
+#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT_FULL)
   extern int del_timer_sync(struct timer_list *timer);
 #else
 # define del_timer_sync(t)		del_timer(t)
Index: linux-3.18.13-rt10-r7s4/include/linux/uaccess.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/uaccess.h
+++ linux-3.18.13-rt10-r7s4/include/linux/uaccess.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:9 @
 
 /*
  * These routines enable/disable the pagefault handler in that
- * it will not take any locks and go straight to the fixup table.
- *
- * They have great resemblance to the preempt_disable/enable calls
- * and in fact they are identical; this is because currently there is
- * no other way to make the pagefault handlers do this. So we do
- * disable preemption but we don't necessarily care about that.
+ * it will not take any MM locks and go straight to the fixup table.
  */
-static inline void pagefault_disable(void)
+static inline void raw_pagefault_disable(void)
 {
 	preempt_count_inc();
 	/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:21 @ static inline void pagefault_disable(voi
 	barrier();
 }
 
-static inline void pagefault_enable(void)
+static inline void raw_pagefault_enable(void)
 {
 #ifndef CONFIG_PREEMPT
 	/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:35 @ static inline void pagefault_enable(void
 #endif
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
+static inline void pagefault_disable(void)
+{
+	raw_pagefault_disable();
+}
+
+static inline void pagefault_enable(void)
+{
+	raw_pagefault_enable();
+}
+#else
+extern void pagefault_disable(void);
+extern void pagefault_enable(void);
+#endif
+
 #ifndef ARCH_HAS_NOCACHE_UACCESS
 
 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:89 @ static inline unsigned long __copy_from_
 		mm_segment_t old_fs = get_fs();		\
 							\
 		set_fs(KERNEL_DS);			\
-		pagefault_disable();			\
+		raw_pagefault_disable();		\
 		ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval));		\
-		pagefault_enable();			\
+		raw_pagefault_enable();			\
 		set_fs(old_fs);				\
 		ret;					\
 	})
Index: linux-3.18.13-rt10-r7s4/include/linux/uprobes.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/uprobes.h
+++ linux-3.18.13-rt10-r7s4/include/linux/uprobes.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:30 @
 #include <linux/errno.h>
 #include <linux/rbtree.h>
 #include <linux/types.h>
+#include <linux/wait.h>
 
 struct vm_area_struct;
 struct mm_struct;
Index: linux-3.18.13-rt10-r7s4/include/linux/vmstat.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/vmstat.h
+++ linux-3.18.13-rt10-r7s4/include/linux/vmstat.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:36 @ DECLARE_PER_CPU(struct vm_event_state, v
  */
 static inline void __count_vm_event(enum vm_event_item item)
 {
+	preempt_disable_rt();
 	raw_cpu_inc(vm_event_states.event[item]);
+	preempt_enable_rt();
 }
 
 static inline void count_vm_event(enum vm_event_item item)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:48 @ static inline void count_vm_event(enum v
 
 static inline void __count_vm_events(enum vm_event_item item, long delta)
 {
+	preempt_disable_rt();
 	raw_cpu_add(vm_event_states.event[item], delta);
+	preempt_enable_rt();
 }
 
 static inline void count_vm_events(enum vm_event_item item, long delta)
Index: linux-3.18.13-rt10-r7s4/include/linux/wait-simple.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/wait-simple.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef _LINUX_WAIT_SIMPLE_H
+#define _LINUX_WAIT_SIMPLE_H
+
+#include <linux/spinlock.h>
+#include <linux/list.h>
+
+#include <asm/current.h>
+
+struct swaiter {
+	struct task_struct	*task;
+	struct list_head	node;
+};
+
+#define DEFINE_SWAITER(name)					\
+	struct swaiter name = {					\
+		.task	= current,				\
+		.node	= LIST_HEAD_INIT((name).node),		\
+	}
+
+struct swait_head {
+	raw_spinlock_t		lock;
+	struct list_head	list;
+};
+
+#define SWAIT_HEAD_INITIALIZER(name) {				\
+		.lock	= __RAW_SPIN_LOCK_UNLOCKED(name.lock),	\
+		.list	= LIST_HEAD_INIT((name).list),		\
+	}
+
+#define DEFINE_SWAIT_HEAD(name)					\
+	struct swait_head name = SWAIT_HEAD_INITIALIZER(name)
+
+extern void __init_swait_head(struct swait_head *h, struct lock_class_key *key);
+
+#define init_swait_head(swh)					\
+	do {							\
+		static struct lock_class_key __key;		\
+								\
+		__init_swait_head((swh), &__key);		\
+	} while (0)
+
+/*
+ * Waiter functions
+ */
+extern void swait_prepare_locked(struct swait_head *head, struct swaiter *w);
+extern void swait_prepare(struct swait_head *head, struct swaiter *w, int state);
+extern void swait_finish_locked(struct swait_head *head, struct swaiter *w);
+extern void swait_finish(struct swait_head *head, struct swaiter *w);
+
+/* Check whether a head has waiters enqueued */
+static inline bool swaitqueue_active(struct swait_head *h)
+{
+	/* Make sure the condition is visible before checking list_empty() */
+	smp_mb();
+	return !list_empty(&h->list);
+}
+
+/*
+ * Wakeup functions
+ */
+extern unsigned int __swait_wake(struct swait_head *head, unsigned int state, unsigned int num);
+extern unsigned int __swait_wake_locked(struct swait_head *head, unsigned int state, unsigned int num);
+
+#define swait_wake(head)			__swait_wake(head, TASK_NORMAL, 1)
+#define swait_wake_interruptible(head)		__swait_wake(head, TASK_INTERRUPTIBLE, 1)
+#define swait_wake_all(head)			__swait_wake(head, TASK_NORMAL, 0)
+#define swait_wake_all_interruptible(head)	__swait_wake(head, TASK_INTERRUPTIBLE, 0)
+
+/*
+ * Event API
+ */
+#define __swait_event(wq, condition)					\
+do {									\
+	DEFINE_SWAITER(__wait);						\
+									\
+	for (;;) {							\
+		swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
+		if (condition)						\
+			break;						\
+		schedule();						\
+	}								\
+	swait_finish(&wq, &__wait);					\
+} while (0)
+
+/**
+ * swait_event - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define swait_event(wq, condition)					\
+do {									\
+	if (condition)							\
+		break;							\
+	__swait_event(wq, condition);					\
+} while (0)
+
+#define __swait_event_interruptible(wq, condition, ret)			\
+do {									\
+	DEFINE_SWAITER(__wait);						\
+									\
+	for (;;) {							\
+		swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE);	\
+		if (condition)						\
+			break;						\
+		if (signal_pending(current)) {				\
+			ret = -ERESTARTSYS;				\
+			break;						\
+		}							\
+		schedule();						\
+	}								\
+	swait_finish(&wq, &__wait);					\
+} while (0)
+
+#define __swait_event_interruptible_timeout(wq, condition, ret)		\
+do {									\
+	DEFINE_SWAITER(__wait);						\
+									\
+	for (;;) {							\
+		swait_prepare(&wq, &__wait, TASK_INTERRUPTIBLE);	\
+		if (condition)						\
+			break;						\
+		if (signal_pending(current)) {				\
+			ret = -ERESTARTSYS;				\
+			break;						\
+		}							\
+		ret = schedule_timeout(ret);				\
+		if (!ret)						\
+			break;						\
+	}								\
+	swait_finish(&wq, &__wait);					\
+} while (0)
+
+/**
+ * swait_event_interruptible - sleep until a condition gets true
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ *
+ * The process is put to sleep (TASK_INTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ */
+#define swait_event_interruptible(wq, condition)			\
+({									\
+	int __ret = 0;							\
+	if (!(condition))						\
+		__swait_event_interruptible(wq, condition, __ret);	\
+	__ret;								\
+})
+
+#define swait_event_interruptible_timeout(wq, condition, timeout)	\
+({									\
+	int __ret = timeout;						\
+	if (!(condition))						\
+		__swait_event_interruptible_timeout(wq, condition, __ret);	\
+	__ret;								\
+})
+
+#define __swait_event_timeout(wq, condition, ret)			\
+do {									\
+	DEFINE_SWAITER(__wait);						\
+									\
+	for (;;) {							\
+		swait_prepare(&wq, &__wait, TASK_UNINTERRUPTIBLE);	\
+		if (condition)						\
+			break;						\
+		ret = schedule_timeout(ret);				\
+		if (!ret)						\
+			break;						\
+	}								\
+	swait_finish(&wq, &__wait);					\
+} while (0)
+
+/**
+ * swait_event_timeout - sleep until a condition gets true or a timeout elapses
+ * @wq: the waitqueue to wait on
+ * @condition: a C expression for the event to wait for
+ * @timeout: timeout, in jiffies
+ *
+ * The process is put to sleep (TASK_UNINTERRUPTIBLE) until the
+ * @condition evaluates to true. The @condition is checked each time
+ * the waitqueue @wq is woken up.
+ *
+ * wake_up() has to be called after changing any variable that could
+ * change the result of the wait condition.
+ *
+ * The function returns 0 if the @timeout elapsed, and the remaining
+ * jiffies if the condition evaluated to true before the timeout elapsed.
+ */
+#define swait_event_timeout(wq, condition, timeout)			\
+({									\
+	long __ret = timeout;						\
+	if (!(condition))						\
+		__swait_event_timeout(wq, condition, __ret);		\
+	__ret;								\
+})
+
+#endif
Index: linux-3.18.13-rt10-r7s4/include/linux/wait.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/linux/wait.h
+++ linux-3.18.13-rt10-r7s4/include/linux/wait.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:11 @
 #include <linux/spinlock.h>
 #include <asm/current.h>
 #include <uapi/linux/wait.h>
+#include <linux/atomic.h>
 
 typedef struct __wait_queue wait_queue_t;
 typedef int (*wait_queue_func_t)(wait_queue_t *wait, unsigned mode, int flags, void *key);
Index: linux-3.18.13-rt10-r7s4/include/linux/work-simple.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/linux/work-simple.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
+
+#include <linux/list.h>
+
+struct swork_event {
+	struct list_head item;
+	unsigned long flags;
+	void (*func)(struct swork_event *);
+};
+
+static inline void INIT_SWORK(struct swork_event *event,
+			      void (*func)(struct swork_event *))
+{
+	event->flags = 0;
+	event->func = func;
+}
+
+bool swork_queue(struct swork_event *sev);
+
+int swork_get(void);
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
Index: linux-3.18.13-rt10-r7s4/include/net/dst.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/net/dst.h
+++ linux-3.18.13-rt10-r7s4/include/net/dst.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:406 @ static inline void dst_confirm(struct ds
 static inline int dst_neigh_output(struct dst_entry *dst, struct neighbour *n,
 				   struct sk_buff *skb)
 {
-	const struct hh_cache *hh;
+	struct hh_cache *hh;
 
 	if (dst->pending_confirm) {
 		unsigned long now = jiffies;
Index: linux-3.18.13-rt10-r7s4/include/net/neighbour.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/net/neighbour.h
+++ linux-3.18.13-rt10-r7s4/include/net/neighbour.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:390 @ static inline int neigh_hh_bridge(struct
 }
 #endif
 
-static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
+static inline int neigh_hh_output(struct hh_cache *hh, struct sk_buff *skb)
 {
 	unsigned int seq;
 	int hh_len;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:445 @ struct neighbour_cb {
 
 #define NEIGH_CB(skb)	((struct neighbour_cb *)(skb)->cb)
 
-static inline void neigh_ha_snapshot(char *dst, const struct neighbour *n,
+static inline void neigh_ha_snapshot(char *dst, struct neighbour *n,
 				     const struct net_device *dev)
 {
 	unsigned int seq;
Index: linux-3.18.13-rt10-r7s4/include/net/netns/ipv4.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/include/net/netns/ipv4.h
+++ linux-3.18.13-rt10-r7s4/include/net/netns/ipv4.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:70 @ struct netns_ipv4 {
 
 	int sysctl_icmp_echo_ignore_all;
 	int sysctl_icmp_echo_ignore_broadcasts;
+	int sysctl_icmp_echo_sysrq;
 	int sysctl_icmp_ignore_bogus_error_responses;
 	int sysctl_icmp_ratelimit;
 	int sysctl_icmp_ratemask;
Index: linux-3.18.13-rt10-r7s4/include/trace/events/hist.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/trace/events/hist.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM hist
+
+#if !defined(_TRACE_HIST_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_HIST_H
+
+#include "latency_hist.h"
+#include <linux/tracepoint.h>
+
+#if !defined(CONFIG_PREEMPT_OFF_HIST) && !defined(CONFIG_INTERRUPT_OFF_HIST)
+#define trace_preemptirqsoff_hist(a, b)
+#else
+TRACE_EVENT(preemptirqsoff_hist,
+
+	TP_PROTO(int reason, int starthist),
+
+	TP_ARGS(reason, starthist),
+
+	TP_STRUCT__entry(
+		__field(int,	reason)
+		__field(int,	starthist)
+	),
+
+	TP_fast_assign(
+		__entry->reason		= reason;
+		__entry->starthist	= starthist;
+	),
+
+	TP_printk("reason=%s starthist=%s", getaction(__entry->reason),
+		  __entry->starthist ? "start" : "stop")
+);
+#endif
+
+#ifndef CONFIG_MISSED_TIMER_OFFSETS_HIST
+#define trace_hrtimer_interrupt(a, b, c, d)
+#else
+TRACE_EVENT(hrtimer_interrupt,
+
+	TP_PROTO(int cpu, long long offset, struct task_struct *curr,
+		struct task_struct *task),
+
+	TP_ARGS(cpu, offset, curr, task),
+
+	TP_STRUCT__entry(
+		__field(int,		cpu)
+		__field(long long,	offset)
+		__array(char,		ccomm,	TASK_COMM_LEN)
+		__field(int,		cprio)
+		__array(char,		tcomm,	TASK_COMM_LEN)
+		__field(int,		tprio)
+	),
+
+	TP_fast_assign(
+		__entry->cpu	= cpu;
+		__entry->offset	= offset;
+		memcpy(__entry->ccomm, curr->comm, TASK_COMM_LEN);
+		__entry->cprio  = curr->prio;
+		memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
+			task != NULL ? TASK_COMM_LEN : 7);
+		__entry->tprio  = task != NULL ? task->prio : -1;
+	),
+
+	TP_printk("cpu=%d offset=%lld curr=%s[%d] thread=%s[%d]",
+		__entry->cpu, __entry->offset, __entry->ccomm,
+		__entry->cprio, __entry->tcomm, __entry->tprio)
+);
+#endif
+
+#endif /* _TRACE_HIST_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
Index: linux-3.18.13-rt10-r7s4/include/trace/events/latency_hist.h
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/include/trace/events/latency_hist.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+#ifndef _LATENCY_HIST_H
+#define _LATENCY_HIST_H
+
+enum hist_action {
+	IRQS_ON,
+	PREEMPT_ON,
+	TRACE_STOP,
+	IRQS_OFF,
+	PREEMPT_OFF,
+	TRACE_START,
+};
+
+static char *actions[] = {
+	"IRQS_ON",
+	"PREEMPT_ON",
+	"TRACE_STOP",
+	"IRQS_OFF",
+	"PREEMPT_OFF",
+	"TRACE_START",
+};
+
+static inline char *getaction(int action)
+{
+	if (action >= 0 && action <= sizeof(actions)/sizeof(actions[0]))
+		return actions[action];
+	return "unknown";
+}
+
+#endif /* _LATENCY_HIST_H */
Index: linux-3.18.13-rt10-r7s4/init/Kconfig
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/init/Kconfig
+++ linux-3.18.13-rt10-r7s4/init/Kconfig
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:638 @ config RCU_FANOUT_EXACT
 
 config RCU_FAST_NO_HZ
 	bool "Accelerate last non-dyntick-idle CPU's grace periods"
-	depends on NO_HZ_COMMON && SMP
+	depends on NO_HZ_COMMON && SMP && !PREEMPT_RT_FULL
 	default n
 	help
 	  This option permits CPUs to enter dynticks-idle state even if
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:665 @ config TREE_RCU_TRACE
 config RCU_BOOST
 	bool "Enable RCU priority boosting"
 	depends on RT_MUTEXES && PREEMPT_RCU
-	default n
+	default y if PREEMPT_RT_FULL
 	help
 	  This option boosts the priority of preempted RCU readers that
 	  block the current preemptible RCU grace period for too long.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1109 @ config CFS_BANDWIDTH
 config RT_GROUP_SCHED
 	bool "Group scheduling for SCHED_RR/FIFO"
 	depends on CGROUP_SCHED
+	depends on !PREEMPT_RT_FULL
 	default n
 	help
 	  This feature lets you explicitly allocate real CPU bandwidth
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1681 @ choice
 
 config SLAB
 	bool "SLAB"
+	depends on !PREEMPT_RT_FULL
 	help
 	  The regular slab allocator that is established and known to work
 	  well in all environments. It organizes cache hot objects in
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1700 @ config SLUB
 config SLOB
 	depends on EXPERT
 	bool "SLOB (Simple Allocator)"
+	depends on !PREEMPT_RT_FULL
 	help
 	   SLOB replaces the stock allocator with a drastically simpler
 	   allocator. SLOB is generally more space efficient but
Index: linux-3.18.13-rt10-r7s4/init/Makefile
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/init/Makefile
+++ linux-3.18.13-rt10-r7s4/init/Makefile
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:36 @ silent_chk_compile.h = :
 include/generated/compile.h: FORCE
 	@$($(quiet)chk_compile.h)
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@ \
-	"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CC) $(KBUILD_CFLAGS)"
+	"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)" "$(CONFIG_PREEMPT_RT_FULL)" "$(CC) $(KBUILD_CFLAGS)"
Index: linux-3.18.13-rt10-r7s4/init/main.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/init/main.c
+++ linux-3.18.13-rt10-r7s4/init/main.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:536 @ asmlinkage __visible void __init start_k
 	setup_command_line(command_line);
 	setup_nr_cpu_ids();
 	setup_per_cpu_areas();
+	softirq_early_init();
 	smp_prepare_boot_cpu();	/* arch-specific boot-cpu hooks */
 
 	build_all_zonelists(NULL, NULL);
Index: linux-3.18.13-rt10-r7s4/ipc/mqueue.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/ipc/mqueue.c
+++ linux-3.18.13-rt10-r7s4/ipc/mqueue.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:926 @ static inline void pipelined_send(struct
 				  struct msg_msg *message,
 				  struct ext_wait_queue *receiver)
 {
+	/*
+	 * Keep them in one critical section for PREEMPT_RT:
+	 */
+	preempt_disable_rt();
 	receiver->msg = message;
 	list_del(&receiver->list);
 	receiver->state = STATE_PENDING;
 	wake_up_process(receiver->task);
 	smp_wmb();
 	receiver->state = STATE_READY;
+	preempt_enable_rt();
 }
 
 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:950 @ static inline void pipelined_receive(str
 		wake_up_interruptible(&info->wait_q);
 		return;
 	}
-	if (msg_insert(sender->msg, info))
-		return;
-	list_del(&sender->list);
-	sender->state = STATE_PENDING;
-	wake_up_process(sender->task);
-	smp_wmb();
-	sender->state = STATE_READY;
+	/*
+	 * Keep them in one critical section for PREEMPT_RT:
+	 */
+	preempt_disable_rt();
+	if (!msg_insert(sender->msg, info)) {
+		list_del(&sender->list);
+		sender->state = STATE_PENDING;
+		wake_up_process(sender->task);
+		smp_wmb();
+		sender->state = STATE_READY;
+	}
+	preempt_enable_rt();
 }
 
 SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
Index: linux-3.18.13-rt10-r7s4/ipc/msg.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/ipc/msg.c
+++ linux-3.18.13-rt10-r7s4/ipc/msg.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:191 @ static void expunge_all(struct msg_queue
 	struct msg_receiver *msr, *t;
 
 	list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) {
+		/*
+		 * Make sure that the wakeup doesnt preempt
+		 * this CPU prematurely. (on PREEMPT_RT)
+		 */
+		preempt_disable_rt();
+
 		msr->r_msg = NULL; /* initialize expunge ordering */
 		wake_up_process(msr->r_tsk);
 		/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:207 @ static void expunge_all(struct msg_queue
 		 */
 		smp_mb();
 		msr->r_msg = ERR_PTR(res);
+
+		preempt_enable_rt();
 	}
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:585 @ static inline int pipelined_send(struct
 		if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
 		    !security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
 					       msr->r_msgtype, msr->r_mode)) {
+			/*
+			 * Make sure that the wakeup doesnt preempt
+			 * this CPU prematurely. (on PREEMPT_RT)
+			 */
+			preempt_disable_rt();
 
 			list_del(&msr->r_list);
 			if (msr->r_maxsize < msg->m_ts) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:611 @ static inline int pipelined_send(struct
 				 */
 				smp_mb();
 				msr->r_msg = msg;
+				preempt_enable_rt();
 
 				return 1;
 			}
+			preempt_enable_rt();
 		}
 	}
-
 	return 0;
 }
 
Index: linux-3.18.13-rt10-r7s4/ipc/sem.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/ipc/sem.c
+++ linux-3.18.13-rt10-r7s4/ipc/sem.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:676 @ undo:
 static void wake_up_sem_queue_prepare(struct list_head *pt,
 				struct sem_queue *q, int error)
 {
+#ifdef CONFIG_PREEMPT_RT_BASE
+	struct task_struct *p = q->sleeper;
+	get_task_struct(p);
+	q->status = error;
+	wake_up_process(p);
+	put_task_struct(p);
+#else
 	if (list_empty(pt)) {
 		/*
 		 * Hold preempt off so that we don't get preempted and have the
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:694 @ static void wake_up_sem_queue_prepare(st
 	q->pid = error;
 
 	list_add_tail(&q->list, pt);
+#endif
 }
 
 /**
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:708 @ static void wake_up_sem_queue_prepare(st
  */
 static void wake_up_sem_queue_do(struct list_head *pt)
 {
+#ifndef CONFIG_PREEMPT_RT_BASE
 	struct sem_queue *q, *t;
 	int did_something;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:721 @ static void wake_up_sem_queue_do(struct
 	}
 	if (did_something)
 		preempt_enable();
+#endif
 }
 
 static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
Index: linux-3.18.13-rt10-r7s4/kernel/Kconfig.locks
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/Kconfig.locks
+++ linux-3.18.13-rt10-r7s4/kernel/Kconfig.locks
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:228 @ config ARCH_SUPPORTS_ATOMIC_RMW
 
 config MUTEX_SPIN_ON_OWNER
 	def_bool y
-	depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+	depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
 
 config RWSEM_SPIN_ON_OWNER
        def_bool y
-       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
+       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW && !PREEMPT_RT_FULL
 
 config ARCH_USE_QUEUE_RWLOCK
 	bool
Index: linux-3.18.13-rt10-r7s4/kernel/Kconfig.preempt
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/Kconfig.preempt
+++ linux-3.18.13-rt10-r7s4/kernel/Kconfig.preempt
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+config PREEMPT
+	bool
+	select PREEMPT_COUNT
+
+config PREEMPT_RT_BASE
+	bool
+	select PREEMPT
+
+config HAVE_PREEMPT_LAZY
+	bool
+
+config PREEMPT_LAZY
+	def_bool y if HAVE_PREEMPT_LAZY && PREEMPT_RT_FULL
 
 choice
 	prompt "Preemption Model"
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:49 @ config PREEMPT_VOLUNTARY
 
 	  Select this if you are building a kernel for a desktop system.
 
-config PREEMPT
+config PREEMPT__LL
 	bool "Preemptible Kernel (Low-Latency Desktop)"
-	select PREEMPT_COUNT
+	select PREEMPT
 	select UNINLINE_SPIN_UNLOCK if !ARCH_INLINE_SPIN_UNLOCK
 	help
 	  This option reduces the latency of the kernel by making
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:68 @ config PREEMPT
 	  embedded system with latency requirements in the milliseconds
 	  range.
 
+config PREEMPT_RTB
+	bool "Preemptible Kernel (Basic RT)"
+	select PREEMPT_RT_BASE
+	help
+	  This option is basically the same as (Low-Latency Desktop) but
+	  enables changes which are preliminary for the full preemptible
+	  RT kernel.
+
+config PREEMPT_RT_FULL
+	bool "Fully Preemptible Kernel (RT)"
+	depends on IRQ_FORCED_THREADING
+	select PREEMPT_RT_BASE
+	select PREEMPT_RCU
+	help
+	  All and everything
+
 endchoice
 
 config PREEMPT_COUNT
Index: linux-3.18.13-rt10-r7s4/kernel/cgroup.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/cgroup.c
+++ linux-3.18.13-rt10-r7s4/kernel/cgroup.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4358 @ static void css_free_rcu_fn(struct rcu_h
 	queue_work(cgroup_destroy_wq, &css->destroy_work);
 }
 
-static void css_release_work_fn(struct work_struct *work)
+static void css_release_work_fn(struct swork_event *sev)
 {
 	struct cgroup_subsys_state *css =
-		container_of(work, struct cgroup_subsys_state, destroy_work);
+		container_of(sev, struct cgroup_subsys_state, destroy_swork);
 	struct cgroup_subsys *ss = css->ss;
 	struct cgroup *cgrp = css->cgroup;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4398 @ static void css_release(struct percpu_re
 	struct cgroup_subsys_state *css =
 		container_of(ref, struct cgroup_subsys_state, refcnt);
 
-	INIT_WORK(&css->destroy_work, css_release_work_fn);
-	queue_work(cgroup_destroy_wq, &css->destroy_work);
+	INIT_SWORK(&css->destroy_swork, css_release_work_fn);
+	swork_queue(&css->destroy_swork);
 }
 
 static void init_and_link_css(struct cgroup_subsys_state *css,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:5000 @ static int __init cgroup_wq_init(void)
 	 */
 	cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
 	BUG_ON(!cgroup_destroy_wq);
+	BUG_ON(swork_get());
 
 	/*
 	 * Used to destroy pidlists and separate to serve as flush domain.
Index: linux-3.18.13-rt10-r7s4/kernel/cpu.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/cpu.c
+++ linux-3.18.13-rt10-r7s4/kernel/cpu.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:89 @ static struct {
 #define cpuhp_lock_acquire()      lock_map_acquire(&cpu_hotplug.dep_map)
 #define cpuhp_lock_release()      lock_map_release(&cpu_hotplug.dep_map)
 
+/**
+ * hotplug_pcp	- per cpu hotplug descriptor
+ * @unplug:	set when pin_current_cpu() needs to sync tasks
+ * @sync_tsk:	the task that waits for tasks to finish pinned sections
+ * @refcount:	counter of tasks in pinned sections
+ * @grab_lock:	set when the tasks entering pinned sections should wait
+ * @synced:	notifier for @sync_tsk to tell cpu_down it's finished
+ * @mutex:	the mutex to make tasks wait (used when @grab_lock is true)
+ * @mutex_init:	zero if the mutex hasn't been initialized yet.
+ *
+ * Although @unplug and @sync_tsk may point to the same task, the @unplug
+ * is used as a flag and still exists after @sync_tsk has exited and
+ * @sync_tsk set to NULL.
+ */
+struct hotplug_pcp {
+	struct task_struct *unplug;
+	struct task_struct *sync_tsk;
+	int refcount;
+	int grab_lock;
+	struct completion synced;
+	struct completion unplug_wait;
+#ifdef CONFIG_PREEMPT_RT_FULL
+	/*
+	 * Note, on PREEMPT_RT, the hotplug lock must save the state of
+	 * the task, otherwise the mutex will cause the task to fail
+	 * to sleep when required. (Because it's called from migrate_disable())
+	 *
+	 * The spinlock_t on PREEMPT_RT is a mutex that saves the task's
+	 * state.
+	 */
+	spinlock_t lock;
+#else
+	struct mutex mutex;
+#endif
+	int mutex_init;
+};
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+# define hotplug_lock(hp) rt_spin_lock(&(hp)->lock)
+# define hotplug_unlock(hp) rt_spin_unlock(&(hp)->lock)
+#else
+# define hotplug_lock(hp) mutex_lock(&(hp)->mutex)
+# define hotplug_unlock(hp) mutex_unlock(&(hp)->mutex)
+#endif
+
+static DEFINE_PER_CPU(struct hotplug_pcp, hotplug_pcp);
+
+/**
+ * pin_current_cpu - Prevent the current cpu from being unplugged
+ *
+ * Lightweight version of get_online_cpus() to prevent cpu from being
+ * unplugged when code runs in a migration disabled region.
+ *
+ * Must be called with preemption disabled (preempt_count = 1)!
+ */
+void pin_current_cpu(void)
+{
+	struct hotplug_pcp *hp;
+	int force = 0;
+
+retry:
+	hp = &__get_cpu_var(hotplug_pcp);
+
+	if (!hp->unplug || hp->refcount || force || preempt_count() > 1 ||
+	    hp->unplug == current) {
+		hp->refcount++;
+		return;
+	}
+	if (hp->grab_lock) {
+		preempt_enable();
+		hotplug_lock(hp);
+		hotplug_unlock(hp);
+	} else {
+		preempt_enable();
+		/*
+		 * Try to push this task off of this CPU.
+		 */
+		if (!migrate_me()) {
+			preempt_disable();
+			hp = &__get_cpu_var(hotplug_pcp);
+			if (!hp->grab_lock) {
+				/*
+				 * Just let it continue it's already pinned
+				 * or about to sleep.
+				 */
+				force = 1;
+				goto retry;
+			}
+			preempt_enable();
+		}
+	}
+	preempt_disable();
+	goto retry;
+}
+
+/**
+ * unpin_current_cpu - Allow unplug of current cpu
+ *
+ * Must be called with preemption or interrupts disabled!
+ */
+void unpin_current_cpu(void)
+{
+	struct hotplug_pcp *hp = &__get_cpu_var(hotplug_pcp);
+
+	WARN_ON(hp->refcount <= 0);
+
+	/* This is safe. sync_unplug_thread is pinned to this cpu */
+	if (!--hp->refcount && hp->unplug && hp->unplug != current)
+		wake_up_process(hp->unplug);
+}
+
+static void wait_for_pinned_cpus(struct hotplug_pcp *hp)
+{
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	while (hp->refcount) {
+		schedule_preempt_disabled();
+		set_current_state(TASK_UNINTERRUPTIBLE);
+	}
+}
+
+static int sync_unplug_thread(void *data)
+{
+	struct hotplug_pcp *hp = data;
+
+	wait_for_completion(&hp->unplug_wait);
+	preempt_disable();
+	hp->unplug = current;
+	wait_for_pinned_cpus(hp);
+
+	/*
+	 * This thread will synchronize the cpu_down() with threads
+	 * that have pinned the CPU. When the pinned CPU count reaches
+	 * zero, we inform the cpu_down code to continue to the next step.
+	 */
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	preempt_enable();
+	complete(&hp->synced);
+
+	/*
+	 * If all succeeds, the next step will need tasks to wait till
+	 * the CPU is offline before continuing. To do this, the grab_lock
+	 * is set and tasks going into pin_current_cpu() will block on the
+	 * mutex. But we still need to wait for those that are already in
+	 * pinned CPU sections. If the cpu_down() failed, the kthread_should_stop()
+	 * will kick this thread out.
+	 */
+	while (!hp->grab_lock && !kthread_should_stop()) {
+		schedule();
+		set_current_state(TASK_UNINTERRUPTIBLE);
+	}
+
+	/* Make sure grab_lock is seen before we see a stale completion */
+	smp_mb();
+
+	/*
+	 * Now just before cpu_down() enters stop machine, we need to make
+	 * sure all tasks that are in pinned CPU sections are out, and new
+	 * tasks will now grab the lock, keeping them from entering pinned
+	 * CPU sections.
+	 */
+	if (!kthread_should_stop()) {
+		preempt_disable();
+		wait_for_pinned_cpus(hp);
+		preempt_enable();
+		complete(&hp->synced);
+	}
+
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	while (!kthread_should_stop()) {
+		schedule();
+		set_current_state(TASK_UNINTERRUPTIBLE);
+	}
+	set_current_state(TASK_RUNNING);
+
+	/*
+	 * Force this thread off this CPU as it's going down and
+	 * we don't want any more work on this CPU.
+	 */
+	current->flags &= ~PF_NO_SETAFFINITY;
+	set_cpus_allowed_ptr(current, cpu_present_mask);
+	migrate_me();
+	return 0;
+}
+
+static void __cpu_unplug_sync(struct hotplug_pcp *hp)
+{
+	wake_up_process(hp->sync_tsk);
+	wait_for_completion(&hp->synced);
+}
+
+static void __cpu_unplug_wait(unsigned int cpu)
+{
+	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+	complete(&hp->unplug_wait);
+	wait_for_completion(&hp->synced);
+}
+
+/*
+ * Start the sync_unplug_thread on the target cpu and wait for it to
+ * complete.
+ */
+static int cpu_unplug_begin(unsigned int cpu)
+{
+	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+	int err;
+
+	/* Protected by cpu_hotplug.lock */
+	if (!hp->mutex_init) {
+#ifdef CONFIG_PREEMPT_RT_FULL
+		spin_lock_init(&hp->lock);
+#else
+		mutex_init(&hp->mutex);
+#endif
+		hp->mutex_init = 1;
+	}
+
+	/* Inform the scheduler to migrate tasks off this CPU */
+	tell_sched_cpu_down_begin(cpu);
+
+	init_completion(&hp->synced);
+	init_completion(&hp->unplug_wait);
+
+	hp->sync_tsk = kthread_create(sync_unplug_thread, hp, "sync_unplug/%d", cpu);
+	if (IS_ERR(hp->sync_tsk)) {
+		err = PTR_ERR(hp->sync_tsk);
+		hp->sync_tsk = NULL;
+		return err;
+	}
+	kthread_bind(hp->sync_tsk, cpu);
+
+	/*
+	 * Wait for tasks to get out of the pinned sections,
+	 * it's still OK if new tasks enter. Some CPU notifiers will
+	 * wait for tasks that are going to enter these sections and
+	 * we must not have them block.
+	 */
+	wake_up_process(hp->sync_tsk);
+	return 0;
+}
+
+static void cpu_unplug_sync(unsigned int cpu)
+{
+	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+	init_completion(&hp->synced);
+	/* The completion needs to be initialzied before setting grab_lock */
+	smp_wmb();
+
+	/* Grab the mutex before setting grab_lock */
+	hotplug_lock(hp);
+	hp->grab_lock = 1;
+
+	/*
+	 * The CPU notifiers have been completed.
+	 * Wait for tasks to get out of pinned CPU sections and have new
+	 * tasks block until the CPU is completely down.
+	 */
+	__cpu_unplug_sync(hp);
+
+	/* All done with the sync thread */
+	kthread_stop(hp->sync_tsk);
+	hp->sync_tsk = NULL;
+}
+
+static void cpu_unplug_done(unsigned int cpu)
+{
+	struct hotplug_pcp *hp = &per_cpu(hotplug_pcp, cpu);
+
+	hp->unplug = NULL;
+	/* Let all tasks know cpu unplug is finished before cleaning up */
+	smp_wmb();
+
+	if (hp->sync_tsk)
+		kthread_stop(hp->sync_tsk);
+
+	if (hp->grab_lock) {
+		hotplug_unlock(hp);
+		/* protected by cpu_hotplug.lock */
+		hp->grab_lock = 0;
+	}
+	tell_sched_cpu_down_done(cpu);
+}
+
 void get_online_cpus(void)
 {
 	might_sleep();
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:389 @ bool try_get_online_cpus(void)
 {
 	if (cpu_hotplug.active_writer == current)
 		return true;
+
 	if (!mutex_trylock(&cpu_hotplug.lock))
 		return false;
 	cpuhp_lock_acquire_tryread();
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:637 @ static int __ref take_cpu_down(void *_pa
 /* Requires cpu_add_remove_lock to be held */
 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
 {
-	int err, nr_calls = 0;
+	int mycpu, err, nr_calls = 0;
 	void *hcpu = (void *)(long)cpu;
 	unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
 	struct take_cpu_down_param tcd_param = {
 		.mod = mod,
 		.hcpu = hcpu,
 	};
+	cpumask_var_t cpumask;
+	cpumask_var_t cpumask_org;
 
 	if (num_online_cpus() == 1)
 		return -EBUSY;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:653 @ static int __ref _cpu_down(unsigned int
 	if (!cpu_online(cpu))
 		return -EINVAL;
 
+	/* Move the downtaker off the unplug cpu */
+	if (!alloc_cpumask_var(&cpumask, GFP_KERNEL))
+		return -ENOMEM;
+	if (!alloc_cpumask_var(&cpumask_org, GFP_KERNEL))  {
+		free_cpumask_var(cpumask);
+		return -ENOMEM;
+	}
+
+	cpumask_copy(cpumask_org, tsk_cpus_allowed(current));
+	cpumask_andnot(cpumask, cpu_online_mask, cpumask_of(cpu));
+	set_cpus_allowed_ptr(current, cpumask);
+	free_cpumask_var(cpumask);
+	migrate_disable();
+	mycpu = smp_processor_id();
+	if (mycpu == cpu) {
+		printk(KERN_ERR "Yuck! Still on unplug CPU\n!");
+		migrate_enable();
+		err = -EBUSY;
+		goto restore_cpus;
+	}
+	migrate_enable();
+
 	cpu_hotplug_begin();
+	err = cpu_unplug_begin(cpu);
+	if (err) {
+		printk("cpu_unplug_begin(%d) failed\n", cpu);
+		goto out_cancel;
+	}
 
 	err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
 	if (err) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:706 @ static int __ref _cpu_down(unsigned int
 #endif
 	synchronize_rcu();
 
+	__cpu_unplug_wait(cpu);
 	smpboot_park_threads(cpu);
 
+	/* Notifiers are done. Don't let any more tasks pin this CPU. */
+	cpu_unplug_sync(cpu);
+
 	/*
 	 * So now all preempt/rcu users must observe !cpu_active().
 	 */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:744 @ static int __ref _cpu_down(unsigned int
 	check_for_tasks(cpu);
 
 out_release:
+	cpu_unplug_done(cpu);
+out_cancel:
 	cpu_hotplug_done();
 	if (!err)
 		cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
+restore_cpus:
+	set_cpus_allowed_ptr(current, cpumask_org);
+	free_cpumask_var(cpumask_org);
 	return err;
 }
 
Index: linux-3.18.13-rt10-r7s4/kernel/debug/kdb/kdb_io.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/debug/kdb/kdb_io.c
+++ linux-3.18.13-rt10-r7s4/kernel/debug/kdb/kdb_io.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:557 @ int vkdb_printf(const char *fmt, va_list
 	int linecount;
 	int colcount;
 	int logging, saved_loglevel = 0;
-	int saved_trap_printk;
 	int got_printf_lock = 0;
 	int retlen = 0;
 	int fnd, len;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:567 @ int vkdb_printf(const char *fmt, va_list
 	unsigned long uninitialized_var(flags);
 
 	preempt_disable();
-	saved_trap_printk = kdb_trap_printk;
-	kdb_trap_printk = 0;
 
 	/* Serialize kdb_printf if multiple cpus try to write at once.
 	 * But if any cpu goes recursive in kdb, just print the output,
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:833 @ kdb_print_out:
 	} else {
 		__release(kdb_printf_lock);
 	}
-	kdb_trap_printk = saved_trap_printk;
 	preempt_enable();
 	return retlen;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:842 @ int kdb_printf(const char *fmt, ...)
 	va_list ap;
 	int r;
 
+	kdb_trap_printk++;
 	va_start(ap, fmt);
 	r = vkdb_printf(fmt, ap);
 	va_end(ap);
+	kdb_trap_printk--;
 
 	return r;
 }
Index: linux-3.18.13-rt10-r7s4/kernel/events/core.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/events/core.c
+++ linux-3.18.13-rt10-r7s4/kernel/events/core.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:6349 @ static void perf_swevent_init_hrtimer(st
 
 	hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
 	hwc->hrtimer.function = perf_swevent_hrtimer;
+	hwc->hrtimer.irqsafe = 1;
 
 	/*
 	 * Since hrtimers have a fixed rate, we can do a static freq->period
Index: linux-3.18.13-rt10-r7s4/kernel/exit.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/exit.c
+++ linux-3.18.13-rt10-r7s4/kernel/exit.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:150 @ static void __exit_signal(struct task_st
 	 * Do this under ->siglock, we can race with another thread
 	 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
 	 */
-	flush_sigqueue(&tsk->pending);
+	flush_task_sigqueue(tsk);
 	tsk->sighand = NULL;
 	spin_unlock(&sighand->siglock);
 
Index: linux-3.18.13-rt10-r7s4/kernel/fork.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/fork.c
+++ linux-3.18.13-rt10-r7s4/kernel/fork.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:100 @ int max_threads;		/* tunable limit on nr
 
 DEFINE_PER_CPU(unsigned long, process_counts) = 0;
 
-__cacheline_aligned DEFINE_RWLOCK(tasklist_lock);  /* outer */
+DEFINE_RWLOCK(tasklist_lock);  /* outer */
 
 #ifdef CONFIG_PROVE_RCU
 int lockdep_tasklist_lock_is_held(void)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:236 @ static inline void put_signal_struct(str
 	if (atomic_dec_and_test(&sig->sigcnt))
 		free_signal_struct(sig);
 }
-
+#ifdef CONFIG_PREEMPT_RT_BASE
+static
+#endif
 void __put_task_struct(struct task_struct *tsk)
 {
 	WARN_ON(!tsk->exit_state);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:254 @ void __put_task_struct(struct task_struc
 	if (!profile_handoff_task(tsk))
 		free_task(tsk);
 }
+#ifndef CONFIG_PREEMPT_RT_BASE
 EXPORT_SYMBOL_GPL(__put_task_struct);
+#else
+void __put_task_struct_cb(struct rcu_head *rhp)
+{
+	struct task_struct *tsk = container_of(rhp, struct task_struct, put_rcu);
+
+	__put_task_struct(tsk);
+
+}
+EXPORT_SYMBOL_GPL(__put_task_struct_cb);
+#endif
 
 void __init __weak arch_task_cache_init(void) { }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:659 @ void __mmdrop(struct mm_struct *mm)
 }
 EXPORT_SYMBOL_GPL(__mmdrop);
 
+#ifdef CONFIG_PREEMPT_RT_BASE
+/*
+ * RCU callback for delayed mm drop. Not strictly rcu, but we don't
+ * want another facility to make this work.
+ */
+void __mmdrop_delayed(struct rcu_head *rhp)
+{
+	struct mm_struct *mm = container_of(rhp, struct mm_struct, delayed_drop);
+
+	__mmdrop(mm);
+}
+#endif
+
 /*
  * Decrement the use count and release all resources for an mm.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1186 @ static void rt_mutex_init_task(struct ta
  */
 static void posix_cpu_timers_init(struct task_struct *tsk)
 {
+#ifdef CONFIG_PREEMPT_RT_BASE
+	tsk->posix_timer_list = NULL;
+#endif
 	tsk->cputime_expires.prof_exp = 0;
 	tsk->cputime_expires.virt_exp = 0;
 	tsk->cputime_expires.sched_exp = 0;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1316 @ static struct task_struct *copy_process(
 	spin_lock_init(&p->alloc_lock);
 
 	init_sigpending(&p->pending);
+	p->sigqueue_cache = NULL;
 
 	p->utime = p->stime = p->gtime = 0;
 	p->utimescaled = p->stimescaled = 0;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1324 @ static struct task_struct *copy_process(
 	p->prev_cputime.utime = p->prev_cputime.stime = 0;
 #endif
 #ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
-	seqlock_init(&p->vtime_seqlock);
+	raw_spin_lock_init(&p->vtime_lock);
+	seqcount_init(&p->vtime_seq);
 	p->vtime_snap = 0;
 	p->vtime_snap_whence = VTIME_SLEEPING;
 #endif
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1376 @ static struct task_struct *copy_process(
 	p->hardirq_context = 0;
 	p->softirq_context = 0;
 #endif
+#ifdef CONFIG_PREEMPT_RT_FULL
+	p->pagefault_disabled = 0;
+#endif
 #ifdef CONFIG_LOCKDEP
 	p->lockdep_depth = 0; /* no locks held yet */
 	p->curr_chain_key = 0;
Index: linux-3.18.13-rt10-r7s4/kernel/futex.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/futex.c
+++ linux-3.18.13-rt10-r7s4/kernel/futex.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:741 @ void exit_pi_state_list(struct task_stru
 		 * task still owns the PI-state:
 		 */
 		if (head->next != next) {
+			raw_spin_unlock_irq(&curr->pi_lock);
 			spin_unlock(&hb->lock);
+			raw_spin_lock_irq(&curr->pi_lock);
 			continue;
 		}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1710 @ retry_private:
 				requeue_pi_wake_futex(this, &key2, hb2);
 				drop_count++;
 				continue;
+			} else if (ret == -EAGAIN) {
+				/*
+				 * Waiter was woken by timeout or
+				 * signal and has set pi_blocked_on to
+				 * PI_WAKEUP_INPROGRESS before we
+				 * tried to enqueue it on the rtmutex.
+				 */
+				this->pi_state = NULL;
+				free_pi_state(pi_state);
+				continue;
 			} else if (ret) {
 				/* -EDEADLK */
 				this->pi_state = NULL;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2564 @ static int futex_wait_requeue_pi(u32 __u
 	struct hrtimer_sleeper timeout, *to = NULL;
 	struct rt_mutex_waiter rt_waiter;
 	struct rt_mutex *pi_mutex = NULL;
-	struct futex_hash_bucket *hb;
+	struct futex_hash_bucket *hb, *hb2;
 	union futex_key key2 = FUTEX_KEY_INIT;
 	struct futex_q q = futex_q_init;
 	int res, ret;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2589 @ static int futex_wait_requeue_pi(u32 __u
 	 * The waiter is allocated on our stack, manipulated by the requeue
 	 * code while we sleep on uaddr.
 	 */
-	debug_rt_mutex_init_waiter(&rt_waiter);
-	RB_CLEAR_NODE(&rt_waiter.pi_tree_entry);
-	RB_CLEAR_NODE(&rt_waiter.tree_entry);
-	rt_waiter.task = NULL;
+	rt_mutex_init_waiter(&rt_waiter, false);
 
 	ret = get_futex_key(uaddr2, flags & FLAGS_SHARED, &key2, VERIFY_WRITE);
 	if (unlikely(ret != 0))
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2620 @ static int futex_wait_requeue_pi(u32 __u
 	/* Queue the futex_q, drop the hb lock, wait for wakeup. */
 	futex_wait_queue_me(hb, &q, to);
 
-	spin_lock(&hb->lock);
-	ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
-	spin_unlock(&hb->lock);
-	if (ret)
-		goto out_put_keys;
+	/*
+	 * On RT we must avoid races with requeue and trying to block
+	 * on two mutexes (hb->lock and uaddr2's rtmutex) by
+	 * serializing access to pi_blocked_on with pi_lock.
+	 */
+	raw_spin_lock_irq(&current->pi_lock);
+	if (current->pi_blocked_on) {
+		/*
+		 * We have been requeued or are in the process of
+		 * being requeued.
+		 */
+		raw_spin_unlock_irq(&current->pi_lock);
+	} else {
+		/*
+		 * Setting pi_blocked_on to PI_WAKEUP_INPROGRESS
+		 * prevents a concurrent requeue from moving us to the
+		 * uaddr2 rtmutex. After that we can safely acquire
+		 * (and possibly block on) hb->lock.
+		 */
+		current->pi_blocked_on = PI_WAKEUP_INPROGRESS;
+		raw_spin_unlock_irq(&current->pi_lock);
+
+		spin_lock(&hb->lock);
+
+		/*
+		 * Clean up pi_blocked_on. We might leak it otherwise
+		 * when we succeeded with the hb->lock in the fast
+		 * path.
+		 */
+		raw_spin_lock_irq(&current->pi_lock);
+		current->pi_blocked_on = NULL;
+		raw_spin_unlock_irq(&current->pi_lock);
+
+		ret = handle_early_requeue_pi_wakeup(hb, &q, &key2, to);
+		spin_unlock(&hb->lock);
+		if (ret)
+			goto out_put_keys;
+	}
 
 	/*
-	 * In order for us to be here, we know our q.key == key2, and since
-	 * we took the hb->lock above, we also know that futex_requeue() has
-	 * completed and we no longer have to concern ourselves with a wakeup
-	 * race with the atomic proxy lock acquisition by the requeue code. The
-	 * futex_requeue dropped our key1 reference and incremented our key2
-	 * reference count.
+	 * In order to be here, we have either been requeued, are in
+	 * the process of being requeued, or requeue successfully
+	 * acquired uaddr2 on our behalf.  If pi_blocked_on was
+	 * non-null above, we may be racing with a requeue.  Do not
+	 * rely on q->lock_ptr to be hb2->lock until after blocking on
+	 * hb->lock or hb2->lock. The futex_requeue dropped our key1
+	 * reference and incremented our key2 reference count.
 	 */
+	hb2 = hash_futex(&key2);
 
 	/* Check if the requeue code acquired the second futex for us. */
 	if (!q.rt_waiter) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2677 @ static int futex_wait_requeue_pi(u32 __u
 		 * did a lock-steal - fix up the PI-state in that case.
 		 */
 		if (q.pi_state && (q.pi_state->owner != current)) {
-			spin_lock(q.lock_ptr);
+			spin_lock(&hb2->lock);
+			BUG_ON(&hb2->lock != q.lock_ptr);
 			ret = fixup_pi_state_owner(uaddr2, &q, current);
-			spin_unlock(q.lock_ptr);
+			spin_unlock(&hb2->lock);
 		}
 	} else {
 		/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2693 @ static int futex_wait_requeue_pi(u32 __u
 		ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter);
 		debug_rt_mutex_free_waiter(&rt_waiter);
 
-		spin_lock(q.lock_ptr);
+		spin_lock(&hb2->lock);
+		BUG_ON(&hb2->lock != q.lock_ptr);
 		/*
 		 * Fixup the pi_state owner and possibly acquire the lock if we
 		 * haven't already.
Index: linux-3.18.13-rt10-r7s4/kernel/irq/handle.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/irq/handle.c
+++ linux-3.18.13-rt10-r7s4/kernel/irq/handle.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:136 @ void __irq_wake_thread(struct irq_desc *
 irqreturn_t
 handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
 {
+	struct pt_regs *regs = get_irq_regs();
+	u64 ip = regs ? instruction_pointer(regs) : 0;
 	irqreturn_t retval = IRQ_NONE;
 	unsigned int flags = 0, irq = desc->irq_data.irq;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:178 @ handle_irq_event_percpu(struct irq_desc
 		action = action->next;
 	} while (action);
 
-	add_interrupt_randomness(irq, flags);
+#ifndef CONFIG_PREEMPT_RT_FULL
+	add_interrupt_randomness(irq, flags, ip);
+#else
+	desc->random_ip = ip;
+#endif
 
 	if (!noirqdebug)
 		note_interrupt(irq, desc, retval);
Index: linux-3.18.13-rt10-r7s4/kernel/irq/manage.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/irq/manage.c
+++ linux-3.18.13-rt10-r7s4/kernel/irq/manage.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:25 @
 #include "internals.h"
 
 #ifdef CONFIG_IRQ_FORCED_THREADING
+# ifndef CONFIG_PREEMPT_RT_BASE
 __read_mostly bool force_irqthreads;
 
 static int __init setup_forced_irqthreads(char *arg)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:34 @ static int __init setup_forced_irqthread
 	return 0;
 }
 early_param("threadirqs", setup_forced_irqthreads);
+# endif
 #endif
 
 static void __synchronize_hardirq(struct irq_desc *desc)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:178 @ static inline void
 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
 #endif
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void _irq_affinity_notify(struct irq_affinity_notify *notify);
+static struct task_struct *set_affinity_helper;
+static LIST_HEAD(affinity_list);
+static DEFINE_RAW_SPINLOCK(affinity_list_lock);
+
+static int set_affinity_thread(void *unused)
+{
+	while (1) {
+		struct irq_affinity_notify *notify;
+		int empty;
+
+		set_current_state(TASK_INTERRUPTIBLE);
+
+		raw_spin_lock_irq(&affinity_list_lock);
+		empty = list_empty(&affinity_list);
+		raw_spin_unlock_irq(&affinity_list_lock);
+
+		if (empty)
+			schedule();
+		if (kthread_should_stop())
+			break;
+		set_current_state(TASK_RUNNING);
+try_next:
+		notify = NULL;
+
+		raw_spin_lock_irq(&affinity_list_lock);
+		if (!list_empty(&affinity_list)) {
+			notify = list_first_entry(&affinity_list,
+					struct irq_affinity_notify, list);
+			list_del_init(&notify->list);
+		}
+		raw_spin_unlock_irq(&affinity_list_lock);
+
+		if (!notify)
+			continue;
+		_irq_affinity_notify(notify);
+		goto try_next;
+	}
+	return 0;
+}
+
+static void init_helper_thread(void)
+{
+	if (set_affinity_helper)
+		return;
+	set_affinity_helper = kthread_run(set_affinity_thread, NULL,
+			"affinity-cb");
+	WARN_ON(IS_ERR(set_affinity_helper));
+}
+#else
+
+static inline void init_helper_thread(void) { }
+
+#endif
+
 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
 			bool force)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:272 @ int irq_set_affinity_locked(struct irq_d
 
 	if (desc->affinity_notify) {
 		kref_get(&desc->affinity_notify->kref);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+		raw_spin_lock(&affinity_list_lock);
+		if (list_empty(&desc->affinity_notify->list))
+			list_add_tail(&affinity_list,
+					&desc->affinity_notify->list);
+		raw_spin_unlock(&affinity_list_lock);
+		wake_up_process(set_affinity_helper);
+#else
 		schedule_work(&desc->affinity_notify->work);
+#endif
 	}
 	irqd_set(data, IRQD_AFFINITY_SET);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:317 @ int irq_set_affinity_hint(unsigned int i
 }
 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 
-static void irq_affinity_notify(struct work_struct *work)
+static void _irq_affinity_notify(struct irq_affinity_notify *notify)
 {
-	struct irq_affinity_notify *notify =
-		container_of(work, struct irq_affinity_notify, work);
 	struct irq_desc *desc = irq_to_desc(notify->irq);
 	cpumask_var_t cpumask;
 	unsigned long flags;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:340 @ out:
 	kref_put(&notify->kref, notify->release);
 }
 
+static void irq_affinity_notify(struct work_struct *work)
+{
+	struct irq_affinity_notify *notify =
+		container_of(work, struct irq_affinity_notify, work);
+	_irq_affinity_notify(notify);
+}
+
 /**
  *	irq_set_affinity_notifier - control notification of IRQ affinity changes
  *	@irq:		Interrupt for which to enable/disable notification
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:376 @ irq_set_affinity_notifier(unsigned int i
 		notify->irq = irq;
 		kref_init(&notify->kref);
 		INIT_WORK(&notify->work, irq_affinity_notify);
+		INIT_LIST_HEAD(&notify->list);
+		init_helper_thread();
 	}
 
 	raw_spin_lock_irqsave(&desc->lock, flags);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:866 @ irq_forced_thread_fn(struct irq_desc *de
 	local_bh_disable();
 	ret = action->thread_fn(action->irq, action->dev_id);
 	irq_finalize_oneshot(desc, action);
-	local_bh_enable();
+	/*
+	 * Interrupts which have real time requirements can be set up
+	 * to avoid softirq processing in the thread handler. This is
+	 * safe as these interrupts do not raise soft interrupts.
+	 */
+	if (irq_settings_no_softirq_call(desc))
+		_local_bh_enable();
+	else
+		local_bh_enable();
 	return ret;
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:957 @ static int irq_thread(void *data)
 		if (action_ret == IRQ_HANDLED)
 			atomic_inc(&desc->threads_handled);
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+		migrate_disable();
+		add_interrupt_randomness(action->irq, 0,
+				 desc->random_ip ^ (unsigned long) action);
+		migrate_enable();
+#endif
 		wake_threads_waitq(desc);
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1276 @ __setup_irq(unsigned int irq, struct irq
 			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 		}
 
+		if (new->flags & IRQF_NO_SOFTIRQ_CALL)
+			irq_settings_set_no_softirq_call(desc);
+
 		/* Set default affinity mask once everything is setup */
 		setup_affinity(irq, desc, mask);
 
Index: linux-3.18.13-rt10-r7s4/kernel/irq/settings.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/irq/settings.h
+++ linux-3.18.13-rt10-r7s4/kernel/irq/settings.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:18 @ enum {
 	_IRQ_NESTED_THREAD	= IRQ_NESTED_THREAD,
 	_IRQ_PER_CPU_DEVID	= IRQ_PER_CPU_DEVID,
 	_IRQ_IS_POLLED		= IRQ_IS_POLLED,
+	_IRQ_NO_SOFTIRQ_CALL	= IRQ_NO_SOFTIRQ_CALL,
 	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
 };
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:32 @ enum {
 #define IRQ_NESTED_THREAD	GOT_YOU_MORON
 #define IRQ_PER_CPU_DEVID	GOT_YOU_MORON
 #define IRQ_IS_POLLED		GOT_YOU_MORON
+#define IRQ_NO_SOFTIRQ_CALL	GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK	GOT_YOU_MORON
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:43 @ irq_settings_clr_and_set(struct irq_desc
 	desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK);
 }
 
+static inline bool irq_settings_no_softirq_call(struct irq_desc *desc)
+{
+	return desc->status_use_accessors & _IRQ_NO_SOFTIRQ_CALL;
+}
+
+static inline void irq_settings_set_no_softirq_call(struct irq_desc *desc)
+{
+	desc->status_use_accessors |= _IRQ_NO_SOFTIRQ_CALL;
+}
+
 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
 {
 	return desc->status_use_accessors & _IRQ_PER_CPU;
Index: linux-3.18.13-rt10-r7s4/kernel/irq/spurious.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/irq/spurious.c
+++ linux-3.18.13-rt10-r7s4/kernel/irq/spurious.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:447 @ MODULE_PARM_DESC(noirqdebug, "Disable ir
 
 static int __init irqfixup_setup(char *str)
 {
+#ifdef CONFIG_PREEMPT_RT_BASE
+	pr_warn("irqfixup boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
+	return 1;
+#endif
 	irqfixup = 1;
 	printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
 	printk(KERN_WARNING "This may impact system performance.\n");
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:463 @ module_param(irqfixup, int, 0644);
 
 static int __init irqpoll_setup(char *str)
 {
+#ifdef CONFIG_PREEMPT_RT_BASE
+	pr_warn("irqpoll boot option not supported w/ CONFIG_PREEMPT_RT_BASE\n");
+	return 1;
+#endif
 	irqfixup = 2;
 	printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
 				"enabled\n");
Index: linux-3.18.13-rt10-r7s4/kernel/irq_work.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/irq_work.c
+++ linux-3.18.13-rt10-r7s4/kernel/irq_work.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:20 @
 #include <linux/cpu.h>
 #include <linux/notifier.h>
 #include <linux/smp.h>
+#include <linux/interrupt.h>
 #include <asm/processor.h>
 
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:69 @ void __weak arch_irq_work_raise(void)
  */
 bool irq_work_queue_on(struct irq_work *work, int cpu)
 {
+	struct llist_head *list;
+
 	/* All work should have been flushed before going offline */
 	WARN_ON_ONCE(cpu_is_offline(cpu));
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:81 @ bool irq_work_queue_on(struct irq_work *
 	if (!irq_work_claim(work))
 		return false;
 
-	if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+	if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !(work->flags & IRQ_WORK_HARD_IRQ))
+		list = &per_cpu(lazy_list, cpu);
+	else
+		list = &per_cpu(raised_list, cpu);
+
+	if (llist_add(&work->llnode, list))
 		arch_send_call_function_single_ipi(cpu);
 
 	return true;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:97 @ EXPORT_SYMBOL_GPL(irq_work_queue_on);
 /* Enqueue the irq work @work on the current CPU */
 bool irq_work_queue(struct irq_work *work)
 {
+	struct llist_head *list;
+	bool lazy_work, realtime = IS_ENABLED(CONFIG_PREEMPT_RT_FULL);
+
 	/* Only queue if not already pending */
 	if (!irq_work_claim(work))
 		return false;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:107 @ bool irq_work_queue(struct irq_work *wor
 	/* Queue the entry and raise the IPI if needed. */
 	preempt_disable();
 
-	/* If the work is "lazy", handle it from next tick if any */
-	if (work->flags & IRQ_WORK_LAZY) {
-		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
-		    tick_nohz_tick_stopped())
-			arch_irq_work_raise();
-	} else {
-		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
+	lazy_work = work->flags & IRQ_WORK_LAZY;
+
+	if (lazy_work || (realtime && !(work->flags & IRQ_WORK_HARD_IRQ)))
+		list = this_cpu_ptr(&lazy_list);
+	else
+		list = this_cpu_ptr(&raised_list);
+
+	if (llist_add(&work->llnode, list)) {
+		if (!lazy_work || tick_nohz_tick_stopped())
 			arch_irq_work_raise();
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:132 @ bool irq_work_needs_cpu(void)
 	raised = this_cpu_ptr(&raised_list);
 	lazy = this_cpu_ptr(&lazy_list);
 
-	if (llist_empty(raised) || arch_irq_work_has_interrupt())
-		if (llist_empty(lazy))
-			return false;
+	if (llist_empty(raised) && llist_empty(lazy))
+		return false;
 
 	/* All work should have been flushed before going offline */
 	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:147 @ static void irq_work_run_list(struct lli
 	struct irq_work *work;
 	struct llist_node *llnode;
 
-	BUG_ON(!irqs_disabled());
+	BUG_ON(!IS_ENABLED(CONFIG_PREEMPT_RT_FULL) && !irqs_disabled());
 
 	if (llist_empty(list))
 		return;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:184 @ static void irq_work_run_list(struct lli
 void irq_work_run(void)
 {
 	irq_work_run_list(this_cpu_ptr(&raised_list));
-	irq_work_run_list(this_cpu_ptr(&lazy_list));
+	if (IS_ENABLED(CONFIG_PREEMPT_RT_FULL)) {
+		/*
+		 * NOTE: we raise softirq via IPI for safety,
+		 * and execute in irq_work_tick() to move the
+		 * overhead from hard to soft irq context.
+		 */
+		if (!llist_empty(this_cpu_ptr(&lazy_list)))
+			raise_softirq(TIMER_SOFTIRQ);
+	} else
+		irq_work_run_list(this_cpu_ptr(&lazy_list));
 }
 EXPORT_SYMBOL_GPL(irq_work_run);
 
 void irq_work_tick(void)
 {
-	struct llist_head *raised = &__get_cpu_var(raised_list);
+	struct llist_head *raised = this_cpu_ptr(&raised_list);
 
 	if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
 		irq_work_run_list(raised);
-	irq_work_run_list(&__get_cpu_var(lazy_list));
+	irq_work_run_list(this_cpu_ptr(&lazy_list));
 }
 
 /*
Index: linux-3.18.13-rt10-r7s4/kernel/ksysfs.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/ksysfs.c
+++ linux-3.18.13-rt10-r7s4/kernel/ksysfs.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:139 @ KERNEL_ATTR_RO(vmcoreinfo);
 
 #endif /* CONFIG_KEXEC */
 
+#if defined(CONFIG_PREEMPT_RT_FULL)
+static ssize_t  realtime_show(struct kobject *kobj,
+			      struct kobj_attribute *attr, char *buf)
+{
+	return sprintf(buf, "%d\n", 1);
+}
+KERNEL_ATTR_RO(realtime);
+#endif
+
 /* whether file capabilities are enabled */
 static ssize_t fscaps_show(struct kobject *kobj,
 				  struct kobj_attribute *attr, char *buf)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:215 @ static struct attribute * kernel_attrs[]
 	&vmcoreinfo_attr.attr,
 #endif
 	&rcu_expedited_attr.attr,
+#ifdef CONFIG_PREEMPT_RT_FULL
+	&realtime_attr.attr,
+#endif
 	NULL
 };
 
Index: linux-3.18.13-rt10-r7s4/kernel/locking/Makefile
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/Makefile
+++ linux-3.18.13-rt10-r7s4/kernel/locking/Makefile
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2 @
 
-obj-y += mutex.o semaphore.o rwsem.o mcs_spinlock.o
+obj-y += semaphore.o mcs_spinlock.o
 
 ifdef CONFIG_FUNCTION_TRACER
 CFLAGS_REMOVE_lockdep.o = -pg
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:11 @ CFLAGS_REMOVE_mutex-debug.o = -pg
 CFLAGS_REMOVE_rtmutex-debug.o = -pg
 endif
 
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
+obj-y += mutex.o
 obj-$(CONFIG_DEBUG_MUTEXES) += mutex-debug.o
+obj-y += rwsem.o
+endif
 obj-$(CONFIG_LOCKDEP) += lockdep.o
 ifeq ($(CONFIG_PROC_FS),y)
 obj-$(CONFIG_LOCKDEP) += lockdep_proc.o
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:28 @ obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmute
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
+ifneq ($(CONFIG_PREEMPT_RT_FULL),y)
 obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
+endif
 obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
+obj-$(CONFIG_PREEMPT_RT_FULL) += rt.o
 obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
Index: linux-3.18.13-rt10-r7s4/kernel/locking/lglock.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/lglock.c
+++ linux-3.18.13-rt10-r7s4/kernel/locking/lglock.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:7 @
 #include <linux/cpu.h>
 #include <linux/string.h>
 
+#ifndef CONFIG_PREEMPT_RT_FULL
+# define lg_lock_ptr		arch_spinlock_t
+# define lg_do_lock(l)		arch_spin_lock(l)
+# define lg_do_unlock(l)	arch_spin_unlock(l)
+#else
+# define lg_lock_ptr		struct rt_mutex
+# define lg_do_lock(l)		__rt_spin_lock(l)
+# define lg_do_unlock(l)	__rt_spin_unlock(l)
+#endif
 /*
  * Note there is no uninit, so lglocks cannot be defined in
  * modules (but it's fine to use them from there)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:24 @
 
 void lg_lock_init(struct lglock *lg, char *name)
 {
+#ifdef CONFIG_PREEMPT_RT_FULL
+	int i;
+
+	for_each_possible_cpu(i) {
+		struct rt_mutex *lock = per_cpu_ptr(lg->lock, i);
+
+		rt_mutex_init(lock);
+	}
+#endif
 	LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
 }
 EXPORT_SYMBOL(lg_lock_init);
 
 void lg_local_lock(struct lglock *lg)
 {
-	arch_spinlock_t *lock;
+	lg_lock_ptr *lock;
 
-	preempt_disable();
+	migrate_disable();
 	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
 	lock = this_cpu_ptr(lg->lock);
-	arch_spin_lock(lock);
+	lg_do_lock(lock);
 }
 EXPORT_SYMBOL(lg_local_lock);
 
 void lg_local_unlock(struct lglock *lg)
 {
-	arch_spinlock_t *lock;
+	lg_lock_ptr *lock;
 
 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
 	lock = this_cpu_ptr(lg->lock);
-	arch_spin_unlock(lock);
-	preempt_enable();
+	lg_do_unlock(lock);
+	migrate_enable();
 }
 EXPORT_SYMBOL(lg_local_unlock);
 
 void lg_local_lock_cpu(struct lglock *lg, int cpu)
 {
-	arch_spinlock_t *lock;
+	lg_lock_ptr *lock;
 
-	preempt_disable();
+	preempt_disable_nort();
 	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
 	lock = per_cpu_ptr(lg->lock, cpu);
-	arch_spin_lock(lock);
+	lg_do_lock(lock);
 }
 EXPORT_SYMBOL(lg_local_lock_cpu);
 
 void lg_local_unlock_cpu(struct lglock *lg, int cpu)
 {
-	arch_spinlock_t *lock;
+	lg_lock_ptr *lock;
 
 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
 	lock = per_cpu_ptr(lg->lock, cpu);
-	arch_spin_unlock(lock);
-	preempt_enable();
+	lg_do_unlock(lock);
+	preempt_enable_nort();
 }
 EXPORT_SYMBOL(lg_local_unlock_cpu);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:85 @ void lg_global_lock(struct lglock *lg)
 {
 	int i;
 
-	preempt_disable();
+	preempt_disable_nort();
 	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
 	for_each_possible_cpu(i) {
-		arch_spinlock_t *lock;
+		lg_lock_ptr *lock;
 		lock = per_cpu_ptr(lg->lock, i);
-		arch_spin_lock(lock);
+		lg_do_lock(lock);
 	}
 }
 EXPORT_SYMBOL(lg_global_lock);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:101 @ void lg_global_unlock(struct lglock *lg)
 
 	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
 	for_each_possible_cpu(i) {
-		arch_spinlock_t *lock;
+		lg_lock_ptr *lock;
 		lock = per_cpu_ptr(lg->lock, i);
-		arch_spin_unlock(lock);
+		lg_do_unlock(lock);
 	}
-	preempt_enable();
+	preempt_enable_nort();
 }
 EXPORT_SYMBOL(lg_global_unlock);
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * HACK: If you use this, you get to keep the pieces.
+ * Used in queue_stop_cpus_work() when stop machinery
+ * is called from inactive CPU, so we can't schedule.
+ */
+# define lg_do_trylock_relax(l)			\
+	do {					\
+		while (!__rt_spin_trylock(l))	\
+			cpu_relax();		\
+	} while (0)
+
+void lg_global_trylock_relax(struct lglock *lg)
+{
+	int i;
+
+	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+	for_each_possible_cpu(i) {
+		lg_lock_ptr *lock;
+		lock = per_cpu_ptr(lg->lock, i);
+		lg_do_trylock_relax(lock);
+	}
+}
+#endif
Index: linux-3.18.13-rt10-r7s4/kernel/locking/lockdep.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/lockdep.c
+++ linux-3.18.13-rt10-r7s4/kernel/locking/lockdep.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3545 @ static void check_flags(unsigned long fl
 		}
 	}
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 	/*
 	 * We dont accurately track softirq state in e.g.
 	 * hardirq contexts (such as on 4KSTACKS), so only
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3560 @ static void check_flags(unsigned long fl
 			DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
 		}
 	}
+#endif
 
 	if (!debug_locks)
 		print_irqtrace_events(current);
Index: linux-3.18.13-rt10-r7s4/kernel/locking/percpu-rwsem.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/percpu-rwsem.c
+++ linux-3.18.13-rt10-r7s4/kernel/locking/percpu-rwsem.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:87 @ void percpu_down_read(struct percpu_rw_s
 
 	down_read(&brw->rw_sem);
 	atomic_inc(&brw->slow_read_ctr);
+#ifdef CONFIG_PREEMPT_RT_FULL
+	up_read(&brw->rw_sem);
+#else
 	/* avoid up_read()->rwsem_release() */
 	__up_read(&brw->rw_sem);
+#endif
 }
 
 void percpu_up_read(struct percpu_rw_semaphore *brw)
Index: linux-3.18.13-rt10-r7s4/kernel/locking/rt.c
===================================================================
--- /dev/null
+++ linux-3.18.13-rt10-r7s4/kernel/locking/rt.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:4 @
+/*
+ * kernel/rt.c
+ *
+ * Real-Time Preemption Support
+ *
+ * started by Ingo Molnar:
+ *
+ *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
+ *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *
+ * historic credit for proving that Linux spinlocks can be implemented via
+ * RT-aware mutexes goes to many people: The Pmutex project (Dirk Grambow
+ * and others) who prototyped it on 2.4 and did lots of comparative
+ * research and analysis; TimeSys, for proving that you can implement a
+ * fully preemptible kernel via the use of IRQ threading and mutexes;
+ * Bill Huey for persuasively arguing on lkml that the mutex model is the
+ * right one; and to MontaVista, who ported pmutexes to 2.6.
+ *
+ * This code is a from-scratch implementation and is not based on pmutexes,
+ * but the idea of converting spinlocks to mutexes is used here too.
+ *
+ * lock debugging, locking tree, deadlock detection:
+ *
+ *  Copyright (C) 2004, LynuxWorks, Inc., Igor Manyilov, Bill Huey
+ *  Released under the General Public License (GPL).
+ *
+ * Includes portions of the generic R/W semaphore implementation from:
+ *
+ *  Copyright (c) 2001   David Howells (dhowells@redhat.com).
+ *  - Derived partially from idea by Andrea Arcangeli <andrea@suse.de>
+ *  - Derived also from comments by Linus
+ *
+ * Pending ownership of locks and ownership stealing:
+ *
+ *  Copyright (C) 2005, Kihon Technologies Inc., Steven Rostedt
+ *
+ *   (also by Steven Rostedt)
+ *    - Converted single pi_lock to individual task locks.
+ *
+ * By Esben Nielsen:
+ *    Doing priority inheritance with help of the scheduler.
+ *
+ *  Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
+ *  - major rework based on Esben Nielsens initial patch
+ *  - replaced thread_info references by task_struct refs
+ *  - removed task->pending_owner dependency
+ *  - BKL drop/reacquire for semaphore style locks to avoid deadlocks
+ *    in the scheduler return path as discussed with Steven Rostedt
+ *
+ *  Copyright (C) 2006, Kihon Technologies Inc.
+ *    Steven Rostedt <rostedt@goodmis.org>
+ *  - debugged and patched Thomas Gleixner's rework.
+ *  - added back the cmpxchg to the rework.
+ *  - turned atomic require back on for SMP.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/rtmutex.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/kallsyms.h>
+#include <linux/syscalls.h>
+#include <linux/interrupt.h>
+#include <linux/plist.h>
+#include <linux/fs.h>
+#include <linux/futex.h>
+#include <linux/hrtimer.h>
+
+#include "rtmutex_common.h"
+
+/*
+ * struct mutex functions
+ */
+void __mutex_do_init(struct mutex *mutex, const char *name,
+		     struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)mutex, sizeof(*mutex));
+	lockdep_init_map(&mutex->dep_map, name, key, 0);
+#endif
+	mutex->lock.save_state = 0;
+}
+EXPORT_SYMBOL(__mutex_do_init);
+
+void __lockfunc _mutex_lock(struct mutex *lock)
+{
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock);
+
+int __lockfunc _mutex_lock_interruptible(struct mutex *lock)
+{
+	int ret;
+
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	ret = rt_mutex_lock_interruptible(&lock->lock);
+	if (ret)
+		mutex_release(&lock->dep_map, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_interruptible);
+
+int __lockfunc _mutex_lock_killable(struct mutex *lock)
+{
+	int ret;
+
+	mutex_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+	ret = rt_mutex_lock_killable(&lock->lock);
+	if (ret)
+		mutex_release(&lock->dep_map, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_killable);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc _mutex_lock_nested(struct mutex *lock, int subclass)
+{
+	mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
+	rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock_nested);
+
+void __lockfunc _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
+{
+	mutex_acquire_nest(&lock->dep_map, 0, 0, nest, _RET_IP_);
+	rt_mutex_lock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_lock_nest_lock);
+
+int __lockfunc _mutex_lock_interruptible_nested(struct mutex *lock, int subclass)
+{
+	int ret;
+
+	mutex_acquire_nest(&lock->dep_map, subclass, 0, NULL, _RET_IP_);
+	ret = rt_mutex_lock_interruptible(&lock->lock);
+	if (ret)
+		mutex_release(&lock->dep_map, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_interruptible_nested);
+
+int __lockfunc _mutex_lock_killable_nested(struct mutex *lock, int subclass)
+{
+	int ret;
+
+	mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+	ret = rt_mutex_lock_killable(&lock->lock);
+	if (ret)
+		mutex_release(&lock->dep_map, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(_mutex_lock_killable_nested);
+#endif
+
+int __lockfunc _mutex_trylock(struct mutex *lock)
+{
+	int ret = rt_mutex_trylock(&lock->lock);
+
+	if (ret)
+		mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+
+	return ret;
+}
+EXPORT_SYMBOL(_mutex_trylock);
+
+void __lockfunc _mutex_unlock(struct mutex *lock)
+{
+	mutex_release(&lock->dep_map, 1, _RET_IP_);
+	rt_mutex_unlock(&lock->lock);
+}
+EXPORT_SYMBOL(_mutex_unlock);
+
+/*
+ * rwlock_t functions
+ */
+int __lockfunc rt_write_trylock(rwlock_t *rwlock)
+{
+	int ret;
+
+	migrate_disable();
+	ret = rt_mutex_trylock(&rwlock->lock);
+	if (ret)
+		rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+	else
+		migrate_enable();
+
+	return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock);
+
+int __lockfunc rt_write_trylock_irqsave(rwlock_t *rwlock, unsigned long *flags)
+{
+	int ret;
+
+	*flags = 0;
+	ret = rt_write_trylock(rwlock);
+	return ret;
+}
+EXPORT_SYMBOL(rt_write_trylock_irqsave);
+
+int __lockfunc rt_read_trylock(rwlock_t *rwlock)
+{
+	struct rt_mutex *lock = &rwlock->lock;
+	int ret = 1;
+
+	/*
+	 * recursive read locks succeed when current owns the lock,
+	 * but not when read_depth == 0 which means that the lock is
+	 * write locked.
+	 */
+	if (rt_mutex_owner(lock) != current) {
+		migrate_disable();
+		ret = rt_mutex_trylock(lock);
+		if (ret)
+			rwlock_acquire(&rwlock->dep_map, 0, 1, _RET_IP_);
+		else
+			migrate_enable();
+
+	} else if (!rwlock->read_depth) {
+		ret = 0;
+	}
+
+	if (ret)
+		rwlock->read_depth++;
+
+	return ret;
+}
+EXPORT_SYMBOL(rt_read_trylock);
+
+void __lockfunc rt_write_lock(rwlock_t *rwlock)
+{
+	rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+	migrate_disable();
+	__rt_spin_lock(&rwlock->lock);
+}
+EXPORT_SYMBOL(rt_write_lock);
+
+void __lockfunc rt_read_lock(rwlock_t *rwlock)
+{
+	struct rt_mutex *lock = &rwlock->lock;
+
+
+	/*
+	 * recursive read locks succeed when current owns the lock
+	 */
+	if (rt_mutex_owner(lock) != current) {
+		migrate_disable();
+		rwlock_acquire(&rwlock->dep_map, 0, 0, _RET_IP_);
+		__rt_spin_lock(lock);
+	}
+	rwlock->read_depth++;
+}
+
+EXPORT_SYMBOL(rt_read_lock);
+
+void __lockfunc rt_write_unlock(rwlock_t *rwlock)
+{
+	/* NOTE: we always pass in '1' for nested, for simplicity */
+	rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+	__rt_spin_unlock(&rwlock->lock);
+	migrate_enable();
+}
+EXPORT_SYMBOL(rt_write_unlock);
+
+void __lockfunc rt_read_unlock(rwlock_t *rwlock)
+{
+	/* Release the lock only when read_depth is down to 0 */
+	if (--rwlock->read_depth == 0) {
+		rwlock_release(&rwlock->dep_map, 1, _RET_IP_);
+		__rt_spin_unlock(&rwlock->lock);
+		migrate_enable();
+	}
+}
+EXPORT_SYMBOL(rt_read_unlock);
+
+unsigned long __lockfunc rt_write_lock_irqsave(rwlock_t *rwlock)
+{
+	rt_write_lock(rwlock);
+
+	return 0;
+}
+EXPORT_SYMBOL(rt_write_lock_irqsave);
+
+unsigned long __lockfunc rt_read_lock_irqsave(rwlock_t *rwlock)
+{
+	rt_read_lock(rwlock);
+
+	return 0;
+}
+EXPORT_SYMBOL(rt_read_lock_irqsave);
+
+void __rt_rwlock_init(rwlock_t *rwlock, char *name, struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)rwlock, sizeof(*rwlock));
+	lockdep_init_map(&rwlock->dep_map, name, key, 0);
+#endif
+	rwlock->lock.save_state = 1;
+	rwlock->read_depth = 0;
+}
+EXPORT_SYMBOL(__rt_rwlock_init);
+
+/*
+ * rw_semaphores
+ */
+
+void  rt_up_write(struct rw_semaphore *rwsem)
+{
+	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+	rt_mutex_unlock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_up_write);
+
+void  rt_up_read(struct rw_semaphore *rwsem)
+{
+	rwsem_release(&rwsem->dep_map, 1, _RET_IP_);
+	if (--rwsem->read_depth == 0)
+		rt_mutex_unlock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_up_read);
+
+/*
+ * downgrade a write lock into a read lock
+ * - just wake up any readers at the front of the queue
+ */
+void  rt_downgrade_write(struct rw_semaphore *rwsem)
+{
+	BUG_ON(rt_mutex_owner(&rwsem->lock) != current);
+	rwsem->read_depth = 1;
+}
+EXPORT_SYMBOL(rt_downgrade_write);
+
+int  rt_down_write_trylock(struct rw_semaphore *rwsem)
+{
+	int ret = rt_mutex_trylock(&rwsem->lock);
+
+	if (ret)
+		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(rt_down_write_trylock);
+
+void  rt_down_write(struct rw_semaphore *rwsem)
+{
+	rwsem_acquire(&rwsem->dep_map, 0, 0, _RET_IP_);
+	rt_mutex_lock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_down_write);
+
+void  rt_down_write_nested(struct rw_semaphore *rwsem, int subclass)
+{
+	rwsem_acquire(&rwsem->dep_map, subclass, 0, _RET_IP_);
+	rt_mutex_lock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_down_write_nested);
+
+void rt_down_write_nested_lock(struct rw_semaphore *rwsem,
+			       struct lockdep_map *nest)
+{
+	rwsem_acquire_nest(&rwsem->dep_map, 0, 0, nest, _RET_IP_);
+	rt_mutex_lock(&rwsem->lock);
+}
+EXPORT_SYMBOL(rt_down_write_nested_lock);
+
+int  rt_down_read_trylock(struct rw_semaphore *rwsem)
+{
+	struct rt_mutex *lock = &rwsem->lock;
+	int ret = 1;
+
+	/*
+	 * recursive read locks succeed when current owns the rwsem,
+	 * but not when read_depth == 0 which means that the rwsem is
+	 * write locked.
+	 */
+	if (rt_mutex_owner(lock) != current)
+		ret = rt_mutex_trylock(&rwsem->lock);
+	else if (!rwsem->read_depth)
+		ret = 0;
+
+	if (ret) {
+		rwsem->read_depth++;
+		rwsem_acquire(&rwsem->dep_map, 0, 1, _RET_IP_);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(rt_down_read_trylock);
+
+static void __rt_down_read(struct rw_semaphore *rwsem, int subclass)
+{
+	struct rt_mutex *lock = &rwsem->lock;
+
+	rwsem_acquire_read(&rwsem->dep_map, subclass, 0, _RET_IP_);
+
+	if (rt_mutex_owner(lock) != current)
+		rt_mutex_lock(&rwsem->lock);
+	rwsem->read_depth++;
+}
+
+void  rt_down_read(struct rw_semaphore *rwsem)
+{
+	__rt_down_read(rwsem, 0);
+}
+EXPORT_SYMBOL(rt_down_read);
+
+void  rt_down_read_nested(struct rw_semaphore *rwsem, int subclass)
+{
+	__rt_down_read(rwsem, subclass);
+}
+EXPORT_SYMBOL(rt_down_read_nested);
+
+void  __rt_rwsem_init(struct rw_semaphore *rwsem, const char *name,
+			      struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)rwsem, sizeof(*rwsem));
+	lockdep_init_map(&rwsem->dep_map, name, key, 0);
+#endif
+	rwsem->read_depth = 0;
+	rwsem->lock.save_state = 0;
+}
+EXPORT_SYMBOL(__rt_rwsem_init);
+
+/**
+ * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
+ * @cnt: the atomic which we are to dec
+ * @lock: the mutex to return holding if we dec to 0
+ *
+ * return true and hold lock if we dec to 0, return false otherwise
+ */
+int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
+{
+	/* dec if we can't possibly hit 0 */
+	if (atomic_add_unless(cnt, -1, 1))
+		return 0;
+	/* we might hit 0, so take the lock */
+	mutex_lock(lock);
+	if (!atomic_dec_and_test(cnt)) {
+		/* when we actually did the dec, we didn't hit 0 */
+		mutex_unlock(lock);
+		return 0;
+	}
+	/* we hit 0, and we hold the lock */
+	return 1;
+}
+EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
Index: linux-3.18.13-rt10-r7s4/kernel/locking/rtmutex.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/rtmutex.c
+++ linux-3.18.13-rt10-r7s4/kernel/locking/rtmutex.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:10 @
  *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
  *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
  *  Copyright (C) 2006 Esben Nielsen
+ *  Adaptive Spinlocks:
+ *  Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
+ *				     and Peter Morreale,
+ * Adaptive Spinlocks simplification:
+ *  Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
  *
  *  See Documentation/locking/rt-mutex-design.txt for details.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:24 @
 #include <linux/sched/rt.h>
 #include <linux/sched/deadline.h>
 #include <linux/timer.h>
+#include <linux/ww_mutex.h>
 
 #include "rtmutex_common.h"
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:78 @ static void fixup_rt_mutex_waiters(struc
 		clear_rt_mutex_waiters(lock);
 }
 
+static int rt_mutex_real_waiter(struct rt_mutex_waiter *waiter)
+{
+	return waiter && waiter != PI_WAKEUP_INPROGRESS &&
+		waiter != PI_REQUEUE_INPROGRESS;
+}
+
 /*
  * We can speed up the acquire/release, if the architecture
  * supports cmpxchg and if there's no debugging state to be set up
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:348 @ static bool rt_mutex_cond_detect_deadloc
 	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
 }
 
+static void rt_mutex_wake_waiter(struct rt_mutex_waiter *waiter)
+{
+	if (waiter->savestate)
+		wake_up_lock_sleeper(waiter->task);
+	else
+		wake_up_process(waiter->task);
+}
+
 /*
  * Max number of times we'll walk the boosting chain:
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:363 @ int max_lock_depth = 1024;
 
 static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
 {
-	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+	return rt_mutex_real_waiter(p->pi_blocked_on) ?
+		p->pi_blocked_on->lock : NULL;
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:501 @ static int rt_mutex_adjust_prio_chain(st
 	 * reached or the state of the chain has changed while we
 	 * dropped the locks.
 	 */
-	if (!waiter)
+	if (!rt_mutex_real_waiter(waiter))
 		goto out_unlock_pi;
 
 	/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:663 @ static int rt_mutex_adjust_prio_chain(st
 	 * follow here. This is the end of the chain we are walking.
 	 */
 	if (!rt_mutex_owner(lock)) {
+		struct rt_mutex_waiter *lock_top_waiter;
+
 		/*
 		 * If the requeue [7] above changed the top waiter,
 		 * then we need to wake the new top waiter up to try
 		 * to get the lock.
 		 */
-		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
-			wake_up_process(rt_mutex_top_waiter(lock)->task);
+		lock_top_waiter = rt_mutex_top_waiter(lock);
+		if (prerequeue_top_waiter != lock_top_waiter)
+			rt_mutex_wake_waiter(lock_top_waiter);
 		raw_spin_unlock(&lock->wait_lock);
 		return 0;
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:765 @ static int rt_mutex_adjust_prio_chain(st
 	return ret;
 }
 
+
+#define STEAL_NORMAL  0
+#define STEAL_LATERAL 1
+
+/*
+ * Note that RT tasks are excluded from lateral-steals to prevent the
+ * introduction of an unbounded latency
+ */
+static inline int lock_is_stealable(struct task_struct *task,
+				    struct task_struct *pendowner, int mode)
+{
+    if (mode == STEAL_NORMAL || rt_task(task)) {
+	    if (task->prio >= pendowner->prio)
+		    return 0;
+    } else if (task->prio > pendowner->prio)
+	    return 0;
+    return 1;
+}
+
 /*
  * Try to take an rt-mutex
  *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:794 @ static int rt_mutex_adjust_prio_chain(st
  * @waiter: The waiter that is queued to the lock's wait list if the
  *	    callsite called task_blocked_on_lock(), otherwise NULL
  */
-static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
-				struct rt_mutex_waiter *waiter)
+static int __try_to_take_rt_mutex(struct rt_mutex *lock,
+				  struct task_struct *task,
+				  struct rt_mutex_waiter *waiter, int mode)
 {
 	unsigned long flags;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:835 @ static int try_to_take_rt_mutex(struct r
 		 * If waiter is not the highest priority waiter of
 		 * @lock, give up.
 		 */
-		if (waiter != rt_mutex_top_waiter(lock))
+		if (waiter != rt_mutex_top_waiter(lock)) {
+			/* XXX lock_is_stealable() ? */
 			return 0;
+		}
 
 		/*
 		 * We can acquire the lock. Remove the waiter from the
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:856 @ static int try_to_take_rt_mutex(struct r
 		 * not need to be dequeued.
 		 */
 		if (rt_mutex_has_waiters(lock)) {
-			/*
-			 * If @task->prio is greater than or equal to
-			 * the top waiter priority (kernel view),
-			 * @task lost.
-			 */
-			if (task->prio >= rt_mutex_top_waiter(lock)->prio)
-				return 0;
+			struct task_struct *pown = rt_mutex_top_waiter(lock)->task;
 
+			if (task != pown && !lock_is_stealable(task, pown, mode))
+				return 0;
 			/*
 			 * The current top waiter stays enqueued. We
 			 * don't have to change anything in the lock
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:908 @ takeit:
 	return 1;
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+/*
+ * preemptible spin_lock functions:
+ */
+static inline void rt_spin_lock_fastlock(struct rt_mutex *lock,
+					 void  (*slowfn)(struct rt_mutex *lock))
+{
+	might_sleep();
+
+	if (likely(rt_mutex_cmpxchg(lock, NULL, current)))
+		rt_mutex_deadlock_account_lock(lock, current);
+	else
+		slowfn(lock);
+}
+
+static inline void rt_spin_lock_fastunlock(struct rt_mutex *lock,
+					   void  (*slowfn)(struct rt_mutex *lock))
+{
+	if (likely(rt_mutex_cmpxchg(lock, current, NULL)))
+		rt_mutex_deadlock_account_unlock(current);
+	else
+		slowfn(lock);
+}
+#ifdef CONFIG_SMP
+/*
+ * Note that owner is a speculative pointer and dereferencing relies
+ * on rcu_read_lock() and the check against the lock owner.
+ */
+static int adaptive_wait(struct rt_mutex *lock,
+			 struct task_struct *owner)
+{
+	int res = 0;
+
+	rcu_read_lock();
+	for (;;) {
+		if (owner != rt_mutex_owner(lock))
+			break;
+		/*
+		 * Ensure that owner->on_cpu is dereferenced _after_
+		 * checking the above to be valid.
+		 */
+		barrier();
+		if (!owner->on_cpu) {
+			res = 1;
+			break;
+		}
+		cpu_relax();
+	}
+	rcu_read_unlock();
+	return res;
+}
+#else
+static int adaptive_wait(struct rt_mutex *lock,
+			 struct task_struct *orig_owner)
+{
+	return 1;
+}
+#endif
+
+# define pi_lock(lock)		raw_spin_lock_irq(lock)
+# define pi_unlock(lock)	raw_spin_unlock_irq(lock)
+
+static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
+				   struct rt_mutex_waiter *waiter,
+				   struct task_struct *task,
+				   enum rtmutex_chainwalk chwalk);
+/*
+ * Slow path lock function spin_lock style: this variant is very
+ * careful not to miss any non-lock wakeups.
+ *
+ * We store the current state under p->pi_lock in p->saved_state and
+ * the try_to_wake_up() code handles this accordingly.
+ */
+static void  noinline __sched rt_spin_lock_slowlock(struct rt_mutex *lock)
+{
+	struct task_struct *lock_owner, *self = current;
+	struct rt_mutex_waiter waiter, *top_waiter;
+	int ret;
+
+	rt_mutex_init_waiter(&waiter, true);
+
+	raw_spin_lock(&lock->wait_lock);
+
+	if (__try_to_take_rt_mutex(lock, self, NULL, STEAL_LATERAL)) {
+		raw_spin_unlock(&lock->wait_lock);
+		return;
+	}
+
+	BUG_ON(rt_mutex_owner(lock) == self);
+
+	/*
+	 * We save whatever state the task is in and we'll restore it
+	 * after acquiring the lock taking real wakeups into account
+	 * as well. We are serialized via pi_lock against wakeups. See
+	 * try_to_wake_up().
+	 */
+	pi_lock(&self->pi_lock);
+	self->saved_state = self->state;
+	__set_current_state(TASK_UNINTERRUPTIBLE);
+	pi_unlock(&self->pi_lock);
+
+	ret = task_blocks_on_rt_mutex(lock, &waiter, self, 0);
+	BUG_ON(ret);
+
+	for (;;) {
+		/* Try to acquire the lock again. */
+		if (__try_to_take_rt_mutex(lock, self, &waiter, STEAL_LATERAL))
+			break;
+
+		top_waiter = rt_mutex_top_waiter(lock);
+		lock_owner = rt_mutex_owner(lock);
+
+		raw_spin_unlock(&lock->wait_lock);
+
+		debug_rt_mutex_print_deadlock(&waiter);
+
+		if (top_waiter != &waiter || adaptive_wait(lock, lock_owner))
+			schedule_rt_mutex(lock);
+
+		raw_spin_lock(&lock->wait_lock);
+
+		pi_lock(&self->pi_lock);
+		__set_current_state(TASK_UNINTERRUPTIBLE);
+		pi_unlock(&self->pi_lock);
+	}
+
+	/*
+	 * Restore the task state to current->saved_state. We set it
+	 * to the original state above and the try_to_wake_up() code
+	 * has possibly updated it when a real (non-rtmutex) wakeup
+	 * happened while we were blocked. Clear saved_state so
+	 * try_to_wakeup() does not get confused.
+	 */
+	pi_lock(&self->pi_lock);
+	__set_current_state(self->saved_state);
+	self->saved_state = TASK_RUNNING;
+	pi_unlock(&self->pi_lock);
+
+	/*
+	 * try_to_take_rt_mutex() sets the waiter bit
+	 * unconditionally. We might have to fix that up:
+	 */
+	fixup_rt_mutex_waiters(lock);
+
+	BUG_ON(rt_mutex_has_waiters(lock) && &waiter == rt_mutex_top_waiter(lock));
+	BUG_ON(!RB_EMPTY_NODE(&waiter.tree_entry));
+
+	raw_spin_unlock(&lock->wait_lock);
+
+	debug_rt_mutex_free_waiter(&waiter);
+}
+
+static void wakeup_next_waiter(struct rt_mutex *lock);
+/*
+ * Slow path to release a rt_mutex spin_lock style
+ */
+static void __sched __rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+	debug_rt_mutex_unlock(lock);
+
+	rt_mutex_deadlock_account_unlock(current);
+
+	if (!rt_mutex_has_waiters(lock)) {
+		lock->owner = NULL;
+		raw_spin_unlock(&lock->wait_lock);
+		return;
+	}
+
+	wakeup_next_waiter(lock);
+
+	raw_spin_unlock(&lock->wait_lock);
+
+	/* Undo pi boosting.when necessary */
+	rt_mutex_adjust_prio(current);
+}
+
+static void  noinline __sched rt_spin_lock_slowunlock(struct rt_mutex *lock)
+{
+	raw_spin_lock(&lock->wait_lock);
+	__rt_spin_lock_slowunlock(lock);
+}
+
+static void  noinline __sched rt_spin_lock_slowunlock_hirq(struct rt_mutex *lock)
+{
+	int ret;
+
+	do {
+		ret = raw_spin_trylock(&lock->wait_lock);
+	} while (!ret);
+
+	__rt_spin_lock_slowunlock(lock);
+}
+
+void __lockfunc rt_spin_lock(spinlock_t *lock)
+{
+	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+	spin_acquire(&lock->dep_map, 0, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock);
+
+void __lockfunc __rt_spin_lock(struct rt_mutex *lock)
+{
+	rt_spin_lock_fastlock(lock, rt_spin_lock_slowlock);
+}
+EXPORT_SYMBOL(__rt_spin_lock);
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+void __lockfunc rt_spin_lock_nested(spinlock_t *lock, int subclass)
+{
+	rt_spin_lock_fastlock(&lock->lock, rt_spin_lock_slowlock);
+	spin_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
+}
+EXPORT_SYMBOL(rt_spin_lock_nested);
+#endif
+
+void __lockfunc rt_spin_unlock(spinlock_t *lock)
+{
+	/* NOTE: we always pass in '1' for nested, for simplicity */
+	spin_release(&lock->dep_map, 1, _RET_IP_);
+	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(rt_spin_unlock);
+
+void __lockfunc rt_spin_unlock_after_trylock_in_irq(spinlock_t *lock)
+{
+	/* NOTE: we always pass in '1' for nested, for simplicity */
+	spin_release(&lock->dep_map, 1, _RET_IP_);
+	rt_spin_lock_fastunlock(&lock->lock, rt_spin_lock_slowunlock_hirq);
+}
+
+void __lockfunc __rt_spin_unlock(struct rt_mutex *lock)
+{
+	rt_spin_lock_fastunlock(lock, rt_spin_lock_slowunlock);
+}
+EXPORT_SYMBOL(__rt_spin_unlock);
+
+/*
+ * Wait for the lock to get unlocked: instead of polling for an unlock
+ * (like raw spinlocks do), we lock and unlock, to force the kernel to
+ * schedule if there's contention:
+ */
+void __lockfunc rt_spin_unlock_wait(spinlock_t *lock)
+{
+	spin_lock(lock);
+	spin_unlock(lock);
+}
+EXPORT_SYMBOL(rt_spin_unlock_wait);
+
+int __lockfunc __rt_spin_trylock(struct rt_mutex *lock)
+{
+	return rt_mutex_trylock(lock);
+}
+
+int __lockfunc rt_spin_trylock(spinlock_t *lock)
+{
+	int ret = rt_mutex_trylock(&lock->lock);
+
+	if (ret)
+		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+	return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock);
+
+int __lockfunc rt_spin_trylock_bh(spinlock_t *lock)
+{
+	int ret;
+
+	local_bh_disable();
+	ret = rt_mutex_trylock(&lock->lock);
+	if (ret) {
+		migrate_disable();
+		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+	} else
+		local_bh_enable();
+	return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_bh);
+
+int __lockfunc rt_spin_trylock_irqsave(spinlock_t *lock, unsigned long *flags)
+{
+	int ret;
+
+	*flags = 0;
+	ret = rt_mutex_trylock(&lock->lock);
+	if (ret) {
+		migrate_disable();
+		spin_acquire(&lock->dep_map, 0, 1, _RET_IP_);
+	}
+	return ret;
+}
+EXPORT_SYMBOL(rt_spin_trylock_irqsave);
+
+int atomic_dec_and_spin_lock(atomic_t *atomic, spinlock_t *lock)
+{
+	/* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+	if (atomic_add_unless(atomic, -1, 1))
+		return 0;
+	migrate_disable();
+	rt_spin_lock(lock);
+	if (atomic_dec_and_test(atomic))
+		return 1;
+	rt_spin_unlock(lock);
+	migrate_enable();
+	return 0;
+}
+EXPORT_SYMBOL(atomic_dec_and_spin_lock);
+
+	void
+__rt_spin_lock_init(spinlock_t *lock, char *name, struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+	/*
+	 * Make sure we are not reinitializing a held lock:
+	 */
+	debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+	lockdep_init_map(&lock->dep_map, name, key, 0);
+#endif
+}
+EXPORT_SYMBOL(__rt_spin_lock_init);
+
+#endif /* PREEMPT_RT_FULL */
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+	static inline int __sched
+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
+	struct ww_acquire_ctx *hold_ctx = ACCESS_ONCE(ww->ctx);
+
+	if (!hold_ctx)
+		return 0;
+
+	if (unlikely(ctx == hold_ctx))
+		return -EALREADY;
+
+	if (ctx->stamp - hold_ctx->stamp <= LONG_MAX &&
+	    (ctx->stamp != hold_ctx->stamp || ctx > hold_ctx)) {
+#ifdef CONFIG_DEBUG_MUTEXES
+		DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
+		ctx->contending_lock = ww;
+#endif
+		return -EDEADLK;
+	}
+
+	return 0;
+}
+#else
+	static inline int __sched
+__mutex_lock_check_stamp(struct rt_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+	BUG();
+	return 0;
+}
+
+#endif
+
+static inline int
+try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
+		     struct rt_mutex_waiter *waiter)
+{
+	return __try_to_take_rt_mutex(lock, task, waiter, STEAL_NORMAL);
+}
+
 /*
  * Task blocks on lock.
  *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1302 @ static int task_blocks_on_rt_mutex(struc
 		return -EDEADLK;
 
 	raw_spin_lock_irqsave(&task->pi_lock, flags);
+
+	/*
+	 * In the case of futex requeue PI, this will be a proxy
+	 * lock. The task will wake unaware that it is enqueueed on
+	 * this lock. Avoid blocking on two locks and corrupting
+	 * pi_blocked_on via the PI_WAKEUP_INPROGRESS
+	 * flag. futex_wait_requeue_pi() sets this when it wakes up
+	 * before requeue (due to a signal or timeout). Do not enqueue
+	 * the task if PI_WAKEUP_INPROGRESS is set.
+	 */
+	if (task != current && task->pi_blocked_on == PI_WAKEUP_INPROGRESS) {
+		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
+		return -EAGAIN;
+	}
+
+	BUG_ON(rt_mutex_real_waiter(task->pi_blocked_on));
+
 	__rt_mutex_adjust_prio(task);
 	waiter->task = task;
 	waiter->lock = lock;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1342 @ static int task_blocks_on_rt_mutex(struc
 		rt_mutex_enqueue_pi(owner, waiter);
 
 		__rt_mutex_adjust_prio(owner);
-		if (owner->pi_blocked_on)
+		if (rt_mutex_real_waiter(owner->pi_blocked_on))
 			chain_walk = 1;
 	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
 		chain_walk = 1;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1419 @ static void wakeup_next_waiter(struct rt
 	 * long as we hold lock->wait_lock. The waiter task needs to
 	 * acquire it in order to dequeue the waiter.
 	 */
-	wake_up_process(waiter->task);
+	rt_mutex_wake_waiter(waiter);
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1433 @ static void remove_waiter(struct rt_mute
 {
 	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
 	struct task_struct *owner = rt_mutex_owner(lock);
-	struct rt_mutex *next_lock;
+	struct rt_mutex *next_lock = NULL;
 	unsigned long flags;
 
 	raw_spin_lock_irqsave(&current->pi_lock, flags);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1458 @ static void remove_waiter(struct rt_mute
 	__rt_mutex_adjust_prio(owner);
 
 	/* Store the lock on which owner is blocked or NULL */
-	next_lock = task_blocked_on_lock(owner);
+	if (rt_mutex_real_waiter(owner->pi_blocked_on))
+		next_lock = task_blocked_on_lock(owner);
 
 	raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1495 @ void rt_mutex_adjust_pi(struct task_stru
 	raw_spin_lock_irqsave(&task->pi_lock, flags);
 
 	waiter = task->pi_blocked_on;
-	if (!waiter || (waiter->prio == task->prio &&
+	if (!rt_mutex_real_waiter(waiter) || (waiter->prio == task->prio &&
 			!dl_prio(task->prio))) {
 		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 		return;
 	}
 	next_lock = waiter->lock;
-	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
 	/* gets dropped in rt_mutex_adjust_prio_chain()! */
 	get_task_struct(task);
 
+	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
 				   next_lock, NULL, task);
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1523 @ void rt_mutex_adjust_pi(struct task_stru
 static int __sched
 __rt_mutex_slowlock(struct rt_mutex *lock, int state,
 		    struct hrtimer_sleeper *timeout,
-		    struct rt_mutex_waiter *waiter)
+		    struct rt_mutex_waiter *waiter,
+		    struct ww_acquire_ctx *ww_ctx)
 {
 	int ret = 0;
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1547 @ __rt_mutex_slowlock(struct rt_mutex *loc
 				break;
 		}
 
+		if (ww_ctx && ww_ctx->acquired > 0) {
+			ret = __mutex_lock_check_stamp(lock, ww_ctx);
+			if (ret)
+				break;
+		}
+
 		raw_spin_unlock(&lock->wait_lock);
 
 		debug_rt_mutex_print_deadlock(waiter);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1586 @ static void rt_mutex_handle_deadlock(int
 	}
 }
 
+static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
+						   struct ww_acquire_ctx *ww_ctx)
+{
+#ifdef CONFIG_DEBUG_MUTEXES
+	/*
+	 * If this WARN_ON triggers, you used ww_mutex_lock to acquire,
+	 * but released with a normal mutex_unlock in this call.
+	 *
+	 * This should never happen, always use ww_mutex_unlock.
+	 */
+	DEBUG_LOCKS_WARN_ON(ww->ctx);
+
+	/*
+	 * Not quite done after calling ww_acquire_done() ?
+	 */
+	DEBUG_LOCKS_WARN_ON(ww_ctx->done_acquire);
+
+	if (ww_ctx->contending_lock) {
+		/*
+		 * After -EDEADLK you tried to
+		 * acquire a different ww_mutex? Bad!
+		 */
+		DEBUG_LOCKS_WARN_ON(ww_ctx->contending_lock != ww);
+
+		/*
+		 * You called ww_mutex_lock after receiving -EDEADLK,
+		 * but 'forgot' to unlock everything else first?
+		 */
+		DEBUG_LOCKS_WARN_ON(ww_ctx->acquired > 0);
+		ww_ctx->contending_lock = NULL;
+	}
+
+	/*
+	 * Naughty, using a different class will lead to undefined behavior!
+	 */
+	DEBUG_LOCKS_WARN_ON(ww_ctx->ww_class != ww->ww_class);
+#endif
+	ww_ctx->acquired++;
+}
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void ww_mutex_account_lock(struct rt_mutex *lock,
+				  struct ww_acquire_ctx *ww_ctx)
+{
+	struct ww_mutex *ww = container_of(lock, struct ww_mutex, base.lock);
+	struct rt_mutex_waiter *waiter, *n;
+
+	/*
+	 * This branch gets optimized out for the common case,
+	 * and is only important for ww_mutex_lock.
+	 */
+	ww_mutex_lock_acquired(ww, ww_ctx);
+	ww->ctx = ww_ctx;
+
+	/*
+	 * Give any possible sleeping processes the chance to wake up,
+	 * so they can recheck if they have to back off.
+	 */
+	rbtree_postorder_for_each_entry_safe(waiter, n, &lock->waiters,
+					     tree_entry) {
+		/* XXX debug rt mutex waiter wakeup */
+
+		BUG_ON(waiter->lock != lock);
+		rt_mutex_wake_waiter(waiter);
+	}
+}
+
+#else
+
+static void ww_mutex_account_lock(struct rt_mutex *lock,
+				  struct ww_acquire_ctx *ww_ctx)
+{
+	BUG();
+}
+#endif
+
 /*
  * Slow path lock function:
  */
 static int __sched
 rt_mutex_slowlock(struct rt_mutex *lock, int state,
 		  struct hrtimer_sleeper *timeout,
-		  enum rtmutex_chainwalk chwalk)
+		  enum rtmutex_chainwalk chwalk,
+		  struct ww_acquire_ctx *ww_ctx)
 {
 	struct rt_mutex_waiter waiter;
 	int ret = 0;
 
-	debug_rt_mutex_init_waiter(&waiter);
-	RB_CLEAR_NODE(&waiter.pi_tree_entry);
-	RB_CLEAR_NODE(&waiter.tree_entry);
+	rt_mutex_init_waiter(&waiter, false);
 
 	raw_spin_lock(&lock->wait_lock);
 
 	/* Try to acquire the lock again: */
 	if (try_to_take_rt_mutex(lock, current, NULL)) {
+		if (ww_ctx)
+			ww_mutex_account_lock(lock, ww_ctx);
 		raw_spin_unlock(&lock->wait_lock);
 		return 0;
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1698 @ rt_mutex_slowlock(struct rt_mutex *lock,
 	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
 
 	if (likely(!ret))
-		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
+		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter, ww_ctx);
+	else if (ww_ctx) {
+		/* ww_mutex received EDEADLK, let it become EALREADY */
+		ret = __mutex_lock_check_stamp(lock, ww_ctx);
+		BUG_ON(!ret);
+	}
 
 	set_current_state(TASK_RUNNING);
 
 	if (unlikely(ret)) {
 		if (rt_mutex_has_waiters(lock))
 			remove_waiter(lock, &waiter);
-		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+		/* ww_mutex want to report EDEADLK/EALREADY, let them */
+		if (!ww_ctx)
+			rt_mutex_handle_deadlock(ret, chwalk, &waiter);
+	} else if (ww_ctx) {
+		ww_mutex_account_lock(lock, ww_ctx);
 	}
 
 	/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1753 @ static inline int rt_mutex_slowtrylock(s
 	 * The mutex has currently no owner. Lock the wait lock and
 	 * try to acquire the lock.
 	 */
-	raw_spin_lock(&lock->wait_lock);
+	if (!raw_spin_trylock(&lock->wait_lock))
+		return 0;
 
 	ret = try_to_take_rt_mutex(lock, current, NULL);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1840 @ rt_mutex_slowunlock(struct rt_mutex *loc
  */
 static inline int
 rt_mutex_fastlock(struct rt_mutex *lock, int state,
+		  struct ww_acquire_ctx *ww_ctx,
 		  int (*slowfn)(struct rt_mutex *lock, int state,
 				struct hrtimer_sleeper *timeout,
-				enum rtmutex_chainwalk chwalk))
+				enum rtmutex_chainwalk chwalk,
+				struct ww_acquire_ctx *ww_ctx))
 {
 	if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
 		return 0;
 	} else
-		return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
+		return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK,
+			      ww_ctx);
 }
 
 static inline int
 rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
 			struct hrtimer_sleeper *timeout,
 			enum rtmutex_chainwalk chwalk,
+			struct ww_acquire_ctx *ww_ctx,
 			int (*slowfn)(struct rt_mutex *lock, int state,
 				      struct hrtimer_sleeper *timeout,
-				      enum rtmutex_chainwalk chwalk))
+				      enum rtmutex_chainwalk chwalk,
+				      struct ww_acquire_ctx *ww_ctx))
 {
 	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
 	    likely(rt_mutex_cmpxchg(lock, NULL, current))) {
 		rt_mutex_deadlock_account_lock(lock, current);
 		return 0;
 	} else
-		return slowfn(lock, state, timeout, chwalk);
+		return slowfn(lock, state, timeout, chwalk, ww_ctx);
 }
 
 static inline int
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1902 @ void __sched rt_mutex_lock(struct rt_mut
 {
 	might_sleep();
 
-	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
+	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, NULL, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1919 @ int __sched rt_mutex_lock_interruptible(
 {
 	might_sleep();
 
-	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
+	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, NULL, rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1932 @ int rt_mutex_timed_futex_lock(struct rt_
 	might_sleep();
 
 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
-				       RT_MUTEX_FULL_CHAINWALK,
+				       RT_MUTEX_FULL_CHAINWALK, NULL,
 				       rt_mutex_slowlock);
 }
 
 /**
+ * rt_mutex_lock_killable - lock a rt_mutex killable
+ *
+ * @lock:              the rt_mutex to be locked
+ * @detect_deadlock:   deadlock detection on/off
+ *
+ * Returns:
+ *  0          on success
+ * -EINTR      when interrupted by a signal
+ * -EDEADLK    when the lock would deadlock (when deadlock detection is on)
+ */
+int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
+{
+	might_sleep();
+
+	return rt_mutex_fastlock(lock, TASK_KILLABLE, NULL, rt_mutex_slowlock);
+}
+EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
+
+/**
  * rt_mutex_timed_lock - lock a rt_mutex interruptible
  *			the timeout structure is provided
  *			by the caller
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1975 @ rt_mutex_timed_lock(struct rt_mutex *loc
 
 	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
 				       RT_MUTEX_MIN_CHAINWALK,
+				       NULL,
 				       rt_mutex_slowlock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2034 @ EXPORT_SYMBOL_GPL(rt_mutex_destroy);
 void __rt_mutex_init(struct rt_mutex *lock, const char *name)
 {
 	lock->owner = NULL;
-	raw_spin_lock_init(&lock->wait_lock);
 	lock->waiters = RB_ROOT;
 	lock->waiters_leftmost = NULL;
 
 	debug_rt_mutex_init(lock, name);
 }
-EXPORT_SYMBOL_GPL(__rt_mutex_init);
+EXPORT_SYMBOL(__rt_mutex_init);
 
 /**
  * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2054 @ EXPORT_SYMBOL_GPL(__rt_mutex_init);
 void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
 				struct task_struct *proxy_owner)
 {
-	__rt_mutex_init(lock, NULL);
+	rt_mutex_init(lock);
 	debug_rt_mutex_proxy_lock(lock, proxy_owner);
 	rt_mutex_set_owner(lock, proxy_owner);
 	rt_mutex_deadlock_account_lock(lock, proxy_owner);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2102 @ int rt_mutex_start_proxy_lock(struct rt_
 		return 1;
 	}
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+	/*
+	 * In PREEMPT_RT there's an added race.
+	 * If the task, that we are about to requeue, times out,
+	 * it can set the PI_WAKEUP_INPROGRESS. This tells the requeue
+	 * to skip this task. But right after the task sets
+	 * its pi_blocked_on to PI_WAKEUP_INPROGRESS it can then
+	 * block on the spin_lock(&hb->lock), which in RT is an rtmutex.
+	 * This will replace the PI_WAKEUP_INPROGRESS with the actual
+	 * lock that it blocks on. We *must not* place this task
+	 * on this proxy lock in that case.
+	 *
+	 * To prevent this race, we first take the task's pi_lock
+	 * and check if it has updated its pi_blocked_on. If it has,
+	 * we assume that it woke up and we return -EAGAIN.
+	 * Otherwise, we set the task's pi_blocked_on to
+	 * PI_REQUEUE_INPROGRESS, so that if the task is waking up
+	 * it will know that we are in the process of requeuing it.
+	 */
+	raw_spin_lock_irq(&task->pi_lock);
+	if (task->pi_blocked_on) {
+		raw_spin_unlock_irq(&task->pi_lock);
+		raw_spin_unlock(&lock->wait_lock);
+		return -EAGAIN;
+	}
+	task->pi_blocked_on = PI_REQUEUE_INPROGRESS;
+	raw_spin_unlock_irq(&task->pi_lock);
+#endif
+
 	/* We enforce deadlock detection for futexes */
 	ret = task_blocks_on_rt_mutex(lock, waiter, task,
 				      RT_MUTEX_FULL_CHAINWALK);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2200 @ int rt_mutex_finish_proxy_lock(struct rt
 
 	set_current_state(TASK_INTERRUPTIBLE);
 
-	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
+	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter, NULL);
 
 	set_current_state(TASK_RUNNING);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2217 @ int rt_mutex_finish_proxy_lock(struct rt
 
 	return ret;
 }
+
+static inline int
+ww_mutex_deadlock_injection(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
+{
+#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
+	unsigned tmp;
+
+	if (ctx->deadlock_inject_countdown-- == 0) {
+		tmp = ctx->deadlock_inject_interval;
+		if (tmp > UINT_MAX/4)
+			tmp = UINT_MAX;
+		else
+			tmp = tmp*2 + tmp + tmp/2;
+
+		ctx->deadlock_inject_interval = tmp;
+		ctx->deadlock_inject_countdown = tmp;
+		ctx->contending_lock = lock;
+
+		ww_mutex_unlock(lock);
+
+		return -EDEADLK;
+	}
+#endif
+
+	return 0;
+}
+
+#ifdef CONFIG_PREEMPT_RT_FULL
+int __sched
+__ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
+{
+	int ret;
+
+	might_sleep();
+
+	mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
+	ret = rt_mutex_slowlock(&lock->base.lock, TASK_INTERRUPTIBLE, NULL, 0, ww_ctx);
+	if (ret)
+		mutex_release(&lock->base.dep_map, 1, _RET_IP_);
+	else if (!ret && ww_ctx->acquired > 1)
+		return ww_mutex_deadlock_injection(lock, ww_ctx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__ww_mutex_lock_interruptible);
+
+int __sched
+__ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
+{
+	int ret;
+
+	might_sleep();
+
+	mutex_acquire_nest(&lock->base.dep_map, 0, 0, &ww_ctx->dep_map, _RET_IP_);
+	ret = rt_mutex_slowlock(&lock->base.lock, TASK_UNINTERRUPTIBLE, NULL, 0, ww_ctx);
+	if (ret)
+		mutex_release(&lock->base.dep_map, 1, _RET_IP_);
+	else if (!ret && ww_ctx->acquired > 1)
+		return ww_mutex_deadlock_injection(lock, ww_ctx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__ww_mutex_lock);
+
+void __sched ww_mutex_unlock(struct ww_mutex *lock)
+{
+	int nest = !!lock->ctx;
+
+	/*
+	 * The unlocking fastpath is the 0->1 transition from 'locked'
+	 * into 'unlocked' state:
+	 */
+	if (nest) {
+#ifdef CONFIG_DEBUG_MUTEXES
+		DEBUG_LOCKS_WARN_ON(!lock->ctx->acquired);
+#endif
+		if (lock->ctx->acquired > 0)
+			lock->ctx->acquired--;
+		lock->ctx = NULL;
+	}
+
+	mutex_release(&lock->base.dep_map, nest, _RET_IP_);
+	rt_mutex_unlock(&lock->base.lock);
+}
+EXPORT_SYMBOL(ww_mutex_unlock);
+#endif
Index: linux-3.18.13-rt10-r7s4/kernel/locking/rtmutex_common.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/rtmutex_common.h
+++ linux-3.18.13-rt10-r7s4/kernel/locking/rtmutex_common.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:52 @ struct rt_mutex_waiter {
 	struct rb_node          pi_tree_entry;
 	struct task_struct	*task;
 	struct rt_mutex		*lock;
+	bool			savestate;
 #ifdef CONFIG_DEBUG_RT_MUTEXES
 	unsigned long		ip;
 	struct pid		*deadlock_task_pid;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:123 @ enum rtmutex_chainwalk {
 /*
  * PI-futex support (proxy locking functions, etc.):
  */
+#define PI_WAKEUP_INPROGRESS	((struct rt_mutex_waiter *) 1)
+#define PI_REQUEUE_INPROGRESS	((struct rt_mutex_waiter *) 2)
+
 extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
 extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
 				       struct task_struct *proxy_owner);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:145 @ extern int rt_mutex_timed_futex_lock(str
 # include "rtmutex.h"
 #endif
 
+static inline void
+rt_mutex_init_waiter(struct rt_mutex_waiter *waiter, bool savestate)
+{
+	debug_rt_mutex_init_waiter(waiter);
+	waiter->task = NULL;
+	waiter->savestate = savestate;
+	RB_CLEAR_NODE(&waiter->pi_tree_entry);
+	RB_CLEAR_NODE(&waiter->tree_entry);
+}
+
 #endif
Index: linux-3.18.13-rt10-r7s4/kernel/locking/spinlock.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/spinlock.c
+++ linux-3.18.13-rt10-r7s4/kernel/locking/spinlock.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:127 @ void __lockfunc __raw_##op##_lock_bh(loc
  *         __[spin|read|write]_lock_bh()
  */
 BUILD_LOCK_OPS(spin, raw_spinlock);
+
+#ifndef CONFIG_PREEMPT_RT_FULL
 BUILD_LOCK_OPS(read, rwlock);
 BUILD_LOCK_OPS(write, rwlock);
+#endif
 
 #endif
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:215 @ void __lockfunc _raw_spin_unlock_bh(raw_
 EXPORT_SYMBOL(_raw_spin_unlock_bh);
 #endif
 
+#ifndef CONFIG_PREEMPT_RT_FULL
+
 #ifndef CONFIG_INLINE_READ_TRYLOCK
 int __lockfunc _raw_read_trylock(rwlock_t *lock)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:361 @ void __lockfunc _raw_write_unlock_bh(rwl
 EXPORT_SYMBOL(_raw_write_unlock_bh);
 #endif
 
+#endif /* !PREEMPT_RT_FULL */
+
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 
 void __lockfunc _raw_spin_lock_nested(raw_spinlock_t *lock, int subclass)
Index: linux-3.18.13-rt10-r7s4/kernel/locking/spinlock_debug.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/locking/spinlock_debug.c
+++ linux-3.18.13-rt10-r7s4/kernel/locking/spinlock_debug.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:34 @ void __raw_spin_lock_init(raw_spinlock_t
 
 EXPORT_SYMBOL(__raw_spin_lock_init);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 void __rwlock_init(rwlock_t *lock, const char *name,
 		   struct lock_class_key *key)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:52 @ void __rwlock_init(rwlock_t *lock, const
 }
 
 EXPORT_SYMBOL(__rwlock_init);
+#endif
 
 static void spin_dump(raw_spinlock_t *lock, const char *msg)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:164 @ void do_raw_spin_unlock(raw_spinlock_t *
 	arch_spin_unlock(&lock->raw_lock);
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 static void rwlock_bug(rwlock_t *lock, const char *msg)
 {
 	if (!debug_locks_off())
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:306 @ void do_raw_write_unlock(rwlock_t *lock)
 	debug_write_unlock(lock);
 	arch_write_unlock(&lock->raw_lock);
 }
+
+#endif
Index: linux-3.18.13-rt10-r7s4/kernel/panic.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/panic.c
+++ linux-3.18.13-rt10-r7s4/kernel/panic.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:387 @ static u64 oops_id;
 
 static int init_oops_id(void)
 {
+#ifndef CONFIG_PREEMPT_RT_FULL
 	if (!oops_id)
 		get_random_bytes(&oops_id, sizeof(oops_id));
 	else
+#endif
 		oops_id++;
 
 	return 0;
Index: linux-3.18.13-rt10-r7s4/kernel/power/hibernate.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/power/hibernate.c
+++ linux-3.18.13-rt10-r7s4/kernel/power/hibernate.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:290 @ static int create_image(int platform_mod
 
 	local_irq_disable();
 
+	system_state = SYSTEM_SUSPEND;
+
 	error = syscore_suspend();
 	if (error) {
 		printk(KERN_ERR "PM: Some system devices failed to power down, "
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:321 @ static int create_image(int platform_mod
 	syscore_resume();
 
  Enable_irqs:
+	system_state = SYSTEM_RUNNING;
 	local_irq_enable();
 
  Enable_cpus:
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:445 @ static int resume_target_kernel(bool pla
 		goto Enable_cpus;
 
 	local_irq_disable();
+	system_state = SYSTEM_SUSPEND;
 
 	error = syscore_suspend();
 	if (error)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:479 @ static int resume_target_kernel(bool pla
 	syscore_resume();
 
  Enable_irqs:
+	system_state = SYSTEM_RUNNING;
 	local_irq_enable();
 
  Enable_cpus:
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:565 @ int hibernation_platform_enter(void)
 		goto Platform_finish;
 
 	local_irq_disable();
+	system_state = SYSTEM_SUSPEND;
 	syscore_suspend();
 	if (pm_wakeup_pending()) {
 		error = -EAGAIN;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:578 @ int hibernation_platform_enter(void)
 
  Power_up:
 	syscore_resume();
+	system_state = SYSTEM_RUNNING;
 	local_irq_enable();
 	enable_nonboot_cpus();
 
Index: linux-3.18.13-rt10-r7s4/kernel/power/suspend.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/power/suspend.c
+++ linux-3.18.13-rt10-r7s4/kernel/power/suspend.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:321 @ static int suspend_enter(suspend_state_t
 	arch_suspend_disable_irqs();
 	BUG_ON(!irqs_disabled());
 
+	system_state = SYSTEM_SUSPEND;
+
 	error = syscore_suspend();
 	if (!error) {
 		*wakeup = pm_wakeup_pending();
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:337 @ static int suspend_enter(suspend_state_t
 		syscore_resume();
 	}
 
+	system_state = SYSTEM_RUNNING;
+
 	arch_suspend_enable_irqs();
 	BUG_ON(irqs_disabled());
 
Index: linux-3.18.13-rt10-r7s4/kernel/printk/printk.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/printk/printk.c
+++ linux-3.18.13-rt10-r7s4/kernel/printk/printk.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1168 @ static int syslog_print_all(char __user
 {
 	char *text;
 	int len = 0;
+	int attempts = 0;
 
 	text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
 	if (!text)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1180 @ static int syslog_print_all(char __user
 		u64 seq;
 		u32 idx;
 		enum log_flags prev;
-
+		int num_msg;
+try_again:
+		attempts++;
+		if (attempts > 10) {
+			len = -EBUSY;
+			goto out;
+		}
+		num_msg = 0;
 		if (clear_seq < log_first_seq) {
 			/* messages are gone, move to first available one */
 			clear_seq = log_first_seq;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1208 @ static int syslog_print_all(char __user
 			prev = msg->flags;
 			idx = log_next(idx);
 			seq++;
+			num_msg++;
+			if (num_msg > 5) {
+				num_msg = 0;
+				raw_spin_unlock_irq(&logbuf_lock);
+				raw_spin_lock_irq(&logbuf_lock);
+				if (clear_seq < log_first_seq)
+					goto try_again;
+			}
 		}
 
 		/* move first record forward until length fits into the buffer */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1229 @ static int syslog_print_all(char __user
 			prev = msg->flags;
 			idx = log_next(idx);
 			seq++;
+			num_msg++;
+			if (num_msg > 5) {
+				num_msg = 0;
+				raw_spin_unlock_irq(&logbuf_lock);
+				raw_spin_lock_irq(&logbuf_lock);
+				if (clear_seq < log_first_seq)
+					goto try_again;
+			}
 		}
 
 		/* last message fitting into this dump */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1277 @ static int syslog_print_all(char __user
 		clear_seq = log_next_seq;
 		clear_idx = log_next_idx;
 	}
+out:
 	raw_spin_unlock_irq(&logbuf_lock);
 
 	kfree(text);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1435 @ static void call_console_drivers(int lev
 	if (!console_drivers)
 		return;
 
+	migrate_disable();
 	for_each_console(con) {
 		if (exclusive_console && con != exclusive_console)
 			continue;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1448 @ static void call_console_drivers(int lev
 			continue;
 		con->write(con, text, len);
 	}
+	migrate_enable();
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1509 @ static inline int can_use_console(unsign
 static int console_trylock_for_printk(void)
 {
 	unsigned int cpu = smp_processor_id();
+#ifdef CONFIG_PREEMPT_RT_FULL
+	int lock = !early_boot_irqs_disabled && (preempt_count() == 0) &&
+		!irqs_disabled();
+#else
+	int lock = 1;
+#endif
+
+	if (!lock)
+		return 0;
 
 	if (!console_trylock())
 		return 0;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1652 @ static size_t cont_print_text(char *text
 	return textlen;
 }
 
+#ifdef CONFIG_EARLY_PRINTK
+struct console *early_console;
+
+void early_vprintk(const char *fmt, va_list ap)
+{
+	if (early_console) {
+		char buf[512];
+		int n = vscnprintf(buf, sizeof(buf), fmt, ap);
+
+		early_console->write(early_console, buf, n);
+	}
+}
+
+asmlinkage void early_printk(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	early_vprintk(fmt, ap);
+	va_end(ap);
+}
+
+/*
+ * This is independent of any log levels - a global
+ * kill switch that turns off all of printk.
+ *
+ * Used by the NMI watchdog if early-printk is enabled.
+ */
+static bool __read_mostly printk_killswitch;
+
+static int __init force_early_printk_setup(char *str)
+{
+	printk_killswitch = true;
+	return 0;
+}
+early_param("force_early_printk", force_early_printk_setup);
+
+void printk_kill(void)
+{
+	printk_killswitch = true;
+}
+
+static int forced_early_printk(const char *fmt, va_list ap)
+{
+	if (!printk_killswitch)
+		return 0;
+	early_vprintk(fmt, ap);
+	return 1;
+}
+#else
+static inline int forced_early_printk(const char *fmt, va_list ap)
+{
+	return 0;
+}
+#endif
+
 asmlinkage int vprintk_emit(int facility, int level,
 			    const char *dict, size_t dictlen,
 			    const char *fmt, va_list args)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1724 @ asmlinkage int vprintk_emit(int facility
 	/* cpu currently holding logbuf_lock in this function */
 	static volatile unsigned int logbuf_cpu = UINT_MAX;
 
+	/*
+	 * Fall back to early_printk if a debugging subsystem has
+	 * killed printk output
+	 */
+	if (unlikely(forced_early_printk(fmt, args)))
+		return 1;
+
 	if (level == SCHED_MESSAGE_LOGLEVEL) {
 		level = -1;
 		in_sched = true;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1871 @ asmlinkage int vprintk_emit(int facility
 		 * console_sem which would prevent anyone from printing to
 		 * console
 		 */
-		preempt_disable();
-
+		migrate_disable();
 		/*
 		 * Try to acquire and then immediately release the console
 		 * semaphore.  The release will print out buffers and wake up
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1879 @ asmlinkage int vprintk_emit(int facility
 		 */
 		if (console_trylock_for_printk())
 			console_unlock();
-		preempt_enable();
+		migrate_enable();
 		lockdep_on();
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1979 @ static size_t cont_print_text(char *text
 
 #endif /* CONFIG_PRINTK */
 
-#ifdef CONFIG_EARLY_PRINTK
-struct console *early_console;
-
-void early_vprintk(const char *fmt, va_list ap)
-{
-	if (early_console) {
-		char buf[512];
-		int n = vscnprintf(buf, sizeof(buf), fmt, ap);
-
-		early_console->write(early_console, buf, n);
-	}
-}
-
-asmlinkage __visible void early_printk(const char *fmt, ...)
-{
-	va_list ap;
-
-	va_start(ap, fmt);
-	early_vprintk(fmt, ap);
-	va_end(ap);
-}
-#endif
-
 static int __add_preferred_console(char *name, int idx, char *options,
 				   char *brl_options)
 {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2218 @ static void console_cont_flush(char *tex
 		goto out;
 
 	len = cont_print_text(text, size);
+#ifndef CONFIG_PREEMPT_RT_FULL
 	raw_spin_unlock(&logbuf_lock);
 	stop_critical_timings();
 	call_console_drivers(cont.level, text, len);
 	start_critical_timings();
 	local_irq_restore(flags);
+#else
+	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+	call_console_drivers(cont.level, text, len);
+#endif
 	return;
 out:
 	raw_spin_unlock_irqrestore(&logbuf_lock, flags);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2315 @ skip:
 		console_idx = log_next(console_idx);
 		console_seq++;
 		console_prev = msg->flags;
+#ifdef CONFIG_PREEMPT_RT_FULL
+		raw_spin_unlock_irqrestore(&logbuf_lock, flags);
+		call_console_drivers(level, text, len);
+#else
 		raw_spin_unlock(&logbuf_lock);
 
 		stop_critical_timings();	/* don't trace print latency */
 		call_console_drivers(level, text, len);
 		start_critical_timings();
 		local_irq_restore(flags);
+#endif
 	}
 	console_locked = 0;
 
Index: linux-3.18.13-rt10-r7s4/kernel/ptrace.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/ptrace.c
+++ linux-3.18.13-rt10-r7s4/kernel/ptrace.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:132 @ static bool ptrace_freeze_traced(struct
 
 	spin_lock_irq(&task->sighand->siglock);
 	if (task_is_traced(task) && !__fatal_signal_pending(task)) {
-		task->state = __TASK_TRACED;
+		raw_spin_lock_irq(&task->pi_lock);
+		if (task->state & __TASK_TRACED)
+			task->state = __TASK_TRACED;
+		else
+			task->saved_state = __TASK_TRACED;
+		raw_spin_unlock_irq(&task->pi_lock);
 		ret = true;
 	}
 	spin_unlock_irq(&task->sighand->siglock);
Index: linux-3.18.13-rt10-r7s4/kernel/rcu/tiny.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/rcu/tiny.c
+++ linux-3.18.13-rt10-r7s4/kernel/rcu/tiny.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:373 @ void call_rcu_sched(struct rcu_head *hea
 }
 EXPORT_SYMBOL_GPL(call_rcu_sched);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /*
  * Post an RCU bottom-half callback to be invoked after any subsequent
  * quiescent state.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:383 @ void call_rcu_bh(struct rcu_head *head,
 	__call_rcu(head, func, &rcu_bh_ctrlblk);
 }
 EXPORT_SYMBOL_GPL(call_rcu_bh);
+#endif
 
 void rcu_init(void)
 {
Index: linux-3.18.13-rt10-r7s4/kernel/rcu/tree.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/rcu/tree.c
+++ linux-3.18.13-rt10-r7s4/kernel/rcu/tree.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:59 @
 #include <linux/random.h>
 #include <linux/ftrace_event.h>
 #include <linux/suspend.h>
+#include <linux/delay.h>
+#include <linux/gfp.h>
+#include <linux/oom.h>
+#include <linux/smpboot.h>
+#include "../time/tick-internal.h"
 
 #include "tree.h"
 #include "rcu.h"
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:160 @ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
  */
 static int rcu_scheduler_fully_active __read_mostly;
 
-#ifdef CONFIG_RCU_BOOST
-
 /*
  * Control variables for per-CPU and per-rcu_node kthreads.  These
  * handle all flavors of RCU.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:169 @ DEFINE_PER_CPU(unsigned int, rcu_cpu_kth
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
 static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
 static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:211 @ void rcu_sched_qs(void)
 	}
 }
 
+#ifdef CONFIG_PREEMPT_RT_FULL
+static void rcu_preempt_qs(void);
+
+void rcu_bh_qs(void)
+{
+	unsigned long flags;
+
+	/* Callers to this function, rcu_preempt_qs(), must disable irqs. */
+	local_irq_save(flags);
+	rcu_preempt_qs();
+	local_irq_restore(flags);
+}
+#else
 void rcu_bh_qs(void)
 {
 	if (!__this_cpu_read(rcu_bh_data.passed_quiesce)) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:233 @ void rcu_bh_qs(void)
 		__this_cpu_write(rcu_bh_data.passed_quiesce, 1);
 	}
 }
+#endif
 
 static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:354 @ long rcu_batches_completed_sched(void)
 }
 EXPORT_SYMBOL_GPL(rcu_batches_completed_sched);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /*
  * Return the number of RCU BH batches processed thus far for debug & stats.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:382 @ void rcu_bh_force_quiescent_state(void)
 }
 EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);
 
+#else
+void rcu_force_quiescent_state(void)
+{
+}
+EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
+#endif
+
 /*
  * Show the state of the grace-period kthreads.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1437 @ static void rcu_gp_kthread_wake(struct r
 	    !ACCESS_ONCE(rsp->gp_flags) ||
 	    !rsp->gp_kthread)
 		return;
-	wake_up(&rsp->gp_wq);
+	swait_wake(&rsp->gp_wq);
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1819 @ static int __noreturn rcu_gp_kthread(voi
 					       ACCESS_ONCE(rsp->gpnum),
 					       TPS("reqwait"));
 			rsp->gp_state = RCU_GP_WAIT_GPS;
-			wait_event_interruptible(rsp->gp_wq,
+			swait_event_interruptible(rsp->gp_wq,
 						 ACCESS_ONCE(rsp->gp_flags) &
 						 RCU_GP_FLAG_INIT);
 			/* Locking provides needed memory barrier. */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1847 @ static int __noreturn rcu_gp_kthread(voi
 					       ACCESS_ONCE(rsp->gpnum),
 					       TPS("fqswait"));
 			rsp->gp_state = RCU_GP_WAIT_FQS;
-			ret = wait_event_interruptible_timeout(rsp->gp_wq,
+			ret = swait_event_interruptible_timeout(rsp->gp_wq,
 					((gf = ACCESS_ONCE(rsp->gp_flags)) &
 					 RCU_GP_FLAG_FQS) ||
 					(!ACCESS_ONCE(rnp->qsmask) &&
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2591 @ __rcu_process_callbacks(struct rcu_state
 /*
  * Do RCU core processing for the current CPU.
  */
-static void rcu_process_callbacks(struct softirq_action *unused)
+static void rcu_process_callbacks(void)
 {
 	struct rcu_state *rsp;
 
 	if (cpu_is_offline(smp_processor_id()))
 		return;
-	trace_rcu_utilization(TPS("Start RCU core"));
 	for_each_rcu_flavor(rsp)
 		__rcu_process_callbacks(rsp);
-	trace_rcu_utilization(TPS("End RCU core"));
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2612 @ static void invoke_rcu_callbacks(struct
 {
 	if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
 		return;
-	if (likely(!rsp->boost)) {
-		rcu_do_batch(rsp, rdp);
+	rcu_do_batch(rsp, rdp);
+}
+
+static void rcu_wake_cond(struct task_struct *t, int status)
+{
+	/*
+	 * If the thread is yielding, only wake it when this
+	 * is invoked from idle
+	 */
+	if (t && (status != RCU_KTHREAD_YIELDING || is_idle_task(current)))
+		wake_up_process(t);
+}
+
+/*
+ * Wake up this CPU's rcuc kthread to do RCU core processing.
+ */
+static void invoke_rcu_core(void)
+{
+	unsigned long flags;
+	struct task_struct *t;
+
+	if (!cpu_online(smp_processor_id()))
 		return;
+	local_irq_save(flags);
+	__this_cpu_write(rcu_cpu_has_work, 1);
+	t = __this_cpu_read(rcu_cpu_kthread_task);
+	if (t != NULL && current != t)
+		rcu_wake_cond(t, __this_cpu_read(rcu_cpu_kthread_status));
+	local_irq_restore(flags);
+}
+
+static void rcu_cpu_kthread_park(unsigned int cpu)
+{
+	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
+}
+
+static int rcu_cpu_kthread_should_run(unsigned int cpu)
+{
+	return __this_cpu_read(rcu_cpu_has_work);
+}
+
+/*
+ * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
+ * RCU softirq used in flavors and configurations of RCU that do not
+ * support RCU priority boosting.
+ */
+static void rcu_cpu_kthread(unsigned int cpu)
+{
+	unsigned int *statusp = &__get_cpu_var(rcu_cpu_kthread_status);
+	char work, *workp = &__get_cpu_var(rcu_cpu_has_work);
+	int spincnt;
+
+	for (spincnt = 0; spincnt < 10; spincnt++) {
+		trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
+		local_bh_disable();
+		*statusp = RCU_KTHREAD_RUNNING;
+		this_cpu_inc(rcu_cpu_kthread_loops);
+		local_irq_disable();
+		work = *workp;
+		*workp = 0;
+		local_irq_enable();
+		if (work)
+			rcu_process_callbacks();
+		local_bh_enable();
+		if (*workp == 0) {
+			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
+			*statusp = RCU_KTHREAD_WAITING;
+			return;
+		}
 	}
-	invoke_rcu_callbacks_kthread();
+	*statusp = RCU_KTHREAD_YIELDING;
+	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
+	schedule_timeout_interruptible(2);
+	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
+	*statusp = RCU_KTHREAD_WAITING;
 }
 
-static void invoke_rcu_core(void)
+static struct smp_hotplug_thread rcu_cpu_thread_spec = {
+	.store			= &rcu_cpu_kthread_task,
+	.thread_should_run	= rcu_cpu_kthread_should_run,
+	.thread_fn		= rcu_cpu_kthread,
+	.thread_comm		= "rcuc/%u",
+	.setup			= rcu_cpu_kthread_setup,
+	.park			= rcu_cpu_kthread_park,
+};
+
+/*
+ * Spawn per-CPU RCU core processing kthreads.
+ */
+static int __init rcu_spawn_core_kthreads(void)
 {
-	if (cpu_online(smp_processor_id()))
-		raise_softirq(RCU_SOFTIRQ);
+	int cpu;
+
+	for_each_possible_cpu(cpu)
+		per_cpu(rcu_cpu_has_work, cpu) = 0;
+	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
+	return 0;
 }
+early_initcall(rcu_spawn_core_kthreads);
 
 /*
  * Handle any core-RCU processing required by a call_rcu() invocation.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2845 @ void call_rcu_sched(struct rcu_head *hea
 }
 EXPORT_SYMBOL_GPL(call_rcu_sched);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /*
  * Queue an RCU callback for invocation after a quicker grace period.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2854 @ void call_rcu_bh(struct rcu_head *head,
 	__call_rcu(head, func, &rcu_bh_state, -1, 0);
 }
 EXPORT_SYMBOL_GPL(call_rcu_bh);
+#endif
 
 /*
  * Queue an RCU callback for lazy invocation after a grace period.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2946 @ void synchronize_sched(void)
 }
 EXPORT_SYMBOL_GPL(synchronize_sched);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /**
  * synchronize_rcu_bh - wait until an rcu_bh grace period has elapsed.
  *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2973 @ void synchronize_rcu_bh(void)
 		wait_rcu_gp(call_rcu_bh);
 }
 EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
+#endif
 
 /**
  * get_state_synchronize_rcu - Snapshot current RCU state
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3456 @ static void _rcu_barrier(struct rcu_stat
 	mutex_unlock(&rsp->barrier_mutex);
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /**
  * rcu_barrier_bh - Wait until all in-flight call_rcu_bh() callbacks complete.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3465 @ void rcu_barrier_bh(void)
 	_rcu_barrier(&rcu_bh_state);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier_bh);
+#endif
 
 /**
  * rcu_barrier_sched - Wait for in-flight call_rcu_sched() callbacks.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3775 @ static void __init rcu_init_one(struct r
 	}
 
 	rsp->rda = rda;
-	init_waitqueue_head(&rsp->gp_wq);
+	init_swait_head(&rsp->gp_wq);
 	rnp = rsp->level[rcu_num_lvls - 1];
 	for_each_possible_cpu(i) {
 		while (i > rnp->grphi)
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:3872 @ void __init rcu_init(void)
 	rcu_init_one(&rcu_bh_state, &rcu_bh_data);
 	rcu_init_one(&rcu_sched_state, &rcu_sched_data);
 	__rcu_init_preempt();
-	open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
 	/*
 	 * We don't need protection against CPU-hotplug here because
Index: linux-3.18.13-rt10-r7s4/kernel/rcu/tree.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/rcu/tree.h
+++ linux-3.18.13-rt10-r7s4/kernel/rcu/tree.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:31 @
 #include <linux/cpumask.h>
 #include <linux/seqlock.h>
 #include <linux/irq_work.h>
+#include <linux/wait-simple.h>
 
 /*
  * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:176 @ struct rcu_node {
 				/*  queued on this rcu_node structure that */
 				/*  are blocking the current grace period, */
 				/*  there can be no such task. */
-	struct completion boost_completion;
-				/* Used to ensure that the rt_mutex used */
-				/*  to carry out the boosting is fully */
-				/*  released with no future boostee accesses */
-				/*  before that rt_mutex is re-initialized. */
 	struct rt_mutex boost_mtx;
 				/* Used only for the priority-boosting */
 				/*  side effect, not as a lock. */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:207 @ struct rcu_node {
 				/*  This can happen due to race conditions. */
 #endif /* #ifdef CONFIG_RCU_BOOST */
 #ifdef CONFIG_RCU_NOCB_CPU
-	wait_queue_head_t nocb_gp_wq[2];
+	struct swait_head nocb_gp_wq[2];
 				/* Place for rcu_nocb_kthread() to wait GP. */
 #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
 	int need_future_gp[2];
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:347 @ struct rcu_data {
 	atomic_long_t nocb_follower_count_lazy; /*  (approximate). */
 	int nocb_p_count;		/* # CBs being invoked by kthread */
 	int nocb_p_count_lazy;		/*  (approximate). */
-	wait_queue_head_t nocb_wq;	/* For nocb kthreads to sleep on. */
+	struct swait_head nocb_wq;	/* For nocb kthreads to sleep on. */
 	struct task_struct *nocb_kthread;
 	int nocb_defer_wakeup;		/* Defer wakeup of nocb_kthread. */
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:438 @ struct rcu_state {
 	unsigned long gpnum;			/* Current gp number. */
 	unsigned long completed;		/* # of last completed gp. */
 	struct task_struct *gp_kthread;		/* Task for grace periods. */
-	wait_queue_head_t gp_wq;		/* Where GP task waits. */
+	struct swait_head gp_wq;		/* Where GP task waits. */
 	short gp_flags;				/* Commands for GP task. */
 	short gp_state;				/* GP kthread sleep state. */
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:569 @ static void rcu_report_exp_rnp(struct rc
 static void __init __rcu_init_preempt(void);
 static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
 static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
-static void invoke_rcu_callbacks_kthread(void);
 static bool rcu_is_callbacks_kthread(void);
+static void rcu_cpu_kthread_setup(unsigned int cpu);
 #ifdef CONFIG_RCU_BOOST
-static void rcu_preempt_do_callbacks(void);
 static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
 						 struct rcu_node *rnp);
 #endif /* #ifdef CONFIG_RCU_BOOST */
Index: linux-3.18.13-rt10-r7s4/kernel/rcu/tree_plugin.h
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/rcu/tree_plugin.h
+++ linux-3.18.13-rt10-r7s4/kernel/rcu/tree_plugin.h
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:27 @
  *	   Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  */
 
-#include <linux/delay.h>
-#include <linux/gfp.h>
-#include <linux/oom.h>
-#include <linux/smpboot.h>
-#include "../time/tick-internal.h"
-
 #define RCU_KTHREAD_PRIO 1
 
 #ifdef CONFIG_RCU_BOOST
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:332 @ void rcu_read_unlock_special(struct task
 	}
 
 	/* Hardware IRQ handlers cannot block, complain if they get here. */
-	if (WARN_ON_ONCE(in_irq() || in_serving_softirq())) {
+	if (WARN_ON_ONCE(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET))) {
 		local_irq_restore(flags);
 		return;
 	}
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:395 @ void rcu_read_unlock_special(struct task
 
 #ifdef CONFIG_RCU_BOOST
 		/* Unboost if we were boosted. */
-		if (drop_boost_mutex) {
+		if (drop_boost_mutex)
 			rt_mutex_unlock(&rnp->boost_mtx);
-			complete(&rnp->boost_completion);
-		}
 #endif /* #ifdef CONFIG_RCU_BOOST */
 
 		/*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:630 @ static void rcu_preempt_check_callbacks(
 		t->rcu_read_unlock_special.b.need_qs = true;
 }
 
-#ifdef CONFIG_RCU_BOOST
-
-static void rcu_preempt_do_callbacks(void)
-{
-	rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
-}
-
-#endif /* #ifdef CONFIG_RCU_BOOST */
-
 /*
  * Queue a preemptible-RCU callback for invocation after a grace period.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1058 @ void exit_rcu(void)
 
 #endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */
 
+/*
+ * If boosting, set rcuc kthreads to realtime priority.
+ */
+static void rcu_cpu_kthread_setup(unsigned int cpu)
+{
+#ifdef CONFIG_RCU_BOOST
+	struct sched_param sp;
+
+	sp.sched_priority = RCU_KTHREAD_PRIO;
+	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
+#endif /* #ifdef CONFIG_RCU_BOOST */
+}
+
 #ifdef CONFIG_RCU_BOOST
 
 #include "../locking/rtmutex_common.h"
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1102 @ static void rcu_initiate_boost_trace(str
 
 #endif /* #else #ifdef CONFIG_RCU_TRACE */
 
-static void rcu_wake_cond(struct task_struct *t, int status)
-{
-	/*
-	 * If the thread is yielding, only wake it when this
-	 * is invoked from idle
-	 */
-	if (status != RCU_KTHREAD_YIELDING || is_idle_task(current))
-		wake_up_process(t);
-}
-
 /*
  * Carry out RCU priority boosting on the task indicated by ->exp_tasks
  * or ->boost_tasks, advancing the pointer to the next task in the
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1164 @ static int rcu_boost(struct rcu_node *rn
 	 */
 	t = container_of(tb, struct task_struct, rcu_node_entry);
 	rt_mutex_init_proxy_locked(&rnp->boost_mtx, t);
-	init_completion(&rnp->boost_completion);
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 	/* Lock only for side effect: boosts task t's priority. */
 	rt_mutex_lock(&rnp->boost_mtx);
 	rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
 
-	/* Wait for boostee to be done w/boost_mtx before reinitializing. */
-	wait_for_completion(&rnp->boost_completion);
-
 	return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
 	       ACCESS_ONCE(rnp->boost_tasks) != NULL;
 }
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1246 @ static void rcu_initiate_boost(struct rc
 }
 
 /*
- * Wake up the per-CPU kthread to invoke RCU callbacks.
- */
-static void invoke_rcu_callbacks_kthread(void)
-{
-	unsigned long flags;
-
-	local_irq_save(flags);
-	__this_cpu_write(rcu_cpu_has_work, 1);
-	if (__this_cpu_read(rcu_cpu_kthread_task) != NULL &&
-	    current != __this_cpu_read(rcu_cpu_kthread_task)) {
-		rcu_wake_cond(__this_cpu_read(rcu_cpu_kthread_task),
-			      __this_cpu_read(rcu_cpu_kthread_status));
-	}
-	local_irq_restore(flags);
-}
-
-/*
  * Is the current CPU running the RCU-callbacks kthread?
  * Caller must have preemption disabled.
  */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1300 @ static int rcu_spawn_one_boost_kthread(s
 	return 0;
 }
 
-static void rcu_kthread_do_work(void)
-{
-	rcu_do_batch(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
-	rcu_do_batch(&rcu_bh_state, this_cpu_ptr(&rcu_bh_data));
-	rcu_preempt_do_callbacks();
-}
-
-static void rcu_cpu_kthread_setup(unsigned int cpu)
-{
-	struct sched_param sp;
-
-	sp.sched_priority = RCU_KTHREAD_PRIO;
-	sched_setscheduler_nocheck(current, SCHED_FIFO, &sp);
-}
-
-static void rcu_cpu_kthread_park(unsigned int cpu)
-{
-	per_cpu(rcu_cpu_kthread_status, cpu) = RCU_KTHREAD_OFFCPU;
-}
-
-static int rcu_cpu_kthread_should_run(unsigned int cpu)
-{
-	return __this_cpu_read(rcu_cpu_has_work);
-}
-
-/*
- * Per-CPU kernel thread that invokes RCU callbacks.  This replaces the
- * RCU softirq used in flavors and configurations of RCU that do not
- * support RCU priority boosting.
- */
-static void rcu_cpu_kthread(unsigned int cpu)
-{
-	unsigned int *statusp = this_cpu_ptr(&rcu_cpu_kthread_status);
-	char work, *workp = this_cpu_ptr(&rcu_cpu_has_work);
-	int spincnt;
-
-	for (spincnt = 0; spincnt < 10; spincnt++) {
-		trace_rcu_utilization(TPS("Start CPU kthread@rcu_wait"));
-		local_bh_disable();
-		*statusp = RCU_KTHREAD_RUNNING;
-		this_cpu_inc(rcu_cpu_kthread_loops);
-		local_irq_disable();
-		work = *workp;
-		*workp = 0;
-		local_irq_enable();
-		if (work)
-			rcu_kthread_do_work();
-		local_bh_enable();
-		if (*workp == 0) {
-			trace_rcu_utilization(TPS("End CPU kthread@rcu_wait"));
-			*statusp = RCU_KTHREAD_WAITING;
-			return;
-		}
-	}
-	*statusp = RCU_KTHREAD_YIELDING;
-	trace_rcu_utilization(TPS("Start CPU kthread@rcu_yield"));
-	schedule_timeout_interruptible(2);
-	trace_rcu_utilization(TPS("End CPU kthread@rcu_yield"));
-	*statusp = RCU_KTHREAD_WAITING;
-}
-
 /*
  * Set the per-rcu_node kthread's affinity to cover all CPUs that are
  * served by the rcu_node in question.  The CPU hotplug lock is still
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1333 @ static void rcu_boost_kthread_setaffinit
 	free_cpumask_var(cm);
 }
 
-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
-	.store			= &rcu_cpu_kthread_task,
-	.thread_should_run	= rcu_cpu_kthread_should_run,
-	.thread_fn		= rcu_cpu_kthread,
-	.thread_comm		= "rcuc/%u",
-	.setup			= rcu_cpu_kthread_setup,
-	.park			= rcu_cpu_kthread_park,
-};
-
 /*
  * Spawn boost kthreads -- called as soon as the scheduler is running.
  */
 static void __init rcu_spawn_boost_kthreads(void)
 {
 	struct rcu_node *rnp;
-	int cpu;
 
-	for_each_possible_cpu(cpu)
-		per_cpu(rcu_cpu_has_work, cpu) = 0;
-	BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
 	rnp = rcu_get_root(rcu_state_p);
 	(void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
 	if (NUM_RCU_NODES > 1) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1366 @ static void rcu_initiate_boost(struct rc
 	raw_spin_unlock_irqrestore(&rnp->lock, flags);
 }
 
-static void invoke_rcu_callbacks_kthread(void)
-{
-	WARN_ON_ONCE(1);
-}
-
 static bool rcu_is_callbacks_kthread(void)
 {
 	return false;
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1389 @ static void rcu_prepare_kthreads(int cpu
 
 #endif /* #else #ifdef CONFIG_RCU_BOOST */
 
-#if !defined(CONFIG_RCU_FAST_NO_HZ)
+#if !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL)
 
 /*
  * Check to see if any future RCU-related work will need to be done
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1407 @ int rcu_needs_cpu(int cpu, unsigned long
 	return rcu_cpu_has_callbacks(cpu, NULL);
 }
 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+#endif /* !defined(CONFIG_RCU_FAST_NO_HZ) || defined(CONFIG_PREEMPT_RT_FULL) */
 
+#if !defined(CONFIG_RCU_FAST_NO_HZ)
 /*
  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
  * after it.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1506 @ static bool __maybe_unused rcu_try_advan
 	return cbs_ready;
 }
 
+#ifndef CONFIG_PREEMPT_RT_FULL
+
 /*
  * Allow the CPU to enter dyntick-idle mode unless it has callbacks ready
  * to invoke.  If the CPU has callbacks, try to advance them.  Tell the
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1548 @ int rcu_needs_cpu(int cpu, unsigned long
 	return 0;
 }
 #endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
-
+#endif /* #ifndef CONFIG_PREEMPT_RT_FULL */
 /*
  * Prepare a CPU for idle from an RCU perspective.  The first major task
  * is to sense whether nohz mode has been enabled or disabled via sysfs.
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1894 @ early_param("rcu_nocb_poll", parse_rcu_n
  */
 static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 {
-	wake_up_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
+	swait_wake_all(&rnp->nocb_gp_wq[rnp->completed & 0x1]);
 }
 
 /*
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1912 @ static void rcu_nocb_gp_set(struct rcu_n
 
 static void rcu_init_one_nocb(struct rcu_node *rnp)
 {
-	init_waitqueue_head(&rnp->nocb_gp_wq[0]);
-	init_waitqueue_head(&rnp->nocb_gp_wq[1]);
+	init_swait_head(&rnp->nocb_gp_wq[0]);
+	init_swait_head(&rnp->nocb_gp_wq[1]);
 }
 
 #ifndef CONFIG_RCU_NOCB_CPU_ALL
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:1938 @ static void wake_nocb_leader(struct rcu_
 	if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
 		/* Prior smp_mb__after_atomic() orders against prior enqueue. */
 		ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
-		wake_up(&rdp_leader->nocb_wq);
+		swait_wake(&rdp_leader->nocb_wq);
 	}
 }
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2131 @ static void rcu_nocb_wait_gp(struct rcu_
 	 */
 	trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait"));
 	for (;;) {
-		wait_event_interruptible(
+		swait_event_interruptible(
 			rnp->nocb_gp_wq[c & 0x1],
 			(d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
 		if (likely(d))
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2159 @ wait_again:
 	/* Wait for callbacks to appear. */
 	if (!rcu_nocb_poll) {
 		trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
-		wait_event_interruptible(my_rdp->nocb_wq,
+		swait_event_interruptible(my_rdp->nocb_wq,
 				!ACCESS_ONCE(my_rdp->nocb_leader_sleep));
 		/* Memory barrier handled by smp_mb() calls below and repoll. */
 	} else if (firsttime) {
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2240 @ wait_again:
 			 * List was empty, wake up the follower.
 			 * Memory barriers supplied by atomic_long_add().
 			 */
-			wake_up(&rdp->nocb_wq);
+			swait_wake(&rdp->nocb_wq);
 		}
 	}
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2261 @ static void nocb_follower_wait(struct rc
 		if (!rcu_nocb_poll) {
 			trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
 					    "FollowerSleep");
-			wait_event_interruptible(rdp->nocb_wq,
+			swait_event_interruptible(rdp->nocb_wq,
 						 ACCESS_ONCE(rdp->nocb_follower_head));
 		} else if (firsttime) {
 			/* Don't drown trace log with "Poll"! */
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:2432 @ void __init rcu_init_nohz(void)
 static void __init rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp)
 {
 	rdp->nocb_tail = &rdp->nocb_head;
-	init_waitqueue_head(&rdp->nocb_wq);
+	init_swait_head(&rdp->nocb_wq);
 	rdp->nocb_follower_tail = &rdp->nocb_follower_head;
 }
 
Index: linux-3.18.13-rt10-r7s4/kernel/rcu/update.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/rcu/update.c
+++ linux-3.18.13-rt10-r7s4/kernel/rcu/update.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:173 @ int rcu_read_lock_held(void)
 }
 EXPORT_SYMBOL_GPL(rcu_read_lock_held);
 
+#ifndef CONFIG_PREEMPT_RT_FULL
 /**
  * rcu_read_lock_bh_held() - might we be in RCU-bh read-side critical section?
  *
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:200 @ int rcu_read_lock_bh_held(void)
 	return in_softirq() || irqs_disabled();
 }
 EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
+#endif
 
 #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
 
Index: linux-3.18.13-rt10-r7s4/kernel/relay.c
===================================================================
--- linux-3.18.13-rt10-r7s4.orig/kernel/relay.c
+++ linux-3.18.13-rt10-r7s4/kernel/relay.c
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:342 @ static void wakeup_readers(unsigned long
 {
 	struct rchan_buf *buf = (struct rchan_buf *)data;
 	wake_up_interruptible(&buf->read_wait);
+	/*
+	 * Stupid polling for now:
+	 */
+	mod_timer(&buf->timer, jiffies + 1);
 }
 
 /**
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:363 @ static void __relay_reset(struct rchan_b
 		init_waitqueue_head(&buf->read_wait);
 		kref_init(&buf->kref);
 		setup_timer(&buf->timer, wakeup_readers, (unsigned long)buf);
+		mod_timer(&buf->timer, jiffies + 1);
 	} else
 		del_timer_sync(&buf->timer);
 
@ linux-3.18.13-rt10-r7s4/Documentation/hwlat_detector.txt:747 @ size_t relay_switch_subbuf(struct rchan_
 		else
 			buf->early_bytes += buf->chan->subbuf_size -
 					    buf->padding