summaryrefslogtreecommitdiff
path: root/package/valgrind/patches/patch-coregrind_m_dispatch_dispatch-arm-linux_S
diff options
context:
space:
mode:
Diffstat (limited to 'package/valgrind/patches/patch-coregrind_m_dispatch_dispatch-arm-linux_S')
-rw-r--r--package/valgrind/patches/patch-coregrind_m_dispatch_dispatch-arm-linux_S94
1 files changed, 94 insertions, 0 deletions
diff --git a/package/valgrind/patches/patch-coregrind_m_dispatch_dispatch-arm-linux_S b/package/valgrind/patches/patch-coregrind_m_dispatch_dispatch-arm-linux_S
new file mode 100644
index 000000000..676365501
--- /dev/null
+++ b/package/valgrind/patches/patch-coregrind_m_dispatch_dispatch-arm-linux_S
@@ -0,0 +1,94 @@
+--- valgrind-3.7.0.orig/coregrind/m_dispatch/dispatch-arm-linux.S 2011-10-26 23:24:42.000000000 +0200
++++ valgrind-3.7.0/coregrind/m_dispatch/dispatch-arm-linux.S 2013-07-23 12:27:45.000000000 +0200
+@@ -84,11 +84,17 @@ VG_(run_innerloop__dispatch_unprofiled):
+ /* AT ENTRY: r0 is next guest addr, r8 is possibly
+ modified guest state ptr */
+
++#if defined(ARM_ARCH_V6)
++ /* use slower code on pre-cortex architectures */
++ ldr r3, =VG_(dispatch_ctr)
++ tst r8, #1
++#else
+ /* Has the guest state pointer been messed with? If yes, exit. */
+ movw r3, #:lower16:VG_(dispatch_ctr)
+ tst r8, #1
+
+ movt r3, #:upper16:VG_(dispatch_ctr)
++#endif
+
+ bne gsp_changed
+
+@@ -104,6 +110,12 @@ VG_(run_innerloop__dispatch_unprofiled):
+
+ beq counter_is_zero
+
++#if defined(ARM_ARCH_V6)
++ /* use slower code on pre-cortex architectures */
++ ldr r1, =VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
++ ldr r4, =VG_(tt_fast)
++ and r2, r1, r0, LSR #1 // r2 = entry #
++#else
+ /* try a fast lookup in the translation cache */
+ // r0 = next guest, r1,r2,r3,r4 scratch
+ movw r1, #VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
+@@ -111,6 +123,7 @@ VG_(run_innerloop__dispatch_unprofiled):
+
+ and r2, r1, r0, LSR #1 // r2 = entry #
+ movt r4, #:upper16:VG_(tt_fast) // r4 = &VG_(tt_fast)
++#endif
+
+ add r1, r4, r2, LSL #3 // r1 = &tt_fast[entry#]
+
+@@ -140,11 +153,17 @@ VG_(run_innerloop__dispatch_profiled):
+ /* AT ENTRY: r0 is next guest addr, r8 is possibly
+ modified guest state ptr */
+
++#if defined(ARM_ARCH_V6)
++ /* use slower code on pre-cortex architectures */
++ ldr r3, =VG_(dispatch_ctr)
++ tst r8, #1
++#else
+ /* Has the guest state pointer been messed with? If yes, exit. */
+ movw r3, #:lower16:VG_(dispatch_ctr)
+ tst r8, #1
+
+ movt r3, #:upper16:VG_(dispatch_ctr)
++#endif
+
+ bne gsp_changed
+
+@@ -160,6 +179,12 @@ VG_(run_innerloop__dispatch_profiled):
+
+ beq counter_is_zero
+
++#if defined(ARM_ARCH_V6)
++ /* use slower code on pre-cortex architectures */
++ ldr r1, =VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
++ ldr r4, =VG_(tt_fast)
++ and r2, r1, r0, LSR #1 // r2 = entry #
++#else
+ /* try a fast lookup in the translation cache */
+ // r0 = next guest, r1,r2,r3,r4 scratch
+ movw r1, #VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
+@@ -167,6 +192,7 @@ VG_(run_innerloop__dispatch_profiled):
+
+ and r2, r1, r0, LSR #1 // r2 = entry #
+ movt r4, #:upper16:VG_(tt_fast) // r4 = &VG_(tt_fast)
++#endif
+
+ add r1, r4, r2, LSL #3 // r1 = &tt_fast[entry#]
+
+@@ -181,8 +207,13 @@ VG_(run_innerloop__dispatch_profiled):
+ // LIVE: r5, r8; all others dead
+
+ /* increment bb profile counter */
++#if defined(ARM_ARCH_V6)
++ /* use slower code on pre-cortex architectures */
++ ldr r0, =VG_(tt_fastN)
++#else
+ movw r0, #:lower16:VG_(tt_fastN)
+ movt r0, #:upper16:VG_(tt_fastN) // r0 = &tt_fastN[0]
++#endif
+ ldr r0, [r0, r2, LSL #2] // r0 = tt_fast[entry #]
+ ldr r3, [r0] // *r0 ++
+ add r3, r3, #1