1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
|
--- valgrind-3.7.0.orig/coregrind/m_dispatch/dispatch-arm-linux.S 2011-10-26 23:24:42.000000000 +0200
+++ valgrind-3.7.0/coregrind/m_dispatch/dispatch-arm-linux.S 2013-07-23 12:27:45.000000000 +0200
@@ -84,11 +84,17 @@ VG_(run_innerloop__dispatch_unprofiled):
/* AT ENTRY: r0 is next guest addr, r8 is possibly
modified guest state ptr */
+#if defined(ARM_ARCH_V6)
+ /* use slower code on pre-cortex architectures */
+ ldr r3, =VG_(dispatch_ctr)
+ tst r8, #1
+#else
/* Has the guest state pointer been messed with? If yes, exit. */
movw r3, #:lower16:VG_(dispatch_ctr)
tst r8, #1
movt r3, #:upper16:VG_(dispatch_ctr)
+#endif
bne gsp_changed
@@ -104,6 +110,12 @@ VG_(run_innerloop__dispatch_unprofiled):
beq counter_is_zero
+#if defined(ARM_ARCH_V6)
+ /* use slower code on pre-cortex architectures */
+ ldr r1, =VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
+ ldr r4, =VG_(tt_fast)
+ and r2, r1, r0, LSR #1 // r2 = entry #
+#else
/* try a fast lookup in the translation cache */
// r0 = next guest, r1,r2,r3,r4 scratch
movw r1, #VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
@@ -111,6 +123,7 @@ VG_(run_innerloop__dispatch_unprofiled):
and r2, r1, r0, LSR #1 // r2 = entry #
movt r4, #:upper16:VG_(tt_fast) // r4 = &VG_(tt_fast)
+#endif
add r1, r4, r2, LSL #3 // r1 = &tt_fast[entry#]
@@ -140,11 +153,17 @@ VG_(run_innerloop__dispatch_profiled):
/* AT ENTRY: r0 is next guest addr, r8 is possibly
modified guest state ptr */
+#if defined(ARM_ARCH_V6)
+ /* use slower code on pre-cortex architectures */
+ ldr r3, =VG_(dispatch_ctr)
+ tst r8, #1
+#else
/* Has the guest state pointer been messed with? If yes, exit. */
movw r3, #:lower16:VG_(dispatch_ctr)
tst r8, #1
movt r3, #:upper16:VG_(dispatch_ctr)
+#endif
bne gsp_changed
@@ -160,6 +179,12 @@ VG_(run_innerloop__dispatch_profiled):
beq counter_is_zero
+#if defined(ARM_ARCH_V6)
+ /* use slower code on pre-cortex architectures */
+ ldr r1, =VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
+ ldr r4, =VG_(tt_fast)
+ and r2, r1, r0, LSR #1 // r2 = entry #
+#else
/* try a fast lookup in the translation cache */
// r0 = next guest, r1,r2,r3,r4 scratch
movw r1, #VG_TT_FAST_MASK // r1 = VG_TT_FAST_MASK
@@ -167,6 +192,7 @@ VG_(run_innerloop__dispatch_profiled):
and r2, r1, r0, LSR #1 // r2 = entry #
movt r4, #:upper16:VG_(tt_fast) // r4 = &VG_(tt_fast)
+#endif
add r1, r4, r2, LSL #3 // r1 = &tt_fast[entry#]
@@ -181,8 +207,13 @@ VG_(run_innerloop__dispatch_profiled):
// LIVE: r5, r8; all others dead
/* increment bb profile counter */
+#if defined(ARM_ARCH_V6)
+ /* use slower code on pre-cortex architectures */
+ ldr r0, =VG_(tt_fastN)
+#else
movw r0, #:lower16:VG_(tt_fastN)
movt r0, #:upper16:VG_(tt_fastN) // r0 = &tt_fastN[0]
+#endif
ldr r0, [r0, r2, LSL #2] // r0 = tt_fast[entry #]
ldr r3, [r0] // *r0 ++
add r3, r3, #1
|