summaryrefslogtreecommitdiff
path: root/libpthread/nptl/allocatestack.c
diff options
context:
space:
mode:
Diffstat (limited to 'libpthread/nptl/allocatestack.c')
-rw-r--r--libpthread/nptl/allocatestack.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/libpthread/nptl/allocatestack.c b/libpthread/nptl/allocatestack.c
index 1c549cee1..e30fe41a2 100644
--- a/libpthread/nptl/allocatestack.c
+++ b/libpthread/nptl/allocatestack.c
@@ -122,7 +122,7 @@ static uintptr_t in_flight_stack;
list_t __stack_user __attribute__ ((nocommon));
hidden_data_def (__stack_user)
-#if COLORING_INCREMENT != 0
+#if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
/* Number of threads created. */
static unsigned int nptl_ncreated;
#endif
@@ -316,10 +316,10 @@ change_stack_perm (struct pthread *pd
+ (((((pd->stackblock_size - pd->guardsize) / 2)
& pagemask) + pd->guardsize) & pagemask));
size_t len = pd->stackblock + pd->stackblock_size - stack;
-#elif _STACK_GROWS_DOWN
+#elif defined _STACK_GROWS_DOWN
void *stack = pd->stackblock + pd->guardsize;
size_t len = pd->stackblock_size - pd->guardsize;
-#elif _STACK_GROWS_UP
+#elif defined _STACK_GROWS_UP
void *stack = pd->stackblock;
size_t len = (uintptr_t) pd - pd->guardsize - (uintptr_t) pd->stackblock;
#else
@@ -443,7 +443,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
void *mem = 0;
const int prot = (PROT_READ | PROT_WRITE);
-#if COLORING_INCREMENT != 0
+#if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
/* Add one more page for stack coloring. Don't do it for stacks
with 16 times pagesize or larger. This might just cause
unnecessary misalignment. */
@@ -474,7 +474,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
adjust the allocated stack size if necessary. This way
allocations directly following each other will not have
aliasing problems. */
-#if MULTI_PAGE_ALIASING != 0
+#if defined MULTI_PAGE_ALIASING && MULTI_PAGE_ALIASING != 0
if ((size % MULTI_PAGE_ALIASING) == 0)
size += pagesize_m1 + 1;
#endif
@@ -494,7 +494,7 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
So we can never get a null pointer back from mmap. */
assert (mem != NULL);
-#if COLORING_INCREMENT != 0
+#if defined COLORING_INCREMENT && COLORING_INCREMENT != 0
/* Atomically increment NCREATED. */
unsigned int ncreated = atomic_increment_val (&nptl_ncreated);
@@ -591,9 +591,9 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
{
#ifdef NEED_SEPARATE_REGISTER_STACK
char *guard = mem + (((size - guardsize) / 2) & ~pagesize_m1);
-#elif _STACK_GROWS_DOWN
+#elif defined _STACK_GROWS_DOWN
char *guard = mem;
-# elif _STACK_GROWS_UP
+#elif defined _STACK_GROWS_UP
char *guard = (char *) (((uintptr_t) pd - guardsize) & ~pagesize_m1);
#endif
if (mprotect (guard, guardsize, PROT_NONE) != 0)
@@ -641,11 +641,11 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
oldguard + pd->guardsize - guard - guardsize,
prot) != 0)
goto mprot_error;
-#elif _STACK_GROWS_DOWN
+#elif defined _STACK_GROWS_DOWN
if (mprotect ((char *) mem + guardsize, pd->guardsize - guardsize,
prot) != 0)
goto mprot_error;
-#elif _STACK_GROWS_UP
+#elif defined _STACK_GROWS_UP
if (mprotect ((char *) pd - pd->guardsize,
pd->guardsize - guardsize, prot) != 0)
goto mprot_error;
@@ -688,9 +688,9 @@ allocate_stack (const struct pthread_attr *attr, struct pthread **pdp,
#ifdef NEED_SEPARATE_REGISTER_STACK
*stack = pd->stackblock;
*stacksize = stacktop - *stack;
-#elif _STACK_GROWS_DOWN
+#elif defined _STACK_GROWS_DOWN
*stack = stacktop;
-#elif _STACK_GROWS_UP
+#elif defined _STACK_GROWS_UP
*stack = pd->stackblock;
assert (*stack > 0);
#endif