summaryrefslogtreecommitdiff
path: root/libc/string/xtensa
diff options
context:
space:
mode:
Diffstat (limited to 'libc/string/xtensa')
-rw-r--r--libc/string/xtensa/memcpy.S22
-rw-r--r--libc/string/xtensa/memset.S12
-rw-r--r--libc/string/xtensa/strcmp.S148
-rw-r--r--libc/string/xtensa/strcpy.S72
-rw-r--r--libc/string/xtensa/strlen.S56
-rw-r--r--libc/string/xtensa/strncpy.S150
6 files changed, 230 insertions, 230 deletions
diff --git a/libc/string/xtensa/memcpy.S b/libc/string/xtensa/memcpy.S
index 19f3a6818..fc04c023e 100644
--- a/libc/string/xtensa/memcpy.S
+++ b/libc/string/xtensa/memcpy.S
@@ -83,7 +83,7 @@ __memcpy_aux:
loopnez a4, 2f
#else
beqz a4, 2f
- add a7, a3, a4 // a7 = end address for source
+ add a7, a3, a4 /* a7 = end address for source */
#endif
1: l8ui a6, a3, 0
addi a3, a3, 1
@@ -98,7 +98,7 @@ __memcpy_aux:
/* Destination is unaligned. */
.align 4
-.Ldst1mod2: // dst is only byte aligned
+.Ldst1mod2: /* dst is only byte aligned */
/* Do short copies byte-by-byte. */
_bltui a4, 7, .Lbytecopy
@@ -113,7 +113,7 @@ __memcpy_aux:
/* Return to main algorithm if dst is now aligned. */
_bbci.l a5, 1, .Ldstaligned
-.Ldst2mod4: // dst has 16-bit alignment
+.Ldst2mod4: /* dst has 16-bit alignment */
/* Do short copies byte-by-byte. */
_bltui a4, 6, .Lbytecopy
@@ -134,7 +134,7 @@ __memcpy_aux:
ENTRY (memcpy)
/* a2 = dst, a3 = src, a4 = len */
- mov a5, a2 // copy dst so that a2 is return value
+ mov a5, a2 /* copy dst so that a2 is return value */
_bbsi.l a2, 0, .Ldst1mod2
_bbsi.l a2, 1, .Ldst2mod4
.Ldstaligned:
@@ -152,7 +152,7 @@ ENTRY (memcpy)
#else
beqz a7, 2f
slli a8, a7, 4
- add a8, a8, a3 // a8 = end of last 16B source chunk
+ add a8, a8, a3 /* a8 = end of last 16B source chunk */
#endif
1: l32i a6, a3, 0
l32i a7, a3, 4
@@ -218,18 +218,18 @@ ENTRY (memcpy)
/* Copy 16 bytes per iteration for word-aligned dst and
unaligned src. */
- ssa8 a3 // set shift amount from byte offset
+ ssa8 a3 /* set shift amount from byte offset */
#if UNALIGNED_ADDRESSES_CHECKED
- and a11, a3, a8 // save unalignment offset for below
- sub a3, a3, a11 // align a3
+ and a11, a3, a8 /* save unalignment offset for below */
+ sub a3, a3, a11 /* align a3 */
#endif
- l32i a6, a3, 0 // load first word
+ l32i a6, a3, 0 /* load first word */
#if XCHAL_HAVE_LOOPS
loopnez a7, 2f
#else
beqz a7, 2f
slli a10, a7, 4
- add a10, a10, a3 // a10 = end of last 16B source chunk
+ add a10, a10, a3 /* a10 = end of last 16B source chunk */
#endif
1: l32i a7, a3, 4
l32i a8, a3, 8
@@ -273,7 +273,7 @@ ENTRY (memcpy)
mov a6, a7
4:
#if UNALIGNED_ADDRESSES_CHECKED
- add a3, a3, a11 // readjust a3 with correct misalignment
+ add a3, a3, a11 /* readjust a3 with correct misalignment */
#endif
bbsi.l a4, 1, 5f
bbsi.l a4, 0, 6f
diff --git a/libc/string/xtensa/memset.S b/libc/string/xtensa/memset.S
index c0928825d..076b8f001 100644
--- a/libc/string/xtensa/memset.S
+++ b/libc/string/xtensa/memset.S
@@ -29,7 +29,7 @@
The algorithm is as follows:
Create a word with c in all byte positions.
-
+
If the destination is aligned, set 16B chunks with a loop, and then
finish up with 8B, 4B, 2B, and 1B stores conditional on the length.
@@ -57,7 +57,7 @@ __memset_aux:
loopnez a4, 2f
#else
beqz a4, 2f
- add a6, a5, a4 // a6 = ending address
+ add a6, a5, a4 /* a6 = ending address */
#endif
1: s8i a3, a5, 0
addi a5, a5, 1
@@ -71,7 +71,7 @@ __memset_aux:
.align 4
-.Ldst1mod2: // dst is only byte aligned
+.Ldst1mod2: /* dst is only byte aligned */
/* Do short sizes byte-by-byte. */
bltui a4, 8, .Lbyteset
@@ -84,7 +84,7 @@ __memset_aux:
/* Now retest if dst is aligned. */
_bbci.l a5, 1, .Ldstaligned
-.Ldst2mod4: // dst has 16-bit alignment
+.Ldst2mod4: /* dst has 16-bit alignment */
/* Do short sizes byte-by-byte. */
bltui a4, 8, .Lbyteset
@@ -108,7 +108,7 @@ ENTRY (memset)
slli a7, a3, 16
or a3, a3, a7
- mov a5, a2 // copy dst so that a2 is return value
+ mov a5, a2 /* copy dst so that a2 is return value */
/* Check if dst is unaligned. */
_bbsi.l a2, 0, .Ldst1mod2
@@ -124,7 +124,7 @@ ENTRY (memset)
#else
beqz a7, 2f
slli a6, a7, 4
- add a6, a6, a5 // a6 = end of last 16B chunk
+ add a6, a6, a5 /* a6 = end of last 16B chunk */
#endif
/* Set 16 bytes per iteration. */
1: s32i a3, a5, 0
diff --git a/libc/string/xtensa/strcmp.S b/libc/string/xtensa/strcmp.S
index 622bb27ed..ac058a2bf 100644
--- a/libc/string/xtensa/strcmp.S
+++ b/libc/string/xtensa/strcmp.S
@@ -45,35 +45,35 @@
ENTRY (strcmp)
/* a2 = s1, a3 = s2 */
- l8ui a8, a2, 0 // byte 0 from s1
- l8ui a9, a3, 0 // byte 0 from s2
- movi a10, 3 // mask
+ l8ui a8, a2, 0 /* byte 0 from s1 */
+ l8ui a9, a3, 0 /* byte 0 from s2 */
+ movi a10, 3 /* mask */
bne a8, a9, .Lretdiff
or a11, a2, a3
bnone a11, a10, .Laligned
- xor a11, a2, a3 // compare low two bits of s1 and s2
- bany a11, a10, .Lunaligned // if they have different alignment
+ xor a11, a2, a3 /* compare low two bits of s1 and s2 */
+ bany a11, a10, .Lunaligned /* if they have different alignment */
/* s1/s2 are not word-aligned. */
- addi a2, a2, 1 // advance s1
- beqz a8, .Leq // bytes equal, if zero, strings are equal
- addi a3, a3, 1 // advance s2
- bnone a2, a10, .Laligned // if s1/s2 now aligned
- l8ui a8, a2, 0 // byte 1 from s1
- l8ui a9, a3, 0 // byte 1 from s2
- addi a2, a2, 1 // advance s1
- bne a8, a9, .Lretdiff // if different, return difference
- beqz a8, .Leq // bytes equal, if zero, strings are equal
- addi a3, a3, 1 // advance s2
- bnone a2, a10, .Laligned // if s1/s2 now aligned
- l8ui a8, a2, 0 // byte 2 from s1
- l8ui a9, a3, 0 // byte 2 from s2
- addi a2, a2, 1 // advance s1
- bne a8, a9, .Lretdiff // if different, return difference
- beqz a8, .Leq // bytes equal, if zero, strings are equal
- addi a3, a3, 1 // advance s2
+ addi a2, a2, 1 /* advance s1 */
+ beqz a8, .Leq /* bytes equal, if zero, strings are equal */
+ addi a3, a3, 1 /* advance s2 */
+ bnone a2, a10, .Laligned /* if s1/s2 now aligned */
+ l8ui a8, a2, 0 /* byte 1 from s1 */
+ l8ui a9, a3, 0 /* byte 1 from s2 */
+ addi a2, a2, 1 /* advance s1 */
+ bne a8, a9, .Lretdiff /* if different, return difference */
+ beqz a8, .Leq /* bytes equal, if zero, strings are equal */
+ addi a3, a3, 1 /* advance s2 */
+ bnone a2, a10, .Laligned /* if s1/s2 now aligned */
+ l8ui a8, a2, 0 /* byte 2 from s1 */
+ l8ui a9, a3, 0 /* byte 2 from s2 */
+ addi a2, a2, 1 /* advance s1 */
+ bne a8, a9, .Lretdiff /* if different, return difference */
+ beqz a8, .Leq /* bytes equal, if zero, strings are equal */
+ addi a3, a3, 1 /* advance s2 */
j .Laligned
/* s1 and s2 have different alignment.
@@ -92,8 +92,8 @@ ENTRY (strcmp)
/* (2 mod 4) alignment for loop instruction */
.Lunaligned:
#if XCHAL_HAVE_LOOPS
- _movi.n a8, 0 // set up for the maximum loop count
- loop a8, .Lretdiff // loop forever (almost anyway)
+ _movi.n a8, 0 /* set up for the maximum loop count */
+ loop a8, .Lretdiff /* loop forever (almost anyway) */
#endif
.Lnextbyte:
l8ui a8, a2, 0
@@ -131,32 +131,32 @@ ENTRY (strcmp)
#if XCHAL_HAVE_LOOPS
.Laligned:
.begin no-transform
- l32r a4, .Lmask0 // mask for byte 0
+ l32r a4, .Lmask0 /* mask for byte 0 */
l32r a7, .Lmask4
/* Loop forever. (a4 is more than than the maximum number
of iterations) */
loop a4, .Laligned_done
/* First unrolled loop body. */
- l32i a8, a2, 0 // get word from s1
- l32i a9, a3, 0 // get word from s2
+ l32i a8, a2, 0 /* get word from s1 */
+ l32i a9, a3, 0 /* get word from s2 */
slli a5, a8, 1
bne a8, a9, .Lwne2
or a9, a8, a5
bnall a9, a7, .Lprobeq
/* Second unrolled loop body. */
- l32i a8, a2, 4 // get word from s1+4
- l32i a9, a3, 4 // get word from s2+4
+ l32i a8, a2, 4 /* get word from s1+4 */
+ l32i a9, a3, 4 /* get word from s2+4 */
slli a5, a8, 1
bne a8, a9, .Lwne2
or a9, a8, a5
bnall a9, a7, .Lprobeq2
- addi a2, a2, 8 // advance s1 pointer
- addi a3, a3, 8 // advance s2 pointer
+ addi a2, a2, 8 /* advance s1 pointer */
+ addi a3, a3, 8 /* advance s2 pointer */
.Laligned_done:
- or a1, a1, a1 // nop
+ or a1, a1, a1 /* nop */
.Lprobeq2:
/* Adjust pointers to account for the loop unrolling. */
@@ -166,15 +166,15 @@ ENTRY (strcmp)
#else /* !XCHAL_HAVE_LOOPS */
.Laligned:
- movi a4, MASK0 // mask for byte 0
+ movi a4, MASK0 /* mask for byte 0 */
movi a7, MASK4
j .Lfirstword
.Lnextword:
- addi a2, a2, 4 // advance s1 pointer
- addi a3, a3, 4 // advance s2 pointer
+ addi a2, a2, 4 /* advance s1 pointer */
+ addi a3, a3, 4 /* advance s2 pointer */
.Lfirstword:
- l32i a8, a2, 0 // get word from s1
- l32i a9, a3, 0 // get word from s2
+ l32i a8, a2, 0 /* get word from s1 */
+ l32i a9, a3, 0 /* get word from s2 */
slli a5, a8, 1
bne a8, a9, .Lwne2
or a9, a8, a5
@@ -186,49 +186,49 @@ ENTRY (strcmp)
/* Words are probably equal, but check for sure.
If not, loop over the rest of string using normal algorithm. */
- bnone a8, a4, .Leq // if byte 0 is zero
- l32r a5, .Lmask1 // mask for byte 1
- l32r a6, .Lmask2 // mask for byte 2
- bnone a8, a5, .Leq // if byte 1 is zero
- l32r a7, .Lmask3 // mask for byte 3
- bnone a8, a6, .Leq // if byte 2 is zero
- bnone a8, a7, .Leq // if byte 3 is zero
- addi.n a2, a2, 4 // advance s1 pointer
- addi.n a3, a3, 4 // advance s2 pointer
+ bnone a8, a4, .Leq /* if byte 0 is zero */
+ l32r a5, .Lmask1 /* mask for byte 1 */
+ l32r a6, .Lmask2 /* mask for byte 2 */
+ bnone a8, a5, .Leq /* if byte 1 is zero */
+ l32r a7, .Lmask3 /* mask for byte 3 */
+ bnone a8, a6, .Leq /* if byte 2 is zero */
+ bnone a8, a7, .Leq /* if byte 3 is zero */
+ addi.n a2, a2, 4 /* advance s1 pointer */
+ addi.n a3, a3, 4 /* advance s2 pointer */
#if XCHAL_HAVE_LOOPS
/* align (1 mod 4) */
- loop a4, .Leq // loop forever (a4 is bigger than max iters)
+ loop a4, .Leq /* loop forever (a4 is bigger than max iters) */
.end no-transform
- l32i a8, a2, 0 // get word from s1
- l32i a9, a3, 0 // get word from s2
- addi a2, a2, 4 // advance s1 pointer
+ l32i a8, a2, 0 /* get word from s1 */
+ l32i a9, a3, 0 /* get word from s2 */
+ addi a2, a2, 4 /* advance s1 pointer */
bne a8, a9, .Lwne
- bnone a8, a4, .Leq // if byte 0 is zero
- bnone a8, a5, .Leq // if byte 1 is zero
- bnone a8, a6, .Leq // if byte 2 is zero
- bnone a8, a7, .Leq // if byte 3 is zero
- addi a3, a3, 4 // advance s2 pointer
+ bnone a8, a4, .Leq /* if byte 0 is zero */
+ bnone a8, a5, .Leq /* if byte 1 is zero */
+ bnone a8, a6, .Leq /* if byte 2 is zero */
+ bnone a8, a7, .Leq /* if byte 3 is zero */
+ addi a3, a3, 4 /* advance s2 pointer */
#else /* !XCHAL_HAVE_LOOPS */
j .Lfirstword2
.Lnextword2:
- addi a3, a3, 4 // advance s2 pointer
+ addi a3, a3, 4 /* advance s2 pointer */
.Lfirstword2:
- l32i a8, a2, 0 // get word from s1
- l32i a9, a3, 0 // get word from s2
- addi a2, a2, 4 // advance s1 pointer
+ l32i a8, a2, 0 /* get word from s1 */
+ l32i a9, a3, 0 /* get word from s2 */
+ addi a2, a2, 4 /* advance s1 pointer */
bne a8, a9, .Lwne
- bnone a8, a4, .Leq // if byte 0 is zero
- bnone a8, a5, .Leq // if byte 1 is zero
- bnone a8, a6, .Leq // if byte 2 is zero
- bany a8, a7, .Lnextword2 // if byte 3 is zero
+ bnone a8, a4, .Leq /* if byte 0 is zero */
+ bnone a8, a5, .Leq /* if byte 1 is zero */
+ bnone a8, a6, .Leq /* if byte 2 is zero */
+ bany a8, a7, .Lnextword2 /* if byte 3 is zero */
#endif /* !XCHAL_HAVE_LOOPS */
/* Words are equal; some byte is zero. */
-.Leq: movi a2, 0 // return equal
+.Leq: movi a2, 0 /* return equal */
retw
.Lwne2: /* Words are not equal. On big-endian processors, if none of the
@@ -243,18 +243,18 @@ ENTRY (strcmp)
.Lposreturn:
movi a2, 1
retw
-.Lsomezero: // There is probably some zero byte.
+.Lsomezero: /* There is probably some zero byte. */
#endif /* __XTENSA_EB__ */
.Lwne: /* Words are not equal. */
- xor a2, a8, a9 // get word with nonzero in byte that differs
- bany a2, a4, .Ldiff0 // if byte 0 differs
- movi a5, MASK1 // mask for byte 1
- bnone a8, a4, .Leq // if byte 0 is zero
- bany a2, a5, .Ldiff1 // if byte 1 differs
- movi a6, MASK2 // mask for byte 2
- bnone a8, a5, .Leq // if byte 1 is zero
- bany a2, a6, .Ldiff2 // if byte 2 differs
- bnone a8, a6, .Leq // if byte 2 is zero
+ xor a2, a8, a9 /* get word with nonzero in byte that differs */
+ bany a2, a4, .Ldiff0 /* if byte 0 differs */
+ movi a5, MASK1 /* mask for byte 1 */
+ bnone a8, a4, .Leq /* if byte 0 is zero */
+ bany a2, a5, .Ldiff1 /* if byte 1 differs */
+ movi a6, MASK2 /* mask for byte 2 */
+ bnone a8, a5, .Leq /* if byte 1 is zero */
+ bany a2, a6, .Ldiff2 /* if byte 2 differs */
+ bnone a8, a6, .Leq /* if byte 2 is zero */
#ifdef __XTENSA_EB__
.Ldiff3:
.Ldiff2:
diff --git a/libc/string/xtensa/strcpy.S b/libc/string/xtensa/strcpy.S
index 108070384..dc0a15175 100644
--- a/libc/string/xtensa/strcpy.S
+++ b/libc/string/xtensa/strcpy.S
@@ -36,7 +36,7 @@
ENTRY (strcpy)
/* a2 = dst, a3 = src */
- mov a10, a2 // leave dst in return value register
+ mov a10, a2 /* leave dst in return value register */
movi a4, MASK0
movi a5, MASK1
movi a6, MASK2
@@ -51,23 +51,23 @@ ENTRY (strcpy)
j .Ldstunaligned
-.Lsrc1mod2: // src address is odd
- l8ui a8, a3, 0 // get byte 0
- addi a3, a3, 1 // advance src pointer
- s8i a8, a10, 0 // store byte 0
- beqz a8, 1f // if byte 0 is zero
- addi a10, a10, 1 // advance dst pointer
- bbci.l a3, 1, .Lsrcaligned // if src is now word-aligned
+.Lsrc1mod2: /* src address is odd */
+ l8ui a8, a3, 0 /* get byte 0 */
+ addi a3, a3, 1 /* advance src pointer */
+ s8i a8, a10, 0 /* store byte 0 */
+ beqz a8, 1f /* if byte 0 is zero */
+ addi a10, a10, 1 /* advance dst pointer */
+ bbci.l a3, 1, .Lsrcaligned /* if src is now word-aligned */
-.Lsrc2mod4: // src address is 2 mod 4
- l8ui a8, a3, 0 // get byte 0
+.Lsrc2mod4: /* src address is 2 mod 4 */
+ l8ui a8, a3, 0 /* get byte 0 */
/* 1-cycle interlock */
- s8i a8, a10, 0 // store byte 0
- beqz a8, 1f // if byte 0 is zero
- l8ui a8, a3, 1 // get byte 0
- addi a3, a3, 2 // advance src pointer
- s8i a8, a10, 1 // store byte 0
- addi a10, a10, 2 // advance dst pointer
+ s8i a8, a10, 0 /* store byte 0 */
+ beqz a8, 1f /* if byte 0 is zero */
+ l8ui a8, a3, 1 /* get byte 0 */
+ addi a3, a3, 2 /* advance src pointer */
+ s8i a8, a10, 1 /* store byte 0 */
+ addi a10, a10, 2 /* advance dst pointer */
bnez a8, .Lsrcaligned
1: retw
@@ -78,28 +78,28 @@ ENTRY (strcpy)
#if XCHAL_HAVE_LOOPS
/* (2 mod 4) alignment for loop instruction */
.Laligned:
- _movi.n a8, 0 // set up for the maximum loop count
- loop a8, .Lz3 // loop forever (almost anyway)
- l32i a8, a3, 0 // get word from src
- addi a3, a3, 4 // advance src pointer
- bnone a8, a4, .Lz0 // if byte 0 is zero
- bnone a8, a5, .Lz1 // if byte 1 is zero
- bnone a8, a6, .Lz2 // if byte 2 is zero
- s32i a8, a10, 0 // store word to dst
- bnone a8, a7, .Lz3 // if byte 3 is zero
- addi a10, a10, 4 // advance dst pointer
+ _movi.n a8, 0 /* set up for the maximum loop count */
+ loop a8, .Lz3 /* loop forever (almost anyway) */
+ l32i a8, a3, 0 /* get word from src */
+ addi a3, a3, 4 /* advance src pointer */
+ bnone a8, a4, .Lz0 /* if byte 0 is zero */
+ bnone a8, a5, .Lz1 /* if byte 1 is zero */
+ bnone a8, a6, .Lz2 /* if byte 2 is zero */
+ s32i a8, a10, 0 /* store word to dst */
+ bnone a8, a7, .Lz3 /* if byte 3 is zero */
+ addi a10, a10, 4 /* advance dst pointer */
#else /* !XCHAL_HAVE_LOOPS */
-1: addi a10, a10, 4 // advance dst pointer
+1: addi a10, a10, 4 /* advance dst pointer */
.Laligned:
- l32i a8, a3, 0 // get word from src
- addi a3, a3, 4 // advance src pointer
- bnone a8, a4, .Lz0 // if byte 0 is zero
- bnone a8, a5, .Lz1 // if byte 1 is zero
- bnone a8, a6, .Lz2 // if byte 2 is zero
- s32i a8, a10, 0 // store word to dst
- bany a8, a7, 1b // if byte 3 is zero
+ l32i a8, a3, 0 /* get word from src */
+ addi a3, a3, 4 /* advance src pointer */
+ bnone a8, a4, .Lz0 /* if byte 0 is zero */
+ bnone a8, a5, .Lz1 /* if byte 1 is zero */
+ bnone a8, a6, .Lz2 /* if byte 2 is zero */
+ s32i a8, a10, 0 /* store word to dst */
+ bany a8, a7, 1b /* if byte 3 is zero */
#endif /* !XCHAL_HAVE_LOOPS */
.Lz3: /* Byte 3 is zero. */
@@ -133,8 +133,8 @@ ENTRY (strcpy)
.Ldstunaligned:
#if XCHAL_HAVE_LOOPS
- _movi.n a8, 0 // set up for the maximum loop count
- loop a8, 2f // loop forever (almost anyway)
+ _movi.n a8, 0 /* set up for the maximum loop count */
+ loop a8, 2f /* loop forever (almost anyway) */
#endif
1: l8ui a8, a3, 0
addi a3, a3, 1
diff --git a/libc/string/xtensa/strlen.S b/libc/string/xtensa/strlen.S
index dd72c16fa..9ee4995f4 100644
--- a/libc/string/xtensa/strlen.S
+++ b/libc/string/xtensa/strlen.S
@@ -36,7 +36,7 @@
ENTRY (strlen)
/* a2 = s */
- addi a3, a2, -4 // because we overincrement at the end
+ addi a3, a2, -4 /* because we overincrement at the end */
movi a4, MASK0
movi a5, MASK1
movi a6, MASK2
@@ -45,21 +45,21 @@ ENTRY (strlen)
bbsi.l a2, 1, .L2mod4
j .Laligned
-.L1mod2: // address is odd
- l8ui a8, a3, 4 // get byte 0
- addi a3, a3, 1 // advance string pointer
- beqz a8, .Lz3 // if byte 0 is zero
- bbci.l a3, 1, .Laligned // if string pointer is now word-aligned
+.L1mod2: /* address is odd */
+ l8ui a8, a3, 4 /* get byte 0 */
+ addi a3, a3, 1 /* advance string pointer */
+ beqz a8, .Lz3 /* if byte 0 is zero */
+ bbci.l a3, 1, .Laligned /* if string pointer is now word-aligned */
-.L2mod4: // address is 2 mod 4
- addi a3, a3, 2 // advance ptr for aligned access
- l32i a8, a3, 0 // get word with first two bytes of string
- bnone a8, a6, .Lz2 // if byte 2 (of word, not string) is zero
- bany a8, a7, .Laligned // if byte 3 (of word, not string) is nonzero
+.L2mod4: /* address is 2 mod 4 */
+ addi a3, a3, 2 /* advance ptr for aligned access */
+ l32i a8, a3, 0 /* get word with first two bytes of string */
+ bnone a8, a6, .Lz2 /* if byte 2 (of word, not string) is zero */
+ bany a8, a7, .Laligned /* if byte 3 (of word, not string) is nonzero */
/* Byte 3 is zero. */
- addi a3, a3, 3 // point to zero byte
- sub a2, a3, a2 // subtract to get length
+ addi a3, a3, 3 /* point to zero byte */
+ sub a2, a3, a2 /* subtract to get length */
retw
@@ -69,36 +69,36 @@ ENTRY (strlen)
/* (2 mod 4) alignment for loop instruction */
.Laligned:
#if XCHAL_HAVE_LOOPS
- _movi.n a8, 0 // set up for the maximum loop count
- loop a8, .Lz3 // loop forever (almost anyway)
+ _movi.n a8, 0 /* set up for the maximum loop count */
+ loop a8, .Lz3 /* loop forever (almost anyway) */
#endif
-1: l32i a8, a3, 4 // get next word of string
- addi a3, a3, 4 // advance string pointer
- bnone a8, a4, .Lz0 // if byte 0 is zero
- bnone a8, a5, .Lz1 // if byte 1 is zero
- bnone a8, a6, .Lz2 // if byte 2 is zero
+1: l32i a8, a3, 4 /* get next word of string */
+ addi a3, a3, 4 /* advance string pointer */
+ bnone a8, a4, .Lz0 /* if byte 0 is zero */
+ bnone a8, a5, .Lz1 /* if byte 1 is zero */
+ bnone a8, a6, .Lz2 /* if byte 2 is zero */
#if XCHAL_HAVE_LOOPS
- bnone a8, a7, .Lz3 // if byte 3 is zero
+ bnone a8, a7, .Lz3 /* if byte 3 is zero */
#else
- bany a8, a7, 1b // repeat if byte 3 is non-zero
+ bany a8, a7, 1b /* repeat if byte 3 is non-zero */
#endif
.Lz3: /* Byte 3 is zero. */
- addi a3, a3, 3 // point to zero byte
+ addi a3, a3, 3 /* point to zero byte */
/* Fall through.... */
.Lz0: /* Byte 0 is zero. */
- sub a2, a3, a2 // subtract to get length
+ sub a2, a3, a2 /* subtract to get length */
retw
.Lz1: /* Byte 1 is zero. */
- addi a3, a3, 1 // point to zero byte
- sub a2, a3, a2 // subtract to get length
+ addi a3, a3, 1 /* point to zero byte */
+ sub a2, a3, a2 /* subtract to get length */
retw
.Lz2: /* Byte 2 is zero. */
- addi a3, a3, 2 // point to zero byte
- sub a2, a3, a2 // subtract to get length
+ addi a3, a3, 2 /* point to zero byte */
+ sub a2, a3, a2 /* subtract to get length */
retw
libc_hidden_def (strlen)
diff --git a/libc/string/xtensa/strncpy.S b/libc/string/xtensa/strncpy.S
index 7ba2ef77d..fe3ec894c 100644
--- a/libc/string/xtensa/strncpy.S
+++ b/libc/string/xtensa/strncpy.S
@@ -41,29 +41,29 @@
.literal_position
__strncpy_aux:
-.Lsrc1mod2: // src address is odd
- l8ui a8, a3, 0 // get byte 0
- addi a3, a3, 1 // advance src pointer
- s8i a8, a10, 0 // store byte 0
- addi a4, a4, -1 // decrement n
- beqz a4, .Lret // if n is zero
- addi a10, a10, 1 // advance dst pointer
- beqz a8, .Lfill // if byte 0 is zero
- bbci.l a3, 1, .Lsrcaligned // if src is now word-aligned
-
-.Lsrc2mod4: // src address is 2 mod 4
- l8ui a8, a3, 0 // get byte 0
- addi a4, a4, -1 // decrement n
- s8i a8, a10, 0 // store byte 0
- beqz a4, .Lret // if n is zero
- addi a10, a10, 1 // advance dst pointer
- beqz a8, .Lfill // if byte 0 is zero
- l8ui a8, a3, 1 // get byte 0
- addi a3, a3, 2 // advance src pointer
- s8i a8, a10, 0 // store byte 0
- addi a4, a4, -1 // decrement n
- beqz a4, .Lret // if n is zero
- addi a10, a10, 1 // advance dst pointer
+.Lsrc1mod2: /* src address is odd */
+ l8ui a8, a3, 0 /* get byte 0 */
+ addi a3, a3, 1 /* advance src pointer */
+ s8i a8, a10, 0 /* store byte 0 */
+ addi a4, a4, -1 /* decrement n */
+ beqz a4, .Lret /* if n is zero */
+ addi a10, a10, 1 /* advance dst pointer */
+ beqz a8, .Lfill /* if byte 0 is zero */
+ bbci.l a3, 1, .Lsrcaligned /* if src is now word-aligned */
+
+.Lsrc2mod4: /* src address is 2 mod 4 */
+ l8ui a8, a3, 0 /* get byte 0 */
+ addi a4, a4, -1 /* decrement n */
+ s8i a8, a10, 0 /* store byte 0 */
+ beqz a4, .Lret /* if n is zero */
+ addi a10, a10, 1 /* advance dst pointer */
+ beqz a8, .Lfill /* if byte 0 is zero */
+ l8ui a8, a3, 1 /* get byte 0 */
+ addi a3, a3, 2 /* advance src pointer */
+ s8i a8, a10, 0 /* store byte 0 */
+ addi a4, a4, -1 /* decrement n */
+ beqz a4, .Lret /* if n is zero */
+ addi a10, a10, 1 /* advance dst pointer */
bnez a8, .Lsrcaligned
j .Lfill
@@ -74,8 +74,8 @@ __strncpy_aux:
ENTRY (strncpy)
/* a2 = dst, a3 = src */
- mov a10, a2 // leave dst in return value register
- beqz a4, .Lret // if n is zero
+ mov a10, a2 /* leave dst in return value register */
+ beqz a4, .Lret /* if n is zero */
movi a11, MASK0
movi a5, MASK1
@@ -125,28 +125,28 @@ ENTRY (strncpy)
.Lfillcleanup:
/* Fill leftover (1 to 3) bytes with zero. */
- s8i a9, a10, 0 // store byte 0
- addi a4, a4, -1 // decrement n
+ s8i a9, a10, 0 /* store byte 0 */
+ addi a4, a4, -1 /* decrement n */
addi a10, a10, 1
- bnez a4, .Lfillcleanup
+ bnez a4, .Lfillcleanup
2: retw
-
-.Lfill1mod2: // dst address is odd
- s8i a9, a10, 0 // store byte 0
- addi a4, a4, -1 // decrement n
- beqz a4, 2b // if n is zero
- addi a10, a10, 1 // advance dst pointer
- bbci.l a10, 1, .Lfillaligned // if dst is now word-aligned
-
-.Lfill2mod4: // dst address is 2 mod 4
- s8i a9, a10, 0 // store byte 0
- addi a4, a4, -1 // decrement n
- beqz a4, 2b // if n is zero
- s8i a9, a10, 1 // store byte 1
- addi a4, a4, -1 // decrement n
- beqz a4, 2b // if n is zero
- addi a10, a10, 2 // advance dst pointer
+
+.Lfill1mod2: /* dst address is odd */
+ s8i a9, a10, 0 /* store byte 0 */
+ addi a4, a4, -1 /* decrement n */
+ beqz a4, 2b /* if n is zero */
+ addi a10, a10, 1 /* advance dst pointer */
+ bbci.l a10, 1, .Lfillaligned /* if dst is now word-aligned */
+
+.Lfill2mod4: /* dst address is 2 mod 4 */
+ s8i a9, a10, 0 /* store byte 0 */
+ addi a4, a4, -1 /* decrement n */
+ beqz a4, 2b /* if n is zero */
+ s8i a9, a10, 1 /* store byte 1 */
+ addi a4, a4, -1 /* decrement n */
+ beqz a4, 2b /* if n is zero */
+ addi a10, a10, 2 /* advance dst pointer */
j .Lfillaligned
@@ -156,32 +156,32 @@ ENTRY (strncpy)
/* (2 mod 4) alignment for loop instruction */
.Laligned:
#if XCHAL_HAVE_LOOPS
- _movi.n a8, 0 // set up for the maximum loop count
- loop a8, 1f // loop forever (almost anyway)
- blti a4, 5, .Ldstunaligned // n is near limit; do one at a time
- l32i a8, a3, 0 // get word from src
- addi a3, a3, 4 // advance src pointer
- bnone a8, a11, .Lz0 // if byte 0 is zero
- bnone a8, a5, .Lz1 // if byte 1 is zero
- bnone a8, a6, .Lz2 // if byte 2 is zero
- s32i a8, a10, 0 // store word to dst
- addi a4, a4, -4 // decrement n
- addi a10, a10, 4 // advance dst pointer
- bnone a8, a7, .Lfill // if byte 3 is zero
-1:
+ _movi.n a8, 0 /* set up for the maximum loop count */
+ loop a8, 1f /* loop forever (almost anyway) */
+ blti a4, 5, .Ldstunaligned /* n is near limit; do one at a time */
+ l32i a8, a3, 0 /* get word from src */
+ addi a3, a3, 4 /* advance src pointer */
+ bnone a8, a11, .Lz0 /* if byte 0 is zero */
+ bnone a8, a5, .Lz1 /* if byte 1 is zero */
+ bnone a8, a6, .Lz2 /* if byte 2 is zero */
+ s32i a8, a10, 0 /* store word to dst */
+ addi a4, a4, -4 /* decrement n */
+ addi a10, a10, 4 /* advance dst pointer */
+ bnone a8, a7, .Lfill /* if byte 3 is zero */
+1:
#else /* !XCHAL_HAVE_LOOPS */
-1: blti a4, 5, .Ldstunaligned // n is near limit; do one at a time
- l32i a8, a3, 0 // get word from src
- addi a3, a3, 4 // advance src pointer
- bnone a8, a11, .Lz0 // if byte 0 is zero
- bnone a8, a5, .Lz1 // if byte 1 is zero
- bnone a8, a6, .Lz2 // if byte 2 is zero
- s32i a8, a10, 0 // store word to dst
- addi a4, a4, -4 // decrement n
- addi a10, a10, 4 // advance dst pointer
- bany a8, a7, 1b // no zeroes
+1: blti a4, 5, .Ldstunaligned /* n is near limit; do one at a time */
+ l32i a8, a3, 0 /* get word from src */
+ addi a3, a3, 4 /* advance src pointer */
+ bnone a8, a11, .Lz0 /* if byte 0 is zero */
+ bnone a8, a5, .Lz1 /* if byte 1 is zero */
+ bnone a8, a6, .Lz2 /* if byte 2 is zero */
+ s32i a8, a10, 0 /* store word to dst */
+ addi a4, a4, -4 /* decrement n */
+ addi a10, a10, 4 /* advance dst pointer */
+ bany a8, a7, 1b /* no zeroes */
#endif /* !XCHAL_HAVE_LOOPS */
j .Lfill
@@ -191,8 +191,8 @@ ENTRY (strncpy)
movi a8, 0
#endif
s8i a8, a10, 0
- addi a4, a4, -1 // decrement n
- addi a10, a10, 1 // advance dst pointer
+ addi a4, a4, -1 /* decrement n */
+ addi a10, a10, 1 /* advance dst pointer */
j .Lfill
.Lz1: /* Byte 1 is zero. */
@@ -200,8 +200,8 @@ ENTRY (strncpy)
extui a8, a8, 16, 16
#endif
s16i a8, a10, 0
- addi a4, a4, -2 // decrement n
- addi a10, a10, 2 // advance dst pointer
+ addi a4, a4, -2 /* decrement n */
+ addi a10, a10, 2 /* advance dst pointer */
j .Lfill
.Lz2: /* Byte 2 is zero. */
@@ -211,8 +211,8 @@ ENTRY (strncpy)
s16i a8, a10, 0
movi a8, 0
s8i a8, a10, 2
- addi a4, a4, -3 // decrement n
- addi a10, a10, 3 // advance dst pointer
+ addi a4, a4, -3 /* decrement n */
+ addi a10, a10, 3 /* advance dst pointer */
j .Lfill
.align 4
@@ -220,8 +220,8 @@ ENTRY (strncpy)
.Ldstunaligned:
#if XCHAL_HAVE_LOOPS
- _movi.n a8, 0 // set up for the maximum loop count
- loop a8, 2f // loop forever (almost anyway)
+ _movi.n a8, 0 /* set up for the maximum loop count */
+ loop a8, 2f /* loop forever (almost anyway) */
#endif
1: l8ui a8, a3, 0
addi a3, a3, 1