Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cygwin.com/git/newlib-cygwin.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRichard Earnshaw <rearnsha@arm.com>2015-01-06 12:57:55 +0300
committerRichard Earnshaw <rearnsha@arm.com>2015-01-06 12:57:55 +0300
commit6a35dbf342834de0b8b590b7dcfaccba108acc09 (patch)
tree6544e2cf582f6af622460a42cbead979ba5b6552 /newlib/libc
parentba913653a6613ba70b7c5341ce167fc3b0f09c89 (diff)
* libc/machine/aarch64/strcpy.S (strcpy): Further performance
improvements. Adjust to allow building as stpcpy. * libc/machine/aarch64/stpcpy.S: New file. * libc/machine/aarch64/stpcpy-stub.c: New file. * libc/machine/aarch64/Makefile.am (lib_a_SOURCES): Build stpcpy. * libc/machine/aarch64/Makefile.in: Regenerated.
Diffstat (limited to 'newlib/libc')
-rw-r--r--newlib/libc/machine/aarch64/Makefile.am2
-rw-r--r--newlib/libc/machine/aarch64/Makefile.in23
-rw-r--r--newlib/libc/machine/aarch64/stpcpy-stub.c31
-rw-r--r--newlib/libc/machine/aarch64/stpcpy.S34
-rw-r--r--newlib/libc/machine/aarch64/strcpy.S285
5 files changed, 250 insertions, 125 deletions
diff --git a/newlib/libc/machine/aarch64/Makefile.am b/newlib/libc/machine/aarch64/Makefile.am
index 19175afdd..e2a577c5a 100644
--- a/newlib/libc/machine/aarch64/Makefile.am
+++ b/newlib/libc/machine/aarch64/Makefile.am
@@ -20,6 +20,8 @@ lib_a_SOURCES += memmove.S
lib_a_SOURCES += memset-stub.c
lib_a_SOURCES += memset.S
lib_a_SOURCES += setjmp.S
+lib_a_SOURCES += stpcpy-stub.c
+lib_a_SOURCES += stpcpy.S
lib_a_SOURCES += strchr-stub.c
lib_a_SOURCES += strchr.S
lib_a_SOURCES += strchrnul-stub.c
diff --git a/newlib/libc/machine/aarch64/Makefile.in b/newlib/libc/machine/aarch64/Makefile.in
index fa89c42a9..eb7087ffe 100644
--- a/newlib/libc/machine/aarch64/Makefile.in
+++ b/newlib/libc/machine/aarch64/Makefile.in
@@ -74,7 +74,8 @@ am_lib_a_OBJECTS = lib_a-memchr-stub.$(OBJEXT) lib_a-memchr.$(OBJEXT) \
lib_a-memcpy-stub.$(OBJEXT) lib_a-memcpy.$(OBJEXT) \
lib_a-memmove-stub.$(OBJEXT) lib_a-memmove.$(OBJEXT) \
lib_a-memset-stub.$(OBJEXT) lib_a-memset.$(OBJEXT) \
- lib_a-setjmp.$(OBJEXT) lib_a-strchr-stub.$(OBJEXT) \
+ lib_a-setjmp.$(OBJEXT) lib_a-stpcpy-stub.$(OBJEXT) \
+ lib_a-stpcpy.$(OBJEXT) lib_a-strchr-stub.$(OBJEXT) \
lib_a-strchr.$(OBJEXT) lib_a-strchrnul-stub.$(OBJEXT) \
lib_a-strchrnul.$(OBJEXT) lib_a-strcmp-stub.$(OBJEXT) \
lib_a-strcmp.$(OBJEXT) lib_a-strcpy-stub.$(OBJEXT) \
@@ -210,10 +211,10 @@ AM_CCASFLAGS = $(INCLUDES)
noinst_LIBRARIES = lib.a
lib_a_SOURCES = memchr-stub.c memchr.S memcmp-stub.c memcmp.S \
memcpy-stub.c memcpy.S memmove-stub.c memmove.S memset-stub.c \
- memset.S setjmp.S strchr-stub.c strchr.S strchrnul-stub.c \
- strchrnul.S strcmp-stub.c strcmp.S strcpy-stub.c strcpy.S \
- strlen-stub.c strlen.S strncmp-stub.c strncmp.S strnlen-stub.c \
- strnlen.S strrchr-stub.c strrchr.S
+ memset.S setjmp.S stpcpy-stub.c stpcpy.S strchr-stub.c \
+ strchr.S strchrnul-stub.c strchrnul.S strcmp-stub.c strcmp.S \
+ strcpy-stub.c strcpy.S strlen-stub.c strlen.S strncmp-stub.c \
+ strncmp.S strnlen-stub.c strnlen.S strrchr-stub.c strrchr.S
lib_a_CCASFLAGS = $(AM_CCASFLAGS)
lib_a_CFLAGS = $(AM_CFLAGS)
ACLOCAL_AMFLAGS = -I ../../.. -I ../../../..
@@ -312,6 +313,12 @@ lib_a-setjmp.o: setjmp.S
lib_a-setjmp.obj: setjmp.S
$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-setjmp.obj `if test -f 'setjmp.S'; then $(CYGPATH_W) 'setjmp.S'; else $(CYGPATH_W) '$(srcdir)/setjmp.S'; fi`
+lib_a-stpcpy.o: stpcpy.S
+ $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-stpcpy.o `test -f 'stpcpy.S' || echo '$(srcdir)/'`stpcpy.S
+
+lib_a-stpcpy.obj: stpcpy.S
+ $(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-stpcpy.obj `if test -f 'stpcpy.S'; then $(CYGPATH_W) 'stpcpy.S'; else $(CYGPATH_W) '$(srcdir)/stpcpy.S'; fi`
+
lib_a-strchr.o: strchr.S
$(CCAS) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CCASFLAGS) $(CCASFLAGS) -c -o lib_a-strchr.o `test -f 'strchr.S' || echo '$(srcdir)/'`strchr.S
@@ -396,6 +403,12 @@ lib_a-memset-stub.o: memset-stub.c
lib_a-memset-stub.obj: memset-stub.c
$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-memset-stub.obj `if test -f 'memset-stub.c'; then $(CYGPATH_W) 'memset-stub.c'; else $(CYGPATH_W) '$(srcdir)/memset-stub.c'; fi`
+lib_a-stpcpy-stub.o: stpcpy-stub.c
+ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-stpcpy-stub.o `test -f 'stpcpy-stub.c' || echo '$(srcdir)/'`stpcpy-stub.c
+
+lib_a-stpcpy-stub.obj: stpcpy-stub.c
+ $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-stpcpy-stub.obj `if test -f 'stpcpy-stub.c'; then $(CYGPATH_W) 'stpcpy-stub.c'; else $(CYGPATH_W) '$(srcdir)/stpcpy-stub.c'; fi`
+
lib_a-strchr-stub.o: strchr-stub.c
$(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(lib_a_CFLAGS) $(CFLAGS) -c -o lib_a-strchr-stub.o `test -f 'strchr-stub.c' || echo '$(srcdir)/'`strchr-stub.c
diff --git a/newlib/libc/machine/aarch64/stpcpy-stub.c b/newlib/libc/machine/aarch64/stpcpy-stub.c
new file mode 100644
index 000000000..bfaba5cf8
--- /dev/null
+++ b/newlib/libc/machine/aarch64/stpcpy-stub.c
@@ -0,0 +1,31 @@
+/* Copyright (c) 2015, ARM Limited
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the company nor the names of its contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+#if (defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED))
+# include "../../string/stpcpy.c"
+#else
+/* See stpcpy.S */
+#endif
diff --git a/newlib/libc/machine/aarch64/stpcpy.S b/newlib/libc/machine/aarch64/stpcpy.S
new file mode 100644
index 000000000..696b45889
--- /dev/null
+++ b/newlib/libc/machine/aarch64/stpcpy.S
@@ -0,0 +1,34 @@
+/*
+ stpcpy - copy a string returning pointer to end.
+
+ Copyright (c) 2015 ARM Ltd.
+ All Rights Reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the company nor the names of its contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+/* This is just a wrapper that uses strcpy code with appropriate
+ pre-defines. */
+
+#define BUILD_STPCPY
+#include "strcpy.S"
diff --git a/newlib/libc/machine/aarch64/strcpy.S b/newlib/libc/machine/aarch64/strcpy.S
index 7c40bc5f5..e5405f253 100644
--- a/newlib/libc/machine/aarch64/strcpy.S
+++ b/newlib/libc/machine/aarch64/strcpy.S
@@ -1,7 +1,7 @@
/*
- strcpy - copy a string.
+ strcpy/stpcpy - copy a string returning pointer to start/end.
- Copyright (c) 2013, 2014 ARM Ltd.
+ Copyright (c) 2013, 2014, 2015 ARM Ltd.
All Rights Reserved.
Redistribution and use in source and binary forms, with or without
@@ -36,7 +36,9 @@
* ARMv8-a, AArch64, unaligned accesses, min page size 4k.
*/
-/* To test the page crossing code path more thoroughly, compile with
+/* To build as stpcpy, define BUILD_STPCPY before compiling this file.
+
+ To test the page crossing code path more thoroughly, compile with
-DSTRCPY_TEST_PAGE_CROSS - this will force all copies through the slower
entry path. This option is not intended for production use. */
@@ -64,6 +66,12 @@
#define len x16
#define to_align x17
+#ifdef BUILD_STPCPY
+#define STRCPY stpcpy
+#else
+#define STRCPY strcpy
+#endif
+
.macro def_fn f p2align=0
.text
.p2align \p2align
@@ -94,10 +102,16 @@
misaligned, crosses a page boundary - after that we move to aligned
fetches for the remainder of the string. */
+#ifdef STRCPY_TEST_PAGE_CROSS
+ /* Make everything that isn't Qword aligned look like a page cross. */
+#define MIN_PAGE_P2 4
+#else
#define MIN_PAGE_P2 12
+#endif
+
#define MIN_PAGE_SIZE (1 << MIN_PAGE_P2)
-def_fn strcpy p2align=6
+def_fn STRCPY p2align=6
/* For moderately short strings, the fastest way to do the copy is to
calculate the length of the string in the same way as strlen, then
essentially do a memcpy of the result. This avoids the need for
@@ -108,80 +122,112 @@ def_fn strcpy p2align=6
always be difficult - we mitigate against this by preferring
conditional select operations over branches whenever this is
feasible. */
- add tmp2, srcin, #15
+ and tmp2, srcin, #(MIN_PAGE_SIZE - 1)
mov zeroones, #REP8_01
and to_align, srcin, #15
- eor tmp2, tmp2, srcin
- mov dst, dstin
+ cmp tmp2, #(MIN_PAGE_SIZE - 16)
neg tmp1, to_align
-#ifdef STRCPY_TEST_PAGE_CROSS
- b .Lpage_cross
-#else
/* The first fetch will straddle a (possible) page boundary iff
srcin + 15 causes bit[MIN_PAGE_P2] to change value. A 16-byte
aligned string will never fail the page align check, so will
always take the fast path. */
- tbnz tmp2, #MIN_PAGE_P2, .Lpage_cross
-#endif
+ b.gt .Lpage_cross
+
+.Lpage_cross_ok:
ldp data1, data2, [srcin]
- add src, srcin, #16
+#ifdef __AARCH64EB__
+ /* Because we expect the end to be found within 16 characters
+ (profiling shows this is the most common case), it's worth
+ swapping the bytes now to save having to recalculate the
+ termination syndrome later. We preserve data1 and data2
+ so that we can re-use the values later on. */
+ rev tmp2, data1
+ sub tmp1, tmp2, zeroones
+ orr tmp2, tmp2, #REP8_7f
+ bics has_nul1, tmp1, tmp2
+ b.ne .Lfp_le8
+ rev tmp4, data2
+ sub tmp3, tmp4, zeroones
+ orr tmp4, tmp4, #REP8_7f
+#else
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
+ bics has_nul1, tmp1, tmp2
+ b.ne .Lfp_le8
sub tmp3, data2, zeroones
orr tmp4, data2, #REP8_7f
- bic has_nul1, tmp1, tmp2
+#endif
bics has_nul2, tmp3, tmp4
- ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
- b.ne .Learly_end_found
- stp data1, data2, [dst], #16
- sub src, src, to_align
- sub dst, dst, to_align
- b .Lentry_no_page_cross
+ b.eq .Lbulk_entry
-.Lpage_cross:
- bic src, srcin, #15
- /* Start by loading two words at [srcin & ~15], then forcing the
- bytes that precede srcin to 0xff. This means they never look
- like termination bytes. */
- ldp data1, data2, [src], #16
- lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
- tst to_align, #7
- csetm tmp2, ne
+ /* The string is short (<=16 bytes). We don't know exactly how
+ short though, yet. Work out the exact length so that we can
+ quickly select the optimal copy strategy. */
+.Lfp_gt8:
+ rev has_nul2, has_nul2
+ clz pos, has_nul2
+ mov tmp2, #56
+ add dst, dstin, pos, lsr #3 /* Bits to bytes. */
+ sub pos, tmp2, pos
#ifdef __AARCH64EB__
- lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+ lsr data2, data2, pos
#else
- lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+ lsl data2, data2, pos
#endif
- orr data1, data1, tmp2
- orr data2a, data2, tmp2
- cmp to_align, #8
- csinv data1, data1, xzr, lt
- csel data2, data2, data2a, lt
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- sub tmp3, data2, zeroones
- orr tmp4, data2, #REP8_7f
- bic has_nul1, tmp1, tmp2
- bics has_nul2, tmp3, tmp4
- ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
- b.ne .Learly_end_found
- ldp data1, data2, [src], #16
- sub tmp1, data1, zeroones
- orr tmp2, data1, #REP8_7f
- sub tmp3, data2, zeroones
- orr tmp4, data2, #REP8_7f
- bic has_nul1, tmp1, tmp2
- bics has_nul2, tmp3, tmp4
- ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
- b.ne .Learly_end_found
- /* We've now checked between 16 and 32 bytes, but not found a null,
- so we can safely start bulk copying. Start by refetching the
- first 16 bytes of the real string; we know this can't trap now. */
- ldp data1a, data2a, [srcin]
- stp data1a, data2a, [dst], #16
- sub dst, dst, to_align
- /* Everything is now set up, so we can just fall into the bulk
- copy loop. */
+ str data2, [dst, #1]
+ str data1, [dstin]
+#ifdef BUILD_STPCPY
+ add dstin, dst, #8
+#endif
+ ret
+
+.Lfp_le8:
+ rev has_nul1, has_nul1
+ clz pos, has_nul1
+ add dst, dstin, pos, lsr #3 /* Bits to bytes. */
+ subs tmp2, pos, #24 /* Pos in bits. */
+ b.lt .Lfp_lt4
+#ifdef __AARCH64EB__
+ mov tmp2, #56
+ sub pos, tmp2, pos
+ lsr data2, data1, pos
+ lsr data1, data1, #32
+#else
+ lsr data2, data1, tmp2
+#endif
+ /* 4->7 bytes to copy. */
+ str data2w, [dst, #-3]
+ str data1w, [dstin]
+#ifdef BUILD_STPCPY
+ mov dstin, dst
+#endif
+ ret
+.Lfp_lt4:
+ cbz pos, .Lfp_lt2
+ /* 2->3 bytes to copy. */
+#ifdef __AARCH64EB__
+ lsr data1, data1, #48
+#endif
+ strh data1w, [dstin]
+ /* Fall-through, one byte (max) to go. */
+.Lfp_lt2:
+ /* Null-terminated string. Last character must be zero! */
+ strb wzr, [dst]
+#ifdef BUILD_STPCPY
+ mov dstin, dst
+#endif
+ ret
+
+ .p2align 6
+ /* Aligning here ensures that the entry code and main loop all lies
+ within one 64-byte cache line. */
+.Lbulk_entry:
+ sub to_align, to_align, #16
+ stp data1, data2, [dstin]
+ sub src, srcin, to_align
+ sub dst, dstin, to_align
+ b .Lentry_no_page_cross
+
/* The inner loop deals with two Dwords at a time. This has a
slightly higher start-up cost, but we should win quite quickly,
especially on cores with a high number of issue slots per
@@ -225,72 +271,71 @@ def_fn strcpy p2align=6
add dst, dst, pos, lsr #3
ldp data1, data2, [src, #-32]
stp data1, data2, [dst, #-16]
+#ifdef BUILD_STPCPY
+ sub dstin, dst, #1
+#endif
ret
- /* The string is short (<32 bytes). We don't know exactly how
- short though, yet. Work out the exact length so that we can
- quickly select the optimal copy strategy. */
-.Learly_end_found:
- cmp has_nul1, #0
+.Lpage_cross:
+ bic src, srcin, #15
+ /* Start by loading two words at [srcin & ~15], then forcing the
+ bytes that precede srcin to 0xff. This means they never look
+ like termination bytes. */
+ ldp data1, data2, [src]
+ lsl tmp1, tmp1, #3 /* Bytes beyond alignment -> bits. */
+ tst to_align, #7
+ csetm tmp2, ne
#ifdef __AARCH64EB__
- /* For big-endian, carry propagation (if the final byte in the
- string is 0x01) means we cannot use has_nul directly. The
- easiest way to get the correct byte is to byte-swap the data
- and calculate the syndrome a second time. */
- csel data1, data1, data2, ne
- rev data1, data1
+ lsl tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#else
+ lsr tmp2, tmp2, tmp1 /* Shift (tmp1 & 63). */
+#endif
+ orr data1, data1, tmp2
+ orr data2a, data2, tmp2
+ cmp to_align, #8
+ csinv data1, data1, xzr, lt
+ csel data2, data2, data2a, lt
sub tmp1, data1, zeroones
orr tmp2, data1, #REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, #REP8_7f
bic has_nul1, tmp1, tmp2
+ bics has_nul2, tmp3, tmp4
+ ccmp has_nul1, #0, #0, eq /* NZCV = 0000 */
+ b.eq .Lpage_cross_ok
+ /* We now need to make data1 and data2 look like they've been
+ loaded directly from srcin. Do a rotate on the 128-bit value. */
+ lsl tmp1, to_align, #3 /* Bytes->bits. */
+ neg tmp2, to_align, lsl #3
+#ifdef __AARCH64EB__
+ lsl data1a, data1, tmp1
+ lsr tmp4, data2, tmp2
+ lsl data2, data2, tmp1
+ orr tmp4, tmp4, data1a
+ cmp to_align, #8
+ csel data1, tmp4, data2, lt
+ rev tmp2, data1
+ rev tmp4, data2
+ sub tmp1, tmp2, zeroones
+ orr tmp2, tmp2, #REP8_7f
+ sub tmp3, tmp4, zeroones
+ orr tmp4, tmp4, #REP8_7f
#else
- csel has_nul1, has_nul1, has_nul2, ne
+ lsr data1a, data1, tmp1
+ lsl tmp4, data2, tmp2
+ lsr data2, data2, tmp1
+ orr tmp4, tmp4, data1a
+ cmp to_align, #8
+ csel data1, tmp4, data2, lt
+ sub tmp1, data1, zeroones
+ orr tmp2, data1, #REP8_7f
+ sub tmp3, data2, zeroones
+ orr tmp4, data2, #REP8_7f
#endif
- rev has_nul1, has_nul1
- sub tmp1, src, #7
- sub src, src, #15
- clz pos, has_nul1
- csel src, src, tmp1, ne
- sub dst, dstin, srcin
- add src, src, pos, lsr #3 /* Bits to bytes. */
- add dst, dst, src
- sub len, src, srcin
- cmp len, #8
- b.lt .Llt8
- cmp len, #16
- b.lt .Llt16
- /* 16->32 bytes to copy. */
- ldp data1, data2, [srcin]
- ldp data1a, data2a, [src, #-16]
- stp data1, data2, [dstin]
- stp data1a, data2a, [dst, #-16]
- ret
-.Llt16:
- /* 8->15 bytes to copy. */
- ldr data1, [srcin]
- ldr data2, [src, #-8]
- str data1, [dstin]
- str data2, [dst, #-8]
- ret
-.Llt8:
- cmp len, #4
- b.lt .Llt4
- /* 4->7 bytes to copy. */
- ldr data1w, [srcin]
- ldr data2w, [src, #-4]
- str data1w, [dstin]
- str data2w, [dst, #-4]
- ret
-.Llt4:
- cmp len, #2
- b.lt .Llt2
- /* 2->3 bytes to copy. */
- ldrh data1w, [srcin]
- strh data1w, [dstin]
- /* Fall-through, one byte (max) to go. */
-.Llt2:
- /* Null-terminated string. Last character must be zero! */
- strb wzr, [dst, #-1]
- ret
+ bic has_nul1, tmp1, tmp2
+ cbnz has_nul1, .Lfp_le8
+ bic has_nul2, tmp3, tmp4
+ b .Lfp_gt8
- .size strcpy, . - strcpy
+ .size STRCPY, . - STRCPY
#endif