Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cygwin.com/git/newlib-cygwin.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2023-09-12 11:33:14 +0300
committerSebastian Huber <sebastian.huber@embedded-brains.de>2023-10-05 15:16:59 +0300
commitfe5886a500e66cddf0f57eea3049d25d5f8765e9 (patch)
treeefff297f01521679f7e2e9e9d5bbc687c7339f66 /newlib/libc
parent96ec8f868e1a0f5a75badfe4627a41f12cce742d (diff)
aarch64: Import memrchr.S
Import memrchr.S for AArch64 from: https://github.com/ARM-software/optimized-routines commit 0cf84f26b6b8dcad8287fe30a4dcc1fdabd06560 Author: Sebastian Huber <sebastian.huber@embedded-brains.de> Date: Thu Jul 27 17:14:57 2023 +0200 string: Fix corrupt GNU_PROPERTY_TYPE (5) size For ELF32 the notes alignment is 4 and not 8.
Diffstat (limited to 'newlib/libc')
-rw-r--r--newlib/libc/machine/aarch64/Makefile.inc2
-rw-r--r--newlib/libc/machine/aarch64/memrchr-stub.c11
-rw-r--r--newlib/libc/machine/aarch64/memrchr.S115
3 files changed, 128 insertions, 0 deletions
diff --git a/newlib/libc/machine/aarch64/Makefile.inc b/newlib/libc/machine/aarch64/Makefile.inc
index 063a2a84a..c749b0d57 100644
--- a/newlib/libc/machine/aarch64/Makefile.inc
+++ b/newlib/libc/machine/aarch64/Makefile.inc
@@ -7,6 +7,8 @@ libc_a_SOURCES += \
%D%/memcpy.S \
%D%/memmove-stub.c \
%D%/memmove.S \
+ %D%/memrchr-stub.c \
+ %D%/memrchr.S \
%D%/memset-stub.c \
%D%/memset.S \
%D%/rawmemchr.S \
diff --git a/newlib/libc/machine/aarch64/memrchr-stub.c b/newlib/libc/machine/aarch64/memrchr-stub.c
new file mode 100644
index 000000000..48f13bedc
--- /dev/null
+++ b/newlib/libc/machine/aarch64/memrchr-stub.c
@@ -0,0 +1,11 @@
+/*
+ * SPDX-License-Identifier: BSD-2-Clause
+ *
+ * Copyright (C) 2023 embedded brains GmbH & Co. KG
+ */
+
+#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)
+#include "../../string/memrchr.c"
+#else
+/* See memrchr.S */
+#endif
diff --git a/newlib/libc/machine/aarch64/memrchr.S b/newlib/libc/machine/aarch64/memrchr.S
new file mode 100644
index 000000000..ba9915cc3
--- /dev/null
+++ b/newlib/libc/machine/aarch64/memrchr.S
@@ -0,0 +1,115 @@
+/*
+ * memrchr - find last character in a memory zone.
+ *
+ * Copyright (c) 2020-2022, Arm Limited.
+ * SPDX-License-Identifier: MIT OR Apache-2.0 WITH LLVM-exception
+ */
+
+/* Assumptions:
+ *
+ * ARMv8-a, AArch64, Advanced SIMD.
+ * MTE compatible.
+ */
+
+#if defined (__OPTIMIZE_SIZE__) || defined (PREFER_SIZE_OVER_SPEED)
+/* See memrchr-stub.c */
+#else
+#include "asmdefs.h"
+
+#define srcin x0
+#define chrin w1
+#define cntin x2
+#define result x0
+
+#define src x3
+#define cntrem x4
+#define synd x5
+#define shift x6
+#define tmp x7
+#define end x8
+#define endm1 x9
+
+#define vrepchr v0
+#define qdata q1
+#define vdata v1
+#define vhas_chr v2
+#define vend v3
+#define dend d3
+
+/*
+ Core algorithm:
+ For each 16-byte chunk we calculate a 64-bit nibble mask value with four bits
+ per byte. We take 4 bits of every comparison byte with shift right and narrow
+ by 4 instruction. Since the bits in the nibble mask reflect the order in
+ which things occur in the original string, counting leading zeros identifies
+ exactly which byte matched. */
+
+ENTRY (memrchr)
+ PTR_ARG (0)
+ add end, srcin, cntin
+ sub endm1, end, 1
+ bic src, endm1, 15
+ cbz cntin, L(nomatch)
+ ld1 {vdata.16b}, [src]
+ dup vrepchr.16b, chrin
+ cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
+ neg shift, end, lsl 2
+ shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */
+ fmov synd, dend
+ lsl synd, synd, shift
+ cbz synd, L(start_loop)
+
+ clz synd, synd
+ sub result, endm1, synd, lsr 2
+ cmp cntin, synd, lsr 2
+ csel result, result, xzr, hi
+ ret
+
+ nop
+L(start_loop):
+ subs cntrem, src, srcin
+ b.ls L(nomatch)
+
+ /* Make sure that it won't overread by a 16-byte chunk */
+ sub cntrem, cntrem, 1
+ tbz cntrem, 4, L(loop32_2)
+ add src, src, 16
+
+ .p2align 5
+L(loop32):
+ ldr qdata, [src, -32]!
+ cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
+ umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
+ fmov synd, dend
+ cbnz synd, L(end)
+
+L(loop32_2):
+ ldr qdata, [src, -16]
+ subs cntrem, cntrem, 32
+ cmeq vhas_chr.16b, vdata.16b, vrepchr.16b
+ b.lo L(end_2)
+ umaxp vend.16b, vhas_chr.16b, vhas_chr.16b /* 128->64 */
+ fmov synd, dend
+ cbz synd, L(loop32)
+L(end_2):
+ sub src, src, 16
+L(end):
+ shrn vend.8b, vhas_chr.8h, 4 /* 128->64 */
+ fmov synd, dend
+
+ add tmp, src, 15
+#ifdef __AARCH64EB__
+ rbit synd, synd
+#endif
+ clz synd, synd
+ sub tmp, tmp, synd, lsr 2
+ cmp tmp, srcin
+ csel result, tmp, xzr, hs
+ ret
+
+L(nomatch):
+ mov result, 0
+ ret
+
+END (memrchr)
+#endif