Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cygwin.com/git/newlib-cygwin.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAlexey Lapshin <alexey.lapshin@espressif.com>2023-08-17 01:05:53 +0300
committerJeff Johnston <jjohnstn@redhat.com>2023-08-18 01:14:15 +0300
commit7ba103eb1a95703c7803e4ca48e6ba5c8e1f3d56 (patch)
tree36b6a578131e28363728a2ed685d66348c747d78 /newlib/libc
parent65d34484e9097fd9036dd577028423355cb5f5bc (diff)
newlib: add Xtensa port
Diffstat (limited to 'newlib/libc')
-rw-r--r--newlib/libc/acinclude.m44
-rw-r--r--newlib/libc/include/machine/ieeefp.h4
-rw-r--r--newlib/libc/include/machine/setjmp.h29
-rw-r--r--newlib/libc/machine/Makefile.inc3
-rw-r--r--newlib/libc/machine/xtensa/Makefile.inc8
-rw-r--r--newlib/libc/machine/xtensa/acinclude.m41
-rw-r--r--newlib/libc/machine/xtensa/memcpy.S343
-rw-r--r--newlib/libc/machine/xtensa/memset.S193
-rw-r--r--newlib/libc/machine/xtensa/setjmp.S252
-rw-r--r--newlib/libc/machine/xtensa/strcmp.S353
-rw-r--r--newlib/libc/machine/xtensa/strcpy.S274
-rw-r--r--newlib/libc/machine/xtensa/strlen.S115
-rw-r--r--newlib/libc/machine/xtensa/strncpy.S274
-rw-r--r--newlib/libc/machine/xtensa/xtensa-asm.h72
-rw-r--r--newlib/libc/machine/xtensa/xtensa.tex72
-rw-r--r--newlib/libc/sys/Makefile.inc3
-rw-r--r--newlib/libc/sys/xtensa/Makefile.inc1
-rw-r--r--newlib/libc/sys/xtensa/clibrary_init.c42
-rw-r--r--newlib/libc/sys/xtensa/creat.c9
-rw-r--r--newlib/libc/sys/xtensa/crt0.c16
-rw-r--r--newlib/libc/sys/xtensa/include/fenv.h88
-rw-r--r--newlib/libc/sys/xtensa/include/unistd.h13
-rw-r--r--newlib/libc/sys/xtensa/include/xtensa/config/core-isa.h115
-rw-r--r--newlib/libc/sys/xtensa/isatty.c18
-rw-r--r--newlib/libc/sys/xtensa/sys/file.h33
25 files changed, 2334 insertions, 1 deletions
diff --git a/newlib/libc/acinclude.m4 b/newlib/libc/acinclude.m4
index 42662669f..24148b13f 100644
--- a/newlib/libc/acinclude.m4
+++ b/newlib/libc/acinclude.m4
@@ -23,6 +23,7 @@ m4_foreach_w([SYS_DIR], [
sh sysmec sysnec810 sysnecv850 sysvi386 sysvnecv70
tic80 tirtos
w65
+ xtensa
z8ksim
], [AM_CONDITIONAL([HAVE_LIBC_SYS_]m4_toupper(SYS_DIR)[_DIR], test "${sys_dir}" = SYS_DIR)])
@@ -38,6 +39,7 @@ m4_include([libc/machine/nds32/acinclude.m4])
m4_include([libc/machine/powerpc/acinclude.m4])
m4_include([libc/machine/sh/acinclude.m4])
m4_include([libc/machine/spu/acinclude.m4])
+m4_include([libc/machine/xtensa/acinclude.m4])
m4_foreach_w([MACHINE], [
aarch64 amdgcn arc arm
@@ -58,7 +60,7 @@ m4_foreach_w([MACHINE], [
tic4x tic6x tic80
v850 visium
w65
- x86_64 xc16x xstormy16
+ x86_64 xc16x xstormy16 xtensa
z8k
], [AM_CONDITIONAL([HAVE_LIBC_MACHINE_]m4_toupper(MACHINE), test "${machine_dir}" = MACHINE)])
diff --git a/newlib/libc/include/machine/ieeefp.h b/newlib/libc/include/machine/ieeefp.h
index abadf520b..ede75e1aa 100644
--- a/newlib/libc/include/machine/ieeefp.h
+++ b/newlib/libc/include/machine/ieeefp.h
@@ -506,6 +506,10 @@
#define __IEEE_LITTLE_ENDIAN
#endif
+#ifdef __XTENSA_EB__
+#define __IEEE_BIG_ENDIAN
+#endif
+
#ifdef __CYGWIN__
#define __OBSOLETE_MATH_DEFAULT 0
#endif
diff --git a/newlib/libc/include/machine/setjmp.h b/newlib/libc/include/machine/setjmp.h
index 29b76cec1..e3534e18d 100644
--- a/newlib/libc/include/machine/setjmp.h
+++ b/newlib/libc/include/machine/setjmp.h
@@ -304,6 +304,35 @@ _BEGIN_STD_C
#define _JBLEN 8
#endif
+#ifdef __XTENSA__
+#if __XTENSA_WINDOWED_ABI__
+
+/* The jmp_buf structure for Xtensa windowed ABI holds the following
+ (where "proc" is the procedure that calls setjmp): 4-12 registers
+ from the window of proc, the 4 words from the save area at proc's $sp
+ (in case a subsequent alloca in proc moves $sp), and the return
+ address within proc. Everything else is saved on the stack in the
+ normal save areas. The jmp_buf structure is:
+
+ struct jmp_buf {
+ int regs[12];
+ int save[4];
+ void *return_address;
+ }
+
+ See the setjmp code for details. */
+
+/* sizeof(struct jmp_buf) */
+#define _JBLEN 17
+
+#else /* __XTENSA_CALL0_ABI__ */
+
+/* a0, a1, a12, a13, a14, a15 */
+#define _JBLEN 6
+
+#endif /* __XTENSA_CALL0_ABI__ */
+#endif /* __XTENSA__ */
+
#ifdef __mep__
/* 16 GPRs, pc, hi, lo */
#define _JBLEN 19
diff --git a/newlib/libc/machine/Makefile.inc b/newlib/libc/machine/Makefile.inc
index 8aae2c52c..a53cf9c10 100644
--- a/newlib/libc/machine/Makefile.inc
+++ b/newlib/libc/machine/Makefile.inc
@@ -169,6 +169,9 @@ endif
if HAVE_LIBC_MACHINE_XSTORMY16
include %D%/xstormy16/Makefile.inc
endif
+if HAVE_LIBC_MACHINE_XTENSA
+include %D%/xtensa/Makefile.inc
+endif
if HAVE_LIBC_MACHINE_Z8K
include %D%/z8k/Makefile.inc
endif
diff --git a/newlib/libc/machine/xtensa/Makefile.inc b/newlib/libc/machine/xtensa/Makefile.inc
new file mode 100644
index 000000000..158c2befe
--- /dev/null
+++ b/newlib/libc/machine/xtensa/Makefile.inc
@@ -0,0 +1,8 @@
+libc_a_SOURCES += \
+ %D%/memcpy.S %D%/memset.S %D%/setjmp.S %D%/strcmp.S %D%/strcpy.S \
+ %D%/strlen.S %D%/strncpy.S
+
+if XTENSA_ESP32_PSRAM_CACHE_FIX
+libc_a_CPPFLAGS_%C% = \
+ -DXTENSA_ESP32_PSRAM_CACHE_FIX
+endif
diff --git a/newlib/libc/machine/xtensa/acinclude.m4 b/newlib/libc/machine/xtensa/acinclude.m4
new file mode 100644
index 000000000..863d7507d
--- /dev/null
+++ b/newlib/libc/machine/xtensa/acinclude.m4
@@ -0,0 +1 @@
+AM_CONDITIONAL([XTENSA_ESP32_PSRAM_CACHE_FIX], [echo $CC | grep mfix-esp32-psram-cache-issue >/dev/null 2>&1])
diff --git a/newlib/libc/machine/xtensa/memcpy.S b/newlib/libc/machine/xtensa/memcpy.S
new file mode 100644
index 000000000..77b026a45
--- /dev/null
+++ b/newlib/libc/machine/xtensa/memcpy.S
@@ -0,0 +1,343 @@
+/* ANSI C standard library function memcpy.
+
+ Copyright (c) 2002-2008 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-asm.h"
+
+/* If the Xtensa Unaligned Load Exception option is not used, this
+ code can run a few cycles faster by relying on the low address bits
+ being ignored. However, if the code is then run with an Xtensa ISS
+ client that checks for unaligned accesses, it will produce a lot of
+ warning messages. Set this flag to disable the use of unaligned
+ accesses and keep the ISS happy. */
+
+/* #define UNALIGNED_ADDRESSES_CHECKED XCHAL_UNALIGNED_LOAD_EXCEPTION */
+#define UNALIGNED_ADDRESSES_CHECKED 1
+
+
+/* void *memcpy (void *dst, const void *src, size_t len)
+
+ The algorithm is as follows:
+
+ If the destination is unaligned, align it by conditionally
+ copying 1- and/or 2-byte pieces.
+
+ If the source is aligned, copy 16 bytes with a loop, and then finish up
+ with 8, 4, 2, and 1-byte copies conditional on the length.
+
+ Else (if source is unaligned), do the same, but use SRC to align the
+ source data.
+
+ This code tries to use fall-through branches for the common
+ case of aligned source and destination and multiple of 4 (or 8) length. */
+
+
+/* Byte by byte copy. */
+
+ .text
+ .begin schedule
+ .align XCHAL_INST_FETCH_WIDTH
+ .literal_position
+__memcpy_aux:
+
+ /* Skip bytes to get proper alignment for three-byte loop */
+.skip XCHAL_INST_FETCH_WIDTH - 3
+
+.Lbytecopy:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, 2f
+#else
+ beqz a4, 2f
+ add a7, a3, a4 // a7 = end address for source
+#endif
+1: l8ui a6, a3, 0
+ addi a3, a3, 1
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ nop
+ nop
+ nop
+#endif
+ s8i a6, a5, 0
+ addi a5, a5, 1
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+#if !XCHAL_HAVE_LOOPS
+ bltu a3, a7, 1b
+#endif
+2: leaf_return
+
+
+/* Destination is unaligned. */
+
+ .align 4
+.Ldst1mod2: // dst is only byte aligned
+
+ /* Do short copies byte-by-byte. */
+ bltui a4, 7, .Lbytecopy
+
+ /* Copy 1 byte. */
+ l8ui a6, a3, 0
+ addi a3, a3, 1
+ addi a4, a4, -1
+ s8i a6, a5, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ addi a5, a5, 1
+
+ /* Return to main algorithm if dst is now aligned. */
+ bbci.l a5, 1, .Ldstaligned
+
+.Ldst2mod4: // dst has 16-bit alignment
+
+ /* Do short copies byte-by-byte. */
+ bltui a4, 6, .Lbytecopy
+
+ /* Copy 2 bytes. */
+ l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a3, a3, 2
+ addi a4, a4, -2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ addi a5, a5, 2
+
+ /* dst is now aligned; return to main algorithm. */
+ j .Ldstaligned
+
+
+ .align 4
+ .global memcpy
+ .type memcpy, @function
+memcpy:
+ leaf_entry sp, 16
+ /* a2 = dst, a3 = src, a4 = len */
+
+ mov a5, a2 // copy dst so that a2 is return value
+ bbsi.l a2, 0, .Ldst1mod2
+ bbsi.l a2, 1, .Ldst2mod4
+.Ldstaligned:
+
+ /* Get number of loop iterations with 16B per iteration. */
+ srli a7, a4, 4
+
+ /* Check if source is aligned. */
+ slli a8, a3, 30
+ bnez a8, .Lsrcunaligned
+
+ /* Destination and source are word-aligned, use word copy. */
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, 2f
+#else
+ beqz a7, 2f
+ slli a8, a7, 4
+ add a8, a8, a3 // a8 = end of last 16B source chunk
+#endif
+
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+
+1: l32i a6, a3, 0
+ l32i a7, a3, 4
+ s32i a6, a5, 0
+ s32i a7, a5, 4
+ memw
+ l32i a6, a3, 8
+ l32i a7, a3, 12
+ s32i a6, a5, 8
+ s32i a7, a5, 12
+ memw
+
+ addi a3, a3, 16
+ addi a5, a5, 16
+
+#else
+
+1: l32i a6, a3, 0
+ l32i a7, a3, 4
+ s32i a6, a5, 0
+ l32i a6, a3, 8
+ s32i a7, a5, 4
+ l32i a7, a3, 12
+ s32i a6, a5, 8
+ addi a3, a3, 16
+ s32i a7, a5, 12
+ addi a5, a5, 16
+
+#endif
+
+
+#if !XCHAL_HAVE_LOOPS
+ bltu a3, a8, 1b
+#endif
+
+ /* Copy any leftover pieces smaller than 16B. */
+2: bbci.l a4, 3, 3f
+
+ /* Copy 8 bytes. */
+ l32i a6, a3, 0
+ l32i a7, a3, 4
+ addi a3, a3, 8
+ s32i a6, a5, 0
+ s32i a7, a5, 4
+ addi a5, a5, 8
+
+3: bbsi.l a4, 2, 4f
+ bbsi.l a4, 1, 5f
+ bbsi.l a4, 0, 6f
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+ .align 4
+ /* Copy 4 bytes. */
+4: l32i a6, a3, 0
+ addi a3, a3, 4
+ s32i a6, a5, 0
+ addi a5, a5, 4
+ bbsi.l a4, 1, 5f
+ bbsi.l a4, 0, 6f
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+ /* Copy 2 bytes. */
+5: l16ui a6, a3, 0
+ addi a3, a3, 2
+ s16i a6, a5, 0
+ addi a5, a5, 2
+ bbsi.l a4, 0, 6f
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+ /* Copy 1 byte. */
+6: l8ui a6, a3, 0
+ s8i a6, a5, 0
+
+.Ldone:
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+
+/* Destination is aligned; source is unaligned. */
+
+ .align 4
+.Lsrcunaligned:
+ /* Avoid loading anything for zero-length copies. */
+ beqz a4, .Ldone
+
+ /* Copy 16 bytes per iteration for word-aligned dst and
+ unaligned src. */
+ ssa8 a3 // set shift amount from byte offset
+#if UNALIGNED_ADDRESSES_CHECKED
+ srli a11, a8, 30 // save unalignment offset for below
+ sub a3, a3, a11 // align a3
+#endif
+ l32i a6, a3, 0 // load first word
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, 2f
+#else
+ beqz a7, 2f
+ slli a10, a7, 4
+ add a10, a10, a3 // a10 = end of last 16B source chunk
+#endif
+1: l32i a7, a3, 4
+ l32i a8, a3, 8
+ src_b a6, a6, a7
+ s32i a6, a5, 0
+ l32i a9, a3, 12
+ src_b a7, a7, a8
+ s32i a7, a5, 4
+ l32i a6, a3, 16
+ src_b a8, a8, a9
+ s32i a8, a5, 8
+ addi a3, a3, 16
+ src_b a9, a9, a6
+ s32i a9, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ bltu a3, a10, 1b
+#endif
+
+2: bbci.l a4, 3, 3f
+
+ /* Copy 8 bytes. */
+ l32i a7, a3, 4
+ l32i a8, a3, 8
+ src_b a6, a6, a7
+ s32i a6, a5, 0
+ addi a3, a3, 8
+ src_b a7, a7, a8
+ s32i a7, a5, 4
+ addi a5, a5, 8
+ mov a6, a8
+
+3: bbci.l a4, 2, 4f
+
+ /* Copy 4 bytes. */
+ l32i a7, a3, 4
+ addi a3, a3, 4
+ src_b a6, a6, a7
+ s32i a6, a5, 0
+ addi a5, a5, 4
+ mov a6, a7
+4:
+#if UNALIGNED_ADDRESSES_CHECKED
+ add a3, a3, a11 // readjust a3 with correct misalignment
+#endif
+ bbsi.l a4, 1, 5f
+ bbsi.l a4, 0, 6f
+ leaf_return
+
+ /* Copy 2 bytes. */
+5: l8ui a6, a3, 0
+ l8ui a7, a3, 1
+ addi a3, a3, 2
+ s8i a6, a5, 0
+ s8i a7, a5, 1
+ addi a5, a5, 2
+ bbsi.l a4, 0, 6f
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+ /* Copy 1 byte. */
+6: l8ui a6, a3, 0
+ s8i a6, a5, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+ .end schedule
+
+ .size memcpy, . - memcpy
diff --git a/newlib/libc/machine/xtensa/memset.S b/newlib/libc/machine/xtensa/memset.S
new file mode 100644
index 000000000..48b5829d7
--- /dev/null
+++ b/newlib/libc/machine/xtensa/memset.S
@@ -0,0 +1,193 @@
+/* ANSI C standard library function memset.
+
+ Copyright (c) 2001-2008 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-asm.h"
+
+/* void *memset (void *dst, int c, size_t length)
+
+ The algorithm is as follows:
+
+ Create a word with c in all byte positions.
+
+ If the destination is aligned, set 16B chunks with a loop, and then
+ finish up with 8B, 4B, 2B, and 1B stores conditional on the length.
+
+ If the destination is unaligned, align it by conditionally
+ setting 1B and/or 2B and then go to aligned case.
+
+ This code tries to use fall-through branches for the common
+ case of an aligned destination (except for the branches to
+ the alignment labels). */
+
+
+/* Byte-by-byte set. */
+
+ .text
+ .begin schedule
+ .align XCHAL_INST_FETCH_WIDTH
+ .literal_position
+__memset_aux:
+
+ /* Skip bytes to get proper alignment for three-byte loop */
+.skip XCHAL_INST_FETCH_WIDTH - 3
+
+.Lbyteset:
+#if XCHAL_HAVE_LOOPS
+ loopnez a4, 2f
+#else
+ beqz a4, 2f
+ add a6, a5, a4 // a6 = ending address
+#endif
+1: s8i a3, a5, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ addi a5, a5, 1
+#if !XCHAL_HAVE_LOOPS
+ bltu a5, a6, 1b
+#endif
+2: leaf_return
+
+
+/* Destination is unaligned. */
+
+ .align 4
+
+.Ldst1mod2: // dst is only byte aligned
+
+ /* Do short sizes byte-by-byte. */
+ bltui a4, 8, .Lbyteset
+
+ /* Set 1 byte. */
+ s8i a3, a5, 0
+ addi a5, a5, 1
+ addi a4, a4, -1
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+
+ /* Now retest if dst is aligned. */
+ _bbci.l a5, 1, .Ldstaligned
+
+.Ldst2mod4: // dst has 16-bit alignment
+
+ /* Do short sizes byte-by-byte. */
+ bltui a4, 8, .Lbyteset
+
+ /* Set 2 bytes. */
+ s16i a3, a5, 0
+ addi a5, a5, 2
+ addi a4, a4, -2
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+
+ /* dst is now aligned; return to main algorithm */
+ j .Ldstaligned
+
+
+ .align 4
+ .global memset
+ .type memset, @function
+memset:
+ leaf_entry sp, 16
+ /* a2 = dst, a3 = c, a4 = length */
+
+ /* Duplicate character into all bytes of word. */
+ extui a3, a3, 0, 8
+ slli a7, a3, 8
+ or a3, a3, a7
+ slli a7, a3, 16
+ or a3, a3, a7
+
+ mov a5, a2 // copy dst so that a2 is return value
+
+ /* Check if dst is unaligned. */
+ _bbsi.l a2, 0, .Ldst1mod2
+ _bbsi.l a2, 1, .Ldst2mod4
+.Ldstaligned:
+
+ /* Get number of loop iterations with 16B per iteration. */
+ srli a7, a4, 4
+
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ //do not do this if we have less than one iteration to do
+ beqz a7, 2f
+ //this seems to work to prefetch the cache line
+ s32i a3, a5, 0
+ nop
+#endif
+
+ /* Destination is word-aligned. */
+#if XCHAL_HAVE_LOOPS
+ loopnez a7, 2f
+#else
+ beqz a7, 2f
+ slli a6, a7, 4
+ add a6, a6, a5 // a6 = end of last 16B chunk
+#endif
+ /* Set 16 bytes per iteration. */
+1: s32i a3, a5, 0
+ s32i a3, a5, 4
+ s32i a3, a5, 8
+ s32i a3, a5, 12
+ addi a5, a5, 16
+#if !XCHAL_HAVE_LOOPS
+ bltu a5, a6, 1b
+#endif
+
+ /* Set any leftover pieces smaller than 16B. */
+2: bbci.l a4, 3, 3f
+
+ /* Set 8 bytes. */
+ s32i a3, a5, 0
+ s32i a3, a5, 4
+ addi a5, a5, 8
+
+3: bbci.l a4, 2, 4f
+
+ /* Set 4 bytes. */
+ s32i a3, a5, 0
+ addi a5, a5, 4
+
+4: bbci.l a4, 1, 5f
+
+ /* Set 2 bytes. */
+ s16i a3, a5, 0
+ addi a5, a5, 2
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+
+5: bbci.l a4, 0, 6f
+
+ /* Set 1 byte. */
+ s8i a3, a5, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+6: leaf_return
+
+ .end schedule
+
+ .size memset, . - memset
diff --git a/newlib/libc/machine/xtensa/setjmp.S b/newlib/libc/machine/xtensa/setjmp.S
new file mode 100644
index 000000000..c32d443f2
--- /dev/null
+++ b/newlib/libc/machine/xtensa/setjmp.S
@@ -0,0 +1,252 @@
+/* setjmp/longjmp functions for Xtensa.
+
+ Copyright (c) 2001-2006 by Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+/* Windowed ABI:
+
+ This implementation relies heavily on the Xtensa register window
+ mechanism. Setjmp flushes all the windows except its own to the
+ stack and then copies registers from the save areas on the stack
+ into the jmp_buf structure, along with the return address of the call
+ to setjmp. Longjmp invalidates all the windows except its own, and
+ then sets things up so that it will return to the right place,
+ using a window underflow to automatically restore the registers.
+
+ Note that it would probably be sufficient to only copy the
+ registers from setjmp's caller into jmp_buf. However, we also copy
+ the save area located at the stack pointer of setjmp's caller.
+ This save area will typically remain intact until the longjmp call.
+ The one exception is when there is an intervening alloca in
+ setjmp's caller. This is certainly an unusual situation and is
+ likely to cause problems in any case (the storage allocated on the
+ stack cannot be safely accessed following the longjmp). As bad as
+ it is, on most systems this situation would not necessarily lead to
+ a catastrophic failure. If we did not preserve the extra save area
+ on Xtensa, however, it would. When setjmp's caller returns after a
+ longjmp, there will be a window underflow; an invalid return
+ address or stack pointer in the save area will almost certainly
+ lead to a crash. Keeping a copy of the extra save area in the
+ jmp_buf avoids this with only a small additional cost. If setjmp
+ and longjmp are ever time-critical, this could be removed.
+
+
+ Call0 ABI:
+
+ Much like other ABIs, this version just saves the necessary registers
+ to the stack and restores them later. Much less needs to be done. */
+
+#include "xtensa-asm.h"
+
+#define SYS_nop 0
+
+
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+
+/* int setjmp (jmp_buf env) */
+
+ .text
+ .align 4
+ .literal_position
+ .global setjmp
+ .type setjmp, @function
+setjmp:
+ entry sp, 16
+
+ /* Flush registers. */
+ mov a4, a2 // save a2 (jmp_buf)
+ movi a2, SYS_nop
+ syscall
+ mov a2, a4 // restore a2
+
+ /* Copy the register save area at (sp - 16). */
+ addi a5, a1, -16
+ l32i a3, a5, 0
+ l32i a4, a5, 4
+ s32i a3, a2, 0
+ s32i a4, a2, 4
+ l32i a3, a5, 8
+ l32i a4, a5, 12
+ s32i a3, a2, 8
+ s32i a4, a2, 12
+
+ /* Copy 0-8 words from the register overflow area. */
+ extui a3, a0, 30, 2
+ blti a3, 2, .Lendsj
+ l32i a7, a1, 4
+ slli a4, a3, 4
+ sub a5, a7, a4
+ addi a6, a2, 16
+ addi a7, a7, -16 // a7 = end of register overflow area
+.Lsjloop:
+ l32i a3, a5, 0
+ l32i a4, a5, 4
+ s32i a3, a6, 0
+ s32i a4, a6, 4
+ l32i a3, a5, 8
+ l32i a4, a5, 12
+ s32i a3, a6, 8
+ s32i a4, a6, 12
+ addi a5, a5, 16
+ addi a6, a6, 16
+ blt a5, a7, .Lsjloop
+.Lendsj:
+
+ /* Copy the register save area at sp. */
+ l32i a3, a1, 0
+ l32i a4, a1, 4
+ s32i a3, a2, 48
+ s32i a4, a2, 52
+ l32i a3, a1, 8
+ l32i a4, a1, 12
+ s32i a3, a2, 56
+ s32i a4, a2, 60
+
+ /* Save the return address, including the window size bits. */
+ s32i a0, a2, 64
+
+ movi a2, 0
+ retw
+ .size setjmp, . - setjmp
+
+
+/* void longjmp (jmp_buf env, int val) */
+
+ .align 4
+ .literal_position
+ .global longjmp
+ .type longjmp, @function
+longjmp:
+ entry sp, 16
+ /* a2 == &env, a3 == val */
+
+ /* Invalidate all but the current window;
+ set WindowStart to (1 << WindowBase). */
+ rsr a5, WINDOWBASE
+ movi a4, 1
+ ssl a5
+ sll a4, a4
+ wsr a4, WINDOWSTART
+ rsync
+
+ /* Return to the return address of the setjmp, using the
+ window size bits from the setjmp call so that the caller
+ will be able to find the return value that we put in a2. */
+
+ l32i a0, a2, 64
+
+ /* Copy the first 4 saved registers from jmp_buf into the save area
+ at the current sp so that the values will be restored to registers
+ when longjmp returns. */
+
+ addi a7, a1, -16
+ l32i a4, a2, 0
+ l32i a5, a2, 4
+ s32i a4, a7, 0
+ s32i a5, a7, 4
+ l32i a4, a2, 8
+ l32i a5, a2, 12
+ s32i a4, a7, 8
+ s32i a5, a7, 12
+
+ /* Copy the remaining 0-8 saved registers. */
+ extui a7, a0, 30, 2
+ blti a7, 2, .Lendlj
+ l32i a8, a2, 52
+ slli a4, a7, 4
+ sub a6, a8, a4
+ addi a5, a2, 16
+ addi a8, a8, -16 // a8 = end of register overflow area
+.Lljloop:
+ l32i a7, a5, 0
+ l32i a4, a5, 4
+ s32i a7, a6, 0
+ s32i a4, a6, 4
+ l32i a7, a5, 8
+ l32i a4, a5, 12
+ s32i a7, a6, 8
+ s32i a4, a6, 12
+ addi a5, a5, 16
+ addi a6, a6, 16
+ blt a6, a8, .Lljloop
+.Lendlj:
+
+ /* The 4 words saved from the register save area at the target's
+ sp are copied back to the target procedure's save area. The
+ only point of this is to prevent a catastrophic failure in
+ case the contents were moved by an alloca after calling
+ setjmp. This is a bit paranoid but it doesn't cost much. */
+
+ l32i a7, a2, 4 // load the target stack pointer
+ addi a7, a7, -16 // find the destination save area
+ l32i a4, a2, 48
+ l32i a5, a2, 52
+ s32i a4, a7, 0
+ s32i a5, a7, 4
+ l32i a4, a2, 56
+ l32i a5, a2, 60
+ s32i a4, a7, 8
+ s32i a5, a7, 12
+
+ /* Return val ? val : 1. */
+ movi a2, 1
+ movnez a2, a3, a3
+
+ retw
+ .size longjmp, . - longjmp
+
+#else /* CALL0 ABI */
+
+ .text
+ .align 4
+ .literal_position
+ .global setjmp
+ .type setjmp, @function
+setjmp:
+ s32i a0, a2, 0
+ s32i a1, a2, 4
+ s32i a12, a2, 8
+ s32i a13, a2, 12
+ s32i a14, a2, 16
+ s32i a15, a2, 20
+ movi a2, 0
+ ret
+ .size setjmp, . - setjmp
+
+ .align 4
+ .literal_position
+ .global longjmp
+ .type longjmp, @function
+longjmp:
+ l32i a0, a2, 0
+ l32i a12, a2, 8
+ l32i a13, a2, 12
+ l32i a14, a2, 16
+ l32i a15, a2, 20
+ l32i a1, a2, 4
+ /* Return val ? val : 1. */
+ movi a2, 1
+ movnez a2, a3, a3
+
+ ret
+ .size longjmp, .-longjmp
+
+#endif /* CALL0 ABI */
diff --git a/newlib/libc/machine/xtensa/strcmp.S b/newlib/libc/machine/xtensa/strcmp.S
new file mode 100644
index 000000000..49cb80581
--- /dev/null
+++ b/newlib/libc/machine/xtensa/strcmp.S
@@ -0,0 +1,353 @@
+/* ANSI C standard library function strcmp.
+
+ Copyright (c) 2001-20012 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-asm.h"
+
+#define MASK4 0x40404040
+
+
+#if XCHAL_HAVE_L32R
+ .literal .Lmask0, MASK0
+ .literal .Lmask1, MASK1
+ .literal .Lmask2, MASK2
+ .literal .Lmask3, MASK3
+ .literal .Lmask4, MASK4
+#endif /* XCHAL_HAVE_L32R */
+
+ .text
+ .align 4
+ .literal_position
+ .global strcmp
+ .type strcmp, @function
+strcmp:
+
+ leaf_entry sp, 16
+ /* a2 = s1, a3 = s2 */
+
+ l8ui a8, a2, 0 // byte 0 from s1
+ l8ui a9, a3, 0 // byte 0 from s2
+ movi a10, 3 // mask
+ bne a8, a9, .Lretdiff
+
+ or a11, a2, a3
+ bnone a11, a10, .Laligned
+
+ xor a11, a2, a3 // compare low two bits of s1 and s2
+ bany a11, a10, .Lunaligned // if they have different alignment
+
+ /* s1/s2 are not word-aligned. */
+ addi a2, a2, 1 // advance s1
+ beqz a8, .Leq // bytes equal, if zero, strings are equal
+ addi a3, a3, 1 // advance s2
+ bnone a2, a10, .Laligned // if s1/s2 now aligned
+ l8ui a8, a2, 0 // byte 1 from s1
+ l8ui a9, a3, 0 // byte 1 from s2
+ addi a2, a2, 1 // advance s1
+ bne a8, a9, .Lretdiff // if different, return difference
+ beqz a8, .Leq // bytes equal, if zero, strings are equal
+ addi a3, a3, 1 // advance s2
+ bnone a2, a10, .Laligned // if s1/s2 now aligned
+ l8ui a8, a2, 0 // byte 2 from s1
+ l8ui a9, a3, 0 // byte 2 from s2
+ addi a2, a2, 1 // advance s1
+ bne a8, a9, .Lretdiff // if different, return difference
+ beqz a8, .Leq // bytes equal, if zero, strings are equal
+ addi a3, a3, 1 // advance s2
+ j .Laligned
+
+/* s1 and s2 have different alignment.
+
+ If the zero-overhead loop option is available, use an (almost)
+ infinite zero-overhead loop with conditional exits so we only pay
+ for taken branches when exiting the loop.
+
+ Note: It is important for this unaligned case to come before the
+ code for aligned strings, because otherwise some of the branches
+ above cannot reach and have to be transformed to branches around
+ jumps. The unaligned code is smaller and the branches can reach
+ over it. */
+
+ .align 4
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ /* (2 mod 4) alignment for loop instruction */
+#else
+ /* (1 mod 4) alignment for loop instruction */
+ .byte 0
+ .byte 0
+#endif
+#endif
+.Lunaligned:
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ _movi.n a8, 0 // set up for the maximum loop count
+#else
+ _movi a8, 0 // set up for the maximum loop count
+#endif
+ loop a8, .Lretdiff // loop forever (almost anyway)
+#endif
+.Lnextbyte:
+ l8ui a8, a2, 0
+ l8ui a9, a3, 0
+ addi a2, a2, 1
+ bne a8, a9, .Lretdiff
+ addi a3, a3, 1
+#if XCHAL_HAVE_LOOPS
+ beqz a8, .Lretdiff
+#else
+ bnez a8, .Lnextbyte
+#endif
+.Lretdiff:
+ sub a2, a8, a9
+ leaf_return
+
+/* s1 is word-aligned; s2 is word-aligned.
+
+ If the zero-overhead loop option is available, use an (almost)
+ infinite zero-overhead loop with conditional exits so we only pay
+ for taken branches when exiting the loop. */
+
+/* New algorithm, relying on the fact that all normal ASCII is between
+ 32 and 127.
+
+ Rather than check all bytes for zero:
+ Take one word (4 bytes). Call it w1.
+ Shift w1 left by one into w1'.
+ Or w1 and w1'. For all normal ASCII bit 6 will be 1; for zero it won't.
+ Check that all 4 bit 6's (one for each byte) are one:
+ If they are, we are definitely not done.
+ If they are not, we are probably done, but need to check for zero. */
+
+ .align 4
+#if XCHAL_HAVE_LOOPS
+#if !XCHAL_HAVE_L32R
+ /* (2 mod 4) alignment for loop instruction */
+ .byte 0
+ .byte 0
+#endif
+.Laligned:
+#if XCHAL_HAVE_L32R
+ l32r a4, .Lmask0 // mask for byte 0
+ l32r a7, .Lmask4
+#else
+ const16 a4, MASK0@h
+ const16 a4, MASK0@l
+ const16 a7, MASK4@h
+ const16 a7, MASK4@l
+#endif
+ /* Loop forever */
+1:
+ loop a0, .Laligned_done
+
+ /* First unrolled loop body. */
+ l32i a8, a2, 0 // get word from s1
+ l32i a9, a3, 0 // get word from s2
+ slli a5, a8, 1
+ bne a8, a9, .Lwne2
+ or a9, a8, a5
+ bnall a9, a7, .Lprobeq
+
+ /* Second unrolled loop body. */
+ l32i a8, a2, 4 // get word from s1+4
+ l32i a9, a3, 4 // get word from s2+4
+ slli a5, a8, 1
+ bne a8, a9, .Lwne2
+ or a9, a8, a5
+ bnall a9, a7, .Lprobeq2
+
+ addi a2, a2, 8 // advance s1 pointer
+ addi a3, a3, 8 // advance s2 pointer
+.Laligned_done:
+ j 1b
+
+.Lprobeq2:
+ /* Adjust pointers to account for the loop unrolling. */
+ addi a2, a2, 4
+ addi a3, a3, 4
+
+#else /* !XCHAL_HAVE_LOOPS */
+
+.Laligned:
+ movi a4, MASK0 // mask for byte 0
+ movi a7, MASK4
+ j .Lfirstword
+.Lnextword:
+ addi a2, a2, 4 // advance s1 pointer
+ addi a3, a3, 4 // advance s2 pointer
+.Lfirstword:
+ l32i a8, a2, 0 // get word from s1
+ l32i a9, a3, 0 // get word from s2
+ slli a5, a8, 1
+ bne a8, a9, .Lwne2
+ or a9, a8, a5
+ ball a9, a7, .Lnextword
+#endif /* !XCHAL_HAVE_LOOPS */
+
+ /* align (0 mod 4) */
+.Lprobeq:
+ /* Words are probably equal, but check for sure.
+ If not, loop over the rest of string using normal algorithm. */
+
+ bnone a8, a4, .Leq // if byte 0 is zero
+#if XCHAL_HAVE_L32R
+ l32r a5, .Lmask1 // mask for byte 1
+ l32r a6, .Lmask2 // mask for byte 2
+ bnone a8, a5, .Leq // if byte 1 is zero
+ l32r a7, .Lmask3 // mask for byte 3
+ bnone a8, a6, .Leq // if byte 2 is zero
+ bnone a8, a7, .Leq // if byte 3 is zero
+ /* align (1 mod 4) */
+#else
+ const16 a5, MASK1@h // mask for byte 1
+ const16 a5, MASK1@l
+ bnone a8, a5, .Leq // if byte 1 is zero
+ const16 a6, MASK2@h // mask for byte 2
+ const16 a6, MASK2@l
+ bnone a8, a6, .Leq // if byte 2 is zero
+ const16 a7, MASK3@h // mask for byte 3
+ const16 a7, MASK3@l
+ bnone a8, a7, .Leq // if byte 3 is zero
+ /* align (2 mod 4) */
+#endif /* XCHAL_HAVE_L32R */
+#if XCHAL_HAVE_DENSITY
+ addi.n a2, a2, 4 // advance s1 pointer
+ addi.n a3, a3, 4 // advance s2 pointer
+ /* align (1 mod 4) or (2 mod 4) */
+#else
+ addi a2, a2, 4 // advance s1 pointer
+ addi a3, a3, 4 // advance s2 pointer
+ or a1, a1, a1 // nop
+#if !XCHAL_HAVE_L32R
+ or a1, a1, a1 // nop
+#endif
+ /* align (2 mod 4) */
+#endif /* XCHAL_HAVE_DENSITY */
+#if XCHAL_HAVE_LOOPS
+1:
+ loop a0, .Leq // loop forever (a4 is bigger than max iters)
+ l32i a8, a2, 0 // get word from s1
+ l32i a9, a3, 0 // get word from s2
+ addi a2, a2, 4 // advance s1 pointer
+ bne a8, a9, .Lwne
+ bnone a8, a4, .Leq // if byte 0 is zero
+ bnone a8, a5, .Leq // if byte 1 is zero
+ bnone a8, a6, .Leq // if byte 2 is zero
+ bnone a8, a7, .Leq // if byte 3 is zero
+ addi a3, a3, 4 // advance s2 pointer
+ j 1b
+#else /* !XCHAL_HAVE_LOOPS */
+
+ j .Lfirstword2
+.Lnextword2:
+ addi a3, a3, 4 // advance s2 pointer
+.Lfirstword2:
+ l32i a8, a2, 0 // get word from s1
+ l32i a9, a3, 0 // get word from s2
+ addi a2, a2, 4 // advance s1 pointer
+ bne a8, a9, .Lwne
+ bnone a8, a4, .Leq // if byte 0 is zero
+ bnone a8, a5, .Leq // if byte 1 is zero
+ bnone a8, a6, .Leq // if byte 2 is zero
+ bany a8, a7, .Lnextword2 // if byte 3 is zero
+#endif /* !XCHAL_HAVE_LOOPS */
+
+ /* Words are equal; some byte is zero. */
+.Leq: movi a2, 0 // return equal
+ leaf_return
+
+.Lwne2: /* Words are not equal. On big-endian processors, if none of the
+ bytes are zero, the return value can be determined by a simple
+ comparison. */
+#ifdef __XTENSA_EB__
+ or a10, a8, a5
+ bnall a10, a7, .Lsomezero
+ bgeu a8, a9, .Lposreturn
+ movi a2, -1
+ leaf_return
+.Lposreturn:
+ movi a2, 1
+ leaf_return
+.Lsomezero: // There is probably some zero byte.
+#endif /* __XTENSA_EB__ */
+.Lwne: /* Words are not equal. */
+ xor a2, a8, a9 // get word with nonzero in byte that differs
+ bany a2, a4, .Ldiff0 // if byte 0 differs
+ movi a5, MASK1 // mask for byte 1
+ bnone a8, a4, .Leq // if byte 0 is zero
+ bany a2, a5, .Ldiff1 // if byte 1 differs
+ movi a6, MASK2 // mask for byte 2
+ bnone a8, a5, .Leq // if byte 1 is zero
+ bany a2, a6, .Ldiff2 // if byte 2 differs
+ bnone a8, a6, .Leq // if byte 2 is zero
+#ifdef __XTENSA_EB__
+.Ldiff3:
+.Ldiff2:
+.Ldiff1:
+ /* Byte 0 is equal (at least) and there is a difference before a zero
+ byte. Just subtract words to get the return value.
+ The high order equal bytes cancel, leaving room for the sign. */
+ sub a2, a8, a9
+ leaf_return
+
+.Ldiff0:
+ /* Need to make room for the sign, so can't subtract whole words. */
+ extui a10, a8, 24, 8
+ extui a11, a9, 24, 8
+ sub a2, a10, a11
+ leaf_return
+
+#else /* !__XTENSA_EB__ */
+ /* Little-endian is a little more difficult because can't subtract
+ whole words. */
+.Ldiff3:
+ /* Bytes 0-2 are equal; byte 3 is different.
+ For little-endian need to have a sign bit for the difference. */
+ extui a10, a8, 24, 8
+ extui a11, a9, 24, 8
+ sub a2, a10, a11
+ leaf_return
+
+.Ldiff0:
+ /* Byte 0 is different. */
+ extui a10, a8, 0, 8
+ extui a11, a9, 0, 8
+ sub a2, a10, a11
+ leaf_return
+
+.Ldiff1:
+ /* Byte 0 is equal; byte 1 is different. */
+ extui a10, a8, 8, 8
+ extui a11, a9, 8, 8
+ sub a2, a10, a11
+ leaf_return
+
+.Ldiff2:
+ /* Bytes 0-1 are equal; byte 2 is different. */
+ extui a10, a8, 16, 8
+ extui a11, a9, 16, 8
+ sub a2, a10, a11
+ leaf_return
+
+#endif /* !__XTENSA_EB */
+
+ .size strcmp, . - strcmp
diff --git a/newlib/libc/machine/xtensa/strcpy.S b/newlib/libc/machine/xtensa/strcpy.S
new file mode 100644
index 000000000..9ab624b73
--- /dev/null
+++ b/newlib/libc/machine/xtensa/strcpy.S
@@ -0,0 +1,274 @@
+/* ANSI C standard library function strcpy.
+
+ Copyright (c) 2001-2008 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-asm.h"
+
+ .text
+ .begin schedule
+ .align 4
+ .literal_position
+ .global strcpy
+ .type strcpy, @function
+strcpy:
+ leaf_entry sp, 16
+ /* a2 = dst, a3 = src */
+
+ mov a10, a2 // leave dst in return value register
+ movi a4, MASK0
+ movi a5, MASK1
+ movi a6, MASK2
+ movi a7, MASK3
+ bbsi.l a3, 0, .Lsrc1mod2
+ bbsi.l a3, 1, .Lsrc2mod4
+.Lsrcaligned:
+
+ /* Check if the destination is aligned. */
+ movi a8, 3
+ bnone a10, a8, .Laligned
+
+ j .Ldstunaligned
+
+.Lsrc1mod2: // src address is odd
+ l8ui a8, a3, 0 // get byte 0
+ addi a3, a3, 1 // advance src pointer
+ s8i a8, a10, 0 // store byte 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ beqz a8, 1f // if byte 0 is zero
+ addi a10, a10, 1 // advance dst pointer
+ bbci.l a3, 1, .Lsrcaligned // if src is now word-aligned
+
+.Lsrc2mod4: // src address is 2 mod 4
+ l8ui a8, a3, 0 // get byte 0
+ /* 1-cycle interlock */
+ s8i a8, a10, 0 // store byte 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ beqz a8, 1f // if byte 0 is zero
+ l8ui a8, a3, 1 // get byte 0
+ addi a3, a3, 2 // advance src pointer
+ s8i a8, a10, 1 // store byte 0
+ addi a10, a10, 2 // advance dst pointer
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ bnez a8, .Lsrcaligned
+1: leaf_return
+
+
+/* dst is word-aligned; src is word-aligned. */
+
+ .align 4
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ /* (2 mod 4) alignment for loop instruction */
+#else
+ /* (1 mod 4) alignment for loop instruction */
+ .byte 0
+ .byte 0
+#endif
+.Laligned:
+#if XCHAL_HAVE_DENSITY
+ _movi.n a8, 0 // set up for the maximum loop count
+#else
+ _movi a8, 0 // set up for the maximum loop count
+#endif
+ loop a8, .Lz3 // loop forever (almost anyway)
+ l32i a8, a3, 0 // get word from src
+ addi a3, a3, 4 // advance src pointer
+ bnone a8, a4, .Lz0 // if byte 0 is zero
+ bnone a8, a5, .Lz1 // if byte 1 is zero
+ bnone a8, a6, .Lz2 // if byte 2 is zero
+ s32i a8, a10, 0 // store word to dst
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ l32i a8, a10, 0
+ s32i a8, a10, 0
+#endif
+ bnone a8, a7, .Lz3 // if byte 3 is zero
+ addi a10, a10, 4 // advance dst pointer
+
+#else /* !XCHAL_HAVE_LOOPS */
+
+1: addi a10, a10, 4 // advance dst pointer
+.Laligned:
+ l32i a8, a3, 0 // get word from src
+ addi a3, a3, 4 // advance src pointer
+ bnone a8, a4, .Lz0 // if byte 0 is zero
+ bnone a8, a5, .Lz1 // if byte 1 is zero
+ bnone a8, a6, .Lz2 // if byte 2 is zero
+ s32i a8, a10, 0 // store word to dst
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ l32i a8, a10, 0
+ s32i a8, a10, 0
+#endif
+
+ bany a8, a7, 1b // if byte 3 is zero
+#endif /* !XCHAL_HAVE_LOOPS */
+
+.Lz3: /* Byte 3 is zero. */
+ leaf_return
+
+.Lz0: /* Byte 0 is zero. */
+#ifdef __XTENSA_EB__
+ movi a8, 0
+#endif
+ s8i a8, a10, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+.Lz1: /* Byte 1 is zero. */
+#ifdef __XTENSA_EB__
+ extui a8, a8, 16, 16
+#endif
+ s16i a8, a10, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+.Lz2: /* Byte 2 is zero. */
+#ifdef __XTENSA_EB__
+ extui a8, a8, 16, 16
+#endif
+ s16i a8, a10, 0
+ movi a8, 0
+ s8i a8, a10, 2
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+#if 1
+/* For now just use byte copy loop for the unaligned destination case. */
+
+ .align 4
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ /* (2 mod 4) alignment for loop instruction */
+#else
+ /* (1 mod 4) alignment for loop instruction */
+ .byte 0
+ .byte 0
+#endif
+#endif
+.Ldstunaligned:
+
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ _movi.n a8, 0 // set up for the maximum loop count
+#else
+ _movi a8, 0 // set up for the maximum loop count
+#endif
+ loop a8, 2f // loop forever (almost anyway)
+#endif
+1: l8ui a8, a3, 0
+ addi a3, a3, 1
+ s8i a8, a10, 0
+ addi a10, a10, 1
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+#if XCHAL_HAVE_LOOPS
+ beqz a8, 2f
+#else
+ bnez a8, 1b
+#endif
+2: leaf_return
+
+#else /* 0 */
+
+/* This code is not functional yet. */
+
+.Ldstunaligned:
+ l32i a9, a2, 0 // load word from dst
+#ifdef __XTENSA_EB__
+ ssa8b a9 // rotate by dst alignment so that
+ src a9, a9, a9 // shift in loop will put back in place
+ ssa8l a9 // shift left by byte*8
+#else
+ ssa8l a9 // rotate by dst alignment so that
+ src a9, a9, a9 // shift in loop will put back in place
+ ssa8b a9 // shift left by 32-byte*8
+#endif
+
+/* dst is word-aligned; src is unaligned. */
+
+.Ldstunalignedloop:
+ l32i a8, a3, 0 // get word from src
+ /* 1-cycle interlock */
+ bnone a8, a4, .Lu0 // if byte 0 is zero
+ bnone a8, a5, .Lu1 // if byte 1 is zero
+ bnone a8, a6, .Lu2 // if byte 2 is zero
+ src a9, a8, a9 // combine last word and this word
+ s32i a9, a10, 0 // store word to dst
+ bnone a8, a7, .Lu3 // if byte 3 is nonzero, iterate
+ l32i a9, a3, 4 // get word from src
+ addi a3, a3, 8 // advance src pointer
+ bnone a9, a4, .Lu4 // if byte 0 is zero
+ bnone a9, a5, .Lu5 // if byte 1 is zero
+ bnone a9, a6, .Lu6 // if byte 2 is zero
+ src a8, a9, a8 // combine last word and this word
+ s32i a8, a10, 4 // store word to dst
+ addi a10, a10, 8 // advance dst pointer
+ bany a8, a7, .Ldstunalignedloop // if byte 3 is nonzero, iterate
+
+ /* Byte 7 is zero. */
+.Lu7: leaf_return
+
+.Lu0: /* Byte 0 is zero. */
+#ifdef __XTENSA_EB__
+ movi a8, 0
+#endif
+ s8i a8, a10, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+.Lu1: /* Byte 1 is zero. */
+#ifdef __XTENSA_EB__
+ extui a8, a8, 16, 16
+#endif
+ s16i a8, a10, 0
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+.Lu2: /* Byte 2 is zero. */
+ s16i a8, a10, 0
+ movi a8, 0
+ s8i a8, a10, 2
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+#endif /* 0 */
+ .end schedule
+
+ .size strcpy, . - strcpy
diff --git a/newlib/libc/machine/xtensa/strlen.S b/newlib/libc/machine/xtensa/strlen.S
new file mode 100644
index 000000000..6560a3185
--- /dev/null
+++ b/newlib/libc/machine/xtensa/strlen.S
@@ -0,0 +1,115 @@
+/* ANSI C standard library function strlen.
+
+ Copyright (c) 2001-2008 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-asm.h"
+
+ .text
+ .begin schedule
+ .align 4
+ .literal_position
+ .global strlen
+ .type strlen, @function
+strlen:
+ leaf_entry sp, 16
+ /* a2 = s */
+
+ addi a3, a2, -4 // because we overincrement at the end
+ movi a4, MASK0
+ movi a5, MASK1
+ movi a6, MASK2
+ movi a7, MASK3
+ bbsi.l a2, 0, .L1mod2
+ bbsi.l a2, 1, .L2mod4
+ j .Laligned
+
+.L1mod2: // address is odd
+ l8ui a8, a3, 4 // get byte 0
+ addi a3, a3, 1 // advance string pointer
+ beqz a8, .Lz3 // if byte 0 is zero
+ bbci.l a3, 1, .Laligned // if string pointer is now word-aligned
+
+.L2mod4: // address is 2 mod 4
+ addi a3, a3, 2 // advance ptr for aligned access
+ l32i a8, a3, 0 // get word with first two bytes of string
+ bnone a8, a6, .Lz2 // if byte 2 (of word, not string) is zero
+ bany a8, a7, .Laligned // if byte 3 (of word, not string) is nonzero
+
+ /* Byte 3 is zero. */
+ addi a3, a3, 3 // point to zero byte
+ sub a2, a3, a2 // subtract to get length
+ leaf_return
+
+
+/* String is word-aligned. */
+
+ .align 4
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ /* (2 mod 4) alignment for loop instruction */
+#else
+ /* (1 mod 4) alignment for loop instruction */
+ .byte 0
+ .byte 0
+#endif
+#endif
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ _movi.n a8, 0 // set up for the maximum loop count
+#else
+ _movi a8, 0 // set up for the maximum loop count
+#endif
+ loop a8, .Lz3 // loop forever (almost anyway)
+#endif
+1: l32i a8, a3, 4 // get next word of string
+ addi a3, a3, 4 // advance string pointer
+ bnone a8, a4, .Lz0 // if byte 0 is zero
+ bnone a8, a5, .Lz1 // if byte 1 is zero
+ bnone a8, a6, .Lz2 // if byte 2 is zero
+#if XCHAL_HAVE_LOOPS
+ bnone a8, a7, .Lz3 // if byte 3 is zero
+#else
+ bany a8, a7, 1b // repeat if byte 3 is non-zero
+#endif
+
+.Lz3: /* Byte 3 is zero. */
+ addi a3, a3, 3 // point to zero byte
+ /* Fall through.... */
+
+.Lz0: /* Byte 0 is zero. */
+ sub a2, a3, a2 // subtract to get length
+ leaf_return
+
+.Lz1: /* Byte 1 is zero. */
+ addi a3, a3, 1 // point to zero byte
+ sub a2, a3, a2 // subtract to get length
+ leaf_return
+
+.Lz2: /* Byte 2 is zero. */
+ addi a3, a3, 2 // point to zero byte
+ sub a2, a3, a2 // subtract to get length
+ leaf_return
+
+ .end schedule
+
+ .size strlen, . - strlen
diff --git a/newlib/libc/machine/xtensa/strncpy.S b/newlib/libc/machine/xtensa/strncpy.S
new file mode 100644
index 000000000..faa7c7b25
--- /dev/null
+++ b/newlib/libc/machine/xtensa/strncpy.S
@@ -0,0 +1,274 @@
+/* ANSI C standard library function strncpy.
+
+ Copyright (c) 2001-2008 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#include "xtensa-asm.h"
+
+ .text
+.begin schedule
+ .align 4
+ .literal_position
+__strncpy_aux:
+
+.Lsrc1mod2: // src address is odd
+ l8ui a8, a3, 0 // get byte 0
+ addi a3, a3, 1 // advance src pointer
+ s8i a8, a10, 0 // store byte 0
+ addi a4, a4, -1 // decrement n
+ beqz a4, .Lret // if n is zero
+ addi a10, a10, 1 // advance dst pointer
+ beqz a8, .Lfill // if byte 0 is zero
+ bbci.l a3, 1, .Lsrcaligned // if src is now word-aligned
+
+.Lsrc2mod4: // src address is 2 mod 4
+ l8ui a8, a3, 0 // get byte 0
+ addi a4, a4, -1 // decrement n
+ s8i a8, a10, 0 // store byte 0
+ beqz a4, .Lret // if n is zero
+ addi a10, a10, 1 // advance dst pointer
+ beqz a8, .Lfill // if byte 0 is zero
+ l8ui a8, a3, 1 // get byte 0
+ addi a3, a3, 2 // advance src pointer
+ s8i a8, a10, 0 // store byte 0
+ addi a4, a4, -1 // decrement n
+ beqz a4, .Lret // if n is zero
+ addi a10, a10, 1 // advance dst pointer
+ bnez a8, .Lsrcaligned
+ j .Lfill
+
+.Lret:
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+
+ .align 4
+ .global strncpy
+ .type strncpy, @function
+strncpy:
+ leaf_entry sp, 16
+ /* a2 = dst, a3 = src */
+
+ mov a10, a2 // leave dst in return value register
+ beqz a4, .Lret // if n is zero
+
+ movi a11, MASK0
+ movi a5, MASK1
+ movi a6, MASK2
+ movi a7, MASK3
+ bbsi.l a3, 0, .Lsrc1mod2
+ bbsi.l a3, 1, .Lsrc2mod4
+.Lsrcaligned:
+
+ /* Check if the destination is aligned. */
+ movi a8, 3
+ bnone a10, a8, .Laligned
+
+ j .Ldstunaligned
+
+
+/* Fill the dst with zeros -- n is at least 1. */
+
+.Lfill:
+ movi a9, 0
+ bbsi.l a10, 0, .Lfill1mod2
+ bbsi.l a10, 1, .Lfill2mod4
+.Lfillaligned:
+ blti a4, 4, .Lfillcleanup
+
+ /* Loop filling complete words with zero. */
+#if XCHAL_HAVE_LOOPS
+
+ srai a8, a4, 2
+ loop a8, 1f
+ s32i a9, a10, 0
+ addi a10, a10, 4
+
+1: slli a8, a8, 2
+ sub a4, a4, a8
+
+#else /* !XCHAL_HAVE_LOOPS */
+
+1: s32i a9, a10, 0
+ addi a10, a10, 4
+ addi a4, a4, -4
+ bgei a4, 4, 1b
+
+#endif /* !XCHAL_HAVE_LOOPS */
+
+ beqz a4, 2f
+
+.Lfillcleanup:
+ /* Fill leftover (1 to 3) bytes with zero. */
+ s8i a9, a10, 0 // store byte 0
+ addi a4, a4, -1 // decrement n
+ addi a10, a10, 1
+ bnez a4, .Lfillcleanup
+
+2:
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+
+.Lfill1mod2: // dst address is odd
+ s8i a9, a10, 0 // store byte 0
+ addi a4, a4, -1 // decrement n
+ beqz a4, 2b // if n is zero
+ addi a10, a10, 1 // advance dst pointer
+ bbci.l a10, 1, .Lfillaligned // if dst is now word-aligned
+
+.Lfill2mod4: // dst address is 2 mod 4
+ s8i a9, a10, 0 // store byte 0
+ addi a4, a4, -1 // decrement n
+ beqz a4, 2b // if n is zero
+ s8i a9, a10, 1 // store byte 1
+ addi a4, a4, -1 // decrement n
+ beqz a4, 2b // if n is zero
+ addi a10, a10, 2 // advance dst pointer
+ j .Lfillaligned
+
+
+/* dst is word-aligned; src is word-aligned; n is at least 1. */
+
+ .align 4
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ /* (2 mod 4) alignment for loop instruction */
+#else
+ /* (1 mod 4) alignment for loop instruction */
+ .byte 0
+ .byte 0
+#endif
+#endif
+.Laligned:
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ _movi.n a8, 0 // set up for the maximum loop count
+#else
+ _movi a8, 0 // set up for the maximum loop count
+#endif
+ loop a8, 1f // loop forever (almost anyway)
+ blti a4, 5, .Ldstunaligned // n is near limit; do one at a time
+ l32i a8, a3, 0 // get word from src
+ addi a3, a3, 4 // advance src pointer
+ bnone a8, a11, .Lz0 // if byte 0 is zero
+ bnone a8, a5, .Lz1 // if byte 1 is zero
+ bnone a8, a6, .Lz2 // if byte 2 is zero
+ s32i a8, a10, 0 // store word to dst
+ addi a4, a4, -4 // decrement n
+ addi a10, a10, 4 // advance dst pointer
+ bnone a8, a7, .Lfill // if byte 3 is zero
+1:
+
+#else /* !XCHAL_HAVE_LOOPS */
+
+1: blti a4, 5, .Ldstunaligned // n is near limit; do one at a time
+ l32i a8, a3, 0 // get word from src
+ addi a3, a3, 4 // advance src pointer
+ bnone a8, a11, .Lz0 // if byte 0 is zero
+ bnone a8, a5, .Lz1 // if byte 1 is zero
+ bnone a8, a6, .Lz2 // if byte 2 is zero
+ s32i a8, a10, 0 // store word to dst
+ addi a4, a4, -4 // decrement n
+ addi a10, a10, 4 // advance dst pointer
+ bany a8, a7, 1b // no zeroes
+#endif /* !XCHAL_HAVE_LOOPS */
+
+ j .Lfill
+
+.Lz0: /* Byte 0 is zero. */
+#ifdef __XTENSA_EB__
+ movi a8, 0
+#endif
+ s8i a8, a10, 0
+ addi a4, a4, -1 // decrement n
+ addi a10, a10, 1 // advance dst pointer
+ j .Lfill
+
+.Lz1: /* Byte 1 is zero. */
+#ifdef __XTENSA_EB__
+ extui a8, a8, 16, 16
+#endif
+ s16i a8, a10, 0
+ addi a4, a4, -2 // decrement n
+ addi a10, a10, 2 // advance dst pointer
+ j .Lfill
+
+.Lz2: /* Byte 2 is zero. */
+#ifdef __XTENSA_EB__
+ extui a8, a8, 16, 16
+#endif
+ s16i a8, a10, 0
+ movi a8, 0
+ s8i a8, a10, 2
+ addi a4, a4, -3 // decrement n
+ addi a10, a10, 3 // advance dst pointer
+ j .Lfill
+
+ .align 4
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ /* (2 mod 4) alignment for loop instruction */
+#else
+ /* (1 mod 4) alignment for loop instruction */
+ .byte 0
+ .byte 0
+#endif
+#endif
+.Ldstunaligned:
+
+#if XCHAL_HAVE_LOOPS
+#if XCHAL_HAVE_DENSITY
+ _movi.n a8, 0 // set up for the maximum loop count
+#else
+ _movi a8, 0 // set up for the maximum loop count
+#endif
+ loop a8, 2f // loop forever (almost anyway)
+#endif
+1: l8ui a8, a3, 0
+ addi a3, a3, 1
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ nop
+ nop
+ nop
+#endif
+ s8i a8, a10, 0
+ addi a4, a4, -1
+ beqz a4, 3f
+ addi a10, a10, 1
+#if XCHAL_HAVE_LOOPS
+ beqz a8, 2f
+#else
+ bnez a8, 1b
+#endif
+2: j .Lfill
+
+3:
+#if XTENSA_ESP32_PSRAM_CACHE_FIX
+ memw
+#endif
+ leaf_return
+.end schedule
+
+ .size strncpy, . - strncpy
diff --git a/newlib/libc/machine/xtensa/xtensa-asm.h b/newlib/libc/machine/xtensa/xtensa-asm.h
new file mode 100644
index 000000000..926f3e499
--- /dev/null
+++ b/newlib/libc/machine/xtensa/xtensa-asm.h
@@ -0,0 +1,72 @@
+/* Copyright (c) 2006 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+/* Define macros for leaf function entry and return, supporting either the
+ * standard register windowed ABI or the non-windowed call0 ABI. These
+ * macros do not allocate any extra stack space, so they only work for
+ * leaf functions that do not need to spill anything to the stack. */
+
+#include <xtensa/config/core-isa.h>
+
+ .macro leaf_entry reg, size
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ entry \reg, \size
+#else
+ /* do nothing */
+#endif
+ .endm
+
+ .macro leaf_return
+#if XCHAL_HAVE_WINDOWED && !__XTENSA_CALL0_ABI__
+ retw
+#else
+ ret
+#endif
+ .endm
+
+ .macro src_b r, w0, w1
+#ifdef __XTENSA_EB__
+ src \r, \w0, \w1
+#else
+ src \r, \w1, \w0
+#endif
+ .endm
+
+ .macro ssa8 r
+#ifdef __XTENSA_EB__
+ ssa8b \r
+#else
+ ssa8l \r
+#endif
+ .endm
+
+#if XCHAL_HAVE_BE
+#define MASK0 0xff000000
+#define MASK1 0x00ff0000
+#define MASK2 0x0000ff00
+#define MASK3 0x000000ff
+#else
+#define MASK0 0x000000ff
+#define MASK1 0x0000ff00
+#define MASK2 0x00ff0000
+#define MASK3 0xff000000
+#endif
+
diff --git a/newlib/libc/machine/xtensa/xtensa.tex b/newlib/libc/machine/xtensa/xtensa.tex
new file mode 100644
index 000000000..1a5bf65e7
--- /dev/null
+++ b/newlib/libc/machine/xtensa/xtensa.tex
@@ -0,0 +1,72 @@
+@node Xtensa
+@chapter Functions for Xtensa Processors
+
+This chapter describes machine-dependent functions that are included
+in the C library when it is built for Xtensa processors.
+
+@menu
+* setjmp:: Save stack environment
+* longjmp:: Non-local goto
+@end menu
+
+@page
+@node setjmp
+@section @code{setjmp}---save stack environment
+@findex setjmp
+@strong{Synopsis}
+@example
+#include <setjmp.h>
+int setjmp(jmp_buf env);
+
+@end example
+@strong{Description}@*
+@code{setjmp} and @code{longjmp} are useful for dealing with errors
+and interrupts encountered in a low-level subroutine of a program.
+@code{setjmp} saves the stack context/environment in @code{env} for
+later use by @code{longjmp}. The stack context will be invalidated if
+the function which called @code{setjmp} returns.
+
+@*
+@strong{Returns}@*
+@code{setjmp} returns 0 if returning directly, and non-zero when
+returning from @code{longjmp} using the saved context.
+
+@*
+@strong{Portability}@*
+@code{setjmp} is ANSI C and POSIX.1.
+
+setjmp requires no supporting OS subroutines.
+
+@*
+@page
+@node longjmp
+@section @code{longjmp}---non-local goto
+@findex longjmp
+@strong{Synopsis}
+@example
+#include <setjmp.h>
+void longjmp(jmp_buf env, int val);
+
+@end example
+@strong{Description}@*
+@code{longjmp} and @code{setjmp} are useful for dealing with errors
+and interrupts encountered in a low-level subroutine of a program.
+@code{longjmp} restores the environment saved by the last call of
+@code{setjmp} with the corresponding @code{env} argument. After
+@code{longjmp} is completed, program execution continues as if the
+corresponding call of @code{setjmp} had just returned the value
+@code{val}. @code{longjmp} cannot cause 0 to be returned. If
+@code{longjmp} is invoked with a second argument of 0, 1 will be
+returned instead.
+
+@*
+@strong{Returns}@*
+This function never returns.
+
+@*
+@strong{Portability}@*
+@code{longjmp} is ANSI C and POSIX.1.
+
+longjmp requires no supporting OS subroutines.
+
+@*
diff --git a/newlib/libc/sys/Makefile.inc b/newlib/libc/sys/Makefile.inc
index 9f8758934..24d8407d5 100644
--- a/newlib/libc/sys/Makefile.inc
+++ b/newlib/libc/sys/Makefile.inc
@@ -64,6 +64,9 @@ endif
if HAVE_LIBC_SYS_W65_DIR
include %D%/w65/Makefile.inc
endif
+if HAVE_LIBC_SYS_XTENSA_DIR
+include %D%/xtensa/Makefile.inc
+endif
if HAVE_LIBC_SYS_Z8KSIM_DIR
include %D%/z8ksim/Makefile.inc
endif
diff --git a/newlib/libc/sys/xtensa/Makefile.inc b/newlib/libc/sys/xtensa/Makefile.inc
new file mode 100644
index 000000000..ddee58ae1
--- /dev/null
+++ b/newlib/libc/sys/xtensa/Makefile.inc
@@ -0,0 +1 @@
+libc_a_SOURCES += %D%/creat.c %D%/isatty.c %D%/clibrary_init.c
diff --git a/newlib/libc/sys/xtensa/clibrary_init.c b/newlib/libc/sys/xtensa/clibrary_init.c
new file mode 100644
index 000000000..24d4cf94c
--- /dev/null
+++ b/newlib/libc/sys/xtensa/clibrary_init.c
@@ -0,0 +1,42 @@
+/* Copyright (c) 2003-2006 Tensilica Inc. ALL RIGHTS RESERVED.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TENSILICA
+ INCORPORATED BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+#include <stdlib.h>
+#include <unistd.h>
+
+const char * __progname = 0;
+
+void
+__clibrary_init (int argc, char **argv, char **envp,
+ void (*init)(void),
+ void (*fini)(void))
+{
+ if (argv != 0)
+ __progname = *argv;
+ environ = envp;
+ if (init)
+ init ();
+ if (fini)
+ atexit (fini);
+}
diff --git a/newlib/libc/sys/xtensa/creat.c b/newlib/libc/sys/xtensa/creat.c
new file mode 100644
index 000000000..21e6a1874
--- /dev/null
+++ b/newlib/libc/sys/xtensa/creat.c
@@ -0,0 +1,9 @@
+/* creat() "system call" (copied from libc/posix/creat.c) */
+
+#include <fcntl.h>
+
+int
+creat (const char *path, mode_t mode)
+{
+ return open (path, O_WRONLY | O_CREAT | O_TRUNC, mode);
+}
diff --git a/newlib/libc/sys/xtensa/crt0.c b/newlib/libc/sys/xtensa/crt0.c
new file mode 100644
index 000000000..11725dca5
--- /dev/null
+++ b/newlib/libc/sys/xtensa/crt0.c
@@ -0,0 +1,16 @@
+/* Dummy crt0 code. */
+
+/* Copyright (c) 2003 by Tensilica Inc. ALL RIGHTS RESERVED.
+ These coded instructions, statements, and computer programs are the
+ copyrighted works and confidential proprietary information of Tensilica Inc.
+ They may not be modified, copied, reproduced, distributed, or disclosed to
+ third parties in any manner, medium, or form, in whole or in part, without
+ the prior written consent of Tensilica Inc. */
+
+/* Xtensa systems normally use a crt1 file associated with a particular
+ linker support package (LSP). There is no need for this crt0 file,
+ except that the newlib makefiles require it to exist if there is a
+ sys/xtensa directory. The directory exists only to hold the header
+ files for the Xtensa ISS semihosting "platform". */
+
+void crt0_unused (void) {}
diff --git a/newlib/libc/sys/xtensa/include/fenv.h b/newlib/libc/sys/xtensa/include/fenv.h
new file mode 100644
index 000000000..2fa76f758
--- /dev/null
+++ b/newlib/libc/sys/xtensa/include/fenv.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2011 Tensilica Inc. ALL RIGHTS RESERVED.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
+ TENSILICA INCORPORATED BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+ OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+
+#ifndef _FENV_H
+#define _FENV_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned long fenv_t;
+typedef unsigned long fexcept_t;
+
+#define FE_DIVBYZERO 0x08
+#define FE_INEXACT 0x01
+#define FE_INVALID 0x10
+#define FE_OVERFLOW 0x04
+#define FE_UNDERFLOW 0x02
+
+#define FE_ALL_EXCEPT \
+ (FE_DIVBYZERO | \
+ FE_INEXACT | \
+ FE_INVALID | \
+ FE_OVERFLOW | \
+ FE_UNDERFLOW)
+
+#define FE_DOWNWARD 0x3
+#define FE_TONEAREST 0x0
+#define FE_TOWARDZERO 0x1
+#define FE_UPWARD 0x2
+
+#define FE_DFL_ENV ((const fenv_t *) 0)
+
+int feclearexcept(int);
+int fegetexceptflag(fexcept_t *, int);
+int feraiseexcept(int);
+int fesetexceptflag(const fexcept_t *, int);
+int fetestexcept(int);
+int fegetround(void);
+int fesetround(int);
+int fegetenv(fenv_t *);
+int feholdexcept(fenv_t *);
+int fesetenv(const fenv_t *);
+int feupdateenv(const fenv_t *);
+
+/* glibc extensions */
+int feenableexcept(int excepts);
+int fedisableexcept(int excepts);
+int fegetexcept(void);
+
+#define _FE_EXCEPTION_FLAGS_OFFSET 7
+#define _FE_EXCEPTION_FLAG_MASK (FE_ALL_EXCEPT << _FE_EXCEPTION_FLAGS_OFFSET)
+#define _FE_EXCEPTION_ENABLE_OFFSET 2
+#define _FE_EXCEPTION_ENABLE_MASK (FE_ALL_EXCEPT << _FE_EXCEPTION_ENABLE_OFFSET)
+#define _FE_ROUND_MODE_OFFSET 0
+#define _FE_ROUND_MODE_MASK (0x3 << _FE_ROUND_MODE_OFFSET)
+#define _FE_FLOATING_ENV_MASK (_FE_EXCEPTION_FLAG_MASK | _FE_EXCEPTION_ENABLE_MASK | _FE_ROUND_MODE_MASK)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/newlib/libc/sys/xtensa/include/unistd.h b/newlib/libc/sys/xtensa/include/unistd.h
new file mode 100644
index 000000000..4f6fd29a4
--- /dev/null
+++ b/newlib/libc/sys/xtensa/include/unistd.h
@@ -0,0 +1,13 @@
+#ifndef _UNISTD_H_
+#define _UNISTD_H_
+
+# include <sys/unistd.h>
+
+#ifndef L_SET
+/* Old BSD names for the same constants; just for compatibility. */
+#define L_SET SEEK_SET
+#define L_INCR SEEK_CUR
+#define L_XTND SEEK_END
+#endif
+
+#endif /* _UNISTD_H_ */
diff --git a/newlib/libc/sys/xtensa/include/xtensa/config/core-isa.h b/newlib/libc/sys/xtensa/include/xtensa/config/core-isa.h
new file mode 100644
index 000000000..2accd411f
--- /dev/null
+++ b/newlib/libc/sys/xtensa/include/xtensa/config/core-isa.h
@@ -0,0 +1,115 @@
+/*
+ * xtensa/config/core-isa.h -- minimum required HAL definitions that are
+ * dependent on Xtensa processor CORE configuration
+ *
+ * See <xtensa/config/core.h>, which includes this file, for more details.
+ */
+
+/* Xtensa processor core configuration information.
+
+ Copyright (c) 1999-2023 Tensilica Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal in the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */
+
+#ifndef _XTENSA_CORE_CONFIGURATION_H
+#define _XTENSA_CORE_CONFIGURATION_H
+
+#if defined(_LIBC) || defined(_LIBM) || defined(_LIBGLOSS)
+
+/* Macros used to build newlib and libgloss */
+
+#undef XCHAL_HAVE_BE
+#ifdef __XCHAL_HAVE_BE
+#define XCHAL_HAVE_BE __XCHAL_HAVE_BE
+#else
+#define XCHAL_HAVE_BE 0 /* big-endian byte ordering */
+#endif
+
+#undef XCHAL_HAVE_WINDOWED
+#ifdef __XCHAL_HAVE_WINDOWED
+#define XCHAL_HAVE_WINDOWED __XCHAL_HAVE_WINDOWED
+#else
+#define XCHAL_HAVE_WINDOWED 1 /* windowed registers option */
+#endif
+
+#undef XCHAL_NUM_AREGS
+#ifdef __XCHAL_NUM_AREGS
+#define XCHAL_NUM_AREGS __XCHAL_NUM_AREGS
+#else
+#define XCHAL_NUM_AREGS 64 /* num of physical addr regs */
+#endif
+
+#undef XCHAL_HAVE_DENSITY
+#ifdef __XCHAL_HAVE_DENSITY
+#define XCHAL_HAVE_DENSITY __XCHAL_HAVE_DENSITY
+#else
+#define XCHAL_HAVE_DENSITY 1 /* 16-bit instructions */
+#endif
+
+#undef XCHAL_HAVE_LOOPS
+#ifdef __XCHAL_HAVE_LOOPS
+#define XCHAL_HAVE_LOOPS __XCHAL_HAVE_LOOPS
+#else
+#define XCHAL_HAVE_LOOPS 1 /* zero-overhead loops */
+#endif
+
+#undef XCHAL_HAVE_L32R
+#ifdef __XCHAL_HAVE_L32R
+#define XCHAL_HAVE_L32R __XCHAL_HAVE_L32R
+#else
+#define XCHAL_HAVE_L32R 1 /* L32R instruction */
+#endif
+
+#undef XCHAL_HAVE_FP
+#ifdef __XCHAL_HAVE_FP
+#define XCHAL_HAVE_FP __XCHAL_HAVE_FP
+#else
+#define XCHAL_HAVE_FP 1 /* single prec floating point */
+#endif
+
+#undef XCHAL_HAVE_FP_SQRT
+#ifdef __XCHAL_HAVE_FP_SQRT
+#define XCHAL_HAVE_FP_SQRT __XCHAL_HAVE_FP_SQRT
+#else
+#define XCHAL_HAVE_FP_SQRT 1 /* FP with SQRT instructions */
+#endif
+
+#undef XCHAL_HAVE_DFP
+#ifdef __XCHAL_HAVE_DFP
+#define XCHAL_HAVE_DFP __XCHAL_HAVE_DFP
+#else
+#define XCHAL_HAVE_DFP 0 /* double precision FP pkg */
+#endif
+
+#undef XCHAL_INST_FETCH_WIDTH
+#ifdef __XCHAL_INST_FETCH_WIDTH
+#define XCHAL_INST_FETCH_WIDTH __XCHAL_INST_FETCH_WIDTH
+#else
+#define XCHAL_INST_FETCH_WIDTH 4 /* instr-fetch width in bytes */
+#endif
+
+#else /* defined(_LIBC) || defined(_LIBM) || defined(_LIBGLOSS) */
+
+/* Expect that core-isa.h exists in OS/baremetal port */
+#include_next <xtensa/config/core-isa.h>
+
+#endif /* defined(_LIBC) || defined(_LIBM) || defined(_LIBGLOSS) */
+
+#endif /* _XTENSA_CORE_CONFIGURATION_H */
diff --git a/newlib/libc/sys/xtensa/isatty.c b/newlib/libc/sys/xtensa/isatty.c
new file mode 100644
index 000000000..fe64209b9
--- /dev/null
+++ b/newlib/libc/sys/xtensa/isatty.c
@@ -0,0 +1,18 @@
+/* isatty.c */
+
+/* Dumb implementation so programs will at least run. */
+
+#include <sys/stat.h>
+#include <reent.h>
+
+int
+_isatty_r (struct _reent *ptr, int fd)
+{
+ struct stat buf;
+
+ if (_fstat_r (ptr, fd, &buf) < 0)
+ return 0;
+ if (S_ISCHR (buf.st_mode))
+ return 1;
+ return 0;
+}
diff --git a/newlib/libc/sys/xtensa/sys/file.h b/newlib/libc/sys/xtensa/sys/file.h
new file mode 100644
index 000000000..48a2ca06d
--- /dev/null
+++ b/newlib/libc/sys/xtensa/sys/file.h
@@ -0,0 +1,33 @@
+/* Copyright (c) 2005-2006 Tensilica Inc. ALL RIGHTS RESERVED.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+ IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL TENSILICA
+ INCORPORATED BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */
+
+#include <sys/fcntl.h>
+
+/* Alternate names for values for the WHENCE argument to `lseek'.
+ These are the same as SEEK_SET, SEEK_CUR, and SEEK_END, respectively. */
+#ifndef L_SET
+#define L_SET 0 /* Seek from beginning of file. */
+#define L_INCR 1 /* Seek from current position. */
+#define L_XTND 2 /* Seek from end of file. */
+#endif