Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cygwin.com/git/newlib-cygwin.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoern Rennecke <joern.rennecke@embecosm.com>2002-03-06 13:43:18 +0300
committerJoern Rennecke <joern.rennecke@embecosm.com>2002-03-06 13:43:18 +0300
commit55f26f11945e4cbacfdaff197ff5220ee7417e9f (patch)
tree6167bc80a49aa118e2594c34a5b1c4d072209453 /newlib/libc/machine/sh
parent6f6aa16f75b106a2efb470be06c6ccb0a09265cd (diff)
* Makefile.am (lib_a_SOURCES, SH64 case): Add memcpy.S, memset.S
and strcpy.S. * Makefile.in: Regenerate. * asm.h (_ENTRY): Set SH5 alignment to 8 bytes. (SHHI, SHLO): Define. * memcpy.S: Add code for SH5. * memset.S: Likewise. * strcpy.S: Likewise.
Diffstat (limited to 'newlib/libc/machine/sh')
-rw-r--r--newlib/libc/machine/sh/Makefile.am2
-rw-r--r--newlib/libc/machine/sh/Makefile.in8
-rw-r--r--newlib/libc/machine/sh/asm.h10
-rw-r--r--newlib/libc/machine/sh/memcpy.S377
-rw-r--r--newlib/libc/machine/sh/memset.S112
-rw-r--r--newlib/libc/machine/sh/strcpy.S132
6 files changed, 491 insertions, 150 deletions
diff --git a/newlib/libc/machine/sh/Makefile.am b/newlib/libc/machine/sh/Makefile.am
index 9cb529347..3cf7f799f 100644
--- a/newlib/libc/machine/sh/Makefile.am
+++ b/newlib/libc/machine/sh/Makefile.am
@@ -7,7 +7,7 @@ INCLUDES = $(NEWLIB_CFLAGS) $(CROSS_CFLAGS) $(TARGET_CFLAGS)
noinst_LIBRARIES = lib.a
if SH64
-lib_a_SOURCES = setjmp.S
+lib_a_SOURCES = memcpy.S memset.S setjmp.S strcpy.S
else
lib_a_SOURCES = memcpy.S memset.S setjmp.S strcpy.S strcmp.S
endif
diff --git a/newlib/libc/machine/sh/Makefile.in b/newlib/libc/machine/sh/Makefile.in
index e6998feec..56dd28f90 100644
--- a/newlib/libc/machine/sh/Makefile.in
+++ b/newlib/libc/machine/sh/Makefile.in
@@ -1,6 +1,6 @@
-# Makefile.in generated automatically by automake 1.4 from Makefile.am
+# Makefile.in generated automatically by automake 1.4-p5 from Makefile.am
-# Copyright (C) 1994, 1995-8, 1999 Free Software Foundation, Inc.
+# Copyright (C) 1994, 1995-8, 1999, 2001 Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
@@ -83,7 +83,7 @@ AUTOMAKE_OPTIONS = cygnus
INCLUDES = $(NEWLIB_CFLAGS) $(CROSS_CFLAGS) $(TARGET_CFLAGS)
noinst_LIBRARIES = lib.a
-@SH64_TRUE@lib_a_SOURCES = @SH64_TRUE@setjmp.S
+@SH64_TRUE@lib_a_SOURCES = @SH64_TRUE@memcpy.S memset.S setjmp.S strcpy.S
@SH64_FALSE@lib_a_SOURCES = @SH64_FALSE@memcpy.S memset.S setjmp.S strcpy.S strcmp.S
ACLOCAL_AMFLAGS = -I ../../..
@@ -98,7 +98,7 @@ DEFS = @DEFS@ -I. -I$(srcdir)
CPPFLAGS = @CPPFLAGS@
LIBS = @LIBS@
lib_a_LIBADD =
-@SH64_TRUE@lib_a_OBJECTS = setjmp.o
+@SH64_TRUE@lib_a_OBJECTS = memcpy.o memset.o setjmp.o strcpy.o
@SH64_FALSE@lib_a_OBJECTS = memcpy.o memset.o setjmp.o strcpy.o \
@SH64_FALSE@strcmp.o
CFLAGS = @CFLAGS@
diff --git a/newlib/libc/machine/sh/asm.h b/newlib/libc/machine/sh/asm.h
index cfdaa812b..2f809afa2 100644
--- a/newlib/libc/machine/sh/asm.h
+++ b/newlib/libc/machine/sh/asm.h
@@ -13,7 +13,7 @@
# endif
# define _ENTRY(name) \
- TEXT; .align 4; .globl name; name:
+ TEXT; .balign 8; .globl name; name:
#else
#define _ENTRY(name) \
.text; .align 2; .globl name; name:
@@ -31,3 +31,11 @@
#define SL(branch, dest, in_slot, in_slot_arg2) \
in_slot, in_slot_arg2; branch dest
#endif
+
+#ifdef __LITTLE_ENDIAN__
+#define SHHI shlld
+#define SHLO shlrd
+#else
+#define SHHI shlrd
+#define SHLO shlld
+#endif
diff --git a/newlib/libc/machine/sh/memcpy.S b/newlib/libc/machine/sh/memcpy.S
index 4df72e371..791bc8b10 100644
--- a/newlib/libc/machine/sh/memcpy.S
+++ b/newlib/libc/machine/sh/memcpy.S
@@ -2,14 +2,15 @@
! Fast SH memcpy
!
! by Toshiyasu Morita (tm@netcom.com)
-! hacked by J"orn Rernnecke (amylaar@cygnus.co.uk) ("o for o-umlaut)
+! hacked by J"orn Rernnecke (joern.rennecke@superh.com) ("o for o-umlaut)
+! SH5 code Copyright 2002 SuperH Ltd.
!
-! Entry: r4: destination pointer
-! r5: source pointer
-! r6: byte count
+! Entry: ARG0: destination pointer
+! ARG1: source pointer
+! ARG3: byte count
!
-! Exit: r0: destination pointer
-! r1-r7: trashed
+! Exit: RESULT: destination pointer
+! any other registers in the range r0-r7: trashed
!
! Notes: Usually one wants to do small reads and write a longword, but
! unfortunately it is difficult in some cases to concatanate bytes
@@ -23,199 +24,371 @@
! be copied. This could be easily swapped for a signed comparison,
! but the algorithm used needs some comparison.
!
-! 2.: When there are two or three bytes in the last word of an 11-or-bore
+! 2.: When there are two or three bytes in the last word of an 11-or-more
! bytes memory chunk to b copied, the rest of the word can be read
-! without size effects.
+! without side effects.
! This could be easily changed by increasing the minumum size of
! a fast memcpy and the amount subtracted from r7 before L_2l_loop be 2,
! however, this would cost a few extra cyles on average.
+! For SHmedia, the assumption is that any quadword can be read in its
+! enirety if at least one byte is included in the copy.
!
#include "asm.h"
ENTRY(memcpy)
+
+#if __SHMEDIA__
+
+#define LDUAQ(P,O,D0,D1) ldlo.q P,O,D0; ldhi.q P,O+7,D1
+#define STUAQ(P,O,D0,D1) stlo.q P,O,D0; sthi.q P,O+7,D1
+#define LDUAL(P,O,D0,D1) ldlo.l P,O,D0; ldhi.l P,O+3,D1
+#define STUAL(P,O,D0,D1) stlo.l P,O,D0; sthi.l P,O+3,D1
+
+ ld.b r3,0,r63
+ pta/l Large,tr0
+ movi 25,r0
+ bgeu/u r4,r0,tr0
+ nsb r4,r0
+ shlli r0,5,r0
+ movi (L1-L0+63*32 + 1) & 0xffff,r1
+ sub r1, r0, r0
+L0: ptrel r0,tr0
+ add r2,r4,r5
+ ptabs r18,tr1
+ add r3,r4,r6
+ blink tr0,r63
+
+ .balign 8
+L1:
+ /* 0 byte memcpy */
+ blink tr1,r63
+
+L4_7: /* 4..7 byte memcpy cntd. */
+ stlo.l r2, 0, r0
+ or r6, r7, r6
+ sthi.l r5, -1, r6
+ stlo.l r5, -4, r6
+ blink tr1,r63
+
+L2_3: /* 2 or 3 byte memcpy cntd. */
+ st.b r5,-1,r6
+ blink tr1,r63
+
+ /* 1 byte memcpy */
+ ld.b r3,0,r0
+ st.b r2,0,r0
+ blink tr1,r63
+
+L8_15: /* 8..15 byte memcpy cntd. */
+ stlo.q r2, 0, r0
+ or r6, r7, r6
+ sthi.q r5, -1, r6
+ stlo.q r5, -8, r6
+ blink tr1,r63
+
+ /* 2 or 3 byte memcpy */
+ ld.b r3,0,r0
+ ld.b r2,0,r63
+ ld.b r3,1,r1
+ st.b r2,0,r0
+ pta/l L2_3,tr0
+ ld.b r6,-1,r6
+ st.b r2,1,r1
+ blink tr0, r63
+
+ /* 4 .. 7 byte memcpy */
+ LDUAL (r3, 0, r0, r1)
+ pta L4_7, tr0
+ ldlo.l r6, -4, r7
+ or r0, r1, r0
+ sthi.l r2, 3, r0
+ ldhi.l r6, -1, r6
+ blink tr0, r63
+
+ /* 8 .. 15 byte memcpy */
+ LDUAQ (r3, 0, r0, r1)
+ pta L8_15, tr0
+ ldlo.q r6, -8, r7
+ or r0, r1, r0
+ sthi.q r2, 7, r0
+ ldhi.q r6, -1, r6
+ blink tr0, r63
+
+ /* 16 .. 24 byte memcpy */
+ LDUAQ (r3, 0, r0, r1)
+ LDUAQ (r3, 8, r8, r9)
+ or r0, r1, r0
+ sthi.q r2, 7, r0
+ or r8, r9, r8
+ sthi.q r2, 15, r8
+ ldlo.q r6, -8, r7
+ ldhi.q r6, -1, r6
+ stlo.q r2, 8, r8
+ stlo.q r2, 0, r0
+ or r6, r7, r6
+ sthi.q r5, -1, r6
+ stlo.q r5, -8, r6
+ blink tr1,r63
+
+Large:
+ ld.b r2, 0, r63
+ pta/l Loop_ua, tr1
+ andi r3, 7, r7
+ sub r2, r7, r22
+ sub r3, r2, r6
+ add r2, r4, r5
+ ldlo.q r3, 0, r0
+ addi r5, -16, r5
+ movi 64+8, r27 // could subtract r7 from that.
+ stlo.q r2, 0, r0
+ ldx.q r22, r6, r0
+ bgtu/l r27, r4, tr1
+
+ addi r5, -48, r27
+ pta/l Loop_line, tr0
+ addi r6, 64, r36
+ addi r6, -24, r19
+ addi r6, -16, r20
+ addi r6, -8, r21
+
+Loop_line:
+ ldx.q r22, r36, r63
+ alloco r22, 32
+ addi r22, 32, r22
+ ldx.q r22, r19, r23
+ sthi.q r22, -25, r0
+ ldx.q r22, r20, r24
+ ldx.q r22, r21, r25
+ stlo.q r22, -32, r0
+ ldx.q r22, r6, r0
+ sthi.q r22, -17, r23
+ sthi.q r22, -9, r24
+ sthi.q r22, -1, r25
+ stlo.q r22, -24, r23
+ stlo.q r22, -16, r24
+ stlo.q r22, -8, r25
+ bgeu r27, r22, tr0
+
+Loop_ua:
+ addi r22, 8, r22
+ sthi.q r22, -1, r0
+ stlo.q r22, -8, r0
+ ldx.q r22, r6, r0
+ bgtu/l r5, r22, tr1
+
+ add r3, r4, r7
+ ldlo.q r7, -8, r1
+ sthi.q r22, 7, r0
+ ldhi.q r7, -1, r7
+ ptabs r18,tr1
+ stlo.q r22, 0, r0
+ or r1, r7, r1
+ sthi.q r5, 15, r1
+ stlo.q r5, 8, r1
+ blink tr1, r63
+
+#else /* ! SHMEDIA, i.e. SH1 .. SH4 / SHcompact */
+
+#ifdef __SH5__
+#define DST r2
+#define SRC r3
+#define COUNT r4
+#define TMP0 r5
+#define TMP1 r6
+#define RESULT r2
+#else
+#define DST r4
+#define SRC r5
+#define COUNT r6
+#define TMP0 r2
+#define TMP1 r3
+#define RESULT r0
+#endif
+
#ifdef __LITTLE_ENDIAN__
! Little endian version copies with increasing addresses.
- mov r4,r3 ! Save return value
+ mov DST,TMP1 ! Save return value
mov #11,r0 ! Check if small number of bytes
- cmp/hs r0,r6
- ! r6 becomes src end address
- SL(bf, L_small, add r5,r6)
+ cmp/hs r0,COUNT
+ ! COUNT becomes src end address
+ SL(bf, L_small, add SRC,COUNT)
mov #1,r1
- tst r1,r5 ! check if source even
- SL(bt, L_even, mov r6,r7)
- mov.b @r5+,r0 ! no, make it even.
- mov.b r0,@r4
- add #1,r4
-L_even: tst r1,r4 ! check if destination is even
+ tst r1,SRC ! check if source even
+ SL(bt, L_even, mov COUNT,r7)
+ mov.b @SRC+,r0 ! no, make it even.
+ mov.b r0,@DST
+ add #1,DST
+L_even: tst r1,DST ! check if destination is even
add #-3,r7
SL(bf, L_odddst, mov #2,r1)
- tst r1,r4 ! check if destination is 4-byte aligned
- mov r4,r0
- SL(bt, L_al4dst, sub r5,r0)
- mov.w @r5+,r2
- mov.w r2,@r4
- ! add #2,r4 r4 is dead here.
+ tst r1,DST ! check if destination is 4-byte aligned
+ mov DST,r0
+ SL(bt, L_al4dst, sub SRC,r0)
+ mov.w @SRC+,TMP0
+ mov.w TMP0,@DST
+ ! add #2,DST DST is dead here.
L_al4dst:
- tst r1,r5
+ tst r1,SRC
bt L_al4both
- mov.w @r5+,r1
+ mov.w @SRC+,r1
swap.w r1,r1
add #-6,r0
add #-6,r7 ! r7 := src end address minus 9.
.align 2
L_2l_loop:
- mov.l @r5+,r2 ! Read & write two longwords per iteration
- xtrct r2,r1
- mov.l r1,@(r0,r5)
- cmp/hs r7,r5
- mov.l @r5+,r1
- xtrct r1,r2
- mov.l r2,@(r0,r5)
+ mov.l @SRC+,TMP0 ! Read & write two longwords per iteration
+ xtrct TMP0,r1
+ mov.l r1,@(r0,SRC)
+ cmp/hs r7,SRC
+ mov.l @SRC+,r1
+ xtrct r1,TMP0
+ mov.l TMP0,@(r0,SRC)
bf L_2l_loop
- add #-2,r5
+ add #-2,SRC
bra L_cleanup
add #5,r0
L_al4both:
add #-4,r0
.align 2
L_al4both_loop:
- mov.l @r5+,r4 ! Read longword, write longword per iteration
- cmp/hs r7,r5
- SL(bf, L_al4both_loop, mov.l r4,@(r0,r5))
+ mov.l @SRC+,DST ! Read longword, write longword per iteration
+ cmp/hs r7,SRC
+ SL(bf, L_al4both_loop, mov.l DST,@(r0,SRC))
bra L_cleanup
add #3,r0
L_odddst:
- tst r1,r5
- SL(bt, L_al4src, add #-1,r4)
- mov.w @r5+,r0
- mov.b r0,@(1,r4)
+ tst r1,SRC
+ SL(bt, L_al4src, add #-1,DST)
+ mov.w @SRC+,r0
+ mov.b r0,@(1,DST)
shlr8 r0
- mov.b r0,@(2,r4)
- add #2,r4
+ mov.b r0,@(2,DST)
+ add #2,DST
L_al4src:
.align 2
L_odd_loop:
- mov.l @r5+,r0 ! Read longword, write byte, word, byte per iteration
- cmp/hs r7,r5
- mov.b r0,@(1,r4)
+ mov.l @SRC+,r0 ! Read longword, write byte, word, byte per iteration
+ cmp/hs r7,SRC
+ mov.b r0,@(1,DST)
shlr8 r0
- mov.w r0,@(2,r4)
+ mov.w r0,@(2,DST)
shlr16 r0
- mov.b r0,@(4,r4)
- SL(bf, L_odd_loop, add #4,r4)
+ mov.b r0,@(4,DST)
+ SL(bf, L_odd_loop, add #4,DST)
.align 2 ! avoid nop in more frequently executed code.
L_cleanup2:
- mov r4,r0
- sub r5,r0
+ mov DST,r0
+ sub SRC,r0
L_cleanup:
- cmp/eq r6,r5
+ cmp/eq COUNT,SRC
bt L_ready
.align 2
L_cleanup_loop:
- mov.b @r5+,r1
- cmp/eq r6,r5
- mov.b r1,@(r0,r5)
+ mov.b @SRC+,r1
+ cmp/eq COUNT,SRC
+ mov.b r1,@(r0,SRC)
bf L_cleanup_loop
L_ready:
rts
- mov r3,r0
+ mov TMP1,RESULT
L_small:
bra L_cleanup2
- add #-1,r4
-#else
+ add #-1,DST
+#else /* ! __LITTLE_ENDIAN__ */
! Big endian version copies with decreasing addresses.
- mov r4,r0
- add r6,r0
- sub r4,r5
+ mov DST,r0
+ add COUNT,r0
+ sub DST,SRC
mov #11,r1
- cmp/hs r1,r6
- SL(bf, L_small, add #-1,r5)
- mov r5,r3
- add r0,r3
- shlr r3
+ cmp/hs r1,COUNT
+ SL(bf, L_small, add #-1,SRC)
+ mov SRC,TMP1
+ add r0,TMP1
+ shlr TMP1
SL(bt, L_even,
- mov r4,r7)
- mov.b @(r0,r5),r2
- add #-1,r3
- mov.b r2,@-r0
+ mov DST,r7)
+ mov.b @(r0,SRC),TMP0
+ add #-1,TMP1
+ mov.b TMP0,@-r0
L_even:
tst #1,r0
- add #-1,r5
+ add #-1,SRC
SL(bf, L_odddst, add #8,r7)
tst #2,r0
bt L_al4dst
- add #-1,r3
- mov.w @(r0,r5),r1
+ add #-1,TMP1
+ mov.w @(r0,SRC),r1
mov.w r1,@-r0
L_al4dst:
- shlr r3
+ shlr TMP1
bt L_al4both
- mov.w @(r0,r5),r1
+ mov.w @(r0,SRC),r1
swap.w r1,r1
add #4,r7
- add #-4,r5
+ add #-4,SRC
.align 2
L_2l_loop:
- mov.l @(r0,r5),r2
- xtrct r2,r1
+ mov.l @(r0,SRC),TMP0
+ xtrct TMP0,r1
mov.l r1,@-r0
cmp/hs r7,r0
- mov.l @(r0,r5),r1
- xtrct r1,r2
- mov.l r2,@-r0
+ mov.l @(r0,SRC),r1
+ xtrct r1,TMP0
+ mov.l TMP0,@-r0
bt L_2l_loop
bra L_cleanup
- add #5,r5
+ add #5,SRC
nop ! avoid nop in executed code.
L_al4both:
- add #-2,r5
+ add #-2,SRC
.align 2
L_al4both_loop:
- mov.l @(r0,r5),r1
+ mov.l @(r0,SRC),r1
cmp/hs r7,r0
SL(bt, L_al4both_loop,
mov.l r1,@-r0)
bra L_cleanup
- add #3,r5
+ add #3,SRC
nop ! avoid nop in executed code.
L_odddst:
- shlr r3
+ shlr TMP1
bt L_al4src
- mov.w @(r0,r5),r1
+ mov.w @(r0,SRC),r1
mov.b r1,@-r0
shlr8 r1
mov.b r1,@-r0
L_al4src:
- add #-2,r5
+ add #-2,SRC
.align 2
L_odd_loop:
- mov.l @(r0,r5),r2
+ mov.l @(r0,SRC),TMP0
cmp/hs r7,r0
- mov.b r2,@-r0
- shlr8 r2
- mov.w r2,@-r0
- shlr16 r2
- mov.b r2,@-r0
+ mov.b TMP0,@-r0
+ shlr8 TMP0
+ mov.w TMP0,@-r0
+ shlr16 TMP0
+ mov.b TMP0,@-r0
bt L_odd_loop
- add #3,r5
+ add #3,SRC
L_cleanup:
L_small:
- cmp/eq r4,r0
+ cmp/eq DST,r0
bt L_ready
- add #1,r4
+ add #1,DST
.align 2
L_cleanup_loop:
- mov.b @(r0,r5),r2
- cmp/eq r4,r0
- mov.b r2,@-r0
+ mov.b @(r0,SRC),TMP0
+ cmp/eq DST,r0
+ mov.b TMP0,@-r0
bf L_cleanup_loop
L_ready:
rts
- nop
-#endif
+ mov r0,RESULT
+#endif /* ! __LITTLE_ENDIAN__ */
+#endif /* ! SHMEDIA */
diff --git a/newlib/libc/machine/sh/memset.S b/newlib/libc/machine/sh/memset.S
index 7352b4141..6f1f396d7 100644
--- a/newlib/libc/machine/sh/memset.S
+++ b/newlib/libc/machine/sh/memset.S
@@ -3,6 +3,71 @@
!
! by Toshiyasu Morita (tm@netcom.com)
!
+! SH5 code by J"orn Rennecke (joern.rennecke@superh.com)
+! Copyright 2002 SuperH Ltd.
+!
+
+#include "asm.h"
+
+ENTRY(memset)
+#if __SHMEDIA__
+ pta/l multiquad, tr0
+ ptabs r18, tr2
+ mshflo.b r3,r3,r3
+ mperm.w r3, r63, r3
+
+ andi r2, 7, r22
+ add r4, r22, r23
+ shlri r23, 3, r24
+ bnei/u r24, 0, tr0
+
+ ldlo.q r2, 0, r7
+ shlli r4, 3, r4
+ movi -1, r8
+ SHHI r8, r4, r8
+ mcmv r7, r8, r22
+ stlo.q r2, 0, r22
+ blink tr2, r63
+
+multiquad:
+ pta/l lastquad, tr0
+ stlo.q r2, 0, r3
+ add r2, r4, r5
+ beqi/u r24, 1, tr0 // lastquad
+ pta/l loop, tr1
+ sub r2, r22, r25
+ andi r5, -8, r20 // calculate end address and
+ addi r20, -7*8, r8 // loop end address; This might overflow, so we need
+ movi 8, r9 // to use a different test before we start the loop
+ bge/u r24, r9, tr1 // loop
+ st.q r25, 8, r3
+ st.q r20, -8, r3
+ shlri r24, 1, r24
+ beqi/u r24, 1, tr0 // lastquad
+ st.q r25, 16, r3
+ st.q r20, -16, r3
+ beqi/u r24, 2, tr0 // lastquad
+ st.q r25, 24, r3
+ st.q r20, -24, r3
+lastquad:
+ sthi.q r5, -1, r3
+ blink tr2,r63
+
+loop:
+ alloco r25, 32
+ st.q r25, 8, r3
+ st.q r25, 16, r3
+ st.q r25, 24, r3
+ st.q r25, 32, r3
+ addi r25, 32, r25
+ bgeu/l r8, r25, tr1
+
+ st.q r20, -24, r3
+ st.q r20, -16, r3
+ st.q r20, -8, r3
+ sthi.q r5, -1, r3
+ blink tr2,r63
+#else /* ! SHMEDIA, i.e. SH1 .. SH4 / SHcompact */
! Entry: r4: destination pointer
! r5: fill value
! r6: byte count
@@ -14,51 +79,60 @@
! reserved - usually by the linker script. Otherwise, we would had to check
! for the case of objects of the size 12..15 at address 0..3 .
-#include "asm.h"
+#ifdef __SH5__
+#define DST r2
+#define VAL r3
+#define CNT r4
+#define TMP r5
+#else
+#define DST r4
+#define VAL r5
+#define CNT r6
+#define TMP r2
+#endif
-ENTRY(memset)
mov #12,r0 ! Check for small number of bytes
- cmp/gt r6,r0
- mov r4,r0
- SL(bt, L_store_byte_loop_check0, add r4,r6)
+ cmp/gt CNT,r0
+ mov DST,r0
+ SL(bt, L_store_byte_loop_check0, add DST,CNT)
tst #3,r0 ! Align destination
SL(bt, L_dup_bytes, extu.b r5,r5)
.balignw 4,0x0009
L_align_loop:
- mov.b r5,@r0
+ mov.b VAL,@r0
add #1,r0
tst #3,r0
bf L_align_loop
L_dup_bytes:
- swap.b r5,r2 ! Duplicate bytes across longword
- or r2,r5
- swap.w r5,r2
- or r2,r5
+ swap.b VAL,TMP ! Duplicate bytes across longword
+ or TMP,VAL
+ swap.w VAL,TMP
+ or TMP,VAL
- add #-16,r6
+ add #-16,CNT
.balignw 4,0x0009
L_store_long_loop:
- mov.l r5,@r0 ! Store double longs to memory
- cmp/hs r6,r0
- mov.l r5,@(4,r0)
+ mov.l VAL,@r0 ! Store double longs to memory
+ cmp/hs CNT,r0
+ mov.l VAL,@(4,r0)
SL(bf, L_store_long_loop, add #8,r0)
- add #16,r6
+ add #16,CNT
L_store_byte_loop_check0:
- cmp/eq r6,r0
+ cmp/eq CNT,r0
bt L_exit
.balignw 4,0x0009
L_store_byte_loop:
- mov.b r5,@r0 ! Store bytes to memory
+ mov.b VAL,@r0 ! Store bytes to memory
add #1,r0
- cmp/eq r6,r0
+ cmp/eq CNT,r0
bf L_store_byte_loop
L_exit:
rts
mov r4,r0
-
+#endif /* ! SHMEDIA */
diff --git a/newlib/libc/machine/sh/strcpy.S b/newlib/libc/machine/sh/strcpy.S
index 63aa7e75b..aab421007 100644
--- a/newlib/libc/machine/sh/strcpy.S
+++ b/newlib/libc/machine/sh/strcpy.S
@@ -1,41 +1,127 @@
-! Entry: r4: destination
-! r5: source
-! Exit: r0: destination
-! r1-r2,r5: clobbered
+! Entry: arg0: destination
+! arg1: source
+! Exit: result: destination
+!
+! SH5 code Copyright 2002 SuperH Ltd.
#include "asm.h"
ENTRY(strcpy)
- mov r4,r0
- or r5,r0
+
+#if __SHMEDIA__
+
+ pta/l shortstring,tr1
+ ldlo.q r3,0,r4
+ ptabs r18,tr4
+ shlli r3,3,r7
+ add r2,r63,r0
+ mcmpeq.b r4,r63,r6
+ SHHI r6,r7,r6
+ bnei/u r6,0,tr1 // shortstring
+ pta/l no_lddst, tr2
+ ori r3,-8,r23
+ sub r2, r23, r0
+ sub r3, r2, r21
+ addi r21, 8, r20
+ ldx.q r0, r21, r5
+ pta/l loop, tr0
+ ori r2,-8,r22
+ mcmpeq.b r5, r63, r6
+ bge/u r22, r23, tr2 // no_lddst
+
+ // r22 < r23 ; Need to do a load from the destination.
+ ldlo.q r2, 0, r9
+ movi -1, r8
+ SHLO r8, r7, r8
+ mcmv r4, r8, r9
+ stlo.q r2, 0, r9
+ beqi/l r6, 0, tr0 // loop
+
+ add r5, r63, r4
+ blink tr1, r63 // shortstring
+no_lddst:
+ stlo.q r2, 0, r4
+ SHHI r4, r7, r4
+ sthi.q r0, -1, r4
+ beqi/l r6, 0, tr0 // loop
+
+ add r5, r63, r4
+shortstring:
+#ifndef LITTLE_ENDIAN
+ pta/l shortstring2,tr1
+ byterev r4,r4
+#endif
+shortstring2:
+ st.b r0,0,r4
+ andi r4,0xff,r5
+ shlri r4,8,r4
+ addi r0,1,r0
+ bnei/l r5,0,tr1
+ blink tr4,r63 // return
+
+ .balign 8
+loop:
+ stlo.q r0, 0, r5
+ ldx.q r0, r20, r4
+ addi r0, 16, r0
+ sthi.q r0, -9, r5
+ mcmpeq.b r4, r63, r6
+ bnei/u r6, 0, tr1 // shortstring
+ ldx.q r0, r21, r5
+ stlo.q r0, -8, r4
+ sthi.q r0, -1, r4
+ mcmpeq.b r5, r63, r6
+ beqi/l r6, 0, tr0 // loop
+
+ add r5, r63, r4
+ blink tr1, r63 // shortstring
+
+#else /* ! __SHMEDIA__, i.e. SH 1..4 / SHcompact */
+
+#ifdef __SH5__
+#define DST r2
+#define SRC r3
+#define TMP r4
+#define RESULT R2
+! r0,r1,r3,r4: clobbered
+#else
+#define DST r4
+#define SRC r5
+#define TMP r2
+#define RESULT r0
+! r1-r2,r5: clobbered
+#endif
+ mov DST,r0
+ or SRC,r0
tst #3,r0
- SL(bf, L_setup_char_loop, mov r4,r0)
- mov.l @r5+,r1
- mov #0,r2
- cmp/str r2,r1
- SL(bt, Longword_loop_end, sub r5,r0)
+ SL(bf, L_setup_char_loop, mov DST,r0)
+ mov.l @SRC+,r1
+ mov #0,TMP
+ cmp/str TMP,r1
+ SL(bt, Longword_loop_end, sub SRC,r0)
.align 2
Longword_loop:
- mov.l r1,@(r0,r5)
- mov.l @r5+,r1
- cmp/str r2,r1
+ mov.l r1,@(r0,SRC)
+ mov.l @SRC+,r1
+ cmp/str TMP,r1
bt Longword_loop_end
- mov.l r1,@(r0,r5)
- mov.l @r5+,r1
- cmp/str r2,r1
+ mov.l r1,@(r0,SRC)
+ mov.l @SRC+,r1
+ cmp/str TMP,r1
bf Longword_loop
Longword_loop_end:
- add #-4,r5
+ add #-4,SRC
add #3,r0
.align 2
L_char_loop:
- mov.b @r5+,r1
+ mov.b @SRC+,r1
L_char_loop_start:
tst r1,r1
- SL(bf, L_char_loop, mov.b r1,@(r0,r5))
+ SL(bf, L_char_loop, mov.b r1,@(r0,SRC))
rts
- mov r4,r0
+ mov DST,RESULT
L_setup_char_loop:
- mov.b @r5+,r1
+ mov.b @SRC+,r1
bra L_char_loop_start
- sub r5,r0
+ sub SRC,r0
+#endif /* ! __SHMEDIA__ */