Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cygwin.com/git/newlib-cygwin.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
path: root/newlib
diff options
context:
space:
mode:
authorSteve Ellcey <sellcey@mips.com>2015-03-03 22:49:00 +0300
committerSteve Ellcey <sellcey@mips.com>2015-03-03 22:49:00 +0300
commitbc860c9894f8b686581b40e9bc08e8bff1417c3f (patch)
tree4d0cd8036a394836a463376f53ab277386cef119 /newlib
parentb8cd02f65af462ca209f90f18e6c07abf33d807e (diff)
* libc/machine/mips/memset.S: Add support for mips32r6/mips64r6.
Diffstat (limited to 'newlib')
-rw-r--r--newlib/ChangeLog4
-rw-r--r--newlib/libc/machine/mips/memset.S58
2 files changed, 62 insertions, 0 deletions
diff --git a/newlib/ChangeLog b/newlib/ChangeLog
index b499904f7..b23b1ea9e 100644
--- a/newlib/ChangeLog
+++ b/newlib/ChangeLog
@@ -1,5 +1,9 @@
2015-03-03 Steve Ellcey <sellcey@imgtec.com>
+ * libc/machine/mips/memset.S: Add support for mips32r6/mips64r6.
+
+2015-03-03 Steve Ellcey <sellcey@imgtec.com>
+
* libc/machine/mips/memcpy.S: Add support for mips32r6/mips64r6.
2015-02-26 Steve Ellcey <sellcey@imgtec.com>
diff --git a/newlib/libc/machine/mips/memset.S b/newlib/libc/machine/mips/memset.S
index bef872b03..565fc7e30 100644
--- a/newlib/libc/machine/mips/memset.S
+++ b/newlib/libc/machine/mips/memset.S
@@ -84,6 +84,15 @@
# endif
#endif
+/* New R6 instructions that may not be in asm.h. */
+#ifndef PTR_LSA
+# if _MIPS_SIM == _ABI64
+# define PTR_LSA dlsa
+# else
+# define PTR_LSA lsa
+# endif
+#endif
+
/* Using PREFETCH_HINT_PREPAREFORSTORE instead of PREFETCH_STORE
or PREFETCH_STORE_STREAMED offers a large performance advantage
but PREPAREFORSTORE has some special restrictions to consider.
@@ -154,6 +163,14 @@
# define PREFETCH_FOR_STORE(offset, reg)
#endif
+#if __mips_isa_rev > 5
+# if (PREFETCH_STORE_HINT == PREFETCH_HINT_PREPAREFORSTORE)
+# undef PREFETCH_STORE_HINT
+# define PREFETCH_STORE_HINT PREFETCH_HINT_STORE_STREAMED
+# endif
+# define R6_CODE
+#endif
+
/* Allow the routine to be named something else if desired. */
#ifndef MEMSET_NAME
# define MEMSET_NAME memset
@@ -243,11 +260,48 @@ LEAF(MEMSET_NAME)
/* If the destination address is not aligned do a partial store to get it
aligned. If it is already aligned just jump to L(aligned). */
L(set0):
+#ifndef R6_CODE
andi t2,a3,(NSIZE-1) /* word-unaligned address? */
beq t2,zero,L(aligned) /* t2 is the unalignment count */
PTR_SUBU a2,a2,t2
C_STHI a1,0(a0)
PTR_ADDU a0,a0,t2
+#else /* R6_CODE */
+ andi t2,a0,(NSIZE-1)
+ lapc t9,L(atable)
+ PTR_LSA t9,t2,t9,2
+ jrc t9
+L(atable):
+ bc L(aligned)
+# ifdef USE_DOUBLE
+ bc L(lb7)
+ bc L(lb6)
+ bc L(lb5)
+ bc L(lb4)
+# endif
+ bc L(lb3)
+ bc L(lb2)
+ bc L(lb1)
+L(lb7):
+ sb a1,6(a0)
+L(lb6):
+ sb a1,5(a0)
+L(lb5):
+ sb a1,4(a0)
+L(lb4):
+ sb a1,3(a0)
+L(lb3):
+ sb a1,2(a0)
+L(lb2):
+ sb a1,1(a0)
+L(lb1):
+ sb a1,0(a0)
+
+ li t9,NSIZE
+ subu t2,t9,t2
+ PTR_SUBU a2,a2,t2
+ PTR_ADDU a0,a0,t2
+#endif /* R6_CODE */
L(aligned):
/* If USE_DOUBLE is not set we may still want to align the data on a 16
@@ -298,8 +352,12 @@ L(loop16w):
bgtz v1,L(skip_pref)
nop
#endif
+#ifndef R6_CODE
PREFETCH_FOR_STORE (4, a0)
PREFETCH_FOR_STORE (5, a0)
+#else
+ PREFETCH_FOR_STORE (2, a0)
+#endif
L(skip_pref):
C_ST a1,UNIT(0)(a0)
C_ST a1,UNIT(1)(a0)