Welcome to mirror list, hosted at ThFree Co, Russian Federation.

cygwin.com/git/newlib-cygwin.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSebastian Huber <sebastian.huber@embedded-brains.de>2022-09-22 10:19:57 +0300
committerSebastian Huber <sebastian.huber@embedded-brains.de>2022-09-24 09:39:29 +0300
commitd9dc88048aa1ba41b4ebb003f3ed7b8e042d3175 (patch)
treed55ee3d4840e707e7e31a0908a16e2e262cfc4f0 /newlib/libc/machine
parent5230eb7f8c6b43c71d7e38d138935c48de930b76 (diff)
powerpc/setjmp: Add 64-bit support
Use 64-bit store/load instructions to save/restore the general-purpose registers.
Diffstat (limited to 'newlib/libc/machine')
-rw-r--r--newlib/libc/machine/powerpc/setjmp.S79
1 files changed, 70 insertions, 9 deletions
diff --git a/newlib/libc/machine/powerpc/setjmp.S b/newlib/libc/machine/powerpc/setjmp.S
index f4ccd1bb5..dc8b239a9 100644
--- a/newlib/libc/machine/powerpc/setjmp.S
+++ b/newlib/libc/machine/powerpc/setjmp.S
@@ -1,6 +1,7 @@
/* This is a simple version of setjmp and longjmp for the PowerPC.
Ian Lance Taylor, Cygnus Support, 9 Feb 1994.
- Modified by Jeff Johnston, Red Hat Inc. 2 Oct 2001. */
+ Modified by Jeff Johnston, Red Hat Inc. 2 Oct 2001.
+ Modified by Sebastian Huber, embedded brains GmbH. 22 Sep 2022. */
#include "ppc-asm.h"
@@ -40,6 +41,31 @@ FUNC_START(setjmp)
stored. Note that we are not adding 168 because the next
store instruction uses an offset of 4. */
addi 3,3,164
+#elif __powerpc64__
+ /* In the first store, add 16 to r3 so that the subsequent floating
+ point stores are aligned on an 8 byte boundary and the Altivec
+ stores are aligned on a 16 byte boundary. */
+ stdu 1,16(3) # offset 16
+ stdu 2,8(3) # offset 24
+ stdu 13,8(3) # offset 32
+ stdu 14,8(3) # offset 40
+ stdu 15,8(3) # offset 48
+ stdu 16,8(3) # offset 56
+ stdu 17,8(3) # offset 64
+ stdu 18,8(3) # offset 72
+ stdu 19,8(3) # offset 80
+ stdu 20,8(3) # offset 88
+ stdu 21,8(3) # offset 96
+ stdu 22,8(3) # offset 104
+ stdu 23,8(3) # offset 112
+ stdu 24,8(3) # offset 120
+ stdu 25,8(3) # offset 128
+ stdu 26,8(3) # offset 136
+ stdu 27,8(3) # offset 144
+ stdu 28,8(3) # offset 152
+ stdu 29,8(3) # offset 160
+ stdu 30,8(3) # offset 168
+ stdu 31,8(3) # offset 176
#else
stw 1,0(3) # offset 0
stwu 2,4(3) # offset 4
@@ -64,10 +90,15 @@ FUNC_START(setjmp)
stwu 31,4(3) # offset 80
#endif
- /* From this point on until the end of this function, add 84
- to the offset shown if __SPE__. This difference comes from
- the fact that we save 21 64-bit registers instead of 21
- 32-bit registers above. */
+ /* If __SPE__, then add 84 to the offset shown from this point on until
+ the end of this function. This difference comes from the fact that
+ we save 21 64-bit registers instead of 21 32-bit registers above.
+
+ If __powerpc64__, then add 96 to the offset shown from this point on until
+ the end of this function. This difference comes from the fact that
+ we save 21 64-bit registers instead of 21 32-bit registers above and
+ we take alignement requirements of floating point and Altivec stores
+ into account. */
mflr 4
stwu 4,4(3) # offset 84
mfcr 4
@@ -188,6 +219,31 @@ FUNC_START(longjmp)
loaded. Note that we are not adding 168 because the next
load instruction uses an offset of 4. */
addi 3,3,164
+#elif __powerpc64__
+ /* In the first load, add 16 to r3 so that the subsequent floating
+ point loades are aligned on an 8 byte boundary and the Altivec
+ loads are aligned on a 16 byte boundary. */
+ ldu 1,16(3) # offset 16
+ ldu 2,8(3) # offset 24
+ ldu 13,8(3) # offset 32
+ ldu 14,8(3) # offset 40
+ ldu 15,8(3) # offset 48
+ ldu 16,8(3) # offset 56
+ ldu 17,8(3) # offset 64
+ ldu 18,8(3) # offset 72
+ ldu 19,8(3) # offset 80
+ ldu 20,8(3) # offset 88
+ ldu 21,8(3) # offset 96
+ ldu 22,8(3) # offset 104
+ ldu 23,8(3) # offset 112
+ ldu 24,8(3) # offset 120
+ ldu 25,8(3) # offset 128
+ ldu 26,8(3) # offset 136
+ ldu 27,8(3) # offset 144
+ ldu 28,8(3) # offset 152
+ ldu 29,8(3) # offset 160
+ ldu 30,8(3) # offset 168
+ ldu 31,8(3) # offset 176
#else
lwz 1,0(3) # offset 0
lwzu 2,4(3) # offset 4
@@ -211,10 +267,15 @@ FUNC_START(longjmp)
lwzu 30,4(3) # offset 76
lwzu 31,4(3) # offset 80
#endif
- /* From this point on until the end of this function, add 84
- to the offset shown if __SPE__. This difference comes from
- the fact that we restore 21 64-bit registers instead of 21
- 32-bit registers above. */
+ /* If __SPE__, then add 84 to the offset shown from this point on until
+ the end of this function. This difference comes from the fact that
+ we restore 21 64-bit registers instead of 21 32-bit registers above.
+
+ If __powerpc64__, then add 96 to the offset shown from this point on until
+ the end of this function. This difference comes from the fact that
+ we restore 21 64-bit registers instead of 21 32-bit registers above and
+ we take alignement requirements of floating point and Altivec loads
+ into account. */
lwzu 5,4(3) # offset 84
mtlr 5
lwzu 5,4(3) # offset 88