Welcome to mirror list, hosted at ThFree Co, Russian Federation.

xtensa_context.S « Xtensa « XCC « ThirdParty « portable - github.com/FreeRTOS/FreeRTOS-Kernel.git - Unnamed repository; edit this file 'description' to name the repository.
summaryrefslogtreecommitdiff
blob: a6545b9718b3d1bb0b89fc1388c2f4b724828026 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
 /*
 * FreeRTOS Kernel <DEVELOPMENT BRANCH>
 * Copyright (C) 2015-2019 Cadence Design Systems, Inc.
 * Copyright (C) 2021 Amazon.com, Inc. or its affiliates.  All Rights Reserved.
 *
 * SPDX-License-Identifier: MIT
 *
 * Permission is hereby granted, free of charge, to any person obtaining a copy of
 * this software and associated documentation files (the "Software"), to deal in
 * the Software without restriction, including without limitation the rights to
 * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
 * the Software, and to permit persons to whom the Software is furnished to do so,
 * subject to the following conditions:
 *
 * The above copyright notice and this permission notice shall be included in all
 * copies or substantial portions of the Software.
 *
 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
 * FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
 * COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
 * IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
 *
 * https://www.FreeRTOS.org
 * https://github.com/FreeRTOS
 *
 */

/*
 * XTENSA CONTEXT SAVE AND RESTORE ROUTINES
 *
 * Low-level Call0 functions for handling generic context save and restore of
 * registers not specifically addressed by the interrupt vectors and handlers.
 * Those registers (not handled by these functions) are PC, PS, A0, A1 (SP).
 * Except for the calls to RTOS functions, this code is generic to Xtensa.
 *
 * Note that in Call0 ABI, interrupt handlers are expected to preserve the callee-
 * save regs (A12-A15), which is always the case if the handlers are coded in C.
 * However A12, A13 are made available as scratch registers for interrupt dispatch
 * code, so are presumed saved anyway, and are always restored even in Call0 ABI.
 * Only A14, A15 are truly handled as callee-save regs.
 *
 * Because Xtensa is a configurable architecture, this port supports all user
 * generated configurations (except restrictions stated in the release notes).
 * This is accomplished by conditional compilation using macros and functions
 * defined in the Xtensa HAL (hardware adaptation layer) for your configuration.
 * Only the processor state included in your configuration is saved and restored,
 * including any processor state added by user configuration options or TIE.
 */

/*  Warn nicely if this file gets named with a lowercase .s instead of .S:  */
#define NOERROR #
NOERROR: .error "C preprocessor needed for this file: make sure its filename\
 ends in uppercase .S, or use xt-xcc's -x assembler-with-cpp option."


#include "xtensa_rtos.h"

#ifdef XT_USE_OVLY
#include <xtensa/overlay_os_asm.h>
#endif

    .text
    .literal_position

/*******************************************************************************

_xt_context_save

    !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!

Saves all Xtensa processor state except PC, PS, A0, A1 (SP), A12, A13, in the
interrupt stack frame defined in xtensa_rtos.h.
Its counterpart is _xt_context_restore (which also restores A12, A13).

Caller is expected to have saved PC, PS, A0, A1 (SP), A12, A13 in the frame.
This function preserves A12 & A13 in order to provide the caller with 2 scratch
regs that need not be saved over the call to this function. The choice of which
2 regs to provide is governed by xthal_window_spill_nw and xthal_save_extra_nw,
to avoid moving data more than necessary. Caller can assign regs accordingly.

Entry Conditions:
    A0  = Return address in caller.
    A1  = Stack pointer of interrupted thread or handler ("interruptee").
    Original A12, A13 have already been saved in the interrupt stack frame.
    Other processor state except PC, PS, A0, A1 (SP), A12, A13, is as at the
    point of interruption.
    If windowed ABI, PS.EXCM = 1 (exceptions disabled).

Exit conditions:
    A0  = Return address in caller.
    A1  = Stack pointer of interrupted thread or handler ("interruptee").
    A12, A13 as at entry (preserved).
    If windowed ABI, PS.EXCM = 1 (exceptions disabled).

*******************************************************************************/

    .global _xt_context_save
    .type   _xt_context_save,@function
    .align  4
_xt_context_save:

    s32i    a2,  sp, XT_STK_A2
    s32i    a3,  sp, XT_STK_A3
    s32i    a4,  sp, XT_STK_A4
    s32i    a5,  sp, XT_STK_A5
    s32i    a6,  sp, XT_STK_A6
    s32i    a7,  sp, XT_STK_A7
    s32i    a8,  sp, XT_STK_A8
    s32i    a9,  sp, XT_STK_A9
    s32i    a10, sp, XT_STK_A10
    s32i    a11, sp, XT_STK_A11

    /*
    Call0 ABI callee-saved regs a12-15 do not need to be saved here.
    a12-13 are the caller's responsibility so it can use them as scratch.
    So only need to save a14-a15 here for Windowed ABI (not Call0).
    */
    #ifndef __XTENSA_CALL0_ABI__
    s32i    a14, sp, XT_STK_A14
    s32i    a15, sp, XT_STK_A15
    #endif

    rsr     a3,  SAR
    s32i    a3,  sp, XT_STK_SAR

    #if XCHAL_HAVE_LOOPS
    rsr     a3,  LBEG
    s32i    a3,  sp, XT_STK_LBEG
    rsr     a3,  LEND
    s32i    a3,  sp, XT_STK_LEND
    rsr     a3,  LCOUNT
    s32i    a3,  sp, XT_STK_LCOUNT
    #endif

    #if XT_USE_SWPRI
    /* Save virtual priority mask */
    movi    a3,  _xt_vpri_mask
    l32i    a3,  a3, 0
    s32i    a3,  sp, XT_STK_VPRI
    #endif

    #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
    mov     a9,  a0                     /* preserve ret addr */
    #endif

    #ifndef __XTENSA_CALL0_ABI__
    /*
    To spill the reg windows, temp. need pre-interrupt stack ptr and a4-15.
    Need to save a9,12,13 temporarily (in frame temps) and recover originals.
    Interrupts need to be disabled below XCHAL_EXCM_LEVEL and window overflow
    and underflow exceptions disabled (assured by PS.EXCM == 1).
    */
    s32i    a12, sp, XT_STK_TMP0        /* temp. save stuff in stack frame */
    s32i    a13, sp, XT_STK_TMP1
    s32i    a9,  sp, XT_STK_TMP2

    /*
    Save the overlay state if we are supporting overlays. Since we just saved
    three registers, we can conveniently use them here. Note that as of now,
    overlays only work for windowed calling ABI.
    */
    #ifdef XT_USE_OVLY
    l32i    a9,  sp, XT_STK_PC          /* recover saved PC */
    _xt_overlay_get_state    a9, a12, a13
    s32i    a9,  sp, XT_STK_OVLY        /* save overlay state */
    #endif

    l32i    a12, sp, XT_STK_A12         /* recover original a9,12,13 */
    l32i    a13, sp, XT_STK_A13
    l32i    a9,  sp, XT_STK_A9
    addi    sp,  sp, XT_STK_FRMSZ       /* restore the interruptee's SP */
    call0   xthal_window_spill_nw       /* preserves only a4,5,8,9,12,13 */
    addi    sp,  sp, -XT_STK_FRMSZ
    l32i    a12, sp, XT_STK_TMP0        /* recover stuff from stack frame */
    l32i    a13, sp, XT_STK_TMP1
    l32i    a9,  sp, XT_STK_TMP2
    #endif

    #if XCHAL_EXTRA_SA_SIZE > 0
    /*
    NOTE: Normally the xthal_save_extra_nw macro only affects address
    registers a2-a5. It is theoretically possible for Xtensa processor
    designers to write TIE that causes more address registers to be
    affected, but it is generally unlikely. If that ever happens,
    more registers need to be saved/restored around this macro invocation.
    Here we assume a9,12,13 are preserved.
    Future Xtensa tools releases might limit the regs that can be affected.
    */
    addi    a2,  sp, XT_STK_EXTRA       /* where to save it */
    # if XCHAL_EXTRA_SA_ALIGN > 16
    movi    a3, -XCHAL_EXTRA_SA_ALIGN
    and     a2, a2, a3                  /* align dynamically >16 bytes */
    # endif
    call0   xthal_save_extra_nw         /* destroys a0,2,3,4,5 */
    #endif

    #if XCHAL_EXTRA_SA_SIZE > 0 || !defined(__XTENSA_CALL0_ABI__)
    mov     a0, a9                      /* retrieve ret addr */
    #endif

    ret

/*******************************************************************************

_xt_context_restore

    !! MUST BE CALLED ONLY BY 'CALL0' INSTRUCTION !!

Restores all Xtensa processor state except PC, PS, A0, A1 (SP) (and in Call0
ABI, A14, A15 which are preserved by all interrupt handlers) from an interrupt
stack frame defined in xtensa_rtos.h .
Its counterpart is _xt_context_save (whose caller saved A12, A13).

Caller is responsible to restore PC, PS, A0, A1 (SP).

Entry Conditions:
    A0  = Return address in caller.
    A1  = Stack pointer of interrupted thread or handler ("interruptee").

Exit conditions:
    A0  = Return address in caller.
    A1  = Stack pointer of interrupted thread or handler ("interruptee").
    Other processor state except PC, PS, A0, A1 (SP), is as at the point
    of interruption.

*******************************************************************************/

    .global _xt_context_restore
    .type   _xt_context_restore,@function
    .align  4
_xt_context_restore:

    #if XCHAL_EXTRA_SA_SIZE > 0
    /*
    NOTE: Normally the xthal_restore_extra_nw macro only affects address
    registers a2-a5. It is theoretically possible for Xtensa processor
    designers to write TIE that causes more address registers to be
    affected, but it is generally unlikely. If that ever happens,
    more registers need to be saved/restored around this macro invocation.
    Here we only assume a13 is preserved.
    Future Xtensa tools releases might limit the regs that can be affected.
    */
    mov     a13, a0                     /* preserve ret addr */
    addi    a2,  sp, XT_STK_EXTRA       /* where to find it */
    # if XCHAL_EXTRA_SA_ALIGN > 16
    movi    a3, -XCHAL_EXTRA_SA_ALIGN
    and     a2, a2, a3                  /* align dynamically >16 bytes */
    # endif
    call0   xthal_restore_extra_nw      /* destroys a0,2,3,4,5 */
    mov     a0,  a13                    /* retrieve ret addr */
    #endif

    #if XCHAL_HAVE_LOOPS
    l32i    a2,  sp, XT_STK_LBEG
    l32i    a3,  sp, XT_STK_LEND
    wsr     a2,  LBEG
    l32i    a2,  sp, XT_STK_LCOUNT
    wsr     a3,  LEND
    wsr     a2,  LCOUNT
    #endif

    #ifdef XT_USE_OVLY
    /*
    If we are using overlays, this is a good spot to check if we need
    to restore an overlay for the incoming task. Here we have a bunch
    of registers to spare. Note that this step is going to use a few
    bytes of storage below SP (SP-20 to SP-32) if an overlay is going
    to be restored.
    */
    l32i    a2,  sp, XT_STK_PC          /* retrieve PC */
    l32i    a3,  sp, XT_STK_PS          /* retrieve PS */
    l32i    a4,  sp, XT_STK_OVLY        /* retrieve overlay state */
    l32i    a5,  sp, XT_STK_A1          /* retrieve stack ptr */
    _xt_overlay_check_map    a2, a3, a4, a5, a6
    s32i    a2,  sp, XT_STK_PC          /* save updated PC */
    s32i    a3,  sp, XT_STK_PS          /* save updated PS */
    #endif

    #ifdef XT_USE_SWPRI
    /* Restore virtual interrupt priority and interrupt enable */
    movi    a3,  _xt_intdata
    l32i    a4,  a3, 0                  /* a4 = _xt_intenable */
    l32i    a5,  sp, XT_STK_VPRI        /* a5 = saved _xt_vpri_mask */
    and     a4,  a4, a5
    wsr     a4,  INTENABLE              /* update INTENABLE */
    s32i    a5,  a3, 4                  /* restore _xt_vpri_mask */
    #endif

    l32i    a3,  sp, XT_STK_SAR
    l32i    a2,  sp, XT_STK_A2
    wsr     a3,  SAR
    l32i    a3,  sp, XT_STK_A3
    l32i    a4,  sp, XT_STK_A4
    l32i    a5,  sp, XT_STK_A5
    l32i    a6,  sp, XT_STK_A6
    l32i    a7,  sp, XT_STK_A7
    l32i    a8,  sp, XT_STK_A8
    l32i    a9,  sp, XT_STK_A9
    l32i    a10, sp, XT_STK_A10
    l32i    a11, sp, XT_STK_A11

    /*
    Call0 ABI callee-saved regs a12-15 do not need to be restored here.
    However a12-13 were saved for scratch before XT_RTOS_INT_ENTER(),
    so need to be restored anyway, despite being callee-saved in Call0.
    */
    l32i    a12, sp, XT_STK_A12
    l32i    a13, sp, XT_STK_A13
    #ifndef __XTENSA_CALL0_ABI__
    l32i    a14, sp, XT_STK_A14
    l32i    a15, sp, XT_STK_A15
    #endif

    ret


/*******************************************************************************

_xt_coproc_init

Initializes global co-processor management data, setting all co-processors
to "unowned". Leaves CPENABLE as it found it (does NOT clear it).

Called during initialization of the RTOS, before any threads run.

This may be called from normal Xtensa single-threaded application code which
might use co-processors. The Xtensa run-time initialization enables all
co-processors. They must remain enabled here, else a co-processor exception
might occur outside of a thread, which the exception handler doesn't expect.

Entry Conditions:
    Xtensa single-threaded run-time environment is in effect.
    No thread is yet running.

Exit conditions:
    None.

Obeys ABI conventions per prototype:
    void _xt_coproc_init(void)

*******************************************************************************/

#if XCHAL_CP_NUM > 0

    .global _xt_coproc_init
    .type   _xt_coproc_init,@function
    .align  4
_xt_coproc_init:
    ENTRY0

    /* Initialize thread co-processor ownerships to 0 (unowned). */
    movi    a2, _xt_coproc_owner_sa         /* a2 = base of owner array */
    addi    a3, a2, XCHAL_CP_MAX << 2       /* a3 = top+1 of owner array */
    movi    a4, 0                           /* a4 = 0 (unowned) */
1:  s32i    a4, a2, 0
    addi    a2, a2, 4
    bltu    a2, a3, 1b

    RET0

#endif


/*******************************************************************************

_xt_coproc_release

Releases any and all co-processors owned by a given thread. The thread is
identified by it's co-processor state save area defined in xtensa_context.h .

Must be called before a thread's co-proc save area is deleted to avoid
memory corruption when the exception handler tries to save the state.
May be called when a thread terminates or completes but does not delete
the co-proc save area, to avoid the exception handler having to save the
thread's co-proc state before another thread can use it (optimization).

Entry Conditions:
    A2  = Pointer to base of co-processor state save area.

Exit conditions:
    None.

Obeys ABI conventions per prototype:
    void _xt_coproc_release(void * coproc_sa_base)

*******************************************************************************/

#if XCHAL_CP_NUM > 0

    .global _xt_coproc_release
    .type   _xt_coproc_release,@function
    .align  4
_xt_coproc_release:
    ENTRY0                                  /* a2 = base of save area */

    movi    a3, _xt_coproc_owner_sa         /* a3 = base of owner array */
    addi    a4, a3, XCHAL_CP_MAX << 2       /* a4 = top+1 of owner array */
    movi    a5, 0                           /* a5 = 0 (unowned) */

    rsil    a6, XCHAL_EXCM_LEVEL            /* lock interrupts */

1:  l32i    a7, a3, 0                       /* a7 = owner at a3 */
    bne     a2, a7, 2f                      /* if (coproc_sa_base == owner) */
    s32i    a5, a3, 0                       /*   owner = unowned */
2:  addi    a3, a3, 1<<2                    /* a3 = next entry in owner array */
    bltu    a3, a4, 1b                      /* repeat until end of array */

3:  wsr     a6, PS                          /* restore interrupts */

    RET0

#endif


/*******************************************************************************
_xt_coproc_savecs

If there is a current thread and it has a coprocessor state save area, then
save all callee-saved state into this area. This function is called from the
solicited context switch handler. It calls a system-specific function to get
the coprocessor save area base address.

Entry conditions:
    - The thread being switched out is still the current thread.
    - CPENABLE state reflects which coprocessors are active.
    - Registers have been saved/spilled already.

Exit conditions:
    - All necessary CP callee-saved state has been saved.
    - Registers a2-a7, a13-a15 have been trashed.

Must be called from assembly code only, using CALL0.
*******************************************************************************/
#if XCHAL_CP_NUM > 0

    .extern     _xt_coproc_sa_offset   /* external reference */

    .global     _xt_coproc_savecs
    .type       _xt_coproc_savecs,@function
    .align      4
_xt_coproc_savecs:

    /* At entry, CPENABLE should be showing which CPs are enabled. */

    rsr     a2, CPENABLE                /* a2 = which CPs are enabled      */
    beqz    a2, .Ldone                  /* quick exit if none              */
    mov     a14, a0                     /* save return address             */
    call0   XT_RTOS_CP_STATE            /* get address of CP save area     */
    mov     a0, a14                     /* restore return address          */
    beqz    a15, .Ldone                 /* if none then nothing to do      */
    s16i    a2, a15, XT_CP_CS_ST        /* save mask of CPs being stored   */
    movi    a13, _xt_coproc_sa_offset   /* array of CP save offsets        */
    l32i    a15, a15, XT_CP_ASA         /* a15 = base of aligned save area */

#if XCHAL_CP0_SA_SIZE
    bbci.l  a2, 0, 2f                   /* CP 0 not enabled                */
    l32i    a14, a13, 0                 /* a14 = _xt_coproc_sa_offset[0]   */
    add     a3, a14, a15                /* a3 = save area for CP 0         */
    xchal_cp0_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP1_SA_SIZE
    bbci.l  a2, 1, 2f                   /* CP 1 not enabled                */
    l32i    a14, a13, 4                 /* a14 = _xt_coproc_sa_offset[1]   */
    add     a3, a14, a15                /* a3 = save area for CP 1         */
    xchal_cp1_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP2_SA_SIZE
    bbci.l  a2, 2, 2f
    l32i    a14, a13, 8
    add     a3, a14, a15
    xchal_cp2_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP3_SA_SIZE
    bbci.l  a2, 3, 2f
    l32i    a14, a13, 12
    add     a3, a14, a15
    xchal_cp3_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP4_SA_SIZE
    bbci.l  a2, 4, 2f
    l32i    a14, a13, 16
    add     a3, a14, a15
    xchal_cp4_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP5_SA_SIZE
    bbci.l  a2, 5, 2f
    l32i    a14, a13, 20
    add     a3, a14, a15
    xchal_cp5_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP6_SA_SIZE
    bbci.l  a2, 6, 2f
    l32i    a14, a13, 24
    add     a3, a14, a15
    xchal_cp6_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP7_SA_SIZE
    bbci.l  a2, 7, 2f
    l32i    a14, a13, 28
    add     a3, a14, a15
    xchal_cp7_store a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

.Ldone:
    ret
#endif


/*******************************************************************************
_xt_coproc_restorecs

Restore any callee-saved coprocessor state for the incoming thread.
This function is called from coprocessor exception handling, when giving
ownership to a thread that solicited a context switch earlier. It calls a
system-specific function to get the coprocessor save area base address.

Entry conditions:
    - The incoming thread is set as the current thread.
    - CPENABLE is set up correctly for all required coprocessors.
    - a2 = mask of coprocessors to be restored.

Exit conditions:
    - All necessary CP callee-saved state has been restored.
    - CPENABLE - unchanged.
    - Registers a2-a7, a13-a15 have been trashed.

Must be called from assembly code only, using CALL0.
*******************************************************************************/
#if XCHAL_CP_NUM > 0

    .global     _xt_coproc_restorecs
    .type       _xt_coproc_restorecs,@function
    .align      4
_xt_coproc_restorecs:

    mov     a14, a0                     /* save return address             */
    call0   XT_RTOS_CP_STATE            /* get address of CP save area     */
    mov     a0, a14                     /* restore return address          */
    beqz    a15, .Ldone2                /* if none then nothing to do      */
    l16ui   a3, a15, XT_CP_CS_ST        /* a3 = which CPs have been saved  */
    xor     a3, a3, a2                  /* clear the ones being restored   */
    s32i    a3, a15, XT_CP_CS_ST        /* update saved CP mask            */
    movi    a13, _xt_coproc_sa_offset   /* array of CP save offsets        */
    l32i    a15, a15, XT_CP_ASA         /* a15 = base of aligned save area */

#if XCHAL_CP0_SA_SIZE
    bbci.l  a2, 0, 2f                   /* CP 0 not enabled                */
    l32i    a14, a13, 0                 /* a14 = _xt_coproc_sa_offset[0]   */
    add     a3, a14, a15                /* a3 = save area for CP 0         */
    xchal_cp0_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP1_SA_SIZE
    bbci.l  a2, 1, 2f                   /* CP 1 not enabled                */
    l32i    a14, a13, 4                 /* a14 = _xt_coproc_sa_offset[1]   */
    add     a3, a14, a15                /* a3 = save area for CP 1         */
    xchal_cp1_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP2_SA_SIZE
    bbci.l  a2, 2, 2f
    l32i    a14, a13, 8
    add     a3, a14, a15
    xchal_cp2_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP3_SA_SIZE
    bbci.l  a2, 3, 2f
    l32i    a14, a13, 12
    add     a3, a14, a15
    xchal_cp3_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP4_SA_SIZE
    bbci.l  a2, 4, 2f
    l32i    a14, a13, 16
    add     a3, a14, a15
    xchal_cp4_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP5_SA_SIZE
    bbci.l  a2, 5, 2f
    l32i    a14, a13, 20
    add     a3, a14, a15
    xchal_cp5_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP6_SA_SIZE
    bbci.l  a2, 6, 2f
    l32i    a14, a13, 24
    add     a3, a14, a15
    xchal_cp6_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

#if XCHAL_CP7_SA_SIZE
    bbci.l  a2, 7, 2f
    l32i    a14, a13, 28
    add     a3, a14, a15
    xchal_cp7_load a3, a4, a5, a6, a7 continue=0 ofs=-1 select=XTHAL_SAS_TIE|XTHAL_SAS_NOCC|XTHAL_SAS_CALE alloc=XTHAL_SAS_ALL
2:
#endif

.Ldone2:
    ret

#endif