This adds support for mapping AArch64 machine exceptions to POSIX
signals as necessary for running Ada applications.
---
 bsps/aarch64/shared/exceptions.c              |  89 +++++++++++++++
 .../cpu/aarch64/aarch64-exception-default.S   |  81 ++-----------
 .../cpu/aarch64/aarch64-exception-interrupt.S | 106 +-----------------
 .../score/cpu/aarch64/aarch64-exception-map.S |  90 +++++++++++++++
 cpukit/score/cpu/aarch64/include/rtems/asm.h  |  97 ++++++++++++++++
 .../cpu/aarch64/include/rtems/score/cpu.h     |   5 +
 spec/build/bsps/aarch64/grp.yml               |   4 +-
 spec/build/bsps/aarch64/objexceptions.yml     |  14 +++
 spec/build/cpukit/cpuaarch64.yml              |   1 +
 9 files changed, 313 insertions(+), 174 deletions(-)
 create mode 100644 bsps/aarch64/shared/exceptions.c
 create mode 100644 cpukit/score/cpu/aarch64/aarch64-exception-map.S
 create mode 100644 spec/build/bsps/aarch64/objexceptions.yml

diff --git a/bsps/aarch64/shared/exceptions.c b/bsps/aarch64/shared/exceptions.c
new file mode 100644
index 0000000000..808a3bf62a
--- /dev/null
+++ b/bsps/aarch64/shared/exceptions.c
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSBSPsAArch64Shared
+ *
+ * @brief AArch64 machine exception to POSIX signal mapping.
+ */
+
+/*
+ * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.mo...@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h> /* getpid */
+
+#include <bsp.h>
+#include <rtems/bspIo.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <rtems/exceptions.h>
+
+/*
+ * Synchronous trap handler. Map the trap number of SIGFPE, SIGSEGV
+ * or SIGILL to generate the corresponding Ada exception.
+ */
+#include <bsp/utility.h>
+
+#define ESR_EC_GET(reg) BSP_FLD64GET(reg, 26, 31)
+
+void AArch64_map_exception( uint64_t syndrome )
+{
+  uint32_t         signal;
+  uint64_t EC = ESR_EC_GET(syndrome);
+
+  switch (EC)
+    {
+    case 0x28:                 /* FPU exception */
+    case 0x2c:                 /* FPU exception */
+    case 0x0A:                 /* tagged arithmetic overflow */
+    case 0x82:                 /* divide by zero */
+      signal = SIGFPE;
+      break;
+    case 0x20:                 /* Data abort */
+    case 0x21:                 /* Data abort */
+    case 0x24:                 /* Instruction abort */
+    case 0x25:                 /* Instruction abort */
+      signal = SIGSEGV;
+      break;
+    default:                   /* Anything else ... */
+      /*
+       * Covers unknown, trapped instructions including SVC, illegal execution
+       * state, PC or SP alignment faults, debugging exceptions, etc. */
+      signal = SIGILL;
+      break;
+    }
+  kill(getpid(), signal);
+}
+
+void
+_Exception_initialize_signal_mapping(void)
+{
+  AArch64_set_exception_handler(
+    AARCH64_EXCEPTION_SPx_SYNCHRONOUS,
+    _AArch64_Exception_synchronous_map
+  );
+}
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-default.S 
b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
index d139fdc6a4..047eab984c 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-default.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-default.S
@@ -49,6 +49,8 @@
 .globl bsp_start_vector_table_end
 .globl bsp_start_vector_table_size
 .globl bsp_vector_table_size
+.globl _AArch64_push_exception_context_start
+.globl _AArch64_push_exception_context_finish
 
 .section ".text"
 
@@ -337,13 +339,13 @@ bsp_start_vector_table_end:
 /* Save LR */
        str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
 /* Push the start of the context */
-       bl .push_exception_context_start
-/* Save original sp in x0 for .push_exception_context_finish */
+       bl _AArch64_push_exception_context_start
+/* Save original sp in x0 for _AArch64_push_exception_context_finish */
        msr spsel, #1
        mov x0, sp
        msr spsel, #0
 /* Push the remainder of the context */
-       bl .push_exception_context_finish
+       bl _AArch64_push_exception_context_finish
 /* Save sp into x0 for handler */
        mov x0, sp
 /* Jump into the handler */
@@ -368,11 +370,11 @@ bsp_start_vector_table_end:
 /* Save LR */
        str lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
 /* Push the start of the context */
-       bl .push_exception_context_start
-/* Save original sp in x0 for .push_exception_context_finish */
+       bl _AArch64_push_exception_context_start
+/* Save original sp in x0 for _AArch64_push_exception_context_finish */
        add x0, sp, #(AARCH64_EXCEPTION_FRAME_SIZE + 0x10)
 /* Push the remainder of the context */
-       bl .push_exception_context_finish
+       bl _AArch64_push_exception_context_finish
 /* Save sp (exception frame) into x0 for handler */
        mov x0, sp
 /* Jump into the handler */
@@ -383,7 +385,7 @@ twiddle:
        b       twiddle
 
 /* Assumes SP is at the base of the context and LR has already been pushed */
-.push_exception_context_start:
+_AArch64_push_exception_context_start:
 /* Push x0-x29(fp) */
        stp x0,  x1,  [sp, #0x00]
        stp x2,  x3,  [sp, #0x10]
@@ -403,7 +405,7 @@ twiddle:
        ret
 
 /* Expects original SP to be stored in x0 */
-.push_exception_context_finish:
+_AArch64_push_exception_context_finish:
 /* Get exception LR for PC */
        mrs x1, ELR_EL1
 /* Push sp and pc */
@@ -443,67 +445,4 @@ twiddle:
 /* Done, return to exception handler */
        ret
 
-/*
- * Apply the exception frame to the current register status, SP points to the 
EF
- */
-.pop_exception_context_and_ret:
-/* Pop daif and spsr */
-       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_DAIF_OFFSET]
-/* Restore daif and spsr */
-       msr DAIF, x2
-       msr SPSR_EL1, x3
-/* Pop FAR and ESR */
-       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SYNDROME_OFFSET]
-/* Restore ESR and FAR */
-       msr ESR_EL1, x2
-       msr FAR_EL1, x3
-/* Pop fpcr and fpsr */
-       ldp x2, x3, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_FPSR_OFFSET]
-/* Restore fpcr and fpsr */
-       msr FPSR, x2
-       msr FPCR, x3
-/* Restore LR */
-       ldr lr, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_LR_OFFSET]
-/* Pop VFP registers */
-       ldp q0,  q1,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x000)]
-       ldp q2,  q3,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x020)]
-       ldp q4,  q5,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x040)]
-       ldp q6,  q7,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x060)]
-       ldp q8,  q9,  [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x080)]
-       ldp q10, q11, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0a0)]
-       ldp q12, q13, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0c0)]
-       ldp q14, q15, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x0e0)]
-       ldp q16, q17, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x100)]
-       ldp q18, q19, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x120)]
-       ldp q20, q21, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x140)]
-       ldp q22, q23, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x160)]
-       ldp q24, q25, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x180)]
-       ldp q26, q27, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1a0)]
-       ldp q28, q29, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1c0)]
-       ldp q30, q31, [sp, #(AARCH64_EXCEPTION_FRAME_REGISTER_Q0_OFFSET + 
0x1e0)]
-/* Pop x0-x29(fp) */
-       ldp x2,  x3,  [sp, #0x10]
-       ldp x4,  x5,  [sp, #0x20]
-       ldp x6,  x7,  [sp, #0x30]
-       ldp x8,  x9,  [sp, #0x40]
-       ldp x10, x11, [sp, #0x50]
-       ldp x12, x13, [sp, #0x60]
-       ldp x14, x15, [sp, #0x70]
-       ldp x16, x17, [sp, #0x80]
-       ldp x18, x19, [sp, #0x90]
-       ldp x20, x21, [sp, #0xa0]
-       ldp x22, x23, [sp, #0xb0]
-       ldp x24, x25, [sp, #0xc0]
-       ldp x26, x27, [sp, #0xd0]
-       ldp x28, x29, [sp, #0xe0]
-/* Pop sp (ignored since sp should be shortly restored anyway) and ELR */
-       ldp x0, x1, [sp, #AARCH64_EXCEPTION_FRAME_REGISTER_SP_OFFSET]
-/* Restore exception LR */
-       msr ELR_EL1, x1
-       ldp x0,  x1,  [sp, #0x00]
-       add sp, sp, #AARCH64_EXCEPTION_FRAME_SIZE
 
-/* We must clear reservations to ensure consistency with atomic operations */
-       clrex
-
-       ret
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S 
b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
index cb0954a29b..2def18bef7 100644
--- a/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
@@ -45,6 +45,7 @@
 
 .globl _AArch64_Exception_interrupt_no_nest
 .globl _AArch64_Exception_interrupt_nest
+.globl _AArch64_Perform_Thread_Dispatch
 
 #ifdef AARCH64_MULTILIB_ARCH_V8_ILP32
   #define SELF_CPU_CONTROL_GET_REG w19
@@ -106,7 +107,7 @@
 /* NOTE: This function does not follow the AArch64 procedure call specification
  * because all relevant state is known to be saved in the interrupt context,
  * hence the blind usage of x19, x20, and x21 */
-.AArch64_Perform_Thread_Dispatch:
+_AArch64_Perform_Thread_Dispatch:
 /* Get per-CPU control of current processor */
        GET_SELF_CPU_CONTROL    SELF_CPU_CONTROL_GET_REG
 
@@ -125,9 +126,7 @@
 
 /* Call _Thread_Do_dispatch(), this function will enable interrupts */
        mov     x0, SELF_CPU_CONTROL
-       mov     x1, NON_VOLATILE_SCRATCH
-       mov     x2, #0x80
-       bic     x1, x1, x2
+       mov     x1, #0x0
        bl      _Thread_Do_dispatch
 
 /* Restore LR */
@@ -152,103 +151,6 @@
 /* Return from thread dispatch */
        ret
 
-/*
- * Must save corruptible registers and non-corruptible registers expected to be
- * used, x0 and lr expected to be already saved on the stack
- */
-.macro push_interrupt_context
-/*
- * Push x1-x21 on to the stack, need 19-21 because they're modified without
- * obeying PCS
- */
-       stp lr,         x1,     [sp, #-0x10]!
-       stp x2,         x3,     [sp, #-0x10]!
-       stp x4,         x5,     [sp, #-0x10]!
-       stp x6,         x7,     [sp, #-0x10]!
-       stp x8,         x9,     [sp, #-0x10]!
-       stp x10,        x11,    [sp, #-0x10]!
-       stp x12,        x13,    [sp, #-0x10]!
-       stp x14,        x15,    [sp, #-0x10]!
-       stp x16,        x17,    [sp, #-0x10]!
-       stp x18,        x19,    [sp, #-0x10]!
-       stp x20,        x21,    [sp, #-0x10]!
-/*
- * Push q0-q31 on to the stack, need everything because parts of every register
- * are volatile/corruptible
- */
-       stp q0,         q1,     [sp, #-0x20]!
-       stp q2,         q3,     [sp, #-0x20]!
-       stp q4,         q5,     [sp, #-0x20]!
-       stp q6,         q7,     [sp, #-0x20]!
-       stp q8,         q9,     [sp, #-0x20]!
-       stp q10,        q11,    [sp, #-0x20]!
-       stp q12,        q13,    [sp, #-0x20]!
-       stp q14,        q15,    [sp, #-0x20]!
-       stp q16,        q17,    [sp, #-0x20]!
-       stp q18,        q19,    [sp, #-0x20]!
-       stp q20,        q21,    [sp, #-0x20]!
-       stp q22,        q23,    [sp, #-0x20]!
-       stp q24,        q25,    [sp, #-0x20]!
-       stp q26,        q27,    [sp, #-0x20]!
-       stp q28,        q29,    [sp, #-0x20]!
-       stp q30,        q31,    [sp, #-0x20]!
-/* Get exception LR for PC and spsr */
-       mrs x0, ELR_EL1
-       mrs x1, SPSR_EL1
-/* Push pc and spsr */
-       stp x0,         x1,     [sp, #-0x10]!
-/* Get fpcr and fpsr */
-       mrs x0, FPSR
-       mrs x1, FPCR
-/* Push fpcr and fpsr */
-       stp x0,         x1,     [sp, #-0x10]!
-.endm
-
-/* Must match inverse order of .push_interrupt_context */
-.macro pop_interrupt_context
-/* Pop fpcr and fpsr */
-       ldp x0,         x1,     [sp], #0x10
-/* Restore fpcr and fpsr */
-       msr FPCR, x1
-       msr FPSR, x0
-/* Pop pc and spsr */
-       ldp x0,         x1,     [sp], #0x10
-/* Restore exception LR for PC and spsr */
-       msr SPSR_EL1, x1
-       msr ELR_EL1, x0
-/* Pop q0-q31 */
-       ldp q30,        q31,    [sp], #0x20
-       ldp q28,        q29,    [sp], #0x20
-       ldp q26,        q27,    [sp], #0x20
-       ldp q24,        q25,    [sp], #0x20
-       ldp q22,        q23,    [sp], #0x20
-       ldp q20,        q21,    [sp], #0x20
-       ldp q18,        q19,    [sp], #0x20
-       ldp q16,        q17,    [sp], #0x20
-       ldp q14,        q15,    [sp], #0x20
-       ldp q12,        q13,    [sp], #0x20
-       ldp q10,        q11,    [sp], #0x20
-       ldp q8,         q9,     [sp], #0x20
-       ldp q6,         q7,     [sp], #0x20
-       ldp q4,         q5,     [sp], #0x20
-       ldp q2,         q3,     [sp], #0x20
-       ldp q0,         q1,     [sp], #0x20
-/* Pop x1-x21 */
-       ldp x20,        x21,    [sp], #0x10
-       ldp x18,        x19,    [sp], #0x10
-       ldp x16,        x17,    [sp], #0x10
-       ldp x14,        x15,    [sp], #0x10
-       ldp x12,        x13,    [sp], #0x10
-       ldp x10,        x11,    [sp], #0x10
-       ldp x8,         x9,     [sp], #0x10
-       ldp x6,         x7,     [sp], #0x10
-       ldp x4,         x5,     [sp], #0x10
-       ldp x2,         x3,     [sp], #0x10
-       ldp lr,         x1,     [sp], #0x10
-/* Must clear reservations here to ensure consistency with atomic operations */
-       clrex
-.endm
-
 _AArch64_Exception_interrupt_nest:
 
 /* Execution template:
@@ -309,7 +211,7 @@ Return to embedded exception vector code
  */
        cmp     x0, #0
        bne     .Lno_need_thread_dispatch
-       bl .AArch64_Perform_Thread_Dispatch
+       bl _AArch64_Perform_Thread_Dispatch
 
 .Lno_need_thread_dispatch:
 /*
diff --git a/cpukit/score/cpu/aarch64/aarch64-exception-map.S 
b/cpukit/score/cpu/aarch64/aarch64-exception-map.S
new file mode 100644
index 0000000000..da79e703c9
--- /dev/null
+++ b/cpukit/score/cpu/aarch64/aarch64-exception-map.S
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+
+/**
+ * @file
+ *
+ * @ingroup RTEMSScoreCPUAArch64
+ *
+ * @brief Implementation of AArch64 exception map handler.
+ *
+ * This file implements the AArch64 exception map handler which maps machine
+ * exceptions to POSIX signals. For more information, see documentation for
+ * CONFIGURE_APPLICATION_NEEDS_EXCEPTION_TO_SIGNAL_MAPPING.
+ */
+
+/*
+ * Copyright (C) 2020 On-Line Applications Research Corporation (OAR)
+ * Written by Kinsey Moore <kinsey.mo...@oarcorp.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef HAVE_CONFIG_H
+#include "config.h"
+#endif
+
+#include <rtems/asm.h>
+
+.globl _AArch64_Exception_synchronous_map
+
+_AArch64_Exception_synchronous_map:
+/* Execution template:
+Save volatile registers on thread stack(some x, all q, ELR, etc.)
+Execute map handler
+Perform thread dispatch
+Restore volatile registers from thread stack
+Return to embedded exception vector code
+*/
+
+/* Push interrupt context */
+       push_interrupt_context
+
+/*
+ * Thread dispatch can occur inside the exception map and the available code
+ * paths assume that exceptions are ensabled.
+ */
+       msr     DAIFClr, #0x2
+
+/* Set x0 and jump into the handler */
+       mrs     x0, ESR_EL1
+       bl AArch64_map_exception
+
+/*
+ * Thread dispatch via the call below assumes that exceptions are disabled upon
+ * entry.
+ */
+       msr     DAIFSet, #0x2
+
+/*
+ * Perform thread dispatch. A Synchronous exception occurred and there is no
+ * other way to resolve it.
+ */
+       bl _AArch64_Perform_Thread_Dispatch
+
+/*
+ * SP should be where it was pre-handler (pointing at the exception frame)
+ * or something has leaked stack space
+ */
+/* Pop interrupt context */
+       pop_interrupt_context
+/* Return to vector for final cleanup */
+       ret
diff --git a/cpukit/score/cpu/aarch64/include/rtems/asm.h 
b/cpukit/score/cpu/aarch64/include/rtems/asm.h
index 35bf533c8a..fa30e45dcb 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/asm.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/asm.h
@@ -84,6 +84,103 @@
        ldr     \REG, =_Per_CPU_Information
 .endm
 
+/*
+ * Must save corruptible registers and non-corruptible registers expected to be
+ * used, x0 and lr expected to be already saved on the stack
+ */
+.macro push_interrupt_context
+/*
+ * Push x1-x21 on to the stack, need 19-21 because they're modified without
+ * obeying PCS
+ */
+       stp lr,         x1,     [sp, #-0x10]!
+       stp x2,         x3,     [sp, #-0x10]!
+       stp x4,         x5,     [sp, #-0x10]!
+       stp x6,         x7,     [sp, #-0x10]!
+       stp x8,         x9,     [sp, #-0x10]!
+       stp x10,        x11,    [sp, #-0x10]!
+       stp x12,        x13,    [sp, #-0x10]!
+       stp x14,        x15,    [sp, #-0x10]!
+       stp x16,        x17,    [sp, #-0x10]!
+       stp x18,        x19,    [sp, #-0x10]!
+       stp x20,        x21,    [sp, #-0x10]!
+/*
+ * Push q0-q31 on to the stack, need everything because parts of every register
+ * are volatile/corruptible
+ */
+       stp q0,         q1,     [sp, #-0x20]!
+       stp q2,         q3,     [sp, #-0x20]!
+       stp q4,         q5,     [sp, #-0x20]!
+       stp q6,         q7,     [sp, #-0x20]!
+       stp q8,         q9,     [sp, #-0x20]!
+       stp q10,        q11,    [sp, #-0x20]!
+       stp q12,        q13,    [sp, #-0x20]!
+       stp q14,        q15,    [sp, #-0x20]!
+       stp q16,        q17,    [sp, #-0x20]!
+       stp q18,        q19,    [sp, #-0x20]!
+       stp q20,        q21,    [sp, #-0x20]!
+       stp q22,        q23,    [sp, #-0x20]!
+       stp q24,        q25,    [sp, #-0x20]!
+       stp q26,        q27,    [sp, #-0x20]!
+       stp q28,        q29,    [sp, #-0x20]!
+       stp q30,        q31,    [sp, #-0x20]!
+/* Get exception LR for PC and spsr */
+       mrs x0, ELR_EL1
+       mrs x1, SPSR_EL1
+/* Push pc and spsr */
+       stp x0,         x1,     [sp, #-0x10]!
+/* Get fpcr and fpsr */
+       mrs x0, FPSR
+       mrs x1, FPCR
+/* Push fpcr and fpsr */
+       stp x0,         x1,     [sp, #-0x10]!
+.endm
+
+/* Must match inverse order of .push_interrupt_context */
+.macro pop_interrupt_context
+/* Pop fpcr and fpsr */
+       ldp x0,         x1,     [sp], #0x10
+/* Restore fpcr and fpsr */
+       msr FPCR, x1
+       msr FPSR, x0
+/* Pop pc and spsr */
+       ldp x0,         x1,     [sp], #0x10
+/* Restore exception LR for PC and spsr */
+       msr SPSR_EL1, x1
+       msr ELR_EL1, x0
+/* Pop q0-q31 */
+       ldp q30,        q31,    [sp], #0x20
+       ldp q28,        q29,    [sp], #0x20
+       ldp q26,        q27,    [sp], #0x20
+       ldp q24,        q25,    [sp], #0x20
+       ldp q22,        q23,    [sp], #0x20
+       ldp q20,        q21,    [sp], #0x20
+       ldp q18,        q19,    [sp], #0x20
+       ldp q16,        q17,    [sp], #0x20
+       ldp q14,        q15,    [sp], #0x20
+       ldp q12,        q13,    [sp], #0x20
+       ldp q10,        q11,    [sp], #0x20
+       ldp q8,         q9,     [sp], #0x20
+       ldp q6,         q7,     [sp], #0x20
+       ldp q4,         q5,     [sp], #0x20
+       ldp q2,         q3,     [sp], #0x20
+       ldp q0,         q1,     [sp], #0x20
+/* Pop x1-x21 */
+       ldp x20,        x21,    [sp], #0x10
+       ldp x18,        x19,    [sp], #0x10
+       ldp x16,        x17,    [sp], #0x10
+       ldp x14,        x15,    [sp], #0x10
+       ldp x12,        x13,    [sp], #0x10
+       ldp x10,        x11,    [sp], #0x10
+       ldp x8,         x9,     [sp], #0x10
+       ldp x6,         x7,     [sp], #0x10
+       ldp x4,         x5,     [sp], #0x10
+       ldp x2,         x3,     [sp], #0x10
+       ldp lr,         x1,     [sp], #0x10
+/* Must clear reservations here to ensure consistency with atomic operations */
+       clrex
+.endm
+
 /** @} */
 
 #endif /* _RTEMS_ASM_H */
diff --git a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h 
b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
index 316079a6cd..eb2206ffc7 100644
--- a/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
+++ b/cpukit/score/cpu/aarch64/include/rtems/score/cpu.h
@@ -433,6 +433,10 @@ typedef enum {
 #define VECTOR_ENTRY_SIZE 0x80
 void _AArch64_Exception_interrupt_no_nest( void );
 void _AArch64_Exception_interrupt_nest( void );
+void _AArch64_Perform_Thread_Dispatch( void );
+void _AArch64_Exception_synchronous_map( void );
+void _AArch64_push_exception_context_start( void );
+void _AArch64_push_exception_context_finish( void );
 static inline void* AArch64_set_exception_handler(
   AArch64_symbolic_exception_name exception,
   void (*handler)(void)
@@ -545,6 +549,7 @@ typedef struct {
 void _CPU_Exception_frame_print( const CPU_Exception_frame *frame );
 
 void _AArch64_Exception_default( CPU_Exception_frame *frame );
+void AArch64_map_exception( uint64_t syndrome );
 
 /** Type that can store a 32-bit integer or a pointer. */
 typedef uintptr_t CPU_Uint32ptr;
diff --git a/spec/build/bsps/aarch64/grp.yml b/spec/build/bsps/aarch64/grp.yml
index a6bc0d5a31..24967d4841 100644
--- a/spec/build/bsps/aarch64/grp.yml
+++ b/spec/build/bsps/aarch64/grp.yml
@@ -23,7 +23,9 @@ install:
   source:
   - bsps/aarch64/shared/start/linkcmds.base
 ldflags: []
-links: []
+links:
+- role: build-dependency
+  uid: objexceptions
 type: build
 use-after: []
 use-before: []
diff --git a/spec/build/bsps/aarch64/objexceptions.yml 
b/spec/build/bsps/aarch64/objexceptions.yml
new file mode 100644
index 0000000000..859d837271
--- /dev/null
+++ b/spec/build/bsps/aarch64/objexceptions.yml
@@ -0,0 +1,14 @@
+SPDX-License-Identifier: CC-BY-SA-4.0 OR BSD-2-Clause
+build-type: objects
+cflags: []
+copyrights:
+- Copyright (C) 2021 On-Line Applications Research (OAR)
+cppflags: []
+cxxflags: []
+enabled-by: true
+includes: []
+install: []
+links: []
+source:
+- bsps/aarch64/shared/exceptions.c
+type: build
diff --git a/spec/build/cpukit/cpuaarch64.yml b/spec/build/cpukit/cpuaarch64.yml
index 80562756ba..a984b3ee62 100644
--- a/spec/build/cpukit/cpuaarch64.yml
+++ b/spec/build/cpukit/cpuaarch64.yml
@@ -30,6 +30,7 @@ source:
 - cpukit/score/cpu/aarch64/aarch64-exception-default.c
 - cpukit/score/cpu/aarch64/aarch64-exception-frame-print.c
 - cpukit/score/cpu/aarch64/aarch64-exception-interrupt.S
+- cpukit/score/cpu/aarch64/aarch64-exception-map.S
 - cpukit/score/cpu/aarch64/aarch64-thread-idle.c
 - cpukit/score/cpu/aarch64/cpu.c
 - cpukit/score/cpu/aarch64/cpu_asm.S
-- 
2.20.1

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to