---
 freebsd/sys/arm64/arm64/in_cksum.c            | 243 +++++++
 freebsd/sys/arm64/include/machine/armreg.h    | 665 ++++++++++++++++++
 freebsd/sys/arm64/include/machine/cpu.h       | 203 ++++++
 freebsd/sys/arm64/include/machine/cpufunc.h   | 153 ++++
 freebsd/sys/arm64/include/machine/in_cksum.h  |  83 +++
 .../ck/include/gcc/aarch64/ck_pr_llsc.h       | 352 +++++++++
 .../ck/include/gcc/aarch64/ck_pr_lse.h        | 298 ++++++++
 libbsd.py                                     |  21 +
 rtemsbsd/include/machine/frame.h              |   1 +
 9 files changed, 2019 insertions(+)
 create mode 100644 freebsd/sys/arm64/arm64/in_cksum.c
 create mode 100644 freebsd/sys/arm64/include/machine/armreg.h
 create mode 100644 freebsd/sys/arm64/include/machine/cpu.h
 create mode 100644 freebsd/sys/arm64/include/machine/cpufunc.h
 create mode 100644 freebsd/sys/arm64/include/machine/in_cksum.h
 create mode 100644 freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_llsc.h
 create mode 100644 freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_lse.h
 create mode 100644 rtemsbsd/include/machine/frame.h

diff --git a/freebsd/sys/arm64/arm64/in_cksum.c 
b/freebsd/sys/arm64/arm64/in_cksum.c
new file mode 100644
index 00000000..9f92f2ff
--- /dev/null
+++ b/freebsd/sys/arm64/arm64/in_cksum.c
@@ -0,0 +1,243 @@
+#include <machine/rtems-bsd-kernel-space.h>
+
+/* $NetBSD: in_cksum.c,v 1.7 1997/09/02 13:18:15 thorpej Exp $ */
+
+/*-
+ * Copyright (c) 1988, 1992, 1993
+ *     The Regents of the University of California.  All rights reserved.
+ * Copyright (c) 1996
+ *     Matt Thomas <m...@3am-software.com>
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. All advertising materials mentioning features or use of this software
+ *    must display the following acknowledgement:
+ *     This product includes software developed by the University of
+ *     California, Berkeley and its contributors.
+ * 4. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     @(#)in_cksum.c  8.1 (Berkeley) 6/10/93
+ */
+
+#include <sys/cdefs.h>                 /* RCS ID & Copyright macro defns */
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/mbuf.h>
+#include <sys/systm.h>
+#include <netinet/in_systm.h>
+#include <netinet/in.h>
+#include <netinet/ip.h>
+#include <machine/in_cksum.h>
+
+/*
+ * Checksum routine for Internet Protocol family headers
+ *    (Portable Alpha version).
+ *
+ * This routine is very heavily used in the network
+ * code and should be modified for each CPU to be as fast as possible.
+ */
+
+#define ADDCARRY(x)  (x > 65535 ? x -= 65535 : x)
+#define REDUCE32                                                         \
+    {                                                                    \
+       q_util.q = sum;                                                   \
+       sum = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3];      \
+    }
+#define REDUCE16                                                         \
+    {                                                                    \
+       q_util.q = sum;                                                   \
+       l_util.l = q_util.s[0] + q_util.s[1] + q_util.s[2] + q_util.s[3]; \
+       sum = l_util.s[0] + l_util.s[1];                                  \
+       ADDCARRY(sum);                                                    \
+    }
+
+static const u_int32_t in_masks[] = {
+       /*0 bytes*/ /*1 byte*/  /*2 bytes*/ /*3 bytes*/
+       0x00000000, 0x000000FF, 0x0000FFFF, 0x00FFFFFF, /* offset 0 */
+       0x00000000, 0x0000FF00, 0x00FFFF00, 0xFFFFFF00, /* offset 1 */
+       0x00000000, 0x00FF0000, 0xFFFF0000, 0xFFFF0000, /* offset 2 */
+       0x00000000, 0xFF000000, 0xFF000000, 0xFF000000, /* offset 3 */
+};
+
+union l_util {
+       u_int16_t s[2];
+       u_int32_t l;
+};
+union q_util {
+       u_int16_t s[4];
+       u_int32_t l[2];
+       u_int64_t q;
+};
+
+static u_int64_t
+in_cksumdata(const void *buf, int len)
+{
+       const u_int32_t *lw = (const u_int32_t *) buf;
+       u_int64_t sum = 0;
+       u_int64_t prefilled;
+       int offset;
+       union q_util q_util;
+
+       if ((3 & (long) lw) == 0 && len == 20) {
+            sum = (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3] + lw[4];
+            REDUCE32;
+            return sum;
+       }
+
+       if ((offset = 3 & (long) lw) != 0) {
+               const u_int32_t *masks = in_masks + (offset << 2);
+               lw = (u_int32_t *) (((long) lw) - offset);
+               sum = *lw++ & masks[len >= 3 ? 3 : len];
+               len -= 4 - offset;
+               if (len <= 0) {
+                       REDUCE32;
+                       return sum;
+               }
+       }
+#if 0
+       /*
+        * Force to cache line boundary.
+        */
+       offset = 32 - (0x1f & (long) lw);
+       if (offset < 32 && len > offset) {
+               len -= offset;
+               if (4 & offset) {
+                       sum += (u_int64_t) lw[0];
+                       lw += 1;
+               }
+               if (8 & offset) {
+                       sum += (u_int64_t) lw[0] + lw[1];
+                       lw += 2;
+               }
+               if (16 & offset) {
+                       sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+                       lw += 4;
+               }
+       }
+#endif
+       /*
+        * access prefilling to start load of next cache line.
+        * then add current cache line
+        * save result of prefilling for loop iteration.
+        */
+       prefilled = lw[0];
+       while ((len -= 32) >= 4) {
+               u_int64_t prefilling = lw[8];
+               sum += prefilled + lw[1] + lw[2] + lw[3]
+                       + lw[4] + lw[5] + lw[6] + lw[7];
+               lw += 8;
+               prefilled = prefilling;
+       }
+       if (len >= 0) {
+               sum += prefilled + lw[1] + lw[2] + lw[3]
+                       + lw[4] + lw[5] + lw[6] + lw[7];
+               lw += 8;
+       } else {
+               len += 32;
+       }
+       while ((len -= 16) >= 0) {
+               sum += (u_int64_t) lw[0] + lw[1] + lw[2] + lw[3];
+               lw += 4;
+       }
+       len += 16;
+       while ((len -= 4) >= 0) {
+               sum += (u_int64_t) *lw++;
+       }
+       len += 4;
+       if (len > 0)
+               sum += (u_int64_t) (in_masks[len] & *lw);
+       REDUCE32;
+       return sum;
+}
+
+u_short
+in_addword(u_short a, u_short b)
+{
+       u_int64_t sum = a + b;
+
+       ADDCARRY(sum);
+       return (sum);
+}
+
+u_short
+in_pseudo(u_int32_t a, u_int32_t b, u_int32_t c)
+{
+       u_int64_t sum;
+       union q_util q_util;
+       union l_util l_util;
+                   
+       sum = (u_int64_t) a + b + c;
+       REDUCE16;
+       return (sum);
+}
+
+u_short
+in_cksum_skip(struct mbuf *m, int len, int skip)
+{
+       u_int64_t sum = 0;
+       int mlen = 0;
+       int clen = 0;
+       caddr_t addr;
+       union q_util q_util;
+       union l_util l_util;
+
+        len -= skip;
+        for (; skip && m; m = m->m_next) {
+                if (m->m_len > skip) {
+                        mlen = m->m_len - skip;
+                       addr = mtod(m, caddr_t) + skip;
+                        goto skip_start;
+                } else {
+                        skip -= m->m_len;
+                }
+        }
+
+       for (; m && len; m = m->m_next) {
+               if (m->m_len == 0)
+                       continue;
+               mlen = m->m_len;
+               addr = mtod(m, caddr_t);
+skip_start:
+               if (len < mlen)
+                       mlen = len;
+               if ((clen ^ (long) addr) & 1)
+                   sum += in_cksumdata(addr, mlen) << 8;
+               else
+                   sum += in_cksumdata(addr, mlen);
+
+               clen += mlen;
+               len -= mlen;
+       }
+       REDUCE16;
+       return (~sum & 0xffff);
+}
+
+u_int in_cksum_hdr(const struct ip *ip)
+{
+    u_int64_t sum = in_cksumdata(ip, sizeof(struct ip));
+    union q_util q_util;
+    union l_util l_util;
+    REDUCE16;
+    return (~sum & 0xffff);
+}
diff --git a/freebsd/sys/arm64/include/machine/armreg.h 
b/freebsd/sys/arm64/include/machine/armreg.h
new file mode 100644
index 00000000..d528f1af
--- /dev/null
+++ b/freebsd/sys/arm64/include/machine/armreg.h
@@ -0,0 +1,665 @@
+/*-
+ * Copyright (c) 2013, 2014 Andrew Turner
+ * Copyright (c) 2015 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_ARMREG_H_
+#define        _MACHINE_ARMREG_H_
+
+#define        INSN_SIZE               4
+
+#define        READ_SPECIALREG(reg)                                            
\
+({     uint64_t val;                                                   \
+       __asm __volatile("mrs   %0, " __STRING(reg) : "=&r" (val));     \
+       val;                                                            \
+})
+#define        WRITE_SPECIALREG(reg, val)                                      
\
+       __asm __volatile("msr   " __STRING(reg) ", %0" : : "r"((uint64_t)val))
+
+#define        UL(x)   UINT64_C(x)
+
+/* CNTHCTL_EL2 - Counter-timer Hypervisor Control register */
+#define        CNTHCTL_EVNTI_MASK      (0xf << 4) /* Bit to trigger event 
stream */
+#define        CNTHCTL_EVNTDIR         (1 << 3) /* Control transition trigger 
bit */
+#define        CNTHCTL_EVNTEN          (1 << 2) /* Enable event stream */
+#define        CNTHCTL_EL1PCEN         (1 << 1) /* Allow EL0/1 physical timer 
access */
+#define        CNTHCTL_EL1PCTEN        (1 << 0) /*Allow EL0/1 physical counter 
access*/
+
+/* CPACR_EL1 */
+#define        CPACR_FPEN_MASK         (0x3 << 20)
+#define         CPACR_FPEN_TRAP_ALL1   (0x0 << 20) /* Traps from EL0 and EL1 */
+#define         CPACR_FPEN_TRAP_EL0    (0x1 << 20) /* Traps from EL0 */
+#define         CPACR_FPEN_TRAP_ALL2   (0x2 << 20) /* Traps from EL0 and EL1 */
+#define         CPACR_FPEN_TRAP_NONE   (0x3 << 20) /* No traps */
+#define        CPACR_TTA               (0x1 << 28)
+
+/* CTR_EL0 - Cache Type Register */
+#define        CTR_DLINE_SHIFT         16
+#define        CTR_DLINE_MASK          (0xf << CTR_DLINE_SHIFT)
+#define        CTR_DLINE_SIZE(reg)     (((reg) & CTR_DLINE_MASK) >> 
CTR_DLINE_SHIFT)
+#define        CTR_ILINE_SHIFT         0
+#define        CTR_ILINE_MASK          (0xf << CTR_ILINE_SHIFT)
+#define        CTR_ILINE_SIZE(reg)     (((reg) & CTR_ILINE_MASK) >> 
CTR_ILINE_SHIFT)
+
+/* DAIF - Interrupt Mask Bits */
+#define        DAIF_D_MASKED           (1 << 9)
+#define        DAIF_A_MASKED           (1 << 8)
+#define        DAIF_I_MASKED           (1 << 7)
+#define        DAIF_F_MASKED           (1 << 6)
+
+/* DCZID_EL0 - Data Cache Zero ID register */
+#define DCZID_DZP              (1 << 4) /* DC ZVA prohibited if non-0 */
+#define DCZID_BS_SHIFT         0
+#define DCZID_BS_MASK          (0xf << DCZID_BS_SHIFT)
+#define        DCZID_BS_SIZE(reg)      (((reg) & DCZID_BS_MASK) >> 
DCZID_BS_SHIFT)
+
+/* ESR_ELx */
+#define        ESR_ELx_ISS_MASK        0x00ffffff
+#define         ISS_INSN_FnV           (0x01 << 10)
+#define         ISS_INSN_EA            (0x01 << 9)
+#define         ISS_INSN_S1PTW         (0x01 << 7)
+#define         ISS_INSN_IFSC_MASK     (0x1f << 0)
+#define         ISS_DATA_ISV           (0x01 << 24)
+#define         ISS_DATA_SAS_MASK      (0x03 << 22)
+#define         ISS_DATA_SSE           (0x01 << 21)
+#define         ISS_DATA_SRT_MASK      (0x1f << 16)
+#define         ISS_DATA_SF            (0x01 << 15)
+#define         ISS_DATA_AR            (0x01 << 14)
+#define         ISS_DATA_FnV           (0x01 << 10)
+#define         ISS_DATA_EA            (0x01 << 9)
+#define         ISS_DATA_CM            (0x01 << 8)
+#define         ISS_DATA_S1PTW         (0x01 << 7)
+#define         ISS_DATA_WnR           (0x01 << 6)
+#define         ISS_DATA_DFSC_MASK     (0x3f << 0)
+#define         ISS_DATA_DFSC_ASF_L0   (0x00 << 0)
+#define         ISS_DATA_DFSC_ASF_L1   (0x01 << 0)
+#define         ISS_DATA_DFSC_ASF_L2   (0x02 << 0)
+#define         ISS_DATA_DFSC_ASF_L3   (0x03 << 0)
+#define         ISS_DATA_DFSC_TF_L0    (0x04 << 0)
+#define         ISS_DATA_DFSC_TF_L1    (0x05 << 0)
+#define         ISS_DATA_DFSC_TF_L2    (0x06 << 0)
+#define         ISS_DATA_DFSC_TF_L3    (0x07 << 0)
+#define         ISS_DATA_DFSC_AFF_L1   (0x09 << 0)
+#define         ISS_DATA_DFSC_AFF_L2   (0x0a << 0)
+#define         ISS_DATA_DFSC_AFF_L3   (0x0b << 0)
+#define         ISS_DATA_DFSC_PF_L1    (0x0d << 0)
+#define         ISS_DATA_DFSC_PF_L2    (0x0e << 0)
+#define         ISS_DATA_DFSC_PF_L3    (0x0f << 0)
+#define         ISS_DATA_DFSC_EXT      (0x10 << 0)
+#define         ISS_DATA_DFSC_EXT_L0   (0x14 << 0)
+#define         ISS_DATA_DFSC_EXT_L1   (0x15 << 0)
+#define         ISS_DATA_DFSC_EXT_L2   (0x16 << 0)
+#define         ISS_DATA_DFSC_EXT_L3   (0x17 << 0)
+#define         ISS_DATA_DFSC_ECC      (0x18 << 0)
+#define         ISS_DATA_DFSC_ECC_L0   (0x1c << 0)
+#define         ISS_DATA_DFSC_ECC_L1   (0x1d << 0)
+#define         ISS_DATA_DFSC_ECC_L2   (0x1e << 0)
+#define         ISS_DATA_DFSC_ECC_L3   (0x1f << 0)
+#define         ISS_DATA_DFSC_ALIGN    (0x21 << 0)
+#define         ISS_DATA_DFSC_TLB_CONFLICT (0x30 << 0)
+#define        ESR_ELx_IL              (0x01 << 25)
+#define        ESR_ELx_EC_SHIFT        26
+#define        ESR_ELx_EC_MASK         (0x3f << 26)
+#define        ESR_ELx_EXCEPTION(esr)  (((esr) & ESR_ELx_EC_MASK) >> 
ESR_ELx_EC_SHIFT)
+#define         EXCP_UNKNOWN           0x00    /* Unkwn exception */
+#define         EXCP_FP_SIMD           0x07    /* VFP/SIMD trap */
+#define         EXCP_ILL_STATE         0x0e    /* Illegal execution state */
+#define         EXCP_SVC32             0x11    /* SVC trap for AArch32 */
+#define         EXCP_SVC64             0x15    /* SVC trap for AArch64 */
+#define         EXCP_MSR               0x18    /* MSR/MRS trap */
+#define         EXCP_INSN_ABORT_L      0x20    /* Instruction abort, from 
lower EL */
+#define         EXCP_INSN_ABORT        0x21    /* Instruction abort, from same 
EL */ 
+#define         EXCP_PC_ALIGN          0x22    /* PC alignment fault */
+#define         EXCP_DATA_ABORT_L      0x24    /* Data abort, from lower EL */
+#define         EXCP_DATA_ABORT        0x25    /* Data abort, from same EL */ 
+#define         EXCP_SP_ALIGN          0x26    /* SP slignment fault */
+#define         EXCP_TRAP_FP           0x2c    /* Trapped FP exception */
+#define         EXCP_SERROR            0x2f    /* SError interrupt */
+#define         EXCP_SOFTSTP_EL0       0x32    /* Software Step, from lower EL 
*/
+#define         EXCP_SOFTSTP_EL1       0x33    /* Software Step, from same EL 
*/
+#define         EXCP_WATCHPT_EL1       0x35    /* Watchpoint, from same EL */
+#define         EXCP_BRK               0x3c    /* Breakpoint */
+
+/* ICC_CTLR_EL1 */
+#define        ICC_CTLR_EL1_EOIMODE    (1U << 1)
+
+/* ICC_IAR1_EL1 */
+#define        ICC_IAR1_EL1_SPUR       (0x03ff)
+
+/* ICC_IGRPEN0_EL1 */
+#define        ICC_IGRPEN0_EL1_EN      (1U << 0)
+
+/* ICC_PMR_EL1 */
+#define        ICC_PMR_EL1_PRIO_MASK   (0xFFUL)
+
+/* ICC_SGI1R_EL1 */
+#define        ICC_SGI1R_EL1_TL_MASK           0xffffUL
+#define        ICC_SGI1R_EL1_AFF1_SHIFT        16
+#define        ICC_SGI1R_EL1_SGIID_SHIFT       24
+#define        ICC_SGI1R_EL1_AFF2_SHIFT        32
+#define        ICC_SGI1R_EL1_AFF3_SHIFT        48
+#define        ICC_SGI1R_EL1_SGIID_MASK        0xfUL
+#define        ICC_SGI1R_EL1_IRM               (0x1UL << 40)
+
+/* ICC_SRE_EL1 */
+#define        ICC_SRE_EL1_SRE         (1U << 0)
+
+/* ICC_SRE_EL2 */
+#define        ICC_SRE_EL2_SRE         (1U << 0)
+#define        ICC_SRE_EL2_EN          (1U << 3)
+
+/* ID_AA64DFR0_EL1 */
+#define        ID_AA64DFR0_MASK                UL(0x0000000ff0f0ffff)
+#define        ID_AA64DFR0_DebugVer_SHIFT      0
+#define        ID_AA64DFR0_DebugVer_MASK       (UL(0xf) << 
ID_AA64DFR0_DebugVer_SHIFT)
+#define        ID_AA64DFR0_DebugVer(x)         ((x) & 
ID_AA64DFR0_DebugVer_MASK)
+#define         ID_AA64DFR0_DebugVer_8         (UL(0x6) << 
ID_AA64DFR0_DebugVer_SHIFT)
+#define         ID_AA64DFR0_DebugVer_8_VHE     (UL(0x7) << 
ID_AA64DFR0_DebugVer_SHIFT)
+#define         ID_AA64DFR0_DebugVer_8_2       (UL(0x8) << 
ID_AA64DFR0_DebugVer_SHIFT)
+#define        ID_AA64DFR0_TraceVer_SHIFT      4
+#define        ID_AA64DFR0_TraceVer_MASK       (UL(0xf) << 
ID_AA64DFR0_TraceVer_SHIFT)
+#define        ID_AA64DFR0_TraceVer(x)         ((x) & 
ID_AA64DFR0_TraceVer_MASK)
+#define         ID_AA64DFR0_TraceVer_NONE      (UL(0x0) << 
ID_AA64DFR0_TraceVer_SHIFT)
+#define         ID_AA64DFR0_TraceVer_IMPL      (UL(0x1) << 
ID_AA64DFR0_TraceVer_SHIFT)
+#define        ID_AA64DFR0_PMUVer_SHIFT        8
+#define        ID_AA64DFR0_PMUVer_MASK         (UL(0xf) << 
ID_AA64DFR0_PMUVer_SHIFT)
+#define        ID_AA64DFR0_PMUVer(x)           ((x) & ID_AA64DFR0_PMUVer_MASK)
+#define         ID_AA64DFR0_PMUVer_NONE        (UL(0x0) << 
ID_AA64DFR0_PMUVer_SHIFT)
+#define         ID_AA64DFR0_PMUVer_3           (UL(0x1) << 
ID_AA64DFR0_PMUVer_SHIFT)
+#define         ID_AA64DFR0_PMUVer_3_1         (UL(0x4) << 
ID_AA64DFR0_PMUVer_SHIFT)
+#define         ID_AA64DFR0_PMUVer_IMPL        (UL(0xf) << 
ID_AA64DFR0_PMUVer_SHIFT)
+#define        ID_AA64DFR0_BRPs_SHIFT          12
+#define        ID_AA64DFR0_BRPs_MASK           (UL(0xf) << 
ID_AA64DFR0_BRPs_SHIFT)
+#define        ID_AA64DFR0_BRPs(x)             \
+    ((((x) >> ID_AA64DFR0_BRPs_SHIFT) & 0xf) + 1)
+#define        ID_AA64DFR0_WRPs_SHIFT          20
+#define        ID_AA64DFR0_WRPs_MASK           (UL(0xf) << 
ID_AA64DFR0_WRPs_SHIFT)
+#define        ID_AA64DFR0_WRPs(x)             \
+    ((((x) >> ID_AA64DFR0_WRPs_SHIFT) & 0xf) + 1)
+#define        ID_AA64DFR0_CTX_CMPs_SHIFT      28
+#define        ID_AA64DFR0_CTX_CMPs_MASK       (UL(0xf) << 
ID_AA64DFR0_CTX_CMPs_SHIFT)
+#define        ID_AA64DFR0_CTX_CMPs(x)         \
+    ((((x) >> ID_AA64DFR0_CTX_CMPs_SHIFT) & 0xf) + 1)
+#define        ID_AA64DFR0_PMSVer_SHIFT        32
+#define        ID_AA64DFR0_PMSVer_MASK         (UL(0xf) << 
ID_AA64DFR0_PMSVer_SHIFT)
+#define        ID_AA64DFR0_PMSVer(x)           ((x) & ID_AA64DFR0_PMSVer_MASK)
+#define         ID_AA64DFR0_PMSVer_NONE        (UL(0x0) << 
ID_AA64DFR0_PMSVer_SHIFT)
+#define         ID_AA64DFR0_PMSVer_V1          (UL(0x1) << 
ID_AA64DFR0_PMSVer_SHIFT)
+
+/* ID_AA64ISAR0_EL1 */
+#define        ID_AA64ISAR0_MASK               UL(0x0000fffff0fffff0)
+#define        ID_AA64ISAR0_AES_SHIFT          4
+#define        ID_AA64ISAR0_AES_MASK           (UL(0xf) << 
ID_AA64ISAR0_AES_SHIFT)
+#define        ID_AA64ISAR0_AES(x)             ((x) & ID_AA64ISAR0_AES_MASK)
+#define         ID_AA64ISAR0_AES_NONE          (UL(0x0) << 
ID_AA64ISAR0_AES_SHIFT)
+#define         ID_AA64ISAR0_AES_BASE          (UL(0x1) << 
ID_AA64ISAR0_AES_SHIFT)
+#define         ID_AA64ISAR0_AES_PMULL         (UL(0x2) << 
ID_AA64ISAR0_AES_SHIFT)
+#define        ID_AA64ISAR0_SHA1_SHIFT         8
+#define        ID_AA64ISAR0_SHA1_MASK          (UL(0xf) << 
ID_AA64ISAR0_SHA1_SHIFT)
+#define        ID_AA64ISAR0_SHA1(x)            ((x) & ID_AA64ISAR0_SHA1_MASK)
+#define         ID_AA64ISAR0_SHA1_NONE         (UL(0x0) << 
ID_AA64ISAR0_SHA1_SHIFT)
+#define         ID_AA64ISAR0_SHA1_BASE         (UL(0x1) << 
ID_AA64ISAR0_SHA1_SHIFT)
+#define        ID_AA64ISAR0_SHA2_SHIFT         12
+#define        ID_AA64ISAR0_SHA2_MASK          (UL(0xf) << 
ID_AA64ISAR0_SHA2_SHIFT)
+#define        ID_AA64ISAR0_SHA2(x)            ((x) & ID_AA64ISAR0_SHA2_MASK)
+#define         ID_AA64ISAR0_SHA2_NONE         (UL(0x0) << 
ID_AA64ISAR0_SHA2_SHIFT)
+#define         ID_AA64ISAR0_SHA2_BASE         (UL(0x1) << 
ID_AA64ISAR0_SHA2_SHIFT)
+#define         ID_AA64ISAR0_SHA2_512          (UL(0x2) << 
ID_AA64ISAR0_SHA2_SHIFT)
+#define        ID_AA64ISAR0_CRC32_SHIFT        16
+#define        ID_AA64ISAR0_CRC32_MASK         (UL(0xf) << 
ID_AA64ISAR0_CRC32_SHIFT)
+#define        ID_AA64ISAR0_CRC32(x)           ((x) & ID_AA64ISAR0_CRC32_MASK)
+#define         ID_AA64ISAR0_CRC32_NONE        (UL(0x0) << 
ID_AA64ISAR0_CRC32_SHIFT)
+#define         ID_AA64ISAR0_CRC32_BASE        (UL(0x1) << 
ID_AA64ISAR0_CRC32_SHIFT)
+#define        ID_AA64ISAR0_Atomic_SHIFT       20
+#define        ID_AA64ISAR0_Atomic_MASK        (UL(0xf) << 
ID_AA64ISAR0_Atomic_SHIFT)
+#define        ID_AA64ISAR0_Atomic(x)          ((x) & ID_AA64ISAR0_Atomic_MASK)
+#define         ID_AA64ISAR0_Atomic_NONE       (UL(0x0) << 
ID_AA64ISAR0_Atomic_SHIFT)
+#define         ID_AA64ISAR0_Atomic_IMPL       (UL(0x2) << 
ID_AA64ISAR0_Atomic_SHIFT)
+#define        ID_AA64ISAR0_RDM_SHIFT          28
+#define        ID_AA64ISAR0_RDM_MASK           (UL(0xf) << 
ID_AA64ISAR0_RDM_SHIFT)
+#define        ID_AA64ISAR0_RDM(x)             ((x) & ID_AA64ISAR0_RDM_MASK)
+#define         ID_AA64ISAR0_RDM_NONE          (UL(0x0) << 
ID_AA64ISAR0_RDM_SHIFT)
+#define         ID_AA64ISAR0_RDM_IMPL          (UL(0x1) << 
ID_AA64ISAR0_RDM_SHIFT)
+#define        ID_AA64ISAR0_SHA3_SHIFT         32
+#define        ID_AA64ISAR0_SHA3_MASK          (UL(0xf) << 
ID_AA64ISAR0_SHA3_SHIFT)
+#define        ID_AA64ISAR0_SHA3(x)            ((x) & ID_AA64ISAR0_SHA3_MASK)
+#define         ID_AA64ISAR0_SHA3_NONE         (UL(0x0) << 
ID_AA64ISAR0_SHA3_SHIFT)
+#define         ID_AA64ISAR0_SHA3_IMPL         (UL(0x1) << 
ID_AA64ISAR0_SHA3_SHIFT)
+#define        ID_AA64ISAR0_SM3_SHIFT          36
+#define        ID_AA64ISAR0_SM3_MASK           (UL(0xf) << 
ID_AA64ISAR0_SM3_SHIFT)
+#define        ID_AA64ISAR0_SM3(x)             ((x) & ID_AA64ISAR0_SM3_MASK)
+#define         ID_AA64ISAR0_SM3_NONE          (UL(0x0) << 
ID_AA64ISAR0_SM3_SHIFT)
+#define         ID_AA64ISAR0_SM3_IMPL          (UL(0x1) << 
ID_AA64ISAR0_SM3_SHIFT)
+#define        ID_AA64ISAR0_SM4_SHIFT          40
+#define        ID_AA64ISAR0_SM4_MASK           (UL(0xf) << 
ID_AA64ISAR0_SM4_SHIFT)
+#define        ID_AA64ISAR0_SM4(x)             ((x) & ID_AA64ISAR0_SM4_MASK)
+#define         ID_AA64ISAR0_SM4_NONE          (UL(0x0) << 
ID_AA64ISAR0_SM4_SHIFT)
+#define         ID_AA64ISAR0_SM4_IMPL          (UL(0x1) << 
ID_AA64ISAR0_SM4_SHIFT)
+#define        ID_AA64ISAR0_DP_SHIFT           44
+#define        ID_AA64ISAR0_DP_MASK            (UL(0xf) << 
ID_AA64ISAR0_DP_SHIFT)
+#define        ID_AA64ISAR0_DP(x)              ((x) & ID_AA64ISAR0_DP_MASK)
+#define         ID_AA64ISAR0_DP_NONE           (UL(0x0) << 
ID_AA64ISAR0_DP_SHIFT)
+#define         ID_AA64ISAR0_DP_IMPL           (UL(0x1) << 
ID_AA64ISAR0_DP_SHIFT)
+
+/* ID_AA64ISAR1_EL1 */
+#define        ID_AA64ISAR1_MASK               UL(0x00000000ffffffff)
+#define        ID_AA64ISAR1_DPB_SHIFT          0
+#define        ID_AA64ISAR1_DPB_MASK           (UL(0xf) << 
ID_AA64ISAR1_DPB_SHIFT)
+#define        ID_AA64ISAR1_DPB(x)             ((x) & ID_AA64ISAR1_DPB_MASK)
+#define         ID_AA64ISAR1_DPB_NONE          (UL(0x0) << 
ID_AA64ISAR1_DPB_SHIFT)
+#define         ID_AA64ISAR1_DPB_IMPL          (UL(0x1) << 
ID_AA64ISAR1_DPB_SHIFT)
+#define        ID_AA64ISAR1_APA_SHIFT          4
+#define        ID_AA64ISAR1_APA_MASK           (UL(0xf) << 
ID_AA64ISAR1_APA_SHIFT)
+#define        ID_AA64ISAR1_APA(x)             ((x) & ID_AA64ISAR1_APA_MASK)
+#define         ID_AA64ISAR1_APA_NONE          (UL(0x0) << 
ID_AA64ISAR1_APA_SHIFT)
+#define         ID_AA64ISAR1_APA_IMPL          (UL(0x1) << 
ID_AA64ISAR1_APA_SHIFT)
+#define        ID_AA64ISAR1_API_SHIFT          8
+#define        ID_AA64ISAR1_API_MASK           (UL(0xf) << 
ID_AA64ISAR1_API_SHIFT)
+#define        ID_AA64ISAR1_API(x)             ((x) & ID_AA64ISAR1_API_MASK)
+#define         ID_AA64ISAR1_API_NONE          (UL(0x0) << 
ID_AA64ISAR1_API_SHIFT)
+#define         ID_AA64ISAR1_API_IMPL          (UL(0x1) << 
ID_AA64ISAR1_API_SHIFT)
+#define        ID_AA64ISAR1_JSCVT_SHIFT        12
+#define        ID_AA64ISAR1_JSCVT_MASK         (UL(0xf) << 
ID_AA64ISAR1_JSCVT_SHIFT)
+#define        ID_AA64ISAR1_JSCVT(x)           ((x) & ID_AA64ISAR1_JSCVT_MASK)
+#define         ID_AA64ISAR1_JSCVT_NONE        (UL(0x0) << 
ID_AA64ISAR1_JSCVT_SHIFT)
+#define         ID_AA64ISAR1_JSCVT_IMPL        (UL(0x1) << 
ID_AA64ISAR1_JSCVT_SHIFT)
+#define        ID_AA64ISAR1_FCMA_SHIFT         16
+#define        ID_AA64ISAR1_FCMA_MASK          (UL(0xf) << 
ID_AA64ISAR1_FCMA_SHIFT)
+#define        ID_AA64ISAR1_FCMA(x)            ((x) & ID_AA64ISAR1_FCMA_MASK)
+#define         ID_AA64ISAR1_FCMA_NONE         (UL(0x0) << 
ID_AA64ISAR1_FCMA_SHIFT)
+#define         ID_AA64ISAR1_FCMA_IMPL         (UL(0x1) << 
ID_AA64ISAR1_FCMA_SHIFT)
+#define        ID_AA64ISAR1_LRCPC_SHIFT        20
+#define        ID_AA64ISAR1_LRCPC_MASK         (UL(0xf) << 
ID_AA64ISAR1_LRCPC_SHIFT)
+#define        ID_AA64ISAR1_LRCPC(x)           ((x) & ID_AA64ISAR1_LRCPC_MASK)
+#define         ID_AA64ISAR1_LRCPC_NONE        (UL(0x0) << 
ID_AA64ISAR1_LRCPC_SHIFT)
+#define         ID_AA64ISAR1_LRCPC_IMPL        (UL(0x1) << 
ID_AA64ISAR1_LRCPC_SHIFT)
+#define        ID_AA64ISAR1_GPA_SHIFT          24
+#define        ID_AA64ISAR1_GPA_MASK           (UL(0xf) << 
ID_AA64ISAR1_GPA_SHIFT)
+#define        ID_AA64ISAR1_GPA(x)             ((x) & ID_AA64ISAR1_GPA_MASK)
+#define         ID_AA64ISAR1_GPA_NONE          (UL(0x0) << 
ID_AA64ISAR1_GPA_SHIFT)
+#define         ID_AA64ISAR1_GPA_IMPL          (UL(0x1) << 
ID_AA64ISAR1_GPA_SHIFT)
+#define        ID_AA64ISAR1_GPI_SHIFT          28
+#define        ID_AA64ISAR1_GPI_MASK           (UL(0xf) << 
ID_AA64ISAR1_GPI_SHIFT)
+#define        ID_AA64ISAR1_GPI(x)             ((x) & ID_AA64ISAR1_GPI_MASK)
+#define         ID_AA64ISAR1_GPI_NONE          (UL(0x0) << 
ID_AA64ISAR1_GPI_SHIFT)
+#define         ID_AA64ISAR1_GPI_IMPL          (UL(0x1) << 
ID_AA64ISAR1_GPI_SHIFT)
+
+/* ID_AA64MMFR0_EL1 */
+#define        ID_AA64MMFR0_MASK               UL(0x00000000ffffffff)
+#define        ID_AA64MMFR0_PARange_SHIFT      0
+#define        ID_AA64MMFR0_PARange_MASK       (UL(0xf) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define        ID_AA64MMFR0_PARange(x)         ((x) & 
ID_AA64MMFR0_PARange_MASK)
+#define         ID_AA64MMFR0_PARange_4G        (UL(0x0) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define         ID_AA64MMFR0_PARange_64G       (UL(0x1) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define         ID_AA64MMFR0_PARange_1T        (UL(0x2) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define         ID_AA64MMFR0_PARange_4T        (UL(0x3) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define         ID_AA64MMFR0_PARange_16T       (UL(0x4) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define         ID_AA64MMFR0_PARange_256T      (UL(0x5) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define         ID_AA64MMFR0_PARange_4P        (UL(0x6) << 
ID_AA64MMFR0_PARange_SHIFT)
+#define        ID_AA64MMFR0_ASIDBits_SHIFT     4
+#define        ID_AA64MMFR0_ASIDBits_MASK      (UL(0xf) << 
ID_AA64MMFR0_ASIDBits_SHIFT)
+#define        ID_AA64MMFR0_ASIDBits(x)        ((x) & 
ID_AA64MMFR0_ASIDBits_MASK)
+#define         ID_AA64MMFR0_ASIDBits_8        (UL(0x0) << 
ID_AA64MMFR0_ASIDBits_SHIFT)
+#define         ID_AA64MMFR0_ASIDBits_16       (UL(0x2) << 
ID_AA64MMFR0_ASIDBits_SHIFT)
+#define        ID_AA64MMFR0_BigEnd_SHIFT       8
+#define        ID_AA64MMFR0_BigEnd_MASK        (UL(0xf) << 
ID_AA64MMFR0_BigEnd_SHIFT)
+#define        ID_AA64MMFR0_BigEnd(x)          ((x) & ID_AA64MMFR0_BigEnd_MASK)
+#define         ID_AA64MMFR0_BigEnd_FIXED      (UL(0x0) << 
ID_AA64MMFR0_BigEnd_SHIFT)
+#define         ID_AA64MMFR0_BigEnd_MIXED      (UL(0x1) << 
ID_AA64MMFR0_BigEnd_SHIFT)
+#define        ID_AA64MMFR0_SNSMem_SHIFT       12
+#define        ID_AA64MMFR0_SNSMem_MASK        (UL(0xf) << 
ID_AA64MMFR0_SNSMem_SHIFT)
+#define        ID_AA64MMFR0_SNSMem(x)          ((x) & ID_AA64MMFR0_SNSMem_MASK)
+#define         ID_AA64MMFR0_SNSMem_NONE       (UL(0x0) << 
ID_AA64MMFR0_SNSMem_SHIFT)
+#define         ID_AA64MMFR0_SNSMem_DISTINCT   (UL(0x1) << 
ID_AA64MMFR0_SNSMem_SHIFT)
+#define        ID_AA64MMFR0_BigEndEL0_SHIFT    16
+#define        ID_AA64MMFR0_BigEndEL0_MASK     (UL(0xf) << 
ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define        ID_AA64MMFR0_BigEndEL0(x)       ((x) & 
ID_AA64MMFR0_BigEndEL0_MASK)
+#define         ID_AA64MMFR0_BigEndEL0_FIXED   (UL(0x0) << 
ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define         ID_AA64MMFR0_BigEndEL0_MIXED   (UL(0x1) << 
ID_AA64MMFR0_BigEndEL0_SHIFT)
+#define        ID_AA64MMFR0_TGran16_SHIFT      20
+#define        ID_AA64MMFR0_TGran16_MASK       (UL(0xf) << 
ID_AA64MMFR0_TGran16_SHIFT)
+#define        ID_AA64MMFR0_TGran16(x)         ((x) & 
ID_AA64MMFR0_TGran16_MASK)
+#define         ID_AA64MMFR0_TGran16_NONE      (UL(0x0) << 
ID_AA64MMFR0_TGran16_SHIFT)
+#define         ID_AA64MMFR0_TGran16_IMPL      (UL(0x1) << 
ID_AA64MMFR0_TGran16_SHIFT)
+#define        ID_AA64MMFR0_TGran64_SHIFT      24
+#define        ID_AA64MMFR0_TGran64_MASK       (UL(0xf) << 
ID_AA64MMFR0_TGran64_SHIFT)
+#define        ID_AA64MMFR0_TGran64(x)         ((x) & 
ID_AA64MMFR0_TGran64_MASK)
+#define         ID_AA64MMFR0_TGran64_IMPL      (UL(0x0) << 
ID_AA64MMFR0_TGran64_SHIFT)
+#define         ID_AA64MMFR0_TGran64_NONE      (UL(0xf) << 
ID_AA64MMFR0_TGran64_SHIFT)
+#define        ID_AA64MMFR0_TGran4_SHIFT       28
+#define        ID_AA64MMFR0_TGran4_MASK        (UL(0xf) << 
ID_AA64MMFR0_TGran4_SHIFT)
+#define        ID_AA64MMFR0_TGran4(x)          ((x) & ID_AA64MMFR0_TGran4_MASK)
+#define         ID_AA64MMFR0_TGran4_IMPL       (UL(0x0) << 
ID_AA64MMFR0_TGran4_SHIFT)
+#define         ID_AA64MMFR0_TGran4_NONE       (UL(0xf) << 
ID_AA64MMFR0_TGran4_SHIFT)
+
+/* ID_AA64MMFR1_EL1 */
+#define        ID_AA64MMFR1_MASK               UL(0x00000000ffffffff)
+#define        ID_AA64MMFR1_HAFDBS_SHIFT       0
+#define        ID_AA64MMFR1_HAFDBS_MASK        (UL(0xf) << 
ID_AA64MMFR1_HAFDBS_SHIFT)
+#define        ID_AA64MMFR1_HAFDBS(x)          ((x) & ID_AA64MMFR1_HAFDBS_MASK)
+#define         ID_AA64MMFR1_HAFDBS_NONE       (UL(0x0) << 
ID_AA64MMFR1_HAFDBS_SHIFT)
+#define         ID_AA64MMFR1_HAFDBS_AF         (UL(0x1) << 
ID_AA64MMFR1_HAFDBS_SHIFT)
+#define         ID_AA64MMFR1_HAFDBS_AF_DBS     (UL(0x2) << 
ID_AA64MMFR1_HAFDBS_SHIFT)
+#define        ID_AA64MMFR1_VMIDBits_SHIFT     4
+#define        ID_AA64MMFR1_VMIDBits_MASK      (UL(0xf) << 
ID_AA64MMFR1_VMIDBits_SHIFT)
+#define        ID_AA64MMFR1_VMIDBits(x)        ((x) & 
ID_AA64MMFR1_VMIDBits_MASK)
+#define         ID_AA64MMFR1_VMIDBits_8        (UL(0x0) << 
ID_AA64MMFR1_VMIDBits_SHIFT)
+#define         ID_AA64MMFR1_VMIDBits_16       (UL(0x2) << 
ID_AA64MMFR1_VMIDBits_SHIFT)
+#define        ID_AA64MMFR1_VH_SHIFT           8
+#define        ID_AA64MMFR1_VH_MASK            (UL(0xf) << 
ID_AA64MMFR1_VH_SHIFT)
+#define        ID_AA64MMFR1_VH(x)              ((x) & ID_AA64MMFR1_VH_MASK)
+#define         ID_AA64MMFR1_VH_NONE           (UL(0x0) << 
ID_AA64MMFR1_VH_SHIFT)
+#define         ID_AA64MMFR1_VH_IMPL           (UL(0x1) << 
ID_AA64MMFR1_VH_SHIFT)
+#define        ID_AA64MMFR1_HPDS_SHIFT         12
+#define        ID_AA64MMFR1_HPDS_MASK          (UL(0xf) << 
ID_AA64MMFR1_HPDS_SHIFT)
+#define        ID_AA64MMFR1_HPDS(x)            ((x) & ID_AA64MMFR1_HPDS_MASK)
+#define         ID_AA64MMFR1_HPDS_NONE         (UL(0x0) << 
ID_AA64MMFR1_HPDS_SHIFT)
+#define         ID_AA64MMFR1_HPDS_HPD          (UL(0x1) << 
ID_AA64MMFR1_HPDS_SHIFT)
+#define         ID_AA64MMFR1_HPDS_TTPBHA       (UL(0x2) << 
ID_AA64MMFR1_HPDS_SHIFT)
+#define        ID_AA64MMFR1_LO_SHIFT           16
+#define        ID_AA64MMFR1_LO_MASK            (UL(0xf) << 
ID_AA64MMFR1_LO_SHIFT)
+#define        ID_AA64MMFR1_LO(x)              ((x) & ID_AA64MMFR1_LO_MASK)
+#define         ID_AA64MMFR1_LO_NONE           (UL(0x0) << 
ID_AA64MMFR1_LO_SHIFT)
+#define         ID_AA64MMFR1_LO_IMPL           (UL(0x1) << 
ID_AA64MMFR1_LO_SHIFT)
+#define        ID_AA64MMFR1_PAN_SHIFT          20
+#define        ID_AA64MMFR1_PAN_MASK           (UL(0xf) << 
ID_AA64MMFR1_PAN_SHIFT)
+#define        ID_AA64MMFR1_PAN(x)             ((x) & ID_AA64MMFR1_PAN_MASK)
+#define         ID_AA64MMFR1_PAN_NONE          (UL(0x0) << 
ID_AA64MMFR1_PAN_SHIFT)
+#define         ID_AA64MMFR1_PAN_IMPL          (UL(0x1) << 
ID_AA64MMFR1_PAN_SHIFT)
+#define         ID_AA64MMFR1_PAN_ATS1E1        (UL(0x2) << 
ID_AA64MMFR1_PAN_SHIFT)
+#define        ID_AA64MMFR1_SpecSEI_SHIFT      24
+#define        ID_AA64MMFR1_SpecSEI_MASK       (UL(0xf) << 
ID_AA64MMFR1_SpecSEI_SHIFT)
+#define        ID_AA64MMFR1_SpecSEI(x)         ((x) & 
ID_AA64MMFR1_SpecSEI_MASK)
+#define         ID_AA64MMFR1_SpecSEI_NONE      (UL(0x0) << 
ID_AA64MMFR1_SpecSEI_SHIFT)
+#define         ID_AA64MMFR1_SpecSEI_IMPL      (UL(0x1) << 
ID_AA64MMFR1_SpecSEI_SHIFT)
+#define        ID_AA64MMFR1_XNX_SHIFT          28
+#define        ID_AA64MMFR1_XNX_MASK           (UL(0xf) << 
ID_AA64MMFR1_XNX_SHIFT)
+#define        ID_AA64MMFR1_XNX(x)             ((x) & ID_AA64MMFR1_XNX_MASK)
+#define         ID_AA64MMFR1_XNX_NONE          (UL(0x0) << 
ID_AA64MMFR1_XNX_SHIFT)
+#define         ID_AA64MMFR1_XNX_IMPL          (UL(0x1) << 
ID_AA64MMFR1_XNX_SHIFT)
+
+/* ID_AA64MMFR2_EL1 */
+#define        ID_AA64MMFR2_EL1                S3_0_C0_C7_2
+#define        ID_AA64MMFR2_MASK               UL(0x000000000fffffff)
+#define        ID_AA64MMFR2_CnP_SHIFT          0
+#define        ID_AA64MMFR2_CnP_MASK           (UL(0xf) << 
ID_AA64MMFR2_CnP_SHIFT)
+#define        ID_AA64MMFR2_CnP(x)             ((x) & ID_AA64MMFR2_CnP_MASK)
+#define         ID_AA64MMFR2_CnP_NONE          (UL(0x0) << 
ID_AA64MMFR2_CnP_SHIFT)
+#define         ID_AA64MMFR2_CnP_IMPL          (UL(0x1) << 
ID_AA64MMFR2_CnP_SHIFT)
+#define        ID_AA64MMFR2_UAO_SHIFT          4
+#define        ID_AA64MMFR2_UAO_MASK           (UL(0xf) << 
ID_AA64MMFR2_UAO_SHIFT)
+#define        ID_AA64MMFR2_UAO(x)             ((x) & ID_AA64MMFR2_UAO_MASK)
+#define         ID_AA64MMFR2_UAO_NONE          (UL(0x0) << 
ID_AA64MMFR2_UAO_SHIFT)
+#define         ID_AA64MMFR2_UAO_IMPL          (UL(0x1) << 
ID_AA64MMFR2_UAO_SHIFT)
+#define        ID_AA64MMFR2_LSM_SHIFT          8
+#define        ID_AA64MMFR2_LSM_MASK           (UL(0xf) << 
ID_AA64MMFR2_LSM_SHIFT)
+#define        ID_AA64MMFR2_LSM(x)             ((x) & ID_AA64MMFR2_LSM_MASK)
+#define         ID_AA64MMFR2_LSM_NONE          (UL(0x0) << 
ID_AA64MMFR2_LSM_SHIFT)
+#define         ID_AA64MMFR2_LSM_IMPL          (UL(0x1) << 
ID_AA64MMFR2_LSM_SHIFT)
+#define        ID_AA64MMFR2_IESB_SHIFT         12
+#define        ID_AA64MMFR2_IESB_MASK          (UL(0xf) << 
ID_AA64MMFR2_IESB_SHIFT)
+#define        ID_AA64MMFR2_IESB(x)            ((x) & ID_AA64MMFR2_IESB_MASK)
+#define         ID_AA64MMFR2_IESB_NONE         (UL(0x0) << 
ID_AA64MMFR2_IESB_SHIFT)
+#define         ID_AA64MMFR2_IESB_IMPL         (UL(0x1) << 
ID_AA64MMFR2_IESB_SHIFT)
+#define        ID_AA64MMFR2_VARange_SHIFT      16
+#define        ID_AA64MMFR2_VARange_MASK       (UL(0xf) << 
ID_AA64MMFR2_VARange_SHIFT)
+#define        ID_AA64MMFR2_VARange(x)         ((x) & 
ID_AA64MMFR2_VARange_MASK)
+#define         ID_AA64MMFR2_VARange_48        (UL(0x0) << 
ID_AA64MMFR2_VARange_SHIFT)
+#define         ID_AA64MMFR2_VARange_52        (UL(0x1) << 
ID_AA64MMFR2_VARange_SHIFT)
+#define        ID_AA64MMFR2_CCIDX_SHIFT        20
+#define        ID_AA64MMFR2_CCIDX_MASK         (UL(0xf) << 
ID_AA64MMFR2_CCIDX_SHIFT)
+#define        ID_AA64MMFR2_CCIDX(x)           ((x) & ID_AA64MMFR2_CCIDX_MASK)
+#define         ID_AA64MMFR2_CCIDX_32          (UL(0x0) << 
ID_AA64MMFR2_CCIDX_SHIFT)
+#define         ID_AA64MMFR2_CCIDX_64          (UL(0x1) << 
ID_AA64MMFR2_CCIDX_SHIFT)
+#define        ID_AA64MMFR2_NV_SHIFT           24
+#define        ID_AA64MMFR2_NV_MASK            (UL(0xf) << 
ID_AA64MMFR2_NV_SHIFT)
+#define        ID_AA64MMFR2_NV(x)              ((x) & ID_AA64MMFR2_NV_MASK)
+#define         ID_AA64MMFR2_NV_NONE           (UL(0x0) << 
ID_AA64MMFR2_NV_SHIFT)
+#define         ID_AA64MMFR2_NV_IMPL           (UL(0x1) << 
ID_AA64MMFR2_NV_SHIFT)
+
+/* ID_AA64PFR0_EL1 */
+#define        ID_AA64PFR0_MASK                UL(0x0000000fffffffff)
+#define        ID_AA64PFR0_EL0_SHIFT           0
+#define        ID_AA64PFR0_EL0_MASK            (UL(0xf) << 
ID_AA64PFR0_EL0_SHIFT)
+#define        ID_AA64PFR0_EL0(x)              ((x) & ID_AA64PFR0_EL0_MASK)
+#define         ID_AA64PFR0_EL0_64             (UL(0x1) << 
ID_AA64PFR0_EL0_SHIFT)
+#define         ID_AA64PFR0_EL0_64_32          (UL(0x2) << 
ID_AA64PFR0_EL0_SHIFT)
+#define        ID_AA64PFR0_EL1_SHIFT           4
+#define        ID_AA64PFR0_EL1_MASK            (UL(0xf) << 
ID_AA64PFR0_EL1_SHIFT)
+#define        ID_AA64PFR0_EL1(x)              ((x) & ID_AA64PFR0_EL1_MASK)
+#define         ID_AA64PFR0_EL1_64             (UL(0x1) << 
ID_AA64PFR0_EL1_SHIFT)
+#define         ID_AA64PFR0_EL1_64_32          (UL(0x2) << 
ID_AA64PFR0_EL1_SHIFT)
+#define        ID_AA64PFR0_EL2_SHIFT           8
+#define        ID_AA64PFR0_EL2_MASK            (UL(0xf) << 
ID_AA64PFR0_EL2_SHIFT)
+#define        ID_AA64PFR0_EL2(x)              ((x) & ID_AA64PFR0_EL2_MASK)
+#define         ID_AA64PFR0_EL2_NONE           (UL(0x0) << 
ID_AA64PFR0_EL2_SHIFT)
+#define         ID_AA64PFR0_EL2_64             (UL(0x1) << 
ID_AA64PFR0_EL2_SHIFT)
+#define         ID_AA64PFR0_EL2_64_32          (UL(0x2) << 
ID_AA64PFR0_EL2_SHIFT)
+#define        ID_AA64PFR0_EL3_SHIFT           12
+#define        ID_AA64PFR0_EL3_MASK            (UL(0xf) << 
ID_AA64PFR0_EL3_SHIFT)
+#define        ID_AA64PFR0_EL3(x)              ((x) & ID_AA64PFR0_EL3_MASK)
+#define         ID_AA64PFR0_EL3_NONE           (UL(0x0) << 
ID_AA64PFR0_EL3_SHIFT)
+#define         ID_AA64PFR0_EL3_64             (UL(0x1) << 
ID_AA64PFR0_EL3_SHIFT)
+#define         ID_AA64PFR0_EL3_64_32          (UL(0x2) << 
ID_AA64PFR0_EL3_SHIFT)
+#define        ID_AA64PFR0_FP_SHIFT            16
+#define        ID_AA64PFR0_FP_MASK             (UL(0xf) << 
ID_AA64PFR0_FP_SHIFT)
+#define        ID_AA64PFR0_FP(x)               ((x) & ID_AA64PFR0_FP_MASK)
+#define         ID_AA64PFR0_FP_IMPL            (UL(0x0) << 
ID_AA64PFR0_FP_SHIFT)
+#define         ID_AA64PFR0_FP_HP              (UL(0x1) << 
ID_AA64PFR0_FP_SHIFT)
+#define         ID_AA64PFR0_FP_NONE            (UL(0xf) << 
ID_AA64PFR0_FP_SHIFT)
+#define        ID_AA64PFR0_AdvSIMD_SHIFT       20
+#define        ID_AA64PFR0_AdvSIMD_MASK        (UL(0xf) << 
ID_AA64PFR0_AdvSIMD_SHIFT)
+#define        ID_AA64PFR0_AdvSIMD(x)          ((x) & ID_AA64PFR0_AdvSIMD_MASK)
+#define         ID_AA64PFR0_AdvSIMD_IMPL       (UL(0x0) << 
ID_AA64PFR0_AdvSIMD_SHIFT)
+#define         ID_AA64PFR0_AdvSIMD_HP         (UL(0x1) << 
ID_AA64PFR0_AdvSIMD_SHIFT)
+#define         ID_AA64PFR0_AdvSIMD_NONE       (UL(0xf) << 
ID_AA64PFR0_AdvSIMD_SHIFT)
+#define        ID_AA64PFR0_GIC_BITS            0x4 /* Number of bits in GIC 
field */
+#define        ID_AA64PFR0_GIC_SHIFT           24
+#define        ID_AA64PFR0_GIC_MASK            (UL(0xf) << 
ID_AA64PFR0_GIC_SHIFT)
+#define        ID_AA64PFR0_GIC(x)              ((x) & ID_AA64PFR0_GIC_MASK)
+#define         ID_AA64PFR0_GIC_CPUIF_NONE     (UL(0x0) << 
ID_AA64PFR0_GIC_SHIFT)
+#define         ID_AA64PFR0_GIC_CPUIF_EN       (UL(0x1) << 
ID_AA64PFR0_GIC_SHIFT)
+#define        ID_AA64PFR0_RAS_SHIFT           28
+#define        ID_AA64PFR0_RAS_MASK            (UL(0xf) << 
ID_AA64PFR0_RAS_SHIFT)
+#define        ID_AA64PFR0_RAS(x)              ((x) & ID_AA64PFR0_RAS_MASK)
+#define         ID_AA64PFR0_RAS_NONE           (UL(0x0) << 
ID_AA64PFR0_RAS_SHIFT)
+#define         ID_AA64PFR0_RAS_V1             (UL(0x1) << 
ID_AA64PFR0_RAS_SHIFT)
+#define        ID_AA64PFR0_SVE_SHIFT           32
+#define        ID_AA64PFR0_SVE_MASK            (UL(0xf) << 
ID_AA64PFR0_SVE_SHIFT)
+#define        ID_AA64PFR0_SVE(x)              ((x) & ID_AA64PFR0_SVE_MASK)
+#define         ID_AA64PFR0_SVE_NONE           (UL(0x0) << 
ID_AA64PFR0_SVE_SHIFT)
+#define         ID_AA64PFR0_SVE_IMPL           (UL(0x1) << 
ID_AA64PFR0_SVE_SHIFT)
+
+/* MAIR_EL1 - Memory Attribute Indirection Register */
+#define        MAIR_ATTR_MASK(idx)     (0xff << ((n)* 8))
+#define        MAIR_ATTR(attr, idx) ((attr) << ((idx) * 8))
+#define         MAIR_DEVICE_nGnRnE     0x00
+#define         MAIR_NORMAL_NC         0x44
+#define         MAIR_NORMAL_WT         0xbb
+#define         MAIR_NORMAL_WB         0xff
+
+/* PAR_EL1 - Physical Address Register */
+#define        PAR_F_SHIFT             0
+#define        PAR_F                   (0x1 << PAR_F_SHIFT)
+#define        PAR_SUCCESS(x)          (((x) & PAR_F) == 0)
+/* When PAR_F == 0 (success) */
+#define        PAR_SH_SHIFT            7
+#define        PAR_SH_MASK             (0x3 << PAR_SH_SHIFT)
+#define        PAR_NS_SHIFT            9
+#define        PAR_NS_MASK             (0x3 << PAR_NS_SHIFT)
+#define        PAR_PA_SHIFT            12
+#define        PAR_PA_MASK             0x0000fffffffff000
+#define        PAR_ATTR_SHIFT          56
+#define        PAR_ATTR_MASK           (0xff << PAR_ATTR_SHIFT)
+/* When PAR_F == 1 (aborted) */
+#define        PAR_FST_SHIFT           1
+#define        PAR_FST_MASK            (0x3f << PAR_FST_SHIFT)
+#define        PAR_PTW_SHIFT           8
+#define        PAR_PTW_MASK            (0x1 << PAR_PTW_SHIFT)
+#define        PAR_S_SHIFT             9
+#define        PAR_S_MASK              (0x1 << PAR_S_SHIFT)
+
+/* SCTLR_EL1 - System Control Register */
+#define        SCTLR_RES0      0xc8222440      /* Reserved ARMv8.0, write 0 */
+#define        SCTLR_RES1      0x30d00800      /* Reserved ARMv8.0, write 1 */
+
+#define        SCTLR_M         0x00000001
+#define        SCTLR_A         0x00000002
+#define        SCTLR_C         0x00000004
+#define        SCTLR_SA        0x00000008
+#define        SCTLR_SA0       0x00000010
+#define        SCTLR_CP15BEN   0x00000020
+/* Bit 6 is reserved */
+#define        SCTLR_ITD       0x00000080
+#define        SCTLR_SED       0x00000100
+#define        SCTLR_UMA       0x00000200
+/* Bit 10 is reserved */
+/* Bit 11 is reserved */
+#define        SCTLR_I         0x00001000
+#define        SCTLR_EnDB      0x00002000 /* ARMv8.3 */
+#define        SCTLR_DZE       0x00004000
+#define        SCTLR_UCT       0x00008000
+#define        SCTLR_nTWI      0x00010000
+/* Bit 17 is reserved */
+#define        SCTLR_nTWE      0x00040000
+#define        SCTLR_WXN       0x00080000
+/* Bit 20 is reserved */
+#define        SCTLR_IESB      0x00200000 /* ARMv8.2 */
+/* Bit 22 is reserved */
+#define        SCTLR_SPAN      0x00800000 /* ARMv8.1 */
+#define        SCTLR_EOE       0x01000000
+#define        SCTLR_EE        0x02000000
+#define        SCTLR_UCI       0x04000000
+#define        SCTLR_EnDA      0x08000000 /* ARMv8.3 */
+#define        SCTLR_nTLSMD    0x10000000 /* ARMv8.2 */
+#define        SCTLR_LSMAOE    0x20000000 /* ARMv8.2 */
+#define        SCTLR_EnIB      0x40000000 /* ARMv8.3 */
+#define        SCTLR_EnIA      0x80000000 /* ARMv8.3 */
+
+/* SPSR_EL1 */
+/*
+ * When the exception is taken in AArch64:
+ * M[3:2] is the exception level
+ * M[1]   is unused
+ * M[0]   is the SP select:
+ *         0: always SP0
+ *         1: current ELs SP
+ */
+#define        PSR_M_EL0t      0x00000000
+#define        PSR_M_EL1t      0x00000004
+#define        PSR_M_EL1h      0x00000005
+#define        PSR_M_EL2t      0x00000008
+#define        PSR_M_EL2h      0x00000009
+#define        PSR_M_MASK      0x0000000f
+
+#define        PSR_AARCH32     0x00000010
+#define        PSR_F           0x00000040
+#define        PSR_I           0x00000080
+#define        PSR_A           0x00000100
+#define        PSR_D           0x00000200
+#define        PSR_IL          0x00100000
+#define        PSR_SS          0x00200000
+#define        PSR_V           0x10000000
+#define        PSR_C           0x20000000
+#define        PSR_Z           0x40000000
+#define        PSR_N           0x80000000
+#define        PSR_FLAGS       0xf0000000
+
+/* TCR_EL1 - Translation Control Register */
+#define        TCR_ASID_16     (1 << 36)
+
+#define        TCR_IPS_SHIFT   32
+#define        TCR_IPS_32BIT   (0 << TCR_IPS_SHIFT)
+#define        TCR_IPS_36BIT   (1 << TCR_IPS_SHIFT)
+#define        TCR_IPS_40BIT   (2 << TCR_IPS_SHIFT)
+#define        TCR_IPS_42BIT   (3 << TCR_IPS_SHIFT)
+#define        TCR_IPS_44BIT   (4 << TCR_IPS_SHIFT)
+#define        TCR_IPS_48BIT   (5 << TCR_IPS_SHIFT)
+
+#define        TCR_TG1_SHIFT   30
+#define        TCR_TG1_16K     (1 << TCR_TG1_SHIFT)
+#define        TCR_TG1_4K      (2 << TCR_TG1_SHIFT)
+#define        TCR_TG1_64K     (3 << TCR_TG1_SHIFT)
+
+#define        TCR_SH1_SHIFT   28
+#define        TCR_SH1_IS      (0x3UL << TCR_SH1_SHIFT)
+#define        TCR_ORGN1_SHIFT 26
+#define        TCR_ORGN1_WBWA  (0x1UL << TCR_ORGN1_SHIFT)
+#define        TCR_IRGN1_SHIFT 24
+#define        TCR_IRGN1_WBWA  (0x1UL << TCR_IRGN1_SHIFT)
+#define        TCR_SH0_SHIFT   12
+#define        TCR_SH0_IS      (0x3UL << TCR_SH0_SHIFT)
+#define        TCR_ORGN0_SHIFT 10
+#define        TCR_ORGN0_WBWA  (0x1UL << TCR_ORGN0_SHIFT)
+#define        TCR_IRGN0_SHIFT 8
+#define        TCR_IRGN0_WBWA  (0x1UL << TCR_IRGN0_SHIFT)
+
+#define        TCR_CACHE_ATTRS ((TCR_IRGN0_WBWA | TCR_IRGN1_WBWA) |\
+                               (TCR_ORGN0_WBWA | TCR_ORGN1_WBWA))
+
+#ifdef SMP
+#define        TCR_SMP_ATTRS   (TCR_SH0_IS | TCR_SH1_IS)
+#else
+#define        TCR_SMP_ATTRS   0
+#endif
+
+#define        TCR_T1SZ_SHIFT  16
+#define        TCR_T0SZ_SHIFT  0
+#define        TCR_T1SZ(x)     ((x) << TCR_T1SZ_SHIFT)
+#define        TCR_T0SZ(x)     ((x) << TCR_T0SZ_SHIFT)
+#define        TCR_TxSZ(x)     (TCR_T1SZ(x) | TCR_T0SZ(x))
+
+/* Saved Program Status Register */
+#define        DBG_SPSR_SS     (0x1 << 21)
+
+/* Monitor Debug System Control Register */
+#define        DBG_MDSCR_SS    (0x1 << 0)
+#define        DBG_MDSCR_KDE   (0x1 << 13)
+#define        DBG_MDSCR_MDE   (0x1 << 15)
+
+/* Perfomance Monitoring Counters */
+#define        PMCR_E          (1 << 0) /* Enable all counters */
+#define        PMCR_P          (1 << 1) /* Reset all counters */
+#define        PMCR_C          (1 << 2) /* Clock counter reset */
+#define        PMCR_D          (1 << 3) /* CNTR counts every 64 clk cycles */
+#define        PMCR_X          (1 << 4) /* Export to ext. monitoring (ETM) */
+#define        PMCR_DP         (1 << 5) /* Disable CCNT if non-invasive debug*/
+#define        PMCR_LC         (1 << 6) /* Long cycle count enable */
+#define        PMCR_IMP_SHIFT  24 /* Implementer code */
+#define        PMCR_IMP_MASK   (0xff << PMCR_IMP_SHIFT)
+#define        PMCR_IDCODE_SHIFT       16 /* Identification code */
+#define        PMCR_IDCODE_MASK        (0xff << PMCR_IDCODE_SHIFT)
+#define         PMCR_IDCODE_CORTEX_A57 0x01
+#define         PMCR_IDCODE_CORTEX_A72 0x02
+#define         PMCR_IDCODE_CORTEX_A53 0x03
+#define        PMCR_N_SHIFT    11       /* Number of counters implemented */
+#define        PMCR_N_MASK     (0x1f << PMCR_N_SHIFT)
+
+#endif /* !_MACHINE_ARMREG_H_ */
diff --git a/freebsd/sys/arm64/include/machine/cpu.h 
b/freebsd/sys/arm64/include/machine/cpu.h
new file mode 100644
index 00000000..5663e50e
--- /dev/null
+++ b/freebsd/sys/arm64/include/machine/cpu.h
@@ -0,0 +1,203 @@
+/*-
+ * Copyright (c) 1990 The Regents of the University of California.
+ * Copyright (c) 2014-2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to Berkeley by
+ * William Jolitz.
+ *
+ * Portions of this software were developed by Andrew Turner
+ * under sponsorship from the FreeBSD Foundation
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     from: @(#)cpu.h 5.4 (Berkeley) 5/9/91
+ *     from: FreeBSD: src/sys/i386/include/cpu.h,v 1.62 2001/06/29
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPU_H_
+#define        _MACHINE_CPU_H_
+
+#include <machine/atomic.h>
+#include <machine/frame.h>
+#include <machine/armreg.h>
+
+#define        TRAPF_PC(tfp)           ((tfp)->tf_lr)
+#define        TRAPF_USERMODE(tfp)     (((tfp)->tf_spsr & PSR_M_MASK) == 
PSR_M_EL0t)
+
+#define        cpu_getstack(td)        ((td)->td_frame->tf_sp)
+#define        cpu_setstack(td, sp)    ((td)->td_frame->tf_sp = (sp))
+#define        cpu_spinwait()          __asm __volatile("yield" ::: "memory")
+#define        cpu_lock_delay()        DELAY(1)
+
+/* Extract CPU affinity levels 0-3 */
+#define        CPU_AFF0(mpidr) (u_int)(((mpidr) >> 0) & 0xff)
+#define        CPU_AFF1(mpidr) (u_int)(((mpidr) >> 8) & 0xff)
+#define        CPU_AFF2(mpidr) (u_int)(((mpidr) >> 16) & 0xff)
+#define        CPU_AFF3(mpidr) (u_int)(((mpidr) >> 32) & 0xff)
+#define        CPU_AFF0_MASK   0xffUL
+#define        CPU_AFF1_MASK   0xff00UL
+#define        CPU_AFF2_MASK   0xff0000UL
+#define        CPU_AFF3_MASK   0xff00000000UL
+#define        CPU_AFF_MASK    (CPU_AFF0_MASK | CPU_AFF1_MASK | \
+    CPU_AFF2_MASK| CPU_AFF3_MASK)      /* Mask affinity fields in MPIDR_EL1 */
+
+#ifdef _KERNEL
+
+#define        CPU_IMPL_ARM            0x41
+#define        CPU_IMPL_BROADCOM       0x42
+#define        CPU_IMPL_CAVIUM         0x43
+#define        CPU_IMPL_DEC            0x44
+#define        CPU_IMPL_INFINEON       0x49
+#define        CPU_IMPL_FREESCALE      0x4D
+#define        CPU_IMPL_NVIDIA         0x4E
+#define        CPU_IMPL_APM            0x50
+#define        CPU_IMPL_QUALCOMM       0x51
+#define        CPU_IMPL_MARVELL        0x56
+#define        CPU_IMPL_INTEL          0x69
+
+/* ARM Part numbers */
+#define        CPU_PART_FOUNDATION     0xD00
+#define        CPU_PART_CORTEX_A35     0xD04
+#define        CPU_PART_CORTEX_A53     0xD03
+#define        CPU_PART_CORTEX_A55     0xD05
+#define        CPU_PART_CORTEX_A57     0xD07
+#define        CPU_PART_CORTEX_A72     0xD08
+#define        CPU_PART_CORTEX_A73     0xD09
+#define        CPU_PART_CORTEX_A75     0xD0A
+
+/* Cavium Part numbers */
+#define        CPU_PART_THUNDERX       0x0A1
+#define        CPU_PART_THUNDERX_81XX  0x0A2
+#define        CPU_PART_THUNDERX_83XX  0x0A3
+#define        CPU_PART_THUNDERX2      0x0AF
+
+#define        CPU_REV_THUNDERX_1_0    0x00
+#define        CPU_REV_THUNDERX_1_1    0x01
+
+#define        CPU_REV_THUNDERX2_0     0x00
+
+/* APM / Ampere Part Number */
+#define CPU_PART_EMAG8180      0x000
+
+#define        CPU_IMPL(midr)  (((midr) >> 24) & 0xff)
+#define        CPU_PART(midr)  (((midr) >> 4) & 0xfff)
+#define        CPU_VAR(midr)   (((midr) >> 20) & 0xf)
+#define        CPU_REV(midr)   (((midr) >> 0) & 0xf)
+
+#define        CPU_IMPL_TO_MIDR(val)   (((val) & 0xff) << 24)
+#define        CPU_PART_TO_MIDR(val)   (((val) & 0xfff) << 4)
+#define        CPU_VAR_TO_MIDR(val)    (((val) & 0xf) << 20)
+#define        CPU_REV_TO_MIDR(val)    (((val) & 0xf) << 0)
+
+#define        CPU_IMPL_MASK   (0xff << 24)
+#define        CPU_PART_MASK   (0xfff << 4)
+#define        CPU_VAR_MASK    (0xf << 20)
+#define        CPU_REV_MASK    (0xf << 0)
+
+#define        CPU_ID_RAW(impl, part, var, rev)                \
+    (CPU_IMPL_TO_MIDR((impl)) |                                \
+    CPU_PART_TO_MIDR((part)) | CPU_VAR_TO_MIDR((var)) |        \
+    CPU_REV_TO_MIDR((rev)))
+
+#define        CPU_MATCH(mask, impl, part, var, rev)           \
+    (((mask) & PCPU_GET(midr)) ==                      \
+    ((mask) & CPU_ID_RAW((impl), (part), (var), (rev))))
+
+#define        CPU_MATCH_RAW(mask, devid)                      \
+    (((mask) & PCPU_GET(midr)) == ((mask) & (devid)))
+
+/*
+ * Chip-specific errata. This defines are intended to be
+ * booleans used within if statements. When an appropriate
+ * kernel option is disabled, these defines must be defined
+ * as 0 to allow the compiler to remove a dead code thus
+ * produce better optimized kernel image.
+ */
+/*
+ * Vendor:     Cavium
+ * Chip:       ThunderX
+ * Revision(s):        Pass 1.0, Pass 1.1
+ */
+#ifdef THUNDERX_PASS_1_1_ERRATA
+#define        CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1                            
\
+    (CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK,           \
+    CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_0) ||    \
+    CPU_MATCH(CPU_IMPL_MASK | CPU_PART_MASK | CPU_REV_MASK,            \
+    CPU_IMPL_CAVIUM, CPU_PART_THUNDERX, 0, CPU_REV_THUNDERX_1_1))
+#else
+#define        CPU_MATCH_ERRATA_CAVIUM_THUNDERX_1_1    0
+#endif
+
+
+extern char btext[];
+extern char etext[];
+
+extern uint64_t __cpu_affinity[];
+
+void   cpu_halt(void) __dead2;
+void   cpu_reset(void) __dead2;
+void   fork_trampoline(void);
+void   identify_cpu(void);
+void   install_cpu_errata(void);
+void   print_cpu_features(u_int);
+void   swi_vm(void *v);
+
+#define        CPU_AFFINITY(cpu)       __cpu_affinity[(cpu)]
+#define        CPU_CURRENT_SOCKET                              \
+    (CPU_AFF2(CPU_AFFINITY(PCPU_GET(cpuid))))
+
+static __inline uint64_t
+get_cyclecount(void)
+{
+       uint64_t ret;
+
+       ret = READ_SPECIALREG(cntvct_el0);
+
+       return (ret);
+}
+
+#define        ADDRESS_TRANSLATE_FUNC(stage)                           \
+static inline uint64_t                                         \
+arm64_address_translate_ ##stage (uint64_t addr)               \
+{                                                              \
+       uint64_t ret;                                           \
+                                                               \
+       __asm __volatile(                                       \
+           "at " __STRING(stage) ", %1 \n"                                     
\
+           "mrs %0, par_el1" : "=r"(ret) : "r"(addr));         \
+                                                               \
+       return (ret);                                           \
+}
+
+ADDRESS_TRANSLATE_FUNC(s1e0r)
+ADDRESS_TRANSLATE_FUNC(s1e0w)
+ADDRESS_TRANSLATE_FUNC(s1e1r)
+ADDRESS_TRANSLATE_FUNC(s1e1w)
+
+#endif
+
+#endif /* !_MACHINE_CPU_H_ */
diff --git a/freebsd/sys/arm64/include/machine/cpufunc.h 
b/freebsd/sys/arm64/include/machine/cpufunc.h
new file mode 100644
index 00000000..c3e2a36e
--- /dev/null
+++ b/freebsd/sys/arm64/include/machine/cpufunc.h
@@ -0,0 +1,153 @@
+/*-
+ * Copyright (c) 2014 Andrew Turner
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_CPUFUNC_H_
+#define        _MACHINE_CPUFUNC_H_
+
+static __inline void
+breakpoint(void)
+{
+
+       __asm("brk #0");
+}
+
+#ifdef _KERNEL
+
+#include <machine/armreg.h>
+
+void pan_enable(void);
+
+static __inline register_t
+dbg_disable(void)
+{
+       uint32_t ret;
+
+       __asm __volatile(
+           "mrs %x0, daif   \n"
+           "msr daifset, #8 \n"
+           : "=&r" (ret));
+
+       return (ret);
+}
+
+static __inline void
+dbg_enable(void)
+{
+
+       __asm __volatile("msr daifclr, #8");
+}
+
+static __inline register_t
+intr_disable(void)
+{
+       /* DAIF is a 32-bit register */
+       uint32_t ret;
+
+       __asm __volatile(
+           "mrs %x0, daif   \n"
+           "msr daifset, #2 \n"
+           : "=&r" (ret));
+
+       return (ret);
+}
+
+static __inline void
+intr_restore(register_t s)
+{
+
+       WRITE_SPECIALREG(daif, s);
+}
+
+static __inline void
+intr_enable(void)
+{
+
+       __asm __volatile("msr daifclr, #2");
+}
+
+static __inline register_t
+get_midr(void)
+{
+       uint64_t midr;
+
+       midr = READ_SPECIALREG(midr_el1);
+
+       return (midr);
+}
+
+static __inline register_t
+get_mpidr(void)
+{
+       uint64_t mpidr;
+
+       mpidr = READ_SPECIALREG(mpidr_el1);
+
+       return (mpidr);
+}
+
+static __inline void
+clrex(void)
+{
+
+       /*
+        * Ensure compiler barrier, otherwise the monitor clear might
+        * occur too late for us ?
+        */
+       __asm __volatile("clrex" : : : "memory");
+}
+
+extern int64_t dcache_line_size;
+extern int64_t icache_line_size;
+extern int64_t idcache_line_size;
+extern int64_t dczva_line_size;
+
+#define        cpu_nullop()                    arm64_nullop()
+#define        cpufunc_nullop()                arm64_nullop()
+#define        cpu_setttb(a)                   arm64_setttb(a)
+
+#define        cpu_tlb_flushID()               arm64_tlb_flushID()
+
+#define        cpu_dcache_wbinv_range(a, s)    arm64_dcache_wbinv_range((a), 
(s))
+#define        cpu_dcache_inv_range(a, s)      arm64_dcache_inv_range((a), (s))
+#define        cpu_dcache_wb_range(a, s)       arm64_dcache_wb_range((a), (s))
+
+#define        cpu_idcache_wbinv_range(a, s)   arm64_idcache_wbinv_range((a), 
(s))
+#define        cpu_icache_sync_range(a, s)     arm64_icache_sync_range((a), 
(s))
+
+void arm64_nullop(void);
+void arm64_setttb(vm_offset_t);
+void arm64_tlb_flushID(void);
+void arm64_tlb_flushID_SE(vm_offset_t);
+void arm64_icache_sync_range(vm_offset_t, vm_size_t);
+void arm64_idcache_wbinv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_wbinv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_inv_range(vm_offset_t, vm_size_t);
+void arm64_dcache_wb_range(vm_offset_t, vm_size_t);
+
+#endif /* _KERNEL */
+#endif /* _MACHINE_CPUFUNC_H_ */
diff --git a/freebsd/sys/arm64/include/machine/in_cksum.h 
b/freebsd/sys/arm64/include/machine/in_cksum.h
new file mode 100644
index 00000000..d55b838b
--- /dev/null
+++ b/freebsd/sys/arm64/include/machine/in_cksum.h
@@ -0,0 +1,83 @@
+/*-
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * Copyright (c) 1990 The Regents of the University of California.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ *    may be used to endorse or promote products derived from this software
+ *    without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ *     from tahoe:     in_cksum.c      1.2     86/01/05
+ *     from:           @(#)in_cksum.c  1.3 (Berkeley) 1/19/91
+ *     from: Id: in_cksum.c,v 1.8 1995/12/03 18:35:19 bde Exp
+ *     from: src/sys/alpha/include/in_cksum.h,v 1.7 2005/03/02 21:33:20 joerg
+ * $FreeBSD$
+ */
+
+#ifndef _MACHINE_IN_CKSUM_H_
+#define        _MACHINE_IN_CKSUM_H_    1
+
+#include <sys/cdefs.h>
+
+#define        in_cksum(m, len)        in_cksum_skip(m, len, 0)
+
+#if defined(IPVERSION) && (IPVERSION == 4)
+/*
+ * It it useful to have an Internet checksum routine which is inlineable
+ * and optimized specifically for the task of computing IP header checksums
+ * in the normal case (where there are no options and the header length is
+ * therefore always exactly five 32-bit words.
+ */
+#ifdef __CC_SUPPORTS___INLINE
+
+static __inline void
+in_cksum_update(struct ip *ip)
+{
+       int __tmpsum;
+       __tmpsum = (int)ntohs(ip->ip_sum) + 256;
+       ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));
+}
+
+#else
+
+#define        in_cksum_update(ip)                                             
\
+       do {                                                            \
+               int __tmpsum;                                           \
+               __tmpsum = (int)ntohs(ip->ip_sum) + 256;                \
+               ip->ip_sum = htons(__tmpsum + (__tmpsum >> 16));        \
+       } while(0)
+
+#endif
+#endif
+
+#ifdef _KERNEL
+#if defined(IPVERSION) && (IPVERSION == 4)
+u_int in_cksum_hdr(const struct ip *ip);
+#endif
+u_short in_addword(u_short sum, u_short b);
+u_short in_pseudo(u_int sum, u_int b, u_int c);
+u_short in_cksum_skip(struct mbuf *m, int len, int skip);
+#endif
+
+#endif /* _MACHINE_IN_CKSUM_H_ */
diff --git a/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_llsc.h 
b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_llsc.h
new file mode 100644
index 00000000..aa4e3090
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_llsc.h
@@ -0,0 +1,352 @@
+/*
+ * Copyright 2009-2016 Samy Al Bahra.
+ * Copyright 2013-2016 Olivier Houchard.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_AARCH64_LLSC_H
+#define CK_PR_AARCH64_LLSC_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2_value(uint64_t target[2], uint64_t compare[2], uint64_t set[2], 
uint64_t value[2])
+{
+        uint64_t tmp1, tmp2;
+
+        __asm__ __volatile__("1:"
+                             "ldxp %0, %1, [%4];"
+                             "mov %2, %0;"
+                             "mov %3, %1;"
+                             "eor %0, %0, %5;"
+                             "eor %1, %1, %6;"
+                             "orr %1, %0, %1;"
+                             "mov %w0, #0;"
+                             "cbnz %1, 2f;"
+                             "stxp %w0, %7, %8, [%4];"
+                             "cbnz %w0, 1b;"
+                             "mov %w0, #1;"
+                             "2:"
+                             : "=&r" (tmp1), "=&r" (tmp2), "=&r" (value[0]), 
"=&r" (value[1])
+                             : "r" (target), "r" (compare[0]), "r" 
(compare[1]), "r" (set[0]), "r" (set[1])
+                             : "cc", "memory");
+
+        return (tmp1);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
+{
+        return (ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *, target),
+                                   CK_CPP_CAST(uint64_t *, compare),
+                                   CK_CPP_CAST(uint64_t *, set),
+                                   CK_CPP_CAST(uint64_t *, value)));
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
+{
+        uint64_t tmp1, tmp2;
+
+        __asm__ __volatile__("1:"
+                             "ldxp %0, %1, [%2];"
+                             "eor %0, %0, %3;"
+                             "eor %1, %1, %4;"
+                             "orr %1, %0, %1;"
+                             "mov %w0, #0;"
+                             "cbnz %1, 2f;"
+                             "stxp %w0, %5, %6, [%2];"
+                             "cbnz %w0, 1b;"
+                             "mov %w0, #1;"
+                             "2:"
+                             : "=&r" (tmp1), "=&r" (tmp2)
+                             : "r" (target), "r" (compare[0]), "r" 
(compare[1]), "r" (set[0]), "r" (set[1])
+                             : "cc", "memory");
+
+        return (tmp1);
+}
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *target, void *compare, void *set)
+{
+        return (ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, target),
+                             CK_CPP_CAST(uint64_t *, compare),
+                             CK_CPP_CAST(uint64_t *, set)));
+}
+
+
+#define CK_PR_CAS(N, M, T, W, R)                                       \
+        CK_CC_INLINE static bool                                       \
+        ck_pr_cas_##N##_value(M *target, T compare, T set, M *value)   \
+        {                                                              \
+                T previous;                                            \
+                T tmp;                                                 \
+                __asm__ __volatile__("1:"                              \
+                                     "ldxr" W " %" R "0, [%2];"                
\
+                                     "cmp  %" R "0, %" R "4;"          \
+                                     "b.ne 2f;"                                
\
+                                     "stxr" W " %w1, %" R "3, [%2];"   \
+                                     "cbnz %w1, 1b;"                   \
+                                     "2:"                              \
+                    : "=&r" (previous),                                        
\
+                    "=&r" (tmp)                                                
\
+                    : "r"   (target),                                  \
+                    "r"   (set),                                       \
+                    "r"   (compare)                                    \
+                    : "memory", "cc");                                 \
+                *(T *)value = previous;                                        
\
+                return (previous == compare);                          \
+        }                                                              \
+        CK_CC_INLINE static bool                                       \
+        ck_pr_cas_##N(M *target, T compare, T set)                     \
+        {                                                              \
+                T previous;                                            \
+                T tmp;                                                 \
+                __asm__ __volatile__(                                  \
+                                     "1:"                              \
+                                     "ldxr" W " %" R "0, [%2];"                
\
+                                     "cmp  %" R "0, %" R "4;"          \
+                                     "b.ne 2f;"                                
\
+                                     "stxr" W " %w1, %" R "3, [%2];"   \
+                                     "cbnz %w1, 1b;"                   \
+                                     "2:"                              \
+                    : "=&r" (previous),                                        
\
+                    "=&r" (tmp)                                                
\
+                    : "r"   (target),                                  \
+                    "r"   (set),                                       \
+                    "r"   (compare)                                    \
+                    : "memory", "cc");                                 \
+                return (previous == compare);                          \
+        }
+
+CK_PR_CAS(ptr, void, void *, "", "")
+
+#define CK_PR_CAS_S(N, M, W, R)        CK_PR_CAS(N, M, M, W, R)
+CK_PR_CAS_S(64, uint64_t, "", "")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_S(double, double, "", "")
+#endif
+CK_PR_CAS_S(32, uint32_t, "", "w")
+CK_PR_CAS_S(uint, unsigned int, "", "w")
+CK_PR_CAS_S(int, int, "", "w")
+CK_PR_CAS_S(16, uint16_t, "h", "w")
+CK_PR_CAS_S(8, uint8_t, "b", "w")
+CK_PR_CAS_S(short, short, "h", "w")
+CK_PR_CAS_S(char, char, "b", "w")
+
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W, R)                               \
+        CK_CC_INLINE static T                                  \
+        ck_pr_fas_##N(M *target, T v)                          \
+        {                                                      \
+                T previous;                                    \
+                T tmp;                                         \
+                __asm__ __volatile__("1:"                      \
+                                     "ldxr" W " %" R "0, [%2];"        \
+                                     "stxr" W " %w1, %" R "3, [%2];"\
+                                     "cbnz %w1, 1b;"           \
+                                        : "=&r" (previous),    \
+                                          "=&r" (tmp)          \
+                                        : "r"   (target),      \
+                                          "r"   (v)            \
+                                        : "memory", "cc");     \
+                return (previous);                             \
+        }
+
+CK_PR_FAS(64, uint64_t, uint64_t, "", "")
+CK_PR_FAS(32, uint32_t, uint32_t, "", "w")
+CK_PR_FAS(ptr, void, void *, "", "")
+CK_PR_FAS(int, int, int, "", "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "", "w")
+CK_PR_FAS(16, uint16_t, uint16_t, "h", "w")
+CK_PR_FAS(8, uint8_t, uint8_t, "b", "w")
+CK_PR_FAS(short, short, short, "h", "w")
+CK_PR_FAS(char, char, char, "b", "w")
+
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W, R)                       \
+        CK_CC_INLINE static void                               \
+        ck_pr_##O##_##N(M *target)                             \
+        {                                                      \
+                T previous = 0;                                        \
+                T tmp = 0;                                     \
+                __asm__ __volatile__("1:"                      \
+                                     "ldxr" W " %" R "0, [%2];"        \
+                                      I ";"                    \
+                                     "stxr" W " %w1, %" R "0, [%2];"   \
+                                     "cbnz %w1, 1b;"           \
+                                        : "=&r" (previous),    \
+                                          "=&r" (tmp)          \
+                                        : "r"   (target)       \
+                                        : "memory", "cc");     \
+                return;                                                \
+        }
+
+CK_PR_UNARY(inc, ptr, void, void *, "add %0, %0, #1", "", "")
+CK_PR_UNARY(dec, ptr, void, void *, "sub %0, %0, #1", "", "")
+CK_PR_UNARY(not, ptr, void, void *, "mvn %0, %0", "", "")
+CK_PR_UNARY(inc, 64, uint64_t, uint64_t, "add %0, %0, #1", "", "")
+CK_PR_UNARY(dec, 64, uint64_t, uint64_t, "sub %0, %0, #1", "", "")
+CK_PR_UNARY(not, 64, uint64_t, uint64_t, "mvn %0, %0", "", "")
+
+#define CK_PR_UNARY_S(S, T, W)                                 \
+        CK_PR_UNARY(inc, S, T, T, "add %w0, %w0, #1", W, "w")  \
+        CK_PR_UNARY(dec, S, T, T, "sub %w0, %w0, #1", W, "w")  \
+        CK_PR_UNARY(not, S, T, T, "mvn %w0, %w0", W, "w")      \
+
+CK_PR_UNARY_S(32, uint32_t, "")
+CK_PR_UNARY_S(uint, unsigned int, "")
+CK_PR_UNARY_S(int, int, "")
+CK_PR_UNARY_S(16, uint16_t, "h")
+CK_PR_UNARY_S(8, uint8_t, "b")
+CK_PR_UNARY_S(short, short, "h")
+CK_PR_UNARY_S(char, char, "b")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, I, W, R)                      \
+        CK_CC_INLINE static void                               \
+        ck_pr_##O##_##N(M *target, T delta)                    \
+        {                                                      \
+                T previous;                                    \
+                T tmp;                                         \
+                __asm__ __volatile__("1:"                      \
+                                     "ldxr" W " %" R "0, [%2];"\
+                                      I " %" R "0, %" R "0, %" R "3;"  \
+                                     "stxr" W " %w1, %" R "0, [%2];"   \
+                                     "cbnz %w1, 1b;"           \
+                                        : "=&r" (previous),    \
+                                          "=&r" (tmp)          \
+                                        : "r"   (target),      \
+                                          "r"   (delta)                \
+                                        : "memory", "cc");     \
+                return;                                                \
+        }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "and", "", "")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "", "")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "orr", "", "")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "sub", "", "")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "", "")
+CK_PR_BINARY(and, 64, uint64_t, uint64_t, "and", "", "")
+CK_PR_BINARY(add, 64, uint64_t, uint64_t, "add", "", "")
+CK_PR_BINARY(or, 64, uint64_t, uint64_t, "orr", "", "")
+CK_PR_BINARY(sub, 64, uint64_t, uint64_t, "sub", "", "")
+CK_PR_BINARY(xor, 64, uint64_t, uint64_t, "eor", "", "")
+
+#define CK_PR_BINARY_S(S, T, W)                                \
+        CK_PR_BINARY(and, S, T, T, "and", W, "w")      \
+        CK_PR_BINARY(add, S, T, T, "add", W, "w")      \
+        CK_PR_BINARY(or, S, T, T, "orr", W, "w")       \
+        CK_PR_BINARY(sub, S, T, T, "sub", W, "w")      \
+        CK_PR_BINARY(xor, S, T, T, "eor", W, "w")
+
+CK_PR_BINARY_S(32, uint32_t, "")
+CK_PR_BINARY_S(uint, unsigned int, "")
+CK_PR_BINARY_S(int, int, "")
+CK_PR_BINARY_S(16, uint16_t, "h")
+CK_PR_BINARY_S(8, uint8_t, "b")
+CK_PR_BINARY_S(short, short, "h")
+CK_PR_BINARY_S(char, char, "b")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+        uintptr_t previous, r, tmp;
+
+        __asm__ __volatile__("1:"
+                             "ldxr %0, [%3];"
+                             "add %1, %4, %0;"
+                             "stxr %w2, %1, [%3];"
+                             "cbnz %w2, 1b;"
+                                : "=&r" (previous),
+                                  "=&r" (r),
+                                  "=&r" (tmp)
+                                : "r"   (target),
+                                  "r"   (delta)
+                                : "memory", "cc");
+
+        return (void *)(previous);
+}
+
+CK_CC_INLINE static uint64_t
+ck_pr_faa_64(uint64_t *target, uint64_t delta)
+{
+        uint64_t previous, r, tmp;
+
+        __asm__ __volatile__("1:"
+                             "ldxr %0, [%3];"
+                             "add %1, %4, %0;"
+                             "stxr %w2, %1, [%3];"
+                             "cbnz %w2, 1b;"
+                                : "=&r" (previous),
+                                  "=&r" (r),
+                                  "=&r" (tmp)
+                                : "r"   (target),
+                                  "r"   (delta)
+                                : "memory", "cc");
+
+        return (previous);
+}
+
+#define CK_PR_FAA(S, T, W)                                             \
+        CK_CC_INLINE static T                                          \
+        ck_pr_faa_##S(T *target, T delta)                              \
+        {                                                              \
+                T previous, r, tmp;                                    \
+                __asm__ __volatile__("1:"                              \
+                                     "ldxr" W " %w0, [%3];"            \
+                                     "add %w1, %w4, %w0;"              \
+                                     "stxr" W " %w2, %w1, [%3];"       \
+                                     "cbnz %w2, 1b;"                   \
+                                        : "=&r" (previous),            \
+                                          "=&r" (r),                   \
+                                          "=&r" (tmp)                  \
+                                        : "r"   (target),              \
+                                          "r"   (delta)                        
\
+                                        : "memory", "cc");             \
+                return (previous);                                     \
+        }
+
+CK_PR_FAA(32, uint32_t, "")
+CK_PR_FAA(uint, unsigned int, "")
+CK_PR_FAA(int, int, "")
+CK_PR_FAA(16, uint16_t, "h")
+CK_PR_FAA(8, uint8_t, "b")
+CK_PR_FAA(short, short, "h")
+CK_PR_FAA(char, char, "b")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_AARCH64_LLSC_H */
diff --git a/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_lse.h 
b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_lse.h
new file mode 100644
index 00000000..e2c9554c
--- /dev/null
+++ b/freebsd/sys/contrib/ck/include/gcc/aarch64/ck_pr_lse.h
@@ -0,0 +1,298 @@
+/*
+ * Copyright 2009-2016 Samy Al Bahra.
+ * Copyright 2013-2016 Olivier Houchard.
+ * Copyright 2016 Alexey Kopytov.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+#ifndef CK_PR_AARCH64_LSE_H
+#define CK_PR_AARCH64_LSE_H
+
+#ifndef CK_PR_H
+#error Do not include this file directly, use ck_pr.h
+#endif
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2_value(uint64_t target[2], uint64_t compare[2], uint64_t set[2], 
uint64_t value[2])
+{
+        uint64_t tmp1;
+        uint64_t tmp2;
+        register uint64_t x0 __asm__ ("x0") = compare[0];
+        register uint64_t x1 __asm__ ("x1") = compare[1];
+        register uint64_t x2 __asm__ ("x2") = set[0];
+        register uint64_t x3 __asm__ ("x3") = set[1];
+
+        __asm__ __volatile__("casp %0, %1, %4, %5, [%6];"
+                             "eor %2, %0, %7;"
+                             "eor %3, %1, %8;"
+                             "orr %2, %2, %3;"
+                             : "+&r" (x0), "+&r" (x1), "=&r" (tmp1), "=&r" 
(tmp2)
+                             : "r" (x2), "r" (x3), "r" (target), "r" 
(compare[0]), "r" (compare[1])
+                             : "memory");
+
+        value[0] = x0;
+        value[1] = x1;
+
+        return (!!tmp1);
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2_value(void *target, void *compare, void *set, void *value)
+{
+        return (ck_pr_cas_64_2_value(CK_CPP_CAST(uint64_t *, target),
+                                   CK_CPP_CAST(uint64_t *, compare),
+                                   CK_CPP_CAST(uint64_t *, set),
+                                   CK_CPP_CAST(uint64_t *, value)));
+}
+
+CK_CC_INLINE static bool
+ck_pr_cas_64_2(uint64_t target[2], uint64_t compare[2], uint64_t set[2])
+{
+        register uint64_t x0 __asm__ ("x0") = compare[0];
+        register uint64_t x1 __asm__ ("x1") = compare[1];
+        register uint64_t x2 __asm__ ("x2") = set[0];
+        register uint64_t x3 __asm__ ("x3") = set[1];
+
+        __asm__ __volatile__("casp %0, %1, %2, %3, [%4];"
+                             "eor %0, %0, %5;"
+                             "eor %1, %1, %6;"
+                             "orr %0, %0, %1;"
+                             : "+&r" (x0), "+&r" (x1)
+                             : "r" (x2), "r" (x3), "r" (target), "r" 
(compare[0]), "r" (compare[1])
+                             : "memory");
+
+        return (!!x0);
+}
+CK_CC_INLINE static bool
+ck_pr_cas_ptr_2(void *target, void *compare, void *set)
+{
+        return (ck_pr_cas_64_2(CK_CPP_CAST(uint64_t *, target),
+                             CK_CPP_CAST(uint64_t *, compare),
+                             CK_CPP_CAST(uint64_t *, set)));
+}
+
+
+#define CK_PR_CAS(N, M, T, W, R)                                       \
+        CK_CC_INLINE static bool                                       \
+        ck_pr_cas_##N##_value(M *target, T compare, T set, M *value)   \
+        {                                                              \
+                  *(T *)value = compare;                               \
+                __asm__ __volatile__(                                  \
+                                     "cas" W " %" R "0, %" R "2, [%1];"        
\
+                    : "+&r" (*(T *)value)                              \
+                    : "r"   (target),                                  \
+                    "r"   (set)                                                
\
+                    : "memory");                                       \
+                return (*(T *)value == compare);                        \
+        }                                                              \
+        CK_CC_INLINE static bool                                       \
+        ck_pr_cas_##N(M *target, T compare, T set)                     \
+        {                                                              \
+                T previous = compare;                                  \
+                __asm__ __volatile__(                                  \
+                                     "cas" W " %" R "0, %" R "2, [%1];"        
\
+                    : "+&r" (previous)                                 \
+                    : "r"   (target),                                  \
+                    "r"   (set)                                                
\
+                    : "memory");                                       \
+                return (previous == compare);                          \
+        }
+
+CK_PR_CAS(ptr, void, void *, "", "")
+
+#define CK_PR_CAS_S(N, M, W, R)        CK_PR_CAS(N, M, M, W, R)
+CK_PR_CAS_S(64, uint64_t, "", "")
+#ifndef CK_PR_DISABLE_DOUBLE
+CK_PR_CAS_S(double, double, "", "")
+#endif
+CK_PR_CAS_S(32, uint32_t, "", "w")
+CK_PR_CAS_S(uint, unsigned int, "", "w")
+CK_PR_CAS_S(int, int, "", "w")
+CK_PR_CAS_S(16, uint16_t, "h", "w")
+CK_PR_CAS_S(8, uint8_t, "b", "w")
+CK_PR_CAS_S(short, short, "h", "w")
+CK_PR_CAS_S(char, char, "b", "w")
+
+
+#undef CK_PR_CAS_S
+#undef CK_PR_CAS
+
+#define CK_PR_FAS(N, M, T, W, R)                                       \
+        CK_CC_INLINE static T                                          \
+        ck_pr_fas_##N(M *target, T v)                                  \
+        {                                                              \
+                T previous;                                            \
+                __asm__ __volatile__(                                  \
+                                     "swp" W " %" R "2, %" R "0, [%1];"        
\
+                                        : "=&r" (previous)             \
+                                        : "r"   (target),              \
+                                          "r"   (v)                    \
+                                        : "memory");                   \
+                return (previous);                                     \
+        }
+
+CK_PR_FAS(64, uint64_t, uint64_t, "", "")
+CK_PR_FAS(32, uint32_t, uint32_t, "", "w")
+CK_PR_FAS(ptr, void, void *, "", "")
+CK_PR_FAS(int, int, int, "", "w")
+CK_PR_FAS(uint, unsigned int, unsigned int, "", "w")
+CK_PR_FAS(16, uint16_t, uint16_t, "h", "w")
+CK_PR_FAS(8, uint8_t, uint8_t, "b", "w")
+CK_PR_FAS(short, short, short, "h", "w")
+CK_PR_FAS(char, char, char, "b", "w")
+
+
+#undef CK_PR_FAS
+
+#define CK_PR_UNARY(O, N, M, T, I, W, R, S)                    \
+        CK_CC_INLINE static void                               \
+        ck_pr_##O##_##N(M *target)                             \
+        {                                                      \
+                __asm__ __volatile__(I ";"                     \
+                                     "st" S W " " R "0, [%0];" \
+                                        :                      \
+                                        : "r"   (target)       \
+                                        : "x0", "memory");     \
+                return;                                                \
+        }
+
+CK_PR_UNARY(inc, ptr, void, void *, "mov x0, 1", "", "x", "add")
+CK_PR_UNARY(dec, ptr, void, void *, "mov x0, -1", "", "x", "add")
+CK_PR_UNARY(not, ptr, void, void *, "mov x0, -1", "", "x", "eor")
+CK_PR_UNARY(inc, 64, uint64_t, uint64_t, "mov x0, 1", "", "x", "add")
+CK_PR_UNARY(dec, 64, uint64_t, uint64_t, "mov x0, -1", "", "x", "add")
+CK_PR_UNARY(not, 64, uint64_t, uint64_t, "mov x0, -1", "", "x", "eor")
+
+#define CK_PR_UNARY_S(S, T, W)                                 \
+        CK_PR_UNARY(inc, S, T, T, "mov w0, 1", W, "w", "add")  \
+        CK_PR_UNARY(dec, S, T, T, "mov w0, -1", W, "w", "add") \
+        CK_PR_UNARY(not, S, T, T, "mov w0, -1", W, "w", "eor") \
+
+CK_PR_UNARY_S(32, uint32_t, "")
+CK_PR_UNARY_S(uint, unsigned int, "")
+CK_PR_UNARY_S(int, int, "")
+CK_PR_UNARY_S(16, uint16_t, "h")
+CK_PR_UNARY_S(8, uint8_t, "b")
+CK_PR_UNARY_S(short, short, "h")
+CK_PR_UNARY_S(char, char, "b")
+
+#undef CK_PR_UNARY_S
+#undef CK_PR_UNARY
+
+#define CK_PR_BINARY(O, N, M, T, S, W, R, I)                   \
+        CK_CC_INLINE static void                               \
+        ck_pr_##O##_##N(M *target, T delta)                    \
+        {                                                      \
+                __asm__ __volatile__(I ";"                     \
+                                     "st" S W " %" R "0, [%1];"        \
+                                        : "+&r" (delta)                \
+                                        : "r"   (target)       \
+                                        : "memory");           \
+                return;                                                \
+        }
+
+CK_PR_BINARY(and, ptr, void, uintptr_t, "clr", "", "", "mvn %0, %0")
+CK_PR_BINARY(add, ptr, void, uintptr_t, "add", "", "", "")
+CK_PR_BINARY(or, ptr, void, uintptr_t, "set", "", "", "")
+CK_PR_BINARY(sub, ptr, void, uintptr_t, "add", "", "", "neg %0, %0")
+CK_PR_BINARY(xor, ptr, void, uintptr_t, "eor", "", "", "")
+CK_PR_BINARY(and, 64, uint64_t, uint64_t, "clr", "", "", "mvn %0, %0")
+CK_PR_BINARY(add, 64, uint64_t, uint64_t, "add", "", "", "")
+CK_PR_BINARY(or, 64, uint64_t, uint64_t, "set", "", "", "")
+CK_PR_BINARY(sub, 64, uint64_t, uint64_t, "add", "", "", "neg %0, %0")
+CK_PR_BINARY(xor, 64, uint64_t, uint64_t, "eor", "", "", "")
+
+#define CK_PR_BINARY_S(S, T, W)                                                
\
+        CK_PR_BINARY(and, S, T, T, "clr", W, "w", "mvn %w0, %w0")      \
+        CK_PR_BINARY(add, S, T, T, "add", W, "w", "")                  \
+        CK_PR_BINARY(or, S, T, T, "set", W, "w", "")                   \
+        CK_PR_BINARY(sub, S, T, T, "add", W, "w", "neg %w0, %w0")      \
+        CK_PR_BINARY(xor, S, T, T, "eor", W, "w", "")
+
+CK_PR_BINARY_S(32, uint32_t, "")
+CK_PR_BINARY_S(uint, unsigned int, "")
+CK_PR_BINARY_S(int, int, "")
+CK_PR_BINARY_S(16, uint16_t, "h")
+CK_PR_BINARY_S(8, uint8_t, "b")
+CK_PR_BINARY_S(short, short, "h")
+CK_PR_BINARY_S(char, char, "b")
+
+#undef CK_PR_BINARY_S
+#undef CK_PR_BINARY
+
+CK_CC_INLINE static void *
+ck_pr_faa_ptr(void *target, uintptr_t delta)
+{
+        uintptr_t previous;
+
+        __asm__ __volatile__(
+                             "ldadd %2, %0, [%1];"
+                                : "=r" (previous)
+                                : "r"   (target),
+                                  "r"   (delta)
+                                : "memory");
+
+        return (void *)(previous);
+}
+
+CK_CC_INLINE static uint64_t
+ck_pr_faa_64(uint64_t *target, uint64_t delta)
+{
+        uint64_t previous;
+
+        __asm__ __volatile__(
+                             "ldadd %2, %0, [%1];"
+                                : "=r" (previous)
+                                : "r"   (target),
+                                  "r"   (delta)
+                                : "memory");
+
+        return (previous);
+}
+
+#define CK_PR_FAA(S, T, W)                                             \
+        CK_CC_INLINE static T                                          \
+        ck_pr_faa_##S(T *target, T delta)                              \
+        {                                                              \
+                T previous;                                            \
+                __asm__ __volatile__(                                  \
+                                     "ldadd" W " %w2, %w0, [%1];"      \
+                                        : "=r" (previous)              \
+                                        : "r"   (target),              \
+                                          "r"   (delta)                        
\
+                                        : "memory");                   \
+                return (previous);                                     \
+        }
+
+CK_PR_FAA(32, uint32_t, "")
+CK_PR_FAA(uint, unsigned int, "")
+CK_PR_FAA(int, int, "")
+CK_PR_FAA(16, uint16_t, "h")
+CK_PR_FAA(8, uint8_t, "b")
+CK_PR_FAA(short, short, "h")
+CK_PR_FAA(char, char, "b")
+
+#undef CK_PR_FAA
+
+#endif /* CK_PR_AARCH64_LSE_H */
diff --git a/libbsd.py b/libbsd.py
index 99877a2f..909c0037 100644
--- a/libbsd.py
+++ b/libbsd.py
@@ -104,6 +104,8 @@ _defaults = {
         # (source, [targets..])
         # i386
         ('freebsd/sys/i386/include', ['freebsd/sys/x86/include', 
'freebsd/sys/i386/include']),
+        # arm64
+        ('freebsd/sys/aarch64/include', ['freebsd/sys/aarch64/include', 
'freebsd/sys/arm64/include']),
     ],
 
     #
@@ -340,6 +342,8 @@ class base(builder.Module):
                 'sys/contrib/ck/include/ck_string.h',
                 'sys/contrib/ck/include/gcc/aarch64/ck_f_pr.h',
                 'sys/contrib/ck/include/gcc/aarch64/ck_pr.h',
+                'sys/contrib/ck/include/gcc/aarch64/ck_pr_lse.h',
+                'sys/contrib/ck/include/gcc/aarch64/ck_pr_llsc.h',
                 'sys/contrib/ck/include/gcc/arm/ck_f_pr.h',
                 'sys/contrib/ck/include/gcc/arm/ck_pr.h',
                 'sys/contrib/ck/include/gcc/ck_cc.h',
@@ -1444,6 +1448,9 @@ class dev_nic(builder.Module):
         )
         self.addCPUDependentFreeBSDHeaderFiles(
             [
+                'sys/arm64/include/armreg.h',
+                'sys/arm64/include/cpufunc.h',
+                'sys/arm64/include/cpu.h',
                 'sys/arm/include/cpufunc.h',
                 'sys/i386/include/md_var.h',
                 'sys/i386/include/intr_machdep.h',
@@ -4913,6 +4920,13 @@ class in_cksum(builder.Module):
                 'sys/sparc64/include/in_cksum.h',
             ]
         )
+        self.addTargetSourceCPUDependentHeaderFiles(
+            [ 'arm64' ],
+            'arm64',
+            [
+                'sys/arm64/include/in_cksum.h',
+            ]
+        )
         self.addTargetSourceCPUDependentHeaderFiles(
             [ 'arm', 'avr', 'bfin', 'h8300', 'lm32', 'm32c', 'm32r', 'm68k',
               'nios2', 'sh', 'sparc', 'v850' ],
@@ -4921,6 +4935,13 @@ class in_cksum(builder.Module):
                 'sys/mips/include/in_cksum.h',
             ]
         )
+        self.addCPUDependentFreeBSDSourceFiles(
+            [ 'aarch64', 'arm64' ],
+            [
+                'sys/arm64/arm64/in_cksum.c',
+            ],
+            mm.generator['source']()
+        )
         self.addCPUDependentFreeBSDSourceFiles(
             [ 'i386' ],
             [
diff --git a/rtemsbsd/include/machine/frame.h b/rtemsbsd/include/machine/frame.h
new file mode 100644
index 00000000..936ffd88
--- /dev/null
+++ b/rtemsbsd/include/machine/frame.h
@@ -0,0 +1 @@
+/* EMPTY */
-- 
2.20.1

_______________________________________________
devel mailing list
devel@rtems.org
http://lists.rtems.org/mailman/listinfo/devel

Reply via email to