omjavaid created this revision. omjavaid added a reviewer: labath. Herald added a subscriber: tschuett. Herald added a reviewer: rengolin. omjavaid added parent revisions: D82853: [LLDB] Support custom expedited register set in gdb-remote, D82855: [LLDB] Send SVE vg register in custom expedited registerset , D82857: [LLDB] Add per-thread register infos shared pointer in gdb-remote, D79699: Add ptrace register access for AArch64 SVE registers.
This patch builds on previously submitted SVE patches regarding expedited register set and per thread register infos. (D82853 <https://reviews.llvm.org/D82853> D82855 <https://reviews.llvm.org/D82855> and D82857 <https://reviews.llvm.org/D82857>) We need to resize SVE register based on value received in expedited list. Also we need to resize SVE registers when we write vg register using register write vg command. The resize will result in a updated offset for all of fpr and sve register set. This offset will be configured in native register context by RegisterInfoInterface and will be updated by GDBRemoteProcess and GDBRemoteRegisterContext. A follow up patch will provide a API test to verify this change. https://reviews.llvm.org/D82863 Files: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp
Index: lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp =================================================================== --- lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp +++ lldb/source/Plugins/Process/gdb-remote/ProcessGDBRemote.cpp @@ -1762,6 +1762,26 @@ gdb_thread->PrivateSetRegisterValue(pair.first, buffer_sp->GetData()); } + // Code below is specific to AArch64 target in SVE state + // If expedited register set contains vector granule (vg) register + // then thread's register context reconfiguration is triggered by + // calling UpdateARM64SVERegistersInfos. + const ArchSpec &arch = GetTarget().GetArchitecture(); + if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64)) { + uint8_t arm64_sve_vg_dwarf_regnum = 46; + GDBRemoteRegisterContext *reg_ctx_sp = + static_cast<GDBRemoteRegisterContext *>( + gdb_thread->GetRegisterContext().get()); + + if (reg_ctx_sp) { + uint32_t vg_regnum = reg_ctx_sp->ConvertRegisterKindToRegisterNumber( + eRegisterKindDWARF, arm64_sve_vg_dwarf_regnum); + if (expedited_register_map.count(vg_regnum)) { + reg_ctx_sp->AArch64SVEReconfigure(); + } + } + } + thread_sp->SetName(thread_name.empty() ? nullptr : thread_name.c_str()); gdb_thread->SetThreadDispatchQAddr(thread_dispatch_qaddr); Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h =================================================================== --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.h @@ -40,6 +40,8 @@ void HardcodeARMRegisters(bool from_scratch); + bool UpdateARM64SVERegistersInfos(uint64_t vg, uint32_t &end_reg_offset); + void CloneFrom(GDBRemoteDynamicRegisterInfoSP process_reginfo); }; @@ -79,6 +81,8 @@ uint32_t ConvertRegisterKindToRegisterNumber(lldb::RegisterKind kind, uint32_t num) override; + bool AArch64SVEReconfigure(); + protected: friend class ThreadGDBRemote; Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp =================================================================== --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteRegisterContext.cpp @@ -347,6 +347,16 @@ if (dst == nullptr) return false; + // Code below is specific to AArch64 target in SVE state + // If vector granule (vg) register is being written then thread's + // register context reconfiguration is triggered on success. + bool do_reconfigure_arm64_sve = false; + const ArchSpec &arch = process->GetTarget().GetArchitecture(); + if (arch.IsValid() && (arch.GetMachine() == llvm::Triple::aarch64)) { + if (reg_info->kinds[eRegisterKindDWARF] == 46) + do_reconfigure_arm64_sve = true; + } + if (data.CopyByteOrderedData(data_offset, // src offset reg_info->byte_size, // src length dst, // dst @@ -366,6 +376,11 @@ { SetAllRegisterValid(false); + + if (do_reconfigure_arm64_sve && + GetPrimordialRegister(reg_info, gdb_comm)) + AArch64SVEReconfigure(); + return true; } } else { @@ -398,6 +413,10 @@ } else { // This is an actual register, write it success = SetPrimordialRegister(reg_info, gdb_comm); + + if (success && do_reconfigure_arm64_sve && + GetPrimordialRegister(reg_info, gdb_comm)) + AArch64SVEReconfigure(); } // Check if writing this register will invalidate any other register @@ -724,6 +743,110 @@ return m_reg_info_sp->ConvertRegisterKindToRegisterNumber(kind, num); } +bool GDBRemoteRegisterContext::AArch64SVEReconfigure(void) { + if (!m_reg_info_sp) + return false; + + uint64_t fail_value = LLDB_INVALID_ADDRESS; + uint32_t vg_reg_num = + ConvertRegisterKindToRegisterNumber(eRegisterKindDWARF, 46); + uint64_t vg_reg_value = ReadRegisterAsUnsigned(vg_reg_num, fail_value); + + if (vg_reg_value != fail_value && vg_reg_value <= 32) { + uint32_t end_reg_offset = 0; + if (m_reg_info_sp->UpdateARM64SVERegistersInfos(vg_reg_value, + end_reg_offset)) { + uint64_t bytes_to_copy = m_reg_data.GetByteSize(); + if (end_reg_offset < bytes_to_copy) + bytes_to_copy = end_reg_offset; + + // Make a heap based buffer that is big enough to store all registers + DataBufferSP reg_data_sp(new DataBufferHeap(end_reg_offset, 0)); + m_reg_data.CopyData(0, bytes_to_copy, reg_data_sp->GetBytes()); + m_reg_data.Clear(); + m_reg_data.SetData(reg_data_sp); + m_reg_data.SetByteOrder(GetByteOrder()); + + // Invalidate all register except GPRs and VG. GPRs will retain their + // state. + uint32_t v0_reg_num = + ConvertRegisterKindToRegisterNumber(eRegisterKindDWARF, 64); + for (uint16_t i = v0_reg_num; i < vg_reg_num; i++) + m_reg_valid[i] = false; + + for (uint16_t i = vg_reg_num + 1; i < m_reg_valid.size(); i++) + m_reg_valid[i] = false; + + return true; + } + } + + return false; +} + +bool GDBRemoteDynamicRegisterInfo::UpdateARM64SVERegistersInfos( + uint64_t vg, uint32_t &end_reg_offset) { + uint32_t z0_reg_num = + ConvertRegisterKindToRegisterNumber(eRegisterKindDWARF, 96); + RegisterInfo *reg_info = GetRegisterInfoAtIndex(z0_reg_num); + uint64_t vg_reg_value_old = reg_info->byte_size / 8; + + if (vg_reg_value_old != vg) { + uint32_t s_reg_offset = reg_info->byte_offset; + uint32_t d_reg_offset = reg_info->byte_offset; + uint32_t v_reg_offset = reg_info->byte_offset; + uint32_t z_reg_offset = reg_info->byte_offset; + + // SVE Z register size is vg x 8 bytes. + uint32_t z_reg_byte_size = vg * 8; + + // To calculate fpsr and fpcr offset we skip over 32 Z regs + // 16 P regs and ffr register to reach end of SVE regs data. + uint32_t fpsr_offset = + z_reg_offset + (z_reg_byte_size * 32) + (vg * 16) + vg; + uint32_t fpcr_offset = fpsr_offset + 4; + + // We iterate over register infos assuming register infos will be indexed + // according to lldb register numbers defined in RegisterInfos_arm64_sve.h + for (auto ® : m_regs) { + if (reg.name[0] == 'v') { + reg.byte_offset = v_reg_offset; + v_reg_offset += z_reg_byte_size; + } else if (reg.name[0] == 's') { + reg.byte_offset = s_reg_offset; + s_reg_offset += z_reg_byte_size; + } else if (reg.name[0] == 'd') { + reg.byte_offset = d_reg_offset; + d_reg_offset += z_reg_byte_size; + } else if (strcmp(reg.name, "fpsr") == 0) { + reg.byte_offset = fpsr_offset; + } else if (strcmp(reg.name, "fpcr") == 0) { + reg.byte_offset = fpcr_offset; + } else if (reg.name[0] == 'z') { + reg.byte_size = z_reg_byte_size; + reg.byte_offset = z_reg_offset; + z_reg_offset += z_reg_byte_size; + } else if (reg.name[0] == 'p') { + reg.byte_size = vg; + reg.byte_offset = z_reg_offset; + z_reg_offset += vg; + } else if (strcmp(reg.name, "ffr") == 0) { + reg.byte_size = vg; + reg.byte_offset = z_reg_offset; + z_reg_offset += vg; + } + + uint32_t new_end_reg_offset = reg.byte_offset + reg.byte_size; + if (end_reg_offset < new_end_reg_offset) + end_reg_offset = new_end_reg_offset; + } + + return true; + } + + return false; +} + void GDBRemoteDynamicRegisterInfo::CloneFrom( GDBRemoteDynamicRegisterInfoSP proc_reginfo) { m_regs = proc_reginfo->m_regs; @@ -917,17 +1040,18 @@ if (from_scratch) { // Calculate the offsets of the registers // Note that the layout of the "composite" registers (d0-d15 and q0-q15) - // which comes after the "primordial" registers is important. This enables - // us to calculate the offset of the composite register by using the offset - // of its first primordial register. For example, to calculate the offset - // of q0, use s0's offset. + // which comes after the "primordial" registers is important. This + // enables us to calculate the offset of the composite register by using + // the offset of its first primordial register. For example, to + // calculate the offset of q0, use s0's offset. if (g_register_infos[2].byte_offset == 0) { uint32_t byte_offset = 0; for (i = 0; i < num_registers; ++i) { - // For primordial registers, increment the byte_offset by the byte_size - // to arrive at the byte_offset for the next register. Otherwise, we - // have a composite register whose offset can be calculated by - // consulting the offset of its first primordial register. + // For primordial registers, increment the byte_offset by the + // byte_size to arrive at the byte_offset for the next register. + // Otherwise, we have a composite register whose offset can be + // calculated by consulting the offset of its first primordial + // register. if (!g_register_infos[i].value_regs) { g_register_infos[i].byte_offset = byte_offset; byte_offset += g_register_infos[i].byte_size; @@ -961,9 +1085,9 @@ const size_t num_common_regs = num_registers - num_composites; RegisterInfo *g_comp_register_infos = g_register_infos + num_common_regs; - // First we need to validate that all registers that we already have match - // the non composite regs. If so, then we can add the registers, else we - // need to bail + // First we need to validate that all registers that we already have + // match the non composite regs. If so, then we can add the registers, + // else we need to bail bool match = true; if (num_dynamic_regs == num_common_regs) { for (i = 0; match && i < num_dynamic_regs; ++i) { @@ -1000,7 +1124,8 @@ if (reg_info && reg_info->name && ::strcasecmp(reg_info->name, reg_name) == 0) { // The name matches the existing primordial entry. Find and - // assign the offset, and then add this composite register entry. + // assign the offset, and then add this composite register + // entry. g_comp_register_infos[i].byte_offset = reg_info->byte_offset; name.SetCString(g_comp_register_infos[i].name); AddRegister(g_comp_register_infos[i], name, alt_name, Index: lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp =================================================================== --- lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp +++ lldb/source/Plugins/Process/gdb-remote/GDBRemoteCommunicationServerLLGS.cpp @@ -506,7 +506,11 @@ #ifdef LLDB_JTHREADSINFO_FULL_REGISTER_SET // Expedite all registers in the first register set (i.e. should be GPRs) // that are not contained in other registers. - const RegisterSet *reg_set_p = reg_ctx_sp->GetRegisterSet(0); + const RegisterSet *reg_set_p = reg_ctx.GetExpeditedRegisterSet(); + + if (reg_set_p == nullptr) + reg_set_p = reg_ctx_sp->GetRegisterSet(0); + if (!reg_set_p) return llvm::make_error<llvm::StringError>("failed to get registers", llvm::inconvertibleErrorCode()); @@ -516,6 +520,8 @@ #else // Expedite only a couple of registers until we figure out why sending // registers is expensive. + std::vector<uint32_t> expedited_reg_nums; + static const uint32_t k_expedited_registers[] = { LLDB_REGNUM_GENERIC_PC, LLDB_REGNUM_GENERIC_SP, LLDB_REGNUM_GENERIC_FP, LLDB_REGNUM_GENERIC_RA, LLDB_INVALID_REGNUM}; @@ -526,6 +532,22 @@ eRegisterKindGeneric, *generic_reg_p); if (reg_num == LLDB_INVALID_REGNUM) continue; // Target does not support the given register. + else + expedited_reg_nums.insert(expedited_reg_nums.end(), reg_num); + } + + // Check if architecture is AArch64 with SVE enabled. + // Make sure we send vector granule register VG in case of SVE. + const ArchSpec &arch = thread.GetProcess().GetArchitecture(); + if (arch.GetMachine() == llvm::Triple::aarch64 || + arch.GetMachine() == llvm::Triple::aarch64_be) { + uint32_t reg_num = + reg_ctx.ConvertRegisterKindToRegisterNumber(eRegisterKindDWARF, 46); + if (reg_num != LLDB_INVALID_REGNUM) + expedited_reg_nums.insert(expedited_reg_nums.end(), reg_num); + } + + for (auto ®_num : expedited_reg_nums) { #endif const RegisterInfo *const reg_info_p = Index: lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp =================================================================== --- lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp +++ lldb/source/Plugins/Process/Utility/DynamicRegisterInfo.cpp @@ -533,6 +533,17 @@ } } + // Create per thread reginfo to support AArch64 SVE dynamic register sizes. + if (arch.GetMachine() == llvm::Triple::aarch64 || + arch.GetMachine() == llvm::Triple::aarch64_be) { + for (const auto ® : m_regs) { + if (strcmp(reg.name, "vg") == 0) { + m_per_thread_reginfo = true; + break; + } + } + } + if (!generic_regs_specified) { switch (arch.GetMachine()) { case llvm::Triple::aarch64: @@ -684,6 +695,7 @@ m_invalidate_regs_map.clear(); m_dynamic_reg_size_map.clear(); m_reg_data_byte_size = 0; + m_per_thread_reginfo = false; m_finalized = false; } Index: lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp =================================================================== --- lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp +++ lldb/source/Plugins/Process/Linux/NativeRegisterContextLinux_arm64.cpp @@ -423,7 +423,23 @@ } } } else if (IsSVERegVG(reg)) { - return Status("SVE state change operation not supported"); + uint64_t vg_value = reg_value.GetAsUInt64(); + + if (sve_vl_valid(vg_value * 8)) { + if (m_sve_header_is_valid && vg_value == GetSVERegVG()) + return error; + + SetSVERegVG(vg_value); + + error = WriteSVEHeader(); + if (error.Success()) + ConfigureRegisterContext(); + + if (m_sve_header_is_valid && vg_value == GetSVERegVG()) + return error; + } + + return Status("SVE vector length update failed."); } else if (IsSVE(reg)) { if (m_sve_state == SVE_STATE::SVE_STATE_DISABLED) { return Status("SVE disabled or not supported");
_______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits