Author: David Spickett Date: 2023-12-20T09:41:33Z New Revision: d14d52158bc444e2d036067305cf54aeea7c9edb
URL: https://github.com/llvm/llvm-project/commit/d14d52158bc444e2d036067305cf54aeea7c9edb DIFF: https://github.com/llvm/llvm-project/commit/d14d52158bc444e2d036067305cf54aeea7c9edb.diff LOG: [lldb][test] Add extra logging for module cache test And remove the workaround I was trying, as this logging may prove what the actual issue is. Which I think is that the thread plan map in Process is cleared before the threads are destroyed. So Thread::ShouldStop could be getting the current plan, then the plan map is cleared, then Thread::ShouldStop is deciding based on that plan to pop a plan from the now empty stack. Added: Modified: lldb/source/Target/Thread.cpp lldb/test/API/python_api/global_module_cache/TestGlobalModuleCache.py Removed: ################################################################################ diff --git a/lldb/source/Target/Thread.cpp b/lldb/source/Target/Thread.cpp index 865cee97e6d878..cbfb323f6d9271 100644 --- a/lldb/source/Target/Thread.cpp +++ b/lldb/source/Target/Thread.cpp @@ -883,6 +883,18 @@ bool Thread::ShouldStop(Event *event_ptr) { // If a Controlling Plan wants to stop, we let it. Otherwise, see if // the plan's parent wants to stop. + // Temporary logging to figure out a crash on Arm/AArch64 Linux. + { + LLDB_LOGF(log, "^^^^^^^^ Thread::ShouldStop plan stack before " + "PopPlan ^^^^^^^^"); + StreamString s; + s.IndentMore(); + GetProcess()->DumpThreadPlansForTID( + s, GetID(), eDescriptionLevelVerbose, true /* internal */, + false /* condense_trivial */, true /* skip_unreported */); + LLDB_LOG(log, s.GetData()); + } + PopPlan(); if (should_stop && current_plan->IsControllingPlan() && !current_plan->OkayToDiscard()) { diff --git a/lldb/test/API/python_api/global_module_cache/TestGlobalModuleCache.py b/lldb/test/API/python_api/global_module_cache/TestGlobalModuleCache.py index cc9da15b566c25..aacfb92735b331 100644 --- a/lldb/test/API/python_api/global_module_cache/TestGlobalModuleCache.py +++ b/lldb/test/API/python_api/global_module_cache/TestGlobalModuleCache.py @@ -34,6 +34,10 @@ def copy_to_main(self, src, dst): # The rerun tests indicate rerunning on Windows doesn't really work, so # this one won't either. @skipIfWindows + # On Arm and AArch64 Linux, this test attempts to pop a thread plan when + # we only have the base plan remaining. Skip it until we can figure out + # the bug this is exposing. + @skipIf(oslist=["linux"], archs=["arm", "aarch64"]) def test_OneTargetOneDebugger(self): self.do_test(True, True) @@ -50,6 +54,11 @@ def test_OneTargetTwoDebuggers(self): self.do_test(True, False) def do_test(self, one_target, one_debugger): + # Here to debug flakiness on Arm, remove later! + log_cmd_result = lldb.SBCommandReturnObject() + interp = self.dbg.GetCommandInterpreter() + interp.HandleCommand("log enable lldb step", log_cmd_result) + # Make sure that if we have one target, and we run, then # change the binary and rerun, the binary (and any .o files # if using dwarf in .o file debugging) get removed from the @@ -100,13 +109,6 @@ def do_test(self, one_target, one_debugger): self.old_debugger = self.dbg self.dbg = new_debugger def cleanupDebugger(self): - # On Arm and AArch64 Linux, it is suspected that destroying - # the debugger first causes lldb to try to pop from an empty - # thread plan stack. Try to prove this by killing the process - # first. - for i in range(self.dbg.GetNumTargets()): - self.dbg.GetTargetAtIndex(i).GetProcess().Kill() - lldb.SBDebugger.Destroy(self.dbg) self.dbg = self.old_debugger self.old_debugger = None _______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits