https://github.com/jimingham updated https://github.com/llvm/llvm-project/pull/116438
>From 943bad7a8faa41d21651de765858dbb49584547c Mon Sep 17 00:00:00 2001 From: Jim Ingham <jing...@apple.com> Date: Fri, 15 Nov 2024 12:20:33 -0800 Subject: [PATCH 1/3] Convert ThreadPlanStack's mutex to a shared mutex. Since shared mutexes are not recursive, I had to add a couple NoLock variants to make sure it didn't get used recursively. --- lldb/include/lldb/Target/ThreadPlanStack.h | 9 ++- lldb/source/Target/ThreadPlanStack.cpp | 81 ++++++++++++---------- 2 files changed, 51 insertions(+), 39 deletions(-) diff --git a/lldb/include/lldb/Target/ThreadPlanStack.h b/lldb/include/lldb/Target/ThreadPlanStack.h index e6a560a509261f..cd606bd0bb1056 100644 --- a/lldb/include/lldb/Target/ThreadPlanStack.h +++ b/lldb/include/lldb/Target/ThreadPlanStack.h @@ -14,6 +14,8 @@ #include <unordered_map> #include <vector> +#include "llvm/Support/RWMutex.h" + #include "lldb/Target/Target.h" #include "lldb/Target/Thread.h" #include "lldb/lldb-private-forward.h" @@ -96,7 +98,10 @@ class ThreadPlanStack { void ClearThreadCache(); private: - void PrintOneStack(Stream &s, llvm::StringRef stack_name, + + lldb::ThreadPlanSP DiscardPlanNoLock(); + lldb::ThreadPlanSP GetCurrentPlanNoLock() const; + void PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name, const PlanStack &stack, lldb::DescriptionLevel desc_level, bool include_internal) const; @@ -110,7 +115,7 @@ class ThreadPlanStack { size_t m_completed_plan_checkpoint = 0; // Monotonically increasing token for // completed plan checkpoints. std::unordered_map<size_t, PlanStack> m_completed_plan_store; - mutable std::recursive_mutex m_stack_mutex; + mutable llvm::sys::RWMutex m_stack_mutex; }; class ThreadPlanStackMap { diff --git a/lldb/source/Target/ThreadPlanStack.cpp b/lldb/source/Target/ThreadPlanStack.cpp index 1572931429071d..81fcb09b563668 100644 --- a/lldb/source/Target/ThreadPlanStack.cpp +++ b/lldb/source/Target/ThreadPlanStack.cpp @@ -39,21 +39,20 @@ ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) { void ThreadPlanStack::DumpThreadPlans(Stream &s, lldb::DescriptionLevel desc_level, bool include_internal) const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); s.IndentMore(); - PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal); - PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level, + PrintOneStackNoLock(s, "Active plan stack", m_plans, desc_level, include_internal); + PrintOneStackNoLock(s, "Completed plan stack", m_completed_plans, desc_level, include_internal); - PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level, + PrintOneStackNoLock(s, "Discarded plan stack", m_discarded_plans, desc_level, include_internal); s.IndentLess(); } -void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name, +void ThreadPlanStack::PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name, const PlanStack &stack, lldb::DescriptionLevel desc_level, bool include_internal) const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); // If the stack is empty, just exit: if (stack.empty()) return; @@ -82,7 +81,7 @@ void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name, } size_t ThreadPlanStack::CheckpointCompletedPlans() { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); m_completed_plan_checkpoint++; m_completed_plan_store.insert( std::make_pair(m_completed_plan_checkpoint, m_completed_plans)); @@ -90,7 +89,7 @@ size_t ThreadPlanStack::CheckpointCompletedPlans() { } void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); auto result = m_completed_plan_store.find(checkpoint); assert(result != m_completed_plan_store.end() && "Asked for a checkpoint that didn't exist"); @@ -99,13 +98,13 @@ void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) { } void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); m_completed_plan_store.erase(checkpoint); } void ThreadPlanStack::ThreadDestroyed(Thread *thread) { // Tell the plan stacks that this thread is going away: - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); for (ThreadPlanSP plan : m_plans) plan->ThreadDestroyed(); @@ -134,7 +133,7 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) { // If the thread plan doesn't already have a tracer, give it its parent's // tracer: // The first plan has to be a base plan: - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) && "Zeroth plan must be a base plan"); @@ -147,7 +146,7 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) { } lldb::ThreadPlanSP ThreadPlanStack::PopPlan() { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); assert(m_plans.size() > 1 && "Can't pop the base thread plan"); // Note that moving the top element of the vector would leave it in an @@ -161,7 +160,11 @@ lldb::ThreadPlanSP ThreadPlanStack::PopPlan() { } lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); + return DiscardPlanNoLock(); +} + +lldb::ThreadPlanSP ThreadPlanStack::DiscardPlanNoLock() { assert(m_plans.size() > 1 && "Can't discard the base thread plan"); // Note that moving the top element of the vector would leave it in an @@ -177,12 +180,12 @@ lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() { // If the input plan is nullptr, discard all plans. Otherwise make sure this // plan is in the stack, and if so discard up to and including it. void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); int stack_size = m_plans.size(); if (up_to_plan_ptr == nullptr) { for (int i = stack_size - 1; i > 0; i--) - DiscardPlan(); + DiscardPlanNoLock(); return; } @@ -197,23 +200,23 @@ void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) { if (found_it) { bool last_one = false; for (int i = stack_size - 1; i > 0 && !last_one; i--) { - if (GetCurrentPlan().get() == up_to_plan_ptr) + if (GetCurrentPlanNoLock().get() == up_to_plan_ptr) last_one = true; - DiscardPlan(); + DiscardPlanNoLock(); } } } void ThreadPlanStack::DiscardAllPlans() { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); int stack_size = m_plans.size(); for (int i = stack_size - 1; i > 0; i--) { - DiscardPlan(); + DiscardPlanNoLock(); } } void ThreadPlanStack::DiscardConsultingControllingPlans() { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); while (true) { int controlling_plan_idx; bool discard = true; @@ -234,26 +237,30 @@ void ThreadPlanStack::DiscardConsultingControllingPlans() { // First pop all the dependent plans: for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) { - DiscardPlan(); + DiscardPlanNoLock(); } // Now discard the controlling plan itself. // The bottom-most plan never gets discarded. "OkayToDiscard" for it // means discard it's dependent plans, but not it... if (controlling_plan_idx > 0) { - DiscardPlan(); + DiscardPlanNoLock(); } } } lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); + return GetCurrentPlanNoLock(); +} + +lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlanNoLock() const { assert(m_plans.size() != 0 && "There will always be a base plan."); return m_plans.back(); } lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (m_completed_plans.empty()) return {}; @@ -271,7 +278,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const { lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx, bool skip_private) const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); uint32_t idx = 0; for (lldb::ThreadPlanSP plan_sp : m_plans) { @@ -285,7 +292,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx, } lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (m_completed_plans.empty()) return {}; @@ -299,7 +306,7 @@ lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const { } lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (m_completed_plans.empty()) return {}; @@ -312,23 +319,23 @@ lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const { return {}; } bool ThreadPlanStack::AnyPlans() const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); // There is always a base plan... return m_plans.size() > 1; } bool ThreadPlanStack::AnyCompletedPlans() const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); return !m_completed_plans.empty(); } bool ThreadPlanStack::AnyDiscardedPlans() const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); return !m_discarded_plans.empty(); } bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); for (auto plan : m_completed_plans) { if (plan.get() == in_plan) return true; @@ -337,7 +344,7 @@ bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const { } bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); for (auto plan : m_discarded_plans) { if (plan.get() == in_plan) return true; @@ -346,7 +353,7 @@ bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const { } ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); if (current_plan == nullptr) return nullptr; @@ -361,7 +368,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const { // If this is the first completed plan, the previous one is the // bottom of the regular plan stack. if (stack_size > 0 && m_completed_plans[0].get() == current_plan) { - return GetCurrentPlan().get(); + return GetCurrentPlanNoLock().get(); } // Otherwise look for it in the regular plans. @@ -374,7 +381,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const { } ThreadPlan *ThreadPlanStack::GetInnermostExpression() const { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); int stack_size = m_plans.size(); for (int i = stack_size - 1; i > 0; i--) { @@ -385,13 +392,13 @@ ThreadPlan *ThreadPlanStack::GetInnermostExpression() const { } void ThreadPlanStack::ClearThreadCache() { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedReader guard(m_stack_mutex); for (lldb::ThreadPlanSP thread_plan_sp : m_plans) thread_plan_sp->ClearThreadCache(); } void ThreadPlanStack::WillResume() { - std::lock_guard<std::recursive_mutex> guard(m_stack_mutex); + llvm::sys::ScopedWriter guard(m_stack_mutex); m_completed_plans.clear(); m_discarded_plans.clear(); } >From 051a77f796719ce4fa3843eb0aa062f99649f60c Mon Sep 17 00:00:00 2001 From: Jim Ingham <jing...@apple.com> Date: Fri, 15 Nov 2024 12:45:39 -0800 Subject: [PATCH 2/3] clang-format --- lldb/include/lldb/Target/ThreadPlanStack.h | 6 +++--- lldb/source/Target/ThreadPlanStack.cpp | 13 +++++++------ 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/lldb/include/lldb/Target/ThreadPlanStack.h b/lldb/include/lldb/Target/ThreadPlanStack.h index cd606bd0bb1056..e0f8104de9a4d1 100644 --- a/lldb/include/lldb/Target/ThreadPlanStack.h +++ b/lldb/include/lldb/Target/ThreadPlanStack.h @@ -98,12 +98,12 @@ class ThreadPlanStack { void ClearThreadCache(); private: - lldb::ThreadPlanSP DiscardPlanNoLock(); lldb::ThreadPlanSP GetCurrentPlanNoLock() const; void PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name, - const PlanStack &stack, lldb::DescriptionLevel desc_level, - bool include_internal) const; + const PlanStack &stack, + lldb::DescriptionLevel desc_level, + bool include_internal) const; PlanStack m_plans; ///< The stack of plans this thread is executing. PlanStack m_completed_plans; ///< Plans that have been completed by this diff --git a/lldb/source/Target/ThreadPlanStack.cpp b/lldb/source/Target/ThreadPlanStack.cpp index 81fcb09b563668..93a3d411428f05 100644 --- a/lldb/source/Target/ThreadPlanStack.cpp +++ b/lldb/source/Target/ThreadPlanStack.cpp @@ -41,18 +41,19 @@ void ThreadPlanStack::DumpThreadPlans(Stream &s, bool include_internal) const { llvm::sys::ScopedReader guard(m_stack_mutex); s.IndentMore(); - PrintOneStackNoLock(s, "Active plan stack", m_plans, desc_level, include_internal); + PrintOneStackNoLock(s, "Active plan stack", m_plans, desc_level, + include_internal); PrintOneStackNoLock(s, "Completed plan stack", m_completed_plans, desc_level, - include_internal); + include_internal); PrintOneStackNoLock(s, "Discarded plan stack", m_discarded_plans, desc_level, - include_internal); + include_internal); s.IndentLess(); } void ThreadPlanStack::PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name, - const PlanStack &stack, - lldb::DescriptionLevel desc_level, - bool include_internal) const { + const PlanStack &stack, + lldb::DescriptionLevel desc_level, + bool include_internal) const { // If the stack is empty, just exit: if (stack.empty()) return; >From ae5df264b72ea82305ab1948ac8ee6f7fde2b38f Mon Sep 17 00:00:00 2001 From: Jim Ingham <jing...@apple.com> Date: Fri, 15 Nov 2024 18:08:16 -0800 Subject: [PATCH 3/3] Release the lock before calling ThreadPlan::DidPush. --- lldb/source/Target/ThreadPlanStack.cpp | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/lldb/source/Target/ThreadPlanStack.cpp b/lldb/source/Target/ThreadPlanStack.cpp index 93a3d411428f05..d5d600dda47a3f 100644 --- a/lldb/source/Target/ThreadPlanStack.cpp +++ b/lldb/source/Target/ThreadPlanStack.cpp @@ -134,15 +134,17 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) { // If the thread plan doesn't already have a tracer, give it its parent's // tracer: // The first plan has to be a base plan: - llvm::sys::ScopedWriter guard(m_stack_mutex); - assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) && - "Zeroth plan must be a base plan"); - - if (!new_plan_sp->GetThreadPlanTracer()) { - assert(!m_plans.empty()); - new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer()); + { // Scope for Lock - DidPush often adds plans to the stack: + llvm::sys::ScopedWriter guard(m_stack_mutex); + assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) && + "Zeroth plan must be a base plan"); + + if (!new_plan_sp->GetThreadPlanTracer()) { + assert(!m_plans.empty()); + new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer()); + } + m_plans.push_back(new_plan_sp); } - m_plans.push_back(new_plan_sp); new_plan_sp->DidPush(); } _______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits