https://github.com/jimingham created 
https://github.com/llvm/llvm-project/pull/116438

I have some reports of A/B inversion deadlocks between the ThreadPlanStack and 
the StackFrameList accesses.  There's a fair bit of reasonable code in lldb 
that does "While accessing the ThreadPlanStack, look at that threads's 
StackFrameList", and also plenty of "While accessing the ThreadPlanStack, look 
at the StackFrameList."

In all the cases I've seen so far, there was at most one of the locks taken 
that were trying to mutate the list, the other three were just reading.  So we 
could solve the deadlock by converting the two mutexes over to shared mutexes.

This patch is the easy part, the ThreadPlanStack mutex.  

The tricky part was because these were originally recursive mutexes, and  
recursive access to shared mutexes is undefined behavior according to the C++ 
standard, I had to add a couple NoLock variants to make sure it didn't get used 
recursively.  Then since the only remaining calls are out to ThreadPlans and 
ThreadPlans don't have access to their containing ThreadPlanStack, converting 
this to a non-recursive lock should be safe.

>From 943bad7a8faa41d21651de765858dbb49584547c Mon Sep 17 00:00:00 2001
From: Jim Ingham <jing...@apple.com>
Date: Fri, 15 Nov 2024 12:20:33 -0800
Subject: [PATCH] Convert ThreadPlanStack's mutex to a shared mutex.

Since shared mutexes are not recursive, I had to add a couple
NoLock variants to make sure it didn't get used recursively.
---
 lldb/include/lldb/Target/ThreadPlanStack.h |  9 ++-
 lldb/source/Target/ThreadPlanStack.cpp     | 81 ++++++++++++----------
 2 files changed, 51 insertions(+), 39 deletions(-)

diff --git a/lldb/include/lldb/Target/ThreadPlanStack.h 
b/lldb/include/lldb/Target/ThreadPlanStack.h
index e6a560a509261f..cd606bd0bb1056 100644
--- a/lldb/include/lldb/Target/ThreadPlanStack.h
+++ b/lldb/include/lldb/Target/ThreadPlanStack.h
@@ -14,6 +14,8 @@
 #include <unordered_map>
 #include <vector>
 
+#include "llvm/Support/RWMutex.h"
+
 #include "lldb/Target/Target.h"
 #include "lldb/Target/Thread.h"
 #include "lldb/lldb-private-forward.h"
@@ -96,7 +98,10 @@ class ThreadPlanStack {
   void ClearThreadCache();
 
 private:
-  void PrintOneStack(Stream &s, llvm::StringRef stack_name,
+
+  lldb::ThreadPlanSP DiscardPlanNoLock();
+  lldb::ThreadPlanSP GetCurrentPlanNoLock() const;
+  void PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name,
                      const PlanStack &stack, lldb::DescriptionLevel desc_level,
                      bool include_internal) const;
 
@@ -110,7 +115,7 @@ class ThreadPlanStack {
   size_t m_completed_plan_checkpoint = 0; // Monotonically increasing token for
                                           // completed plan checkpoints.
   std::unordered_map<size_t, PlanStack> m_completed_plan_store;
-  mutable std::recursive_mutex m_stack_mutex;
+  mutable llvm::sys::RWMutex m_stack_mutex;
 };
 
 class ThreadPlanStackMap {
diff --git a/lldb/source/Target/ThreadPlanStack.cpp 
b/lldb/source/Target/ThreadPlanStack.cpp
index 1572931429071d..81fcb09b563668 100644
--- a/lldb/source/Target/ThreadPlanStack.cpp
+++ b/lldb/source/Target/ThreadPlanStack.cpp
@@ -39,21 +39,20 @@ ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool 
make_null) {
 void ThreadPlanStack::DumpThreadPlans(Stream &s,
                                       lldb::DescriptionLevel desc_level,
                                       bool include_internal) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   s.IndentMore();
-  PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
-  PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
+  PrintOneStackNoLock(s, "Active plan stack", m_plans, desc_level, 
include_internal);
+  PrintOneStackNoLock(s, "Completed plan stack", m_completed_plans, desc_level,
                 include_internal);
-  PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
+  PrintOneStackNoLock(s, "Discarded plan stack", m_discarded_plans, desc_level,
                 include_internal);
   s.IndentLess();
 }
 
-void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
+void ThreadPlanStack::PrintOneStackNoLock(Stream &s, llvm::StringRef 
stack_name,
                                     const PlanStack &stack,
                                     lldb::DescriptionLevel desc_level,
                                     bool include_internal) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
   // If the stack is empty, just exit:
   if (stack.empty())
     return;
@@ -82,7 +81,7 @@ void ThreadPlanStack::PrintOneStack(Stream &s, 
llvm::StringRef stack_name,
 }
 
 size_t ThreadPlanStack::CheckpointCompletedPlans() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   m_completed_plan_checkpoint++;
   m_completed_plan_store.insert(
       std::make_pair(m_completed_plan_checkpoint, m_completed_plans));
@@ -90,7 +89,7 @@ size_t ThreadPlanStack::CheckpointCompletedPlans() {
 }
 
 void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   auto result = m_completed_plan_store.find(checkpoint);
   assert(result != m_completed_plan_store.end() &&
          "Asked for a checkpoint that didn't exist");
@@ -99,13 +98,13 @@ void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t 
checkpoint) {
 }
 
 void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   m_completed_plan_store.erase(checkpoint);
 }
 
 void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
   // Tell the plan stacks that this thread is going away:
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   for (ThreadPlanSP plan : m_plans)
     plan->ThreadDestroyed();
 
@@ -134,7 +133,7 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP 
new_plan_sp) {
   // If the thread plan doesn't already have a tracer, give it its parent's
   // tracer:
   // The first plan has to be a base plan:
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
          "Zeroth plan must be a base plan");
 
@@ -147,7 +146,7 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP 
new_plan_sp) {
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   assert(m_plans.size() > 1 && "Can't pop the base thread plan");
 
   // Note that moving the top element of the vector would leave it in an
@@ -161,7 +160,11 @@ lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
+  return DiscardPlanNoLock();
+}
+
+lldb::ThreadPlanSP ThreadPlanStack::DiscardPlanNoLock() {
   assert(m_plans.size() > 1 && "Can't discard the base thread plan");
 
   // Note that moving the top element of the vector would leave it in an
@@ -177,12 +180,12 @@ lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
 // If the input plan is nullptr, discard all plans.  Otherwise make sure this
 // plan is in the stack, and if so discard up to and including it.
 void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   int stack_size = m_plans.size();
 
   if (up_to_plan_ptr == nullptr) {
     for (int i = stack_size - 1; i > 0; i--)
-      DiscardPlan();
+      DiscardPlanNoLock();
     return;
   }
 
@@ -197,23 +200,23 @@ void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan 
*up_to_plan_ptr) {
   if (found_it) {
     bool last_one = false;
     for (int i = stack_size - 1; i > 0 && !last_one; i--) {
-      if (GetCurrentPlan().get() == up_to_plan_ptr)
+      if (GetCurrentPlanNoLock().get() == up_to_plan_ptr)
         last_one = true;
-      DiscardPlan();
+      DiscardPlanNoLock();
     }
   }
 }
 
 void ThreadPlanStack::DiscardAllPlans() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   int stack_size = m_plans.size();
   for (int i = stack_size - 1; i > 0; i--) {
-    DiscardPlan();
+    DiscardPlanNoLock();
   }
 }
 
 void ThreadPlanStack::DiscardConsultingControllingPlans() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   while (true) {
     int controlling_plan_idx;
     bool discard = true;
@@ -234,26 +237,30 @@ void ThreadPlanStack::DiscardConsultingControllingPlans() 
{
 
     // First pop all the dependent plans:
     for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) {
-      DiscardPlan();
+      DiscardPlanNoLock();
     }
 
     // Now discard the controlling plan itself.
     // The bottom-most plan never gets discarded.  "OkayToDiscard" for it
     // means discard it's dependent plans, but not it...
     if (controlling_plan_idx > 0) {
-      DiscardPlan();
+      DiscardPlanNoLock();
     }
   }
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
+  return GetCurrentPlanNoLock();
+}
+
+lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlanNoLock() const {
   assert(m_plans.size() != 0 && "There will always be a base plan.");
   return m_plans.back();
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (m_completed_plans.empty())
     return {};
 
@@ -271,7 +278,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool 
skip_private) const {
 
 lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
                                                    bool skip_private) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   uint32_t idx = 0;
 
   for (lldb::ThreadPlanSP plan_sp : m_plans) {
@@ -285,7 +292,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t 
plan_idx,
 }
 
 lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (m_completed_plans.empty())
     return {};
 
@@ -299,7 +306,7 @@ lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() 
const {
 }
 
 lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (m_completed_plans.empty())
     return {};
 
@@ -312,23 +319,23 @@ lldb::ExpressionVariableSP 
ThreadPlanStack::GetExpressionVariable() const {
   return {};
 }
 bool ThreadPlanStack::AnyPlans() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   // There is always a base plan...
   return m_plans.size() > 1;
 }
 
 bool ThreadPlanStack::AnyCompletedPlans() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   return !m_completed_plans.empty();
 }
 
 bool ThreadPlanStack::AnyDiscardedPlans() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   return !m_discarded_plans.empty();
 }
 
 bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   for (auto plan : m_completed_plans) {
     if (plan.get() == in_plan)
       return true;
@@ -337,7 +344,7 @@ bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const 
{
 }
 
 bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   for (auto plan : m_discarded_plans) {
     if (plan.get() == in_plan)
       return true;
@@ -346,7 +353,7 @@ bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) 
const {
 }
 
 ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (current_plan == nullptr)
     return nullptr;
 
@@ -361,7 +368,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan 
*current_plan) const {
   // If this is the first completed plan, the previous one is the
   // bottom of the regular plan stack.
   if (stack_size > 0 && m_completed_plans[0].get() == current_plan) {
-    return GetCurrentPlan().get();
+    return GetCurrentPlanNoLock().get();
   }
 
   // Otherwise look for it in the regular plans.
@@ -374,7 +381,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan 
*current_plan) const {
 }
 
 ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   int stack_size = m_plans.size();
 
   for (int i = stack_size - 1; i > 0; i--) {
@@ -385,13 +392,13 @@ ThreadPlan *ThreadPlanStack::GetInnermostExpression() 
const {
 }
 
 void ThreadPlanStack::ClearThreadCache() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
     thread_plan_sp->ClearThreadCache();
 }
 
 void ThreadPlanStack::WillResume() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   m_completed_plans.clear();
   m_discarded_plans.clear();
 }

_______________________________________________
lldb-commits mailing list
lldb-commits@lists.llvm.org
https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits

Reply via email to