scott.smith updated this revision to Diff 97695.
scott.smith added a comment.
update to use a private llvm::ThreadPool. I chose this over a 2nd global
"TaskPool" because if the threads are going to be short lived, there isn't much
point in having a global pool rather than a short-lived instantiated one.
Repository:
rL LLVM
https://reviews.llvm.org/D32597
Files:
source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp
source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.h
Index: source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.h
===================================================================
--- source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.h
+++ source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.h
@@ -66,6 +66,10 @@
uint32_t GetPluginVersion() override;
protected:
+ /// Mutex to protect various global variables during parallel shared library
+ /// loading.
+ std::recursive_mutex m_mutex;
+
/// Runtime linker rendezvous structure.
DYLDRendezvous m_rendezvous;
Index: source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp
===================================================================
--- source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp
+++ source/Plugins/DynamicLoader/POSIX-DYLD/DynamicLoaderPOSIXDYLD.cpp
@@ -27,6 +27,7 @@
#include "lldb/Target/Thread.h"
#include "lldb/Target/ThreadPlanRunToAddress.h"
#include "lldb/Utility/Log.h"
+#include "llvm/Support/ThreadPool.h"
// C++ Includes
// C Includes
@@ -195,6 +196,7 @@
}
void DynamicLoaderPOSIXDYLD::DidLaunch() {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
if (log)
log->Printf("DynamicLoaderPOSIXDYLD::%s()", __FUNCTION__);
@@ -228,17 +230,20 @@
addr_t link_map_addr,
addr_t base_addr,
bool base_addr_is_offset) {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_loaded_modules[module] = link_map_addr;
UpdateLoadedSectionsCommon(module, base_addr, base_addr_is_offset);
}
void DynamicLoaderPOSIXDYLD::UnloadSections(const ModuleSP module) {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
m_loaded_modules.erase(module);
UnloadSectionsCommon(module);
}
void DynamicLoaderPOSIXDYLD::ProbeEntry() {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
const addr_t entry = GetEntryPoint();
@@ -329,6 +334,7 @@
}
void DynamicLoaderPOSIXDYLD::SetRendezvousBreakpoint() {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
addr_t break_addr = m_rendezvous.GetBreakAddress();
@@ -372,6 +378,7 @@
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
DynamicLoaderPOSIXDYLD *const dyld_instance =
static_cast<DynamicLoaderPOSIXDYLD *>(baton);
+ std::lock_guard<std::recursive_mutex> guard(dyld_instance->m_mutex);
if (log)
log->Printf("DynamicLoaderPOSIXDYLD::%s called for pid %" PRIu64,
__FUNCTION__,
@@ -393,6 +400,7 @@
}
void DynamicLoaderPOSIXDYLD::RefreshModules() {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (!m_rendezvous.Resolve())
return;
@@ -437,6 +445,7 @@
ThreadPlanSP
DynamicLoaderPOSIXDYLD::GetStepThroughTrampolinePlan(Thread &thread,
bool stop) {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
ThreadPlanSP thread_plan_sp;
StackFrame *frame = thread.GetStackFrameAtIndex(0).get();
@@ -514,14 +523,33 @@
std::vector<FileSpec> module_names;
for (I = m_rendezvous.begin(), E = m_rendezvous.end(); I != E; ++I)
module_names.push_back(I->file_spec);
+ size_t num_to_load = module_names.size();
m_process->PrefetchModuleSpecs(
module_names, m_process->GetTarget().GetArchitecture().GetTriple());
- for (I = m_rendezvous.begin(), E = m_rendezvous.end(); I != E; ++I) {
- ModuleSP module_sp =
- LoadModuleAtAddress(I->file_spec, I->link_addr, I->base_addr, true);
- if (module_sp.get()) {
- module_list.Append(module_sp);
+ struct loader {
+ DYLDRendezvous::iterator I;
+ ModuleSP m;
+ std::shared_future<void> f;
+ };
+ std::vector<loader> loaders(num_to_load);
+ llvm::ThreadPool load_pool(
+ std::min<size_t>(num_to_load, std::thread::hardware_concurrency()));
+ auto loader_fn = [this](loader * l)
+ {
+ l->m = LoadModuleAtAddress(
+ l->I->file_spec, l->I->link_addr, l->I->base_addr, true);
+ };
+
+ loader * l = loaders.data();
+ for (I = m_rendezvous.begin(), E = m_rendezvous.end(); I != E; ++I, ++l) {
+ l->I = I;
+ l->f = load_pool.async(loader_fn, l);
+ }
+ for (auto & l : loaders) {
+ l.f.wait();
+ if (l.m.get()) {
+ module_list.Append(l.m);
} else {
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
if (log)
@@ -535,6 +563,7 @@
}
addr_t DynamicLoaderPOSIXDYLD::ComputeLoadOffset() {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
addr_t virt_entry;
if (m_load_offset != LLDB_INVALID_ADDRESS)
@@ -561,13 +590,15 @@
}
void DynamicLoaderPOSIXDYLD::EvalVdsoStatus() {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
AuxVector::iterator I = m_auxv->FindEntry(AuxVector::AT_SYSINFO_EHDR);
if (I != m_auxv->end())
m_vdso_base = I->value;
}
addr_t DynamicLoaderPOSIXDYLD::GetEntryPoint() {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
if (m_entry_point != LLDB_INVALID_ADDRESS)
return m_entry_point;
@@ -594,6 +625,7 @@
DynamicLoaderPOSIXDYLD::GetThreadLocalData(const lldb::ModuleSP module_sp,
const lldb::ThreadSP thread,
lldb::addr_t tls_file_addr) {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
auto it = m_loaded_modules.find(module_sp);
if (it == m_loaded_modules.end())
return LLDB_INVALID_ADDRESS;
@@ -644,6 +676,7 @@
void DynamicLoaderPOSIXDYLD::ResolveExecutableModule(
lldb::ModuleSP &module_sp) {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
Log *log(GetLogIfAnyCategoriesSet(LIBLLDB_LOG_DYNAMIC_LOADER));
if (m_process == nullptr)
@@ -692,6 +725,7 @@
bool DynamicLoaderPOSIXDYLD::AlwaysRelyOnEHUnwindInfo(
lldb_private::SymbolContext &sym_ctx) {
+ std::lock_guard<std::recursive_mutex> guard(m_mutex);
ModuleSP module_sp;
if (sym_ctx.symbol)
module_sp = sym_ctx.symbol->GetAddressRef().GetModule();
_______________________________________________
lldb-commits mailing list
[email protected]
http://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits