================ @@ -777,28 +798,134 @@ llvm::Error DAP::Disconnect(bool terminateDebuggee) { return ToError(error); } +template <typename T> +static std::optional<T> getArgumentsIfRequest(const protocol::Message &pm, + llvm::StringLiteral command) { + auto *const req = std::get_if<protocol::Request>(&pm); + if (!req || req->command != command) + return std::nullopt; + + T args; + llvm::json::Path::Root root; + if (!fromJSON(req->arguments, args, root)) { + return std::nullopt; + } + + return std::move(args); +} + llvm::Error DAP::Loop() { - auto cleanup = llvm::make_scope_exit([this]() { + std::deque<protocol::Message> queue; + std::condition_variable queue_cv; + std::mutex queue_mutex; + std::future<llvm::Error> queue_reader = std::async([&]() -> llvm::Error { + llvm::set_thread_name(transport.GetClientName() + ".transport_handler"); + auto cleanup = llvm::make_scope_exit([&]() { + // Ensure we're marked as disconnecting when the reader exits. + disconnecting = true; + queue_cv.notify_all(); + }); + + while (!disconnecting) { + llvm::Expected<std::optional<protocol::Message>> next = + transport.Read(std::chrono::seconds(1)); + bool timeout = false; + if (llvm::Error Err = llvm::handleErrors( + next.takeError(), + [&](std::unique_ptr<llvm::StringError> Err) -> llvm::Error { + if (Err->convertToErrorCode() == std::errc::timed_out) { + timeout = true; + return llvm::Error::success(); + } + return llvm::Error(std::move(Err)); + })) + return Err; + + // If the read timed out, continue to check if we should disconnect. + if (timeout) + continue; + + // nullopt is returned on EOF. + if (!*next) + break; + + { + std::lock_guard<std::mutex> lock(queue_mutex); + + // If a cancel is requested for the active request, make a best + // effort attempt to interrupt. + if (const auto cancel_args = + getArgumentsIfRequest<protocol::CancelArguments>(**next, + "cancel"); + cancel_args && active_seq == cancel_args->requestId) { + DAP_LOG(log, "({0}) interrupting inflight request {1}", + transport.GetClientName(), active_seq); + debugger.RequestInterrupt(); + debugger.GetCommandInterpreter().InterruptCommand(); + } + + queue.push_back(std::move(**next)); + } + queue_cv.notify_one(); + } + + return llvm::Error::success(); + }); + + auto cleanup = llvm::make_scope_exit([&]() { out.Stop(); err.Stop(); StopEventHandlers(); }); + while (!disconnecting) { - llvm::Expected<std::optional<protocol::Message>> next = transport.Read(); - if (!next) - return next.takeError(); + protocol::Message next; + { + std::unique_lock<std::mutex> lock(queue_mutex); + queue_cv.wait(lock, [&] { return disconnecting || !queue.empty(); }); - // nullopt on EOF - if (!*next) - break; + if (queue.empty()) + break; + + next = queue.front(); + queue.pop_front(); + + if (protocol::Request *req = std::get_if<protocol::Request>(&next)) { + active_seq = req->seq; + + // Check if we should preempt this request from a queued cancel. + bool cancelled = false; + for (const auto &message : queue) { ---------------- JDevlieghere wrote:
Rather than scanning the whole queue, would it be worth having a queue of cancellation requests (ids), so that this scan isn't linear in the number of pending requests? Maybe even make it a set so that duplicated cancellation requests can't create exponential behavior here. https://github.com/llvm/llvm-project/pull/130169 _______________________________________________ lldb-commits mailing list lldb-commits@lists.llvm.org https://lists.llvm.org/cgi-bin/mailman/listinfo/lldb-commits