Fixing a number of scaling issues:
- Missed closing of file descriptor made ForkingTaskExecutor silently die after running out of FDs - Tightened up scope for locks to prevent http timeout - Simplified threadpool
This commit is contained in:
@@ -4,6 +4,7 @@
|
||||
#include <condition_variable>
|
||||
#include <functional>
|
||||
#include <future>
|
||||
#include <iostream>
|
||||
#include <list>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
@@ -13,65 +14,11 @@
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
namespace daggy {
|
||||
|
||||
/*
|
||||
A Task Queue is a collection of async tasks to be executed by the
|
||||
thread pool. Using individual task queues allows for a rough QoS
|
||||
when a single thread may be submitting batches of requests --
|
||||
one producer won't starve out another, but all tasks will be run
|
||||
as quickly as possible.
|
||||
*/
|
||||
class TaskQueue
|
||||
{
|
||||
public:
|
||||
template <class F, class... Args>
|
||||
decltype(auto) addTask(F &&f, Args &&...args)
|
||||
{
|
||||
// using return_type = std::invoke_result<F, Args...>::type;
|
||||
using return_type = std::invoke_result_t<F, Args...>;
|
||||
|
||||
std::packaged_task<return_type()> task(
|
||||
std::bind(std::forward<F>(f), std::forward<Args>(args)...));
|
||||
|
||||
std::future<return_type> res = task.get_future();
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
tasks_.emplace(std::move(task));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
std::packaged_task<void()> pop()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
auto task = std::move(tasks_.front());
|
||||
tasks_.pop();
|
||||
return task;
|
||||
}
|
||||
|
||||
size_t size()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
return tasks_.size();
|
||||
}
|
||||
|
||||
bool empty()
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
return tasks_.empty();
|
||||
}
|
||||
|
||||
private:
|
||||
std::queue<std::packaged_task<void()>> tasks_;
|
||||
std::mutex mtx_;
|
||||
};
|
||||
|
||||
class ThreadPool
|
||||
{
|
||||
public:
|
||||
explicit ThreadPool(size_t nWorkers)
|
||||
: tqit_(taskQueues_.begin())
|
||||
, stop_(false)
|
||||
: stop_(false)
|
||||
, drain_(false)
|
||||
{
|
||||
resize(nWorkers);
|
||||
@@ -98,7 +45,7 @@ namespace daggy {
|
||||
while (true) {
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
if (taskQueues_.empty())
|
||||
if (tasks_.empty())
|
||||
break;
|
||||
}
|
||||
std::this_thread::sleep_for(250ms);
|
||||
@@ -118,25 +65,18 @@ namespace daggy {
|
||||
|
||||
for (size_t i = 0; i < nWorkers; ++i)
|
||||
workers_.emplace_back([&] {
|
||||
std::packaged_task<void()> task;
|
||||
while (true) {
|
||||
std::packaged_task<void()> task;
|
||||
{
|
||||
std::unique_lock<std::mutex> lock(mtx_);
|
||||
cv_.wait(lock, [&] { return stop_ || !taskQueues_.empty(); });
|
||||
if (taskQueues_.empty()) {
|
||||
cv_.wait(lock, [&] { return stop_ || !tasks_.empty(); });
|
||||
if (tasks_.empty()) {
|
||||
if (stop_)
|
||||
return;
|
||||
continue;
|
||||
}
|
||||
if (tqit_ == taskQueues_.end())
|
||||
tqit_ = taskQueues_.begin();
|
||||
task = (*tqit_)->pop();
|
||||
if ((*tqit_)->empty()) {
|
||||
tqit_ = taskQueues_.erase(tqit_);
|
||||
}
|
||||
else {
|
||||
tqit_++;
|
||||
}
|
||||
task.swap(tasks_.front());
|
||||
tasks_.pop();
|
||||
}
|
||||
task();
|
||||
}
|
||||
@@ -148,25 +88,18 @@ namespace daggy {
|
||||
{
|
||||
if (drain_)
|
||||
throw std::runtime_error("Unable to add task to draining pool");
|
||||
auto tq = std::make_shared<TaskQueue>();
|
||||
using return_type = std::invoke_result_t<F, Args...>;
|
||||
|
||||
auto fut = tq->addTask(f, args...);
|
||||
std::packaged_task<return_type()> task(
|
||||
std::bind(std::forward<F>(f), std::forward<Args>(args)...));
|
||||
|
||||
std::future<return_type> res = task.get_future();
|
||||
{
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
taskQueues_.push_back(tq);
|
||||
tasks_.emplace(std::move(task));
|
||||
}
|
||||
cv_.notify_one();
|
||||
return fut;
|
||||
}
|
||||
|
||||
void addTasks(std::shared_ptr<TaskQueue> &tq)
|
||||
{
|
||||
if (drain_)
|
||||
throw std::runtime_error("Unable to add task to draining pool");
|
||||
std::lock_guard<std::mutex> guard(mtx_);
|
||||
taskQueues_.push_back(tq);
|
||||
cv_.notify_one();
|
||||
return res;
|
||||
}
|
||||
|
||||
size_t size() const
|
||||
@@ -174,12 +107,17 @@ namespace daggy {
|
||||
return workers_.size();
|
||||
}
|
||||
|
||||
size_t queueSize()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(mtx_);
|
||||
return tasks_.size();
|
||||
}
|
||||
|
||||
private:
|
||||
// need to keep track of threads, so we can join them
|
||||
std::vector<std::thread> workers_;
|
||||
// the task queue
|
||||
std::list<std::shared_ptr<TaskQueue>> taskQueues_;
|
||||
std::list<std::shared_ptr<TaskQueue>>::iterator tqit_;
|
||||
std::queue<std::packaged_task<void()>> tasks_;
|
||||
|
||||
// synchronization
|
||||
std::mutex mtx_;
|
||||
@@ -187,5 +125,4 @@ namespace daggy {
|
||||
std::atomic<bool> stop_;
|
||||
std::atomic<bool> drain_;
|
||||
};
|
||||
|
||||
} // namespace daggy
|
||||
|
||||
@@ -51,7 +51,7 @@ namespace daggy::executors::task {
|
||||
|
||||
bool stop(DAGRunID runID, const std::string &taskName) override;
|
||||
|
||||
std::string description() const;
|
||||
std::string description() const override;
|
||||
|
||||
void addRunner(const std::string &url);
|
||||
|
||||
|
||||
Reference in New Issue
Block a user