Moving to a poll method for workers, and daggyd-preserved capacities
This commit is contained in:
@@ -49,7 +49,8 @@ namespace daggy {
|
||||
{
|
||||
Ok = 200,
|
||||
Not_Found = 404,
|
||||
Not_Acceptable = 406
|
||||
Not_Acceptable = 406,
|
||||
Server_Error = 500
|
||||
};
|
||||
|
||||
struct HTTPResponse
|
||||
|
||||
@@ -58,13 +58,21 @@ namespace daggy::executors::task {
|
||||
std::string taskName;
|
||||
std::string runnerURL;
|
||||
uint32_t retries;
|
||||
daggy_runner::Capacity resources;
|
||||
};
|
||||
|
||||
// Resolves jobs through polling
|
||||
std::atomic<bool> running_;
|
||||
std::thread monitorWorker_;
|
||||
|
||||
std::unordered_set<std::string> runners_;
|
||||
struct RunnerCapacity
|
||||
{
|
||||
daggy_runner::Capacity current;
|
||||
daggy_runner::Capacity total;
|
||||
};
|
||||
std::mutex runnersGuard_;
|
||||
std::unordered_map<std::string, RunnerCapacity> runners_;
|
||||
|
||||
std::mutex rtGuard_;
|
||||
std::unordered_map<std::pair<DAGRunID, std::string>, RunningTask>
|
||||
runningTasks_;
|
||||
|
||||
@@ -234,7 +234,7 @@ namespace daggy {
|
||||
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, curlWriter);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer);
|
||||
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 2);
|
||||
curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10);
|
||||
|
||||
if (trace) {
|
||||
curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, http_trace);
|
||||
@@ -254,8 +254,9 @@ namespace daggy {
|
||||
|
||||
if (res != CURLE_OK) {
|
||||
curl_easy_cleanup(curl);
|
||||
throw std::runtime_error(std::string{"CURL Failed: "} +
|
||||
curl_easy_strerror(res));
|
||||
response.code = HTTPCode::Server_Error;
|
||||
response.body = std::string{"CURL Failed: "} + curl_easy_strerror(res);
|
||||
return response;
|
||||
}
|
||||
curl_easy_cleanup(curl);
|
||||
|
||||
@@ -275,7 +276,18 @@ namespace daggy {
|
||||
auto response = HTTP_REQUEST(url, payload, method);
|
||||
|
||||
rj::Document doc;
|
||||
checkRJParse(doc.Parse(response.body.c_str()));
|
||||
if (response.code == HTTPCode::Server_Error) {
|
||||
doc.SetObject();
|
||||
auto &alloc = doc.GetAllocator();
|
||||
doc.AddMember("error",
|
||||
rj::Value().SetString(response.body.c_str(),
|
||||
response.body.size(), alloc),
|
||||
alloc);
|
||||
}
|
||||
else {
|
||||
checkRJParse(doc.Parse(response.body.c_str()));
|
||||
}
|
||||
|
||||
return std::make_pair(response.code, std::move(doc));
|
||||
}
|
||||
|
||||
|
||||
@@ -135,44 +135,45 @@ std::future<AttemptRecord> DaggyRunnerTaskExecutor::execute(
|
||||
// Capacities for a runner can be negative, meaning that they're currently
|
||||
// oversubscribed.
|
||||
std::vector<std::pair<std::string, double>> impacts;
|
||||
std::string runner;
|
||||
|
||||
for (const auto &runner : runners_) {
|
||||
try {
|
||||
const auto &[code, doc] = JSON_HTTP_REQUEST(runner + "/v1/capacity");
|
||||
if (code != HTTPCode::Ok) {
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(runnersGuard_);
|
||||
for (const auto &[runner, caps] : runners_) {
|
||||
const auto result = HTTP_REQUEST(runner + "/ready");
|
||||
if (result.code != 200)
|
||||
continue;
|
||||
}
|
||||
|
||||
auto curCap = capacityFromJSON(doc["current"]);
|
||||
auto totCap = capacityFromJSON(doc["total"]);
|
||||
|
||||
double cores = (curCap.cores - taskUsed.cores);
|
||||
double memoryMB = (curCap.memoryMB - taskUsed.memoryMB);
|
||||
double cores = (caps.current.cores - taskUsed.cores);
|
||||
double memoryMB = (caps.current.memoryMB - taskUsed.memoryMB);
|
||||
|
||||
double impact =
|
||||
std::min(cores / totCap.cores, memoryMB / totCap.memoryMB);
|
||||
std::min(cores / caps.total.cores, memoryMB / caps.total.memoryMB);
|
||||
std::cout << runner << ": " << impact << std::endl;
|
||||
impacts.emplace_back(runner, impact);
|
||||
}
|
||||
catch (const std::exception &_) {
|
||||
continue;
|
||||
|
||||
if (impacts.empty()) {
|
||||
std::promise<AttemptRecord> prom;
|
||||
auto fut = prom.get_future();
|
||||
AttemptRecord record{.rc = -1,
|
||||
.executorLog = "No runners available for execution"};
|
||||
prom.set_value(std::move(record));
|
||||
return fut;
|
||||
}
|
||||
|
||||
std::sort(impacts.begin(), impacts.end());
|
||||
|
||||
runner = impacts.back().first;
|
||||
auto &caps = runners_.at(runner);
|
||||
caps.current.cores -= taskUsed.cores;
|
||||
caps.current.memoryMB -= taskUsed.memoryMB;
|
||||
}
|
||||
|
||||
if (impacts.empty()) {
|
||||
std::promise<AttemptRecord> prom;
|
||||
auto fut = prom.get_future();
|
||||
AttemptRecord record{.rc = -1,
|
||||
.executorLog = "No runners available for execution"};
|
||||
prom.set_value(std::move(record));
|
||||
return fut;
|
||||
}
|
||||
|
||||
std::sort(impacts.begin(), impacts.end());
|
||||
|
||||
auto runner = impacts.back();
|
||||
std::cout << "Queuing on runner: " << runner << std::endl;
|
||||
|
||||
std::stringstream ss;
|
||||
ss << runner.first << "/v1/task/" << runID << "/" << taskName;
|
||||
ss << runner << "/v1/task/" << runID << "/" << taskName;
|
||||
auto url = ss.str();
|
||||
|
||||
const auto response = HTTP_REQUEST(url, taskToJSON(task), "POST");
|
||||
@@ -182,8 +183,9 @@ std::future<AttemptRecord> DaggyRunnerTaskExecutor::execute(
|
||||
RunningTask rt{.prom{},
|
||||
.runID = runID,
|
||||
.taskName = taskName,
|
||||
.runnerURL = runner.first,
|
||||
.retries = 3};
|
||||
.runnerURL = runner,
|
||||
.retries = 3,
|
||||
.resources = taskUsed};
|
||||
|
||||
auto fut = rt.prom.get_future();
|
||||
|
||||
@@ -200,51 +202,103 @@ bool DaggyRunnerTaskExecutor::stop(DAGRunID runID, const std::string &taskName)
|
||||
|
||||
void DaggyRunnerTaskExecutor::addRunner(const std::string &url)
|
||||
{
|
||||
runners_.insert(url);
|
||||
// Try and get the capacity
|
||||
const auto &[code, doc] = JSON_HTTP_REQUEST(url + "/v1/capacity");
|
||||
if (code != HTTPCode::Ok) {
|
||||
std::cerr << "Failed to add runner " << url << ": "
|
||||
<< doc["error"].GetString() << std::endl;
|
||||
return;
|
||||
}
|
||||
RunnerCapacity caps{.current = capacityFromJSON(doc["current"]),
|
||||
.total = capacityFromJSON(doc["total"])};
|
||||
std::lock_guard<std::mutex> lock(runnersGuard_);
|
||||
runners_.emplace(url, caps);
|
||||
}
|
||||
|
||||
void DaggyRunnerTaskExecutor::monitor()
|
||||
{
|
||||
while (running_) {
|
||||
{
|
||||
std::vector<std::pair<DAGRunID, std::string>> resolvedJobs;
|
||||
std::unordered_map<std::pair<DAGRunID, std::string>,
|
||||
std::optional<AttemptRecord>>
|
||||
resolvedJobs;
|
||||
|
||||
std::unordered_map<std::pair<DAGRunID, std::string>, Capacity>
|
||||
taskResources;
|
||||
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(rtGuard_);
|
||||
for (auto &[taskID, task] : runningTasks_) {
|
||||
try {
|
||||
const auto &[code, json] = JSON_HTTP_REQUEST(
|
||||
task.runnerURL + "/v1/task/" + std::to_string(taskID.first) +
|
||||
"/" + taskID.second);
|
||||
if (code != HTTPCode::Ok) {
|
||||
--task.retries;
|
||||
|
||||
if (task.retries == 0) {
|
||||
AttemptRecord record{
|
||||
.rc = -1,
|
||||
.executorLog = "Unable to query runner for progress"};
|
||||
task.prom.set_value(std::move(record));
|
||||
resolvedJobs.emplace_back(taskID);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
if (json["state"] == "COMPLETED") {
|
||||
auto attempt = attemptRecordFromJSON(json["attempt"]);
|
||||
task.prom.set_value(std::move(attempt));
|
||||
resolvedJobs.emplace_back(taskID);
|
||||
}
|
||||
}
|
||||
catch (std::runtime_error &e) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
for (const auto &tid : resolvedJobs) {
|
||||
runningTasks_.extract(tid);
|
||||
for (const auto &[tid, info] : runningTasks_) {
|
||||
taskResources.emplace(tid, info.resources);
|
||||
}
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(250));
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(runnersGuard_);
|
||||
for (auto &[runnerURL, caps] : runners_) {
|
||||
try {
|
||||
const auto &[code, json] =
|
||||
JSON_HTTP_REQUEST(runnerURL + "/v1/poll");
|
||||
if (code != HTTPCode::Ok)
|
||||
continue;
|
||||
|
||||
const auto tasks = json.GetArray();
|
||||
for (size_t idx = 0; idx < tasks.Size(); ++idx) {
|
||||
const auto &task = tasks[idx];
|
||||
if (task["state"] == "PENDING") {
|
||||
resolvedJobs.emplace(
|
||||
std::make_pair(task["runID"].GetInt64(),
|
||||
task["taskName"].GetString()),
|
||||
std::nullopt);
|
||||
}
|
||||
else {
|
||||
auto tid = std::make_pair(task["runID"].GetInt64(),
|
||||
task["taskName"].GetString());
|
||||
const auto &res = taskResources.at(tid);
|
||||
caps.current.cores += res.cores;
|
||||
caps.current.memoryMB += res.memoryMB;
|
||||
|
||||
resolvedJobs.emplace(tid,
|
||||
attemptRecordFromJSON(task["attempt"]));
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
std::cout << "Curl timeout failed for runner " << runnerURL << ": "
|
||||
<< e.what() << std::endl;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<DAGRunID, std::string>> completedTasks;
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(rtGuard_);
|
||||
for (auto &[taskID, task] : runningTasks_) {
|
||||
auto it = resolvedJobs.find(taskID);
|
||||
if (it == resolvedJobs.end()) {
|
||||
--task.retries;
|
||||
|
||||
if (task.retries == 0) {
|
||||
AttemptRecord record{
|
||||
.rc = -1,
|
||||
.executorLog = "Unable to query runner for progress"};
|
||||
task.prom.set_value(std::move(record));
|
||||
completedTasks.emplace_back(taskID);
|
||||
}
|
||||
continue;
|
||||
}
|
||||
else if (it->second.has_value()) {
|
||||
// Task has completed
|
||||
task.prom.set_value(it->second.value());
|
||||
completedTasks.emplace_back(taskID);
|
||||
}
|
||||
}
|
||||
}
|
||||
for (const auto &tid : completedTasks) {
|
||||
runningTasks_.extract(tid);
|
||||
}
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user