Adding support for remote execution daemons.
Squashed commit of the following: commit 69d5ef7a256b86a86d46e5ae374c00fded1497ea Author: Ian Roddis <tech@kinesin.ca> Date: Thu Dec 16 12:15:55 2021 -0400 Updating readme commit 94a9f676d0f9cc0b55cdc18c4927eaea40d82c77 Author: Ian Roddis <tech@kinesin.ca> Date: Thu Dec 16 12:05:36 2021 -0400 Fixing serialization of attempt records when querying entire dag commit 945e5f90b24abf07c9af1bc4c6bbcb33e93b8069 Author: Ian Roddis <tech@kinesin.ca> Date: Thu Dec 16 11:37:59 2021 -0400 Compiles cleanly... commit 8b23e46081d47fb80dc1a2d998fc6dc4bbf301a8 Author: Ian Roddis <tech@kinesin.ca> Date: Thu Dec 16 10:43:03 2021 -0400 Adding in missing source file to cmake build list commit 6d10d9791206e2bc15788beadeea580b8e43a853 Author: Ian Roddis <tech@kinesin.ca> Date: Thu Dec 16 10:41:43 2021 -0400 Adding new executors commit 42a2c67f4d6ae99df95d917c8621d78cd99837a1 Author: Ian Roddis <tech@kinesin.ca> Date: Thu Dec 16 10:27:14 2021 -0400 Fixing missing curl cmake dependency commit 394bc4c5d51ecee7bf14712f719c8bf7e97fb0fa Author: Ian Roddis <tech@kinesin.ca> Date: Thu Dec 16 10:21:58 2021 -0400 Fixing missing curl cmake dependency commit dd9efc8e7e7770ea1bcbccb70a1af9cfcff0414c Author: Ian Roddis <tech@kinesin.ca> Date: Wed Dec 15 17:15:38 2021 -0400 Checkpointing progress commit 3b3b55d6037bb96e46de6763f486f4ecb92fe6a0 Author: Ian Roddis <tech@kinesin.ca> Date: Wed Dec 15 14:21:18 2021 -0400 updating readme commit 303027c11452941b2a0c0d1b04ac5942e79efd74 Author: Ian Roddis <tech@kinesin.ca> Date: Wed Dec 15 14:17:16 2021 -0400 Namespacing daggyd Adding more error checking around deserialization of parameters Adding tests for runner agent commit c592eaeba12e2a449bae401e8c1d9ed236416d52 Author: Ian Roddis <tech@kinesin.ca> Date: Wed Dec 15 11:20:21 2021 -0400 Checkpointing work commit fb1862d1cefe2b53a98659cce3c8c73d88bf5d84 Author: Ian Roddis <tech@kinesin.ca> Date: Wed Dec 15 09:52:29 2021 -0400 Copying daggyd for daggyr template, adding in basic routes
This commit is contained in:
@@ -11,7 +11,7 @@ IF (DAGGY_ENABLE_REDIS)
|
||||
endif ()
|
||||
|
||||
target_include_directories(${PROJECT_NAME} PUBLIC include)
|
||||
target_link_libraries(${PROJECT_NAME} pistache pthread rapidjson better-enums)
|
||||
target_link_libraries(${PROJECT_NAME} pistache curl pthread rapidjson better-enums)
|
||||
|
||||
add_subdirectory(src)
|
||||
add_subdirectory(tests)
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
#pragma once
|
||||
|
||||
#include <curl/curl.h>
|
||||
#include <rapidjson/document.h>
|
||||
|
||||
#include <string>
|
||||
@@ -12,6 +13,8 @@
|
||||
#include "daggy/executors/task/TaskExecutor.hpp"
|
||||
#include "daggy/loggers/dag_run/DAGRunLogger.hpp"
|
||||
|
||||
namespace rj = rapidjson;
|
||||
|
||||
namespace daggy {
|
||||
using TaskDAG = DAG<std::string, Task>;
|
||||
|
||||
@@ -40,4 +43,48 @@ namespace daggy {
|
||||
void updateDAGFromTasks(TaskDAG &dag, const TaskSet &tasks);
|
||||
|
||||
std::ostream &operator<<(std::ostream &os, const TimePoint &tp);
|
||||
|
||||
// HTTP helpers
|
||||
enum HTTPCode : long
|
||||
{
|
||||
Ok = 200,
|
||||
Not_Found = 404,
|
||||
Not_Acceptable = 406
|
||||
};
|
||||
|
||||
struct HTTPResponse
|
||||
{
|
||||
HTTPCode code;
|
||||
std::string body;
|
||||
};
|
||||
|
||||
HTTPResponse HTTP_REQUEST(const std::string &url,
|
||||
const std::string &payload = "",
|
||||
const std::string &method = "GET",
|
||||
bool trace = false);
|
||||
|
||||
std::pair<HTTPCode, rj::Document> JSON_HTTP_REQUEST(
|
||||
const std::string &url, const std::string &payload = "",
|
||||
const std::string &method = "GET", bool trace = false);
|
||||
} // namespace daggy
|
||||
|
||||
template <typename T>
|
||||
void hash_combine(std::size_t &seed, T const &key)
|
||||
{
|
||||
std::hash<T> hasher;
|
||||
seed ^= hasher(key) + 0x9e3779b9 + (seed << 6) + (seed >> 2);
|
||||
}
|
||||
|
||||
namespace std {
|
||||
template <typename T1, typename T2>
|
||||
struct hash<std::pair<T1, T2>>
|
||||
{
|
||||
std::size_t operator()(std::pair<T1, T2> const &p) const
|
||||
{
|
||||
std::size_t seed(0);
|
||||
::hash_combine(seed, p.first);
|
||||
::hash_combine(seed, p.second);
|
||||
return seed;
|
||||
}
|
||||
};
|
||||
} // namespace std
|
||||
|
||||
@@ -0,0 +1,69 @@
|
||||
#pragma once
|
||||
|
||||
#include <rapidjson/document.h>
|
||||
|
||||
#include "TaskExecutor.hpp"
|
||||
|
||||
namespace rj = rapidjson;
|
||||
|
||||
namespace daggy::executors::task {
|
||||
|
||||
namespace daggy_runner {
|
||||
struct Capacity
|
||||
{
|
||||
ssize_t cores;
|
||||
ssize_t memoryMB;
|
||||
};
|
||||
|
||||
std::string capacityToJSON(const Capacity &cap);
|
||||
Capacity capacityFromJSON(const rj::Value &spec);
|
||||
Capacity capacityFromTask(const Task &task);
|
||||
|
||||
void validateTaskParameters(const ConfigValues &job);
|
||||
} // namespace daggy_runner
|
||||
|
||||
class DaggyRunnerTaskExecutor : public TaskExecutor
|
||||
{
|
||||
public:
|
||||
using Command = std::vector<std::string>;
|
||||
|
||||
DaggyRunnerTaskExecutor();
|
||||
~DaggyRunnerTaskExecutor() override;
|
||||
|
||||
// Validates the job to ensure that all required values are set and are of
|
||||
// the right type,
|
||||
bool validateTaskParameters(const ConfigValues &job) override;
|
||||
|
||||
std::vector<ConfigValues> expandTaskParameters(
|
||||
const ConfigValues &job, const ConfigValues &expansionValues) override;
|
||||
|
||||
// Runs the task
|
||||
std::future<AttemptRecord> execute(DAGRunID runID,
|
||||
const std::string &taskName,
|
||||
const Task &task) override;
|
||||
|
||||
bool stop(DAGRunID runID, const std::string &taskName) override;
|
||||
|
||||
void addRunner(const std::string &url);
|
||||
|
||||
private:
|
||||
void monitor();
|
||||
|
||||
struct RunningTask
|
||||
{
|
||||
std::promise<AttemptRecord> prom;
|
||||
DAGRunID runID;
|
||||
std::string taskName;
|
||||
std::string runnerURL;
|
||||
};
|
||||
|
||||
// Resolves jobs through polling
|
||||
std::atomic<bool> running_;
|
||||
std::thread monitorWorker_;
|
||||
|
||||
std::unordered_set<std::string> runners_;
|
||||
std::mutex rtGuard_;
|
||||
std::unordered_map<std::pair<DAGRunID, std::string>, RunningTask>
|
||||
runningTasks_;
|
||||
};
|
||||
} // namespace daggy::executors::task
|
||||
@@ -5,6 +5,10 @@
|
||||
#include "TaskExecutor.hpp"
|
||||
|
||||
namespace daggy::executors::task {
|
||||
namespace forking_executor {
|
||||
void validateTaskParameters(const ConfigValues &job);
|
||||
}
|
||||
|
||||
class ForkingTaskExecutor : public TaskExecutor
|
||||
{
|
||||
public:
|
||||
|
||||
@@ -153,6 +153,7 @@ namespace daggy {
|
||||
for (auto it = params.MemberBegin(); it != params.MemberEnd(); ++it) {
|
||||
if (!it->name.IsString())
|
||||
throw std::runtime_error("job key must be a string.");
|
||||
|
||||
if (it->value.IsArray()) {
|
||||
std::vector<std::string> values;
|
||||
for (size_t i = 0; i < it->value.Size(); ++i) {
|
||||
@@ -160,10 +161,13 @@ namespace daggy {
|
||||
}
|
||||
task.job.insert_or_assign(it->name.GetString(), values);
|
||||
}
|
||||
else {
|
||||
else if (it->value.IsString()) {
|
||||
task.job.insert_or_assign(it->name.GetString(),
|
||||
it->value.GetString());
|
||||
}
|
||||
else {
|
||||
throw std::runtime_error("Value in parameters is not a string");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,6 +5,53 @@
|
||||
|
||||
using namespace std::chrono_literals;
|
||||
|
||||
static int http_trace(CURL *handle, curl_infotype type, char *data, size_t size,
|
||||
void *userp)
|
||||
{
|
||||
const char *text;
|
||||
(void)handle; /* prevent compiler warning */
|
||||
(void)userp;
|
||||
|
||||
switch (type) {
|
||||
case CURLINFO_TEXT:
|
||||
fprintf(stderr, "== Info: %s", data);
|
||||
default: /* in case a new one is introduced to shock us */
|
||||
return 0;
|
||||
|
||||
case CURLINFO_HEADER_OUT:
|
||||
text = "=> Send header";
|
||||
break;
|
||||
case CURLINFO_DATA_OUT:
|
||||
text = "=> Send data";
|
||||
break;
|
||||
case CURLINFO_SSL_DATA_OUT:
|
||||
text = "=> Send SSL data";
|
||||
break;
|
||||
case CURLINFO_HEADER_IN:
|
||||
text = "<= Recv header";
|
||||
break;
|
||||
case CURLINFO_DATA_IN:
|
||||
text = "<= Recv data";
|
||||
break;
|
||||
case CURLINFO_SSL_DATA_IN:
|
||||
text = "<= Recv SSL data";
|
||||
break;
|
||||
}
|
||||
|
||||
std::cerr << "\n================== " << text
|
||||
<< " ==================" << std::endl
|
||||
<< data << std::endl;
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint curlWriter(char *in, uint size, uint nmemb, std::stringstream *out)
|
||||
{
|
||||
uint r;
|
||||
r = size * nmemb;
|
||||
out->write(in, r);
|
||||
return r;
|
||||
}
|
||||
|
||||
namespace daggy {
|
||||
std::string globalSub(std::string string, const std::string &pattern,
|
||||
const std::string &replacement)
|
||||
@@ -168,4 +215,67 @@ namespace daggy {
|
||||
os << tp.time_since_epoch().count() << std::endl;
|
||||
return os;
|
||||
}
|
||||
|
||||
HTTPResponse HTTP_REQUEST(const std::string &url, const std::string &payload,
|
||||
const std::string &method, bool trace)
|
||||
{
|
||||
HTTPResponse response{.code = HTTPCode::Ok, .body = ""};
|
||||
|
||||
CURL *curl;
|
||||
CURLcode res;
|
||||
struct curl_slist *headers = NULL;
|
||||
|
||||
curl_global_init(CURL_GLOBAL_ALL);
|
||||
|
||||
curl = curl_easy_init();
|
||||
if (curl) {
|
||||
std::stringstream buffer;
|
||||
|
||||
curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, curlWriter);
|
||||
curl_easy_setopt(curl, CURLOPT_WRITEDATA, &buffer);
|
||||
|
||||
if (trace) {
|
||||
curl_easy_setopt(curl, CURLOPT_DEBUGFUNCTION, http_trace);
|
||||
curl_easy_setopt(curl, CURLOPT_VERBOSE, 1L);
|
||||
}
|
||||
|
||||
if (!payload.empty()) {
|
||||
curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, payload.size());
|
||||
curl_easy_setopt(curl, CURLOPT_POSTFIELDS, payload.c_str());
|
||||
headers = curl_slist_append(headers, "Content-Type: Application/Json");
|
||||
}
|
||||
curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, method.c_str());
|
||||
headers = curl_slist_append(headers, "Expect:");
|
||||
curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
|
||||
|
||||
res = curl_easy_perform(curl);
|
||||
|
||||
if (res != CURLE_OK) {
|
||||
curl_easy_cleanup(curl);
|
||||
throw std::runtime_error(std::string{"CURL Failed: "} +
|
||||
curl_easy_strerror(res));
|
||||
}
|
||||
curl_easy_cleanup(curl);
|
||||
|
||||
curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &response.code);
|
||||
response.body = buffer.str();
|
||||
}
|
||||
|
||||
curl_global_cleanup();
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
std::pair<HTTPCode, rj::Document> JSON_HTTP_REQUEST(
|
||||
const std::string &url, const std::string &payload,
|
||||
const std::string &method, bool trace)
|
||||
{
|
||||
auto response = HTTP_REQUEST(url, payload, method);
|
||||
|
||||
rj::Document doc;
|
||||
checkRJParse(doc.Parse(response.body.c_str()));
|
||||
return std::make_pair(response.code, std::move(doc));
|
||||
}
|
||||
|
||||
} // namespace daggy
|
||||
|
||||
@@ -2,4 +2,5 @@ target_sources(${PROJECT_NAME} PRIVATE
|
||||
SlurmTaskExecutor.cpp
|
||||
NoopTaskExecutor.cpp
|
||||
ForkingTaskExecutor.cpp
|
||||
DaggyRunnerTaskExecutor.cpp
|
||||
)
|
||||
|
||||
227
libdaggy/src/executors/task/DaggyRunnerTaskExecutor.cpp
Normal file
227
libdaggy/src/executors/task/DaggyRunnerTaskExecutor.cpp
Normal file
@@ -0,0 +1,227 @@
|
||||
#include <daggy/Serialization.hpp>
|
||||
#include <daggy/Utilities.hpp>
|
||||
#include <daggy/executors/task/DaggyRunnerTaskExecutor.hpp>
|
||||
#include <daggy/executors/task/ForkingTaskExecutor.hpp>
|
||||
#include <iomanip>
|
||||
|
||||
using namespace daggy::executors::task;
|
||||
using namespace daggy::executors::task::daggy_runner;
|
||||
using namespace daggy;
|
||||
|
||||
namespace daggy::executors::task::daggy_runner {
|
||||
std::string capacityToJSON(const Capacity &cap)
|
||||
{
|
||||
return R"({ "cores": )" + std::to_string(cap.cores) + R"(, "memoryMB": )" +
|
||||
std::to_string(cap.memoryMB) + "}";
|
||||
}
|
||||
|
||||
Capacity capacityFromJSON(const rj::Value &spec)
|
||||
{
|
||||
Capacity cap{.cores = 0, .memoryMB = 0};
|
||||
|
||||
if (!spec.IsObject()) {
|
||||
throw std::runtime_error("Capacity is not an object");
|
||||
}
|
||||
|
||||
if (spec.HasMember("cores")) {
|
||||
if (!spec["cores"].IsNumber()) {
|
||||
throw std::runtime_error("cores member of Capacity is not an integer");
|
||||
}
|
||||
cap.cores = spec["cores"].GetInt64();
|
||||
}
|
||||
|
||||
if (spec.HasMember("memoryMB")) {
|
||||
if (!spec["memoryMB"].IsNumber()) {
|
||||
throw std::runtime_error(
|
||||
"memoryMB member of Capacity is not an integer");
|
||||
}
|
||||
cap.memoryMB = spec["memoryMB"].GetInt64();
|
||||
}
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
Capacity capacityFromTask(const Task &task)
|
||||
{
|
||||
Capacity cap{.cores = 0, .memoryMB = 0};
|
||||
|
||||
cap.cores = std::stoll(std::get<std::string>(task.job.at("cores")));
|
||||
cap.memoryMB = std::stoll(std::get<std::string>(task.job.at("memoryMB")));
|
||||
|
||||
return cap;
|
||||
}
|
||||
|
||||
void validateTaskParameters(const daggy::ConfigValues &job)
|
||||
{
|
||||
forking_executor::validateTaskParameters(job);
|
||||
|
||||
const std::array<std::string, 2> fields{"cores", "memoryMB"};
|
||||
|
||||
for (const auto &field : fields) {
|
||||
if (job.count(field) == 0)
|
||||
throw std::runtime_error("Missing required job parameter " + field);
|
||||
|
||||
const auto &val = job.at(field);
|
||||
|
||||
if (!std::holds_alternative<std::string>(val))
|
||||
throw std::runtime_error(field + " in capacity is not a string");
|
||||
|
||||
try {
|
||||
std::stoll(std::get<std::string>(val));
|
||||
}
|
||||
catch (std::exception &e) {
|
||||
throw std::runtime_error(field + " in capacity is not an integer");
|
||||
}
|
||||
}
|
||||
}
|
||||
} // namespace daggy::executors::task::daggy_runner
|
||||
|
||||
DaggyRunnerTaskExecutor::DaggyRunnerTaskExecutor()
|
||||
: running_(true)
|
||||
, monitorWorker_(&DaggyRunnerTaskExecutor::monitor, this)
|
||||
{
|
||||
}
|
||||
|
||||
DaggyRunnerTaskExecutor::~DaggyRunnerTaskExecutor()
|
||||
{
|
||||
running_ = false;
|
||||
monitorWorker_.join();
|
||||
}
|
||||
|
||||
// Validates the job to ensure that all required values are set and are of
|
||||
// the right type,
|
||||
bool DaggyRunnerTaskExecutor::validateTaskParameters(const ConfigValues &job)
|
||||
{
|
||||
daggy_runner::validateTaskParameters(job);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::vector<ConfigValues> DaggyRunnerTaskExecutor::expandTaskParameters(
|
||||
const ConfigValues &job, const ConfigValues &expansionValues)
|
||||
{
|
||||
std::vector<ConfigValues> newValues;
|
||||
|
||||
auto command =
|
||||
(job.count("command") == 0 ? Command{}
|
||||
: std::get<Command>(job.at("command")));
|
||||
|
||||
auto environment = (job.count("environment") == 0
|
||||
? Command{}
|
||||
: std::get<Command>(job.at("environment")));
|
||||
|
||||
Command both(command);
|
||||
std::copy(environment.begin(), environment.end(), std::back_inserter(both));
|
||||
|
||||
for (const auto &parts : interpolateValues(both, expansionValues)) {
|
||||
ConfigValues newCommand{job};
|
||||
newCommand["command"] =
|
||||
Command(parts.begin(), parts.begin() + command.size());
|
||||
newCommand["environment"] =
|
||||
Command(parts.begin() + command.size(), parts.end());
|
||||
newValues.emplace_back(newCommand);
|
||||
}
|
||||
|
||||
return newValues;
|
||||
}
|
||||
|
||||
// Runs the task
|
||||
std::future<AttemptRecord> DaggyRunnerTaskExecutor::execute(
|
||||
DAGRunID runID, const std::string &taskName, const Task &task)
|
||||
{
|
||||
auto taskUsed = capacityFromTask(task);
|
||||
|
||||
// Get the capacities for all the runners
|
||||
// Capacities for a runner can be negative, meaning that they're currently
|
||||
// oversubscribed.
|
||||
std::vector<std::pair<std::string, double>> impacts;
|
||||
for (const auto &runner : runners_) {
|
||||
try {
|
||||
const auto &[code, doc] = JSON_HTTP_REQUEST(runner + "/v1/capacity");
|
||||
if (code != HTTPCode::Ok) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto curCap = capacityFromJSON(doc["current"]);
|
||||
auto totCap = capacityFromJSON(doc["total"]);
|
||||
|
||||
ssize_t cores = curCap.cores < 0 ? totCap.cores : curCap.cores;
|
||||
ssize_t memoryMB =
|
||||
curCap.memoryMB < 0 ? totCap.memoryMB : curCap.memoryMB;
|
||||
|
||||
double impact =
|
||||
std::max(taskUsed.cores / cores, taskUsed.memoryMB / memoryMB);
|
||||
impacts.emplace_back(runner, impact);
|
||||
}
|
||||
catch (const std::exception &_) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if (impacts.empty())
|
||||
throw std::runtime_error("No runners available for execution");
|
||||
|
||||
auto cit = impacts.begin();
|
||||
for (auto it = impacts.begin(); it != impacts.end(); ++it) {
|
||||
if (it->second < cit->second)
|
||||
cit = it;
|
||||
}
|
||||
|
||||
RunningTask rt{
|
||||
.prom{}, .runID = runID, .taskName = taskName, .runnerURL = cit->first};
|
||||
|
||||
auto fut = rt.prom.get_future();
|
||||
|
||||
std::lock_guard<std::mutex> lock(rtGuard_);
|
||||
runningTasks_.emplace(std::make_pair(runID, taskName), std::move(rt));
|
||||
|
||||
return fut;
|
||||
}
|
||||
|
||||
bool DaggyRunnerTaskExecutor::stop(DAGRunID runID, const std::string &taskName)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
void DaggyRunnerTaskExecutor::addRunner(const std::string &url)
|
||||
{
|
||||
runners_.insert(url);
|
||||
}
|
||||
|
||||
void DaggyRunnerTaskExecutor::monitor()
|
||||
{
|
||||
while (running_) {
|
||||
{
|
||||
std::vector<std::pair<DAGRunID, std::string>> resolvedJobs;
|
||||
|
||||
std::lock_guard<std::mutex> lock(rtGuard_);
|
||||
for (auto &[taskID, task] : runningTasks_) {
|
||||
try {
|
||||
const auto &[code, json] = JSON_HTTP_REQUEST(
|
||||
task.runnerURL + "/v1/task/" + std::to_string(taskID.first) +
|
||||
"/" + taskID.second);
|
||||
if (code != HTTPCode::Ok) {
|
||||
AttemptRecord record{
|
||||
.rc = -1, .executorLog = "Unable to query runner for progress"};
|
||||
task.prom.set_value(std::move(record));
|
||||
resolvedJobs.emplace_back(taskID);
|
||||
continue;
|
||||
}
|
||||
|
||||
if (json["state"] == "COMPLETED") {
|
||||
task.prom.set_value(attemptRecordFromJSON(json["attempt"]));
|
||||
resolvedJobs.emplace_back(taskID);
|
||||
}
|
||||
}
|
||||
catch (std::runtime_error &e) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const auto &tid : resolvedJobs) {
|
||||
runningTasks_.extract(tid);
|
||||
}
|
||||
}
|
||||
std::this_thread::sleep_for(std::chrono::seconds(1));
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -9,6 +9,30 @@
|
||||
|
||||
using namespace daggy::executors::task;
|
||||
|
||||
namespace daggy::executors::task::forking_executor {
|
||||
void validateTaskParameters(const daggy::ConfigValues &job)
|
||||
{
|
||||
// command or commandString is required
|
||||
if (job.count("command")) {
|
||||
if (!std::holds_alternative<daggy::Command>(job.at("command")))
|
||||
throw std::runtime_error(R"(command must be an array of strings)");
|
||||
}
|
||||
else {
|
||||
if (job.count("commandString") == 0) {
|
||||
throw std::runtime_error(
|
||||
R"(command or commandString must be defined.)");
|
||||
}
|
||||
if (!std::holds_alternative<std::string>(job.at("commandString")))
|
||||
throw std::runtime_error(R"(commandString must be a string)");
|
||||
}
|
||||
|
||||
if (job.count("environment")) {
|
||||
if (!std::holds_alternative<daggy::Command>(job.at("environment")))
|
||||
throw std::runtime_error(R"(environment must be an array of strings)");
|
||||
}
|
||||
}
|
||||
} // namespace daggy::executors::task::forking_executor
|
||||
|
||||
std::string slurp(int fd)
|
||||
{
|
||||
std::string result;
|
||||
@@ -190,23 +214,7 @@ daggy::AttemptRecord ForkingTaskExecutor::runTask(const Task &task,
|
||||
|
||||
bool ForkingTaskExecutor::validateTaskParameters(const ConfigValues &job)
|
||||
{
|
||||
// command or commandString is required
|
||||
if (job.count("command")) {
|
||||
if (!std::holds_alternative<Command>(job.at("command")))
|
||||
throw std::runtime_error(R"(command must be an array of strings)");
|
||||
}
|
||||
else {
|
||||
if (job.count("commandString") == 0) {
|
||||
throw std::runtime_error(R"(command or commandString must be defined.)");
|
||||
}
|
||||
if (!std::holds_alternative<std::string>(job.at("commandString")))
|
||||
throw std::runtime_error(R"(commandString must be a string)");
|
||||
}
|
||||
|
||||
if (job.count("environment")) {
|
||||
if (!std::holds_alternative<Command>(job.at("environment")))
|
||||
throw std::runtime_error(R"(environment must be an array of strings)");
|
||||
}
|
||||
forking_executor::validateTaskParameters(job);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user