Changing how execution parallelism is handled, so that different

executors can implement their own idea of parallelism.
This commit is contained in:
Ian Roddis
2021-09-15 13:05:04 -03:00
parent 4562ac755e
commit a6a7501d12
13 changed files with 167 additions and 104 deletions

View File

@@ -16,7 +16,8 @@ TEST_CASE("forking_executor", "[forking_executor]") {
REQUIRE(ex.validateTaskParameters(task.job));
auto rec = ex.execute("command", task);
auto recFuture = ex.execute("command", task);
auto rec = recFuture.get();
REQUIRE(rec.rc == 0);
REQUIRE(rec.outputLog.size() >= 6);
@@ -27,7 +28,8 @@ TEST_CASE("forking_executor", "[forking_executor]") {
daggy::Task task{.job{
{"command", daggy::executors::task::ForkingTaskExecutor::Command{"/usr/bin/expr", "1", "+", "+"}}}};
auto rec = ex.execute("command", task);
auto recFuture = ex.execute("command", task);
auto rec = recFuture.get();
REQUIRE(rec.rc == 2);
REQUIRE(rec.errorLog.size() >= 20);
@@ -45,7 +47,8 @@ TEST_CASE("forking_executor", "[forking_executor]") {
daggy::Task task{.job{
{"command", daggy::executors::task::ForkingTaskExecutor::Command{"/usr/bin/cat", bigFile}}}};
auto rec = ex.execute("command", task);
auto recFuture = ex.execute("command", task);
auto rec = recFuture.get();
REQUIRE(rec.rc == 0);
REQUIRE(rec.outputLog.size() == std::filesystem::file_size(bigFile));
@@ -80,4 +83,4 @@ TEST_CASE("forking_executor", "[forking_executor]") {
REQUIRE(tasks.size() == 4);
}
}
}

View File

@@ -15,7 +15,7 @@ namespace fs = std::filesystem;
#ifdef DAGGY_ENABLE_SLURM
TEST_CASE("slurm_execution", "[slurm_executor]") {
daggy::executors::task::SlurmTaskExecutor ex(10);
daggy::executors::task::SlurmTaskExecutor ex;
daggy::ConfigValues defaultJobValues{
{"minCPUs", "1"},
@@ -37,7 +37,8 @@ TEST_CASE("slurm_execution", "[slurm_executor]") {
REQUIRE(ex.validateTaskParameters(task.job));
auto rec = ex.execute("command", task);
auto recFuture = ex.execute("command", task);
auto rec = recFuture.get();
REQUIRE(rec.rc == 0);
REQUIRE(rec.outputLog.size() >= 6);
@@ -49,7 +50,8 @@ TEST_CASE("slurm_execution", "[slurm_executor]") {
{"command", daggy::executors::task::SlurmTaskExecutor::Command{"/usr/bin/expr", "1", "+", "+"}}}};
task.job.merge(defaultJobValues);
auto rec = ex.execute("command", task);
auto recFuture = ex.execute("command", task);
auto rec = recFuture.get();
REQUIRE(rec.rc != 0);
REQUIRE(rec.errorLog.size() >= 20);
@@ -68,7 +70,8 @@ TEST_CASE("slurm_execution", "[slurm_executor]") {
{"command", daggy::executors::task::SlurmTaskExecutor::Command{"/usr/bin/cat", bigFile}}}};
task.job.merge(defaultJobValues);
auto rec = ex.execute("command", task);
auto recFuture = ex.execute("command", task);
auto rec = recFuture.get();
REQUIRE(rec.rc == 0);
REQUIRE(rec.outputLog.size() == std::filesystem::file_size(bigFile));

View File

@@ -38,4 +38,25 @@ TEST_CASE("threadpool", "[threadpool]") {
for (auto &r: res) r.get();
REQUIRE(cnt == 100);
}
SECTION("parallel") {
std::vector<std::future<void>> res;
using namespace std::chrono_literals;
std::atomic<uint32_t> maxCnt{0};
for (size_t i = 0; i < 100; ++i)
res.push_back(tp.addTask([&cnt,&maxCnt, i]() {
auto delay = 20ms;
uint32_t current = cnt.fetch_add(1);
delay += i * 1ms;
std::this_thread::sleep_for(delay);
if (current > maxCnt) {
maxCnt = current;
}
cnt--;
return;
}));
for (auto &r: res) r.get();
REQUIRE(maxCnt > 1);
}
}