Adjusting capacity impact calculation to yield a more even distribution of jobs
This commit is contained in:
@@ -159,8 +159,6 @@ namespace daggy::daggyr {
|
||||
.resourcesUsed = resourcesUsed});
|
||||
}
|
||||
|
||||
std::cout << "Enqueued " << runID << " / " << taskName << std::endl;
|
||||
|
||||
response.send(Pistache::Http::Code::Ok, "");
|
||||
}
|
||||
|
||||
@@ -196,9 +194,9 @@ namespace daggy::daggyr {
|
||||
curCapacity_.cores += it->second.resourcesUsed.cores;
|
||||
curCapacity_.memoryMB += it->second.resourcesUsed.memoryMB;
|
||||
}
|
||||
pending_.extract(it);
|
||||
std::cout << "Resolved " << it->first.first << " / "
|
||||
<< it->first.second << std::endl;
|
||||
pending_.extract(it);
|
||||
}
|
||||
else {
|
||||
payload = R"({ "state": "RUNNING" })";
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
#include <rapidjson/document.h>
|
||||
|
||||
#include <random>
|
||||
|
||||
#include "TaskExecutor.hpp"
|
||||
|
||||
namespace rj = rapidjson;
|
||||
|
||||
@@ -146,11 +146,11 @@ std::future<AttemptRecord> DaggyRunnerTaskExecutor::execute(
|
||||
auto curCap = capacityFromJSON(doc["current"]);
|
||||
auto totCap = capacityFromJSON(doc["total"]);
|
||||
|
||||
double cores = curCap.cores < 0 ? totCap.cores : curCap.cores;
|
||||
double memoryMB = curCap.memoryMB < 0 ? totCap.memoryMB : curCap.memoryMB;
|
||||
double cores = (curCap.cores - taskUsed.cores);
|
||||
double memoryMB = (curCap.memoryMB - taskUsed.memoryMB);
|
||||
|
||||
double impact =
|
||||
std::max(taskUsed.cores / cores, taskUsed.memoryMB / memoryMB);
|
||||
std::min(cores / totCap.cores, memoryMB / totCap.memoryMB);
|
||||
impacts.emplace_back(runner, impact);
|
||||
}
|
||||
catch (const std::exception &_) {
|
||||
@@ -167,15 +167,12 @@ std::future<AttemptRecord> DaggyRunnerTaskExecutor::execute(
|
||||
return fut;
|
||||
}
|
||||
|
||||
auto cit = impacts.begin();
|
||||
for (auto it = impacts.begin(); it != impacts.end(); ++it) {
|
||||
std::cout << it->first << " impact is " << it->second << std::endl;
|
||||
if (it->second < cit->second)
|
||||
cit = it;
|
||||
}
|
||||
std::sort(impacts.begin(), impacts.end());
|
||||
|
||||
auto runner = impacts.back();
|
||||
|
||||
std::stringstream ss;
|
||||
ss << cit->first << "/v1/task/" << runID << "/" << taskName;
|
||||
ss << runner.first << "/v1/task/" << runID << "/" << taskName;
|
||||
auto url = ss.str();
|
||||
|
||||
const auto response = HTTP_REQUEST(url, taskToJSON(task), "POST");
|
||||
@@ -185,7 +182,7 @@ std::future<AttemptRecord> DaggyRunnerTaskExecutor::execute(
|
||||
RunningTask rt{.prom{},
|
||||
.runID = runID,
|
||||
.taskName = taskName,
|
||||
.runnerURL = cit->first,
|
||||
.runnerURL = runner.first,
|
||||
.retries = 3};
|
||||
|
||||
auto fut = rt.prom.get_future();
|
||||
|
||||
Reference in New Issue
Block a user