Skip to content

Commit

Permalink
threads: elastically scale-up
Browse files Browse the repository at this point in the history
  • Loading branch information
csegarragonz committed Apr 12, 2024
1 parent d7d6b99 commit 7df8133
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 0 deletions.
43 changes: 43 additions & 0 deletions src/planner/Planner.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,14 @@ namespace faabric::planner {
// Utility Functions
// ----------------------

static int availableSlots(std::shared_ptr<Host> host)
{
int availableSlots = host->slots() - host->usedslots();
assert(availableSlots >= 0);

return availableSlots;
}

static void claimHostSlots(std::shared_ptr<Host> host, int slotsToClaim = 1)
{
host->set_usedslots(host->usedslots() + slotsToClaim);
Expand Down Expand Up @@ -606,9 +614,42 @@ Planner::callBatch(std::shared_ptr<BatchExecuteRequest> req)
// Make a copy of the host-map state to make sure the scheduling process
// does not modify it
auto hostMapCopy = convertToBatchSchedHostMap(state.hostMap);
bool isScaleChange =
decisionType == faabric::batch_scheduler::DecisionType::SCALE_CHANGE;
bool isDistChange =
decisionType == faabric::batch_scheduler::DecisionType::DIST_CHANGE;

// For a SCALE_CHANGE decision (i.e. fork) with the elastic flag set, we
// want to scale up to as many available cores as possible in the app's
// main host
if (isScaleChange && req->elasticscalehint()) {
SPDLOG_INFO("App {} requested to elastically scale-up", appId);
auto oldDec = state.inFlightReqs.at(appId).second;
auto mainHost = oldDec->hosts.at(0);

int numAvail = availableSlots(state.hostMap.at(mainHost));
int numRequested = req->messages_size();
int lastMsgIdx = req->messages(numRequested - 1).groupidx();
for (int itr = 0; itr < (numAvail - numRequested); itr++) {
// Differentiate between the position in the message array (itr)
// and the new group index. Usually, in a fork, they would be
// offset by one
int msgIdx = lastMsgIdx + itr + 1;
SPDLOG_DEBUG("Adding elastically scaled up msg idx {} (app: {})", msgIdx, appId);

// To add a new message, copy from the last, and update the indexes
*req->add_messages() = req->messages(numRequested - 1);
req->mutable_messages(numRequested + itr)->set_appidx(msgIdx);
req->mutable_messages(numRequested + itr)->set_groupidx(msgIdx);
}

if (numAvail > numRequested) {
SPDLOG_INFO("Elastically scaled-up app {} ({} -> {})", appId, numRequested, numAvail);
} else {
SPDLOG_INFO("Decided NOT to elastically scaled-up app {}", appId);
}
}

// For a DIST_CHANGE decision (i.e. migration) we want to try to imrpove
// on the old decision (we don't care the one we send), so we make sure
// we are scheduling the same messages from the old request
Expand Down Expand Up @@ -840,6 +881,8 @@ void Planner::dispatchSchedulingDecision(
hostRequests[thisHost]->set_singlehost(isSingleHost);
// Propagate the single host hint
hostRequests[thisHost]->set_singlehosthint(req->singlehosthint());
// Propagate the elastic scaling hint
hostRequests[thisHost]->set_elasticscalehint(req->elasticscalehint());
}

*hostRequests[thisHost]->add_messages() = msg;
Expand Down
3 changes: 3 additions & 0 deletions src/proto/faabric.proto
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,9 @@ message BatchExecuteRequest {
// Hint set by the user to hint that this execution should all be in a
// single host
bool singleHostHint = 11;

// Hint set by the user to make scale-up requests elastic
bool elasticScaleHint = 12;
}

message BatchExecuteRequestStatus {
Expand Down

0 comments on commit 7df8133

Please sign in to comment.