Skip to content

Commit

Permalink
initial batching implementation
Browse files Browse the repository at this point in the history
  • Loading branch information
qimiko committed Sep 7, 2024
1 parent 7c92d1c commit 766fa7c
Show file tree
Hide file tree
Showing 2 changed files with 87 additions and 20 deletions.
96 changes: 77 additions & 19 deletions loader/src/server/Server.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -789,30 +789,14 @@ ServerRequest<std::optional<ServerModUpdate>> server::checkUpdates(Mod const* mo
);
}

ServerRequest<std::vector<ServerModUpdate>> server::checkAllUpdates(bool useCache) {
if (useCache) {
return getCache<checkAllUpdates>().get();
}

auto modIDs = ranges::map<std::vector<std::string>>(
Loader::get()->getAllMods(),
[](auto mod) { return mod->getID(); }
);

// if there's no mods, the request would just be empty anyways
if (modIDs.empty()) {
// you would think it could infer like literally anything
return ServerRequest<std::vector<ServerModUpdate>>::immediate(
Ok<std::vector<ServerModUpdate>>({})
);
}

ServerRequest<std::vector<ServerModUpdate>> server::batchedCheckUpdates(std::vector<std::string> const& batch) {
auto req = web::WebRequest();
req.userAgent(getServerUserAgent());
req.param("platform", GEODE_PLATFORM_SHORT_IDENTIFIER);
req.param("gd", GEODE_GD_VERSION_STR);
req.param("geode", Loader::get()->getVersion().toNonVString());
req.param("ids", ranges::join(modIDs, ";"));

req.param("ids", ranges::join(batch, ";"));
return req.get(formatServerURL("/mods/updates")).map(
[](web::WebResponse* response) -> Result<std::vector<ServerModUpdate>, ServerError> {
if (response->ok()) {
Expand All @@ -836,6 +820,80 @@ ServerRequest<std::vector<ServerModUpdate>> server::checkAllUpdates(bool useCach
);
}

void server::queueBatches(
ServerRequest<std::vector<ServerModUpdate>>::PostResult const resolve,
std::shared_ptr<std::vector<std::vector<std::string>>> const batches,
std::shared_ptr<std::vector<ServerModUpdate>> accum
) {
// we have to do the copy here, or else our values die
batchedCheckUpdates(batches->back()).listen([resolve, batches, accum](auto result) {
if (result->ok()) {
auto serverValues = result->unwrap();

// we should probably do deduplication here as well
accum->reserve(accum->size() + serverValues.size());
std::copy(serverValues.begin(), serverValues.end(), std::back_inserter(*accum));

if (batches->size() > 1) {
batches->pop_back();
queueBatches(resolve, batches, accum);
}
else {
resolve(Ok(*accum));
}
}
else {
resolve(*result);
}
});
}

ServerRequest<std::vector<ServerModUpdate>> server::checkAllUpdates(bool useCache) {
if (useCache) {
return getCache<checkAllUpdates>().get();
}

auto modIDs = ranges::map<std::vector<std::string>>(
Loader::get()->getAllMods(),
[](auto mod) { return mod->getID(); }
);

// if there's no mods, the request would just be empty anyways
if (modIDs.empty()) {
// you would think it could infer like literally anything
return ServerRequest<std::vector<ServerModUpdate>>::immediate(
Ok<std::vector<ServerModUpdate>>({})
);
}

auto modBatches = std::make_shared<std::vector<std::vector<std::string>>>();
auto modCount = modIDs.size();
std::size_t maxMods = 200u; // this quite literally affects 0.03% of users

if (modCount <= maxMods) {
// no tricks needed
return batchedCheckUpdates(modIDs);
}

// even out the mod count, so a request with 230 mods sends two 115 mod requests
auto batchCount = modCount / maxMods + 1;
auto maxBatchSize = modCount / batchCount + 1;

for (std::size_t i = 0u; i < modCount; i += maxBatchSize) {
auto end = std::min(modCount, i + maxBatchSize);
modBatches->emplace_back(modIDs.begin() + i, modIDs.begin() + end);
}

// chain requests to avoid doing too many large requests at once
return ServerRequest<std::vector<ServerModUpdate>>::runWithCallback(
[modBatches](auto finish, auto progress, auto hasBeenCancelled) {
auto accum = std::make_shared<std::vector<ServerModUpdate>>();
queueBatches(finish, modBatches, std::move(accum));
},
"Mod Update Check"
);
}

void server::clearServerCaches(bool clearGlobalCaches) {
getCache<&getMods>().clear();
getCache<&getMod>().clear();
Expand Down
11 changes: 10 additions & 1 deletion loader/src/server/Server.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
#include <chrono>
#include <matjson.hpp>
#include <vector>
#include <span>

using namespace geode::prelude;

Expand Down Expand Up @@ -151,7 +152,15 @@ namespace server {
ServerRequest<std::unordered_set<std::string>> getTags(bool useCache = true);

ServerRequest<std::optional<ServerModUpdate>> checkUpdates(Mod const* mod);

ServerRequest<std::vector<ServerModUpdate>> batchedCheckUpdates(std::vector<std::string> const& batch);
void queueBatches(
ServerRequest<std::vector<ServerModUpdate>>::PostResult const finish,
std::shared_ptr<std::vector<std::vector<std::string>>> const batches,
std::shared_ptr<std::vector<ServerModUpdate>> const accum
);

ServerRequest<std::vector<ServerModUpdate>> checkAllUpdates(bool useCache = true);

void clearServerCaches(bool clearGlobalCaches = false);
}

0 comments on commit 766fa7c

Please sign in to comment.