diff --git a/.gitignore b/.gitignore index b5354ab..1d143aa 100644 --- a/.gitignore +++ b/.gitignore @@ -2,4 +2,5 @@ __pycache__/ build/ .ipynb_checkpoints/ .vscode/ +.idea/ kubernetes/setup/secret.txt \ No newline at end of file diff --git a/hpc/lib/umbridge.h b/hpc/lib/umbridge.h index c04e294..c614436 100644 --- a/hpc/lib/umbridge.h +++ b/hpc/lib/umbridge.h @@ -1,126 +1,104 @@ #ifndef UMBRIDGE #define UMBRIDGE +// #define LOGGING + // Increase timeout to allow for long-running models. // This should be (to be on the safe side) significantly greater than the maximum time your model may take + #define CPPHTTPLIB_READ_TIMEOUT_SECOND 7 * 24 * 60 * 60 #include #include + #include "json.hpp" #include "httplib.h" using json = nlohmann::json; -namespace umbridge -{ +namespace umbridge { - class Model - { + class Model { public: Model(std::string name) : name(name) {} - virtual std::vector GetInputSizes(const json &config_json = json::parse("{}")) const = 0; - virtual std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const = 0; + virtual std::vector GetInputSizes(const json& config_json = json::parse("{}")) const = 0; + virtual std::vector GetOutputSizes(const json& config_json = json::parse("{}")) const = 0; - virtual std::vector> Evaluate(const std::vector> &inputs, - json config_json = json::parse("{}")) - { - (void)inputs; - (void)config_json; // Avoid unused argument warnings + virtual std::vector> Evaluate(const std::vector>& inputs, + json config_json = json::parse("{}")) { + (void)inputs; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Evaluate was called, but not implemented by model!"); } virtual std::vector Gradient(unsigned int outWrt, - unsigned int inWrt, - const std::vector> &inputs, - const std::vector &sens, - json config_json = json::parse("{}")) - { - (void)outWrt; - (void)inWrt; - (void)inputs; - (void)sens; - (void)config_json; // Avoid unused argument warnings + unsigned int inWrt, + const std::vector>& inputs, + const std::vector& sens, + json config_json = json::parse("{}")) { + (void)outWrt; (void)inWrt; (void)inputs; (void)sens; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("Gradient was called, but not implemented by model!"); } virtual std::vector ApplyJacobian(unsigned int outWrt, - unsigned int inWrt, - const std::vector> &inputs, - const std::vector &vec, - json config_json = json::parse("{}")) - { - (void)outWrt; - (void)inWrt; - (void)inputs; - (void)vec; - (void)config_json; // Avoid unused argument warnings + unsigned int inWrt, + const std::vector>& inputs, + const std::vector& vec, + json config_json = json::parse("{}")) { + (void)outWrt; (void)inWrt; (void)inputs; (void)vec; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyJacobian was called, but not implemented by model!"); } virtual std::vector ApplyHessian(unsigned int outWrt, - unsigned int inWrt1, - unsigned int inWrt2, - const std::vector> &inputs, - const std::vector &sens, - const std::vector &vec, - json config_json = json::parse("{}")) - { - (void)outWrt; - (void)inWrt1; - (void)inWrt2; - (void)inputs; - (void)sens; - (void)vec; - (void)config_json; // Avoid unused argument warnings + unsigned int inWrt1, + unsigned int inWrt2, + const std::vector>& inputs, + const std::vector& sens, + const std::vector& vec, + json config_json = json::parse("{}")) { + (void)outWrt; (void)inWrt1; (void)inWrt2; (void)inputs; (void)sens; (void)vec; (void)config_json; // Avoid unused argument warnings throw std::runtime_error("ApplyHessian was called, but not implemented by model!"); } - virtual bool SupportsEvaluate() { return false; } - virtual bool SupportsGradient() { return false; } - virtual bool SupportsApplyJacobian() { return false; } - virtual bool SupportsApplyHessian() { return false; } + virtual bool SupportsEvaluate() {return false;} + virtual bool SupportsGradient() {return false;} + virtual bool SupportsApplyJacobian() {return false;} + virtual bool SupportsApplyHessian() {return false;} - std::string GetName() const { return name; } + std::string GetName() const {return name;} protected: std::string name; }; - std::vector SupportedModels(std::string host, httplib::Headers headers = httplib::Headers()) - { + std::vector SupportedModels(std::string host, httplib::Headers headers = httplib::Headers()) { httplib::Client cli(host.c_str()); - if (auto res = cli.Get("/Info", headers)) - { + if (auto res = cli.Get("/Info", headers)) { json response = json::parse(res->body); - if (response.value("protocolVersion", 0) != 1.0) + if (response.value("protocolVersion",0) != 1.0) throw std::runtime_error("Model protocol version not supported!"); return response["models"]; - } - else - { + + } else { throw std::runtime_error("GET Info failed with error type '" + to_string(res.error()) + "'"); } } // Client-side Model connecting to a server for the actual evaluations etc. - class HTTPModel : public Model - { + class HTTPModel : public Model { public: + HTTPModel(std::string host, std::string name, httplib::Headers headers = httplib::Headers()) - : Model(name), cli(host.c_str()), headers(headers) + : Model(name), cli(host.c_str()), headers(headers) { // Check if requested model is available on server std::vector models = SupportedModels(host, headers); - if (std::find(models.begin(), models.end(), name) == models.end()) - { + if (std::find(models.begin(), models.end(), name) == models.end()) { std::string model_names = ""; - for (auto &m : models) - { + for (auto& m : models) { model_names += "'" + m + "' "; } throw std::runtime_error("Model " + name + " not found on server! Available models: " + model_names + "."); @@ -129,8 +107,7 @@ namespace umbridge json request_body; request_body["name"] = name; - if (auto res = cli.Post("/ModelInfo", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ModelInfo", headers, request_body.dump(), "application/json")) { json response = json::parse(res->body); json supported_features = response.at("support"); @@ -138,196 +115,164 @@ namespace umbridge supportsGradient = supported_features.value("Gradient", false); supportsApplyJacobian = supported_features.value("ApplyJacobian", false); supportsApplyHessian = supported_features.value("ApplyHessian", false); - } - else - { + } else { throw std::runtime_error("POST ModelInfo failed with error type '" + to_string(res.error()) + "'"); } } - std::vector GetInputSizes(const json &config_json = json::parse("{}")) const override - { + std::vector GetInputSizes(const json& config_json = json::parse("{}")) const override { json request_body; request_body["name"] = name; if (!config_json.empty()) request_body["config"] = config_json; - if (auto res = cli.Post("/InputSizes", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/InputSizes", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector outputvec = response_body["inputSizes"].get>(); return outputvec; - } - else - { + } else { throw std::runtime_error("POST InputSizes failed with error type '" + to_string(res.error()) + "'"); return std::vector(0); } } - std::vector GetOutputSizes(const json &config_json = json::parse("{}")) const override - { + std::vector GetOutputSizes(const json& config_json = json::parse("{}")) const override { json request_body; request_body["name"] = name; if (!config_json.empty()) request_body["config"] = config_json; - if (auto res = cli.Post("/OutputSizes", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/OutputSizes", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector outputvec = response_body["outputSizes"].get>(); return outputvec; - } - else - { + } else { throw std::runtime_error("POST OutputSizes failed with error type '" + to_string(res.error()) + "'"); return std::vector(0); } } - std::vector> Evaluate(const std::vector> &inputs, json config_json = json::parse("{}")) override - { + std::vector> Evaluate(const std::vector>& inputs, json config_json = json::parse("{}")) override { json request_body; request_body["name"] = name; - for (std::size_t i = 0; i < inputs.size(); i++) - { + request_body["input"] = json::parse("[]"); + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["config"] = config_json; - if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/Evaluate", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); std::vector> outputs(response_body["output"].size()); - for (std::size_t i = 0; i < response_body["output"].size(); i++) - { + for (std::size_t i = 0; i < response_body["output"].size(); i++) { outputs[i] = response_body["output"][i].get>(); } return outputs; - } - else - { + } else { throw std::runtime_error("POST Evaluate failed with error type '" + to_string(res.error()) + "'"); } } std::vector Gradient(unsigned int outWrt, - unsigned int inWrt, - const std::vector> &inputs, - const std::vector &sens, - json config_json = json::parse("{}")) override + unsigned int inWrt, + const std::vector>& inputs, + const std::vector& sens, + json config_json = json::parse("{}")) override { json request_body; request_body["name"] = name; request_body["outWrt"] = outWrt; request_body["inWrt"] = inWrt; - for (std::size_t i = 0; i < inputs.size(); i++) - { + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["sens"] = sens; request_body["config"] = config_json; - if (auto res = cli.Post("/Gradient", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/Gradient", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); return response_body["output"].get>(); - } - else - { + } else { throw std::runtime_error("POST Gradient failed with error type '" + to_string(res.error()) + "'"); } } std::vector ApplyJacobian(unsigned int outWrt, - unsigned int inWrt, - const std::vector> &inputs, - const std::vector &vec, - json config_json = json::parse("{}")) override - { + unsigned int inWrt, + const std::vector>& inputs, + const std::vector& vec, + json config_json = json::parse("{}")) override { json request_body; request_body["name"] = name; request_body["outWrt"] = outWrt; request_body["inWrt"] = inWrt; - for (std::size_t i = 0; i < inputs.size(); i++) - { + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["vec"] = vec; request_body["config"] = config_json; - if (auto res = cli.Post("/ApplyJacobian", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ApplyJacobian", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); return response_body["output"].get>(); - } - else - { + } else { throw std::runtime_error("POST ApplyJacobian failed with error type '" + to_string(res.error()) + "'"); } } std::vector ApplyHessian(unsigned int outWrt, - unsigned int inWrt1, - unsigned int inWrt2, - const std::vector> &inputs, - const std::vector &sens, - const std::vector &vec, - json config_json = json::parse("{}")) override - { + unsigned int inWrt1, + unsigned int inWrt2, + const std::vector>& inputs, + const std::vector& sens, + const std::vector& vec, + json config_json = json::parse("{}")) override { json request_body; request_body["name"] = name; request_body["outWrt"] = outWrt; request_body["inWrt1"] = inWrt1; request_body["inWrt2"] = inWrt2; - for (std::size_t i = 0; i < inputs.size(); i++) - { + for (std::size_t i = 0; i < inputs.size(); i++) { request_body["input"][i] = inputs[i]; } request_body["sens"] = sens; request_body["vec"] = vec; request_body["config"] = config_json; - if (auto res = cli.Post("/ApplyHessian", headers, request_body.dump(), "application/json")) - { + if (auto res = cli.Post("/ApplyHessian", headers, request_body.dump(), "application/json")) { json response_body = parse_result_with_error_handling(res); return response_body["output"].get>(); - } - else - { + } else { throw std::runtime_error("POST ApplyHessian failed with error type '" + to_string(res.error()) + "'"); } } - bool SupportsEvaluate() override - { + bool SupportsEvaluate() override { return supportsEvaluate; } - bool SupportsGradient() override - { + bool SupportsGradient() override { return supportsGradient; } - bool SupportsApplyJacobian() override - { + bool SupportsApplyJacobian() override { return supportsApplyJacobian; } - bool SupportsApplyHessian() override - { + bool SupportsApplyHessian() override { return supportsApplyHessian; } private: + mutable httplib::Client cli; httplib::Headers headers; @@ -336,30 +281,24 @@ namespace umbridge bool supportsApplyJacobian = false; bool supportsApplyHessian = false; - json parse_result_with_error_handling(const httplib::Result &res) const - { + json parse_result_with_error_handling(const httplib::Result& res) const { json response_body; - try - { + try { response_body = json::parse(res->body); - } - catch (json::parse_error &e) - { + } catch (json::parse_error& e) { throw std::runtime_error("Response JSON could not be parsed. Response body: '" + res->body + "'"); } - if (response_body.find("error") != response_body.end()) - { + if (response_body.find("error") != response_body.end()) { throw std::runtime_error("Model server returned error of type " + response_body["error"]["type"].get() + ", message: " + response_body["error"]["message"].get()); } return response_body; } + }; // Check if inputs dimensions match model's expected input size and return error in httplib response - bool check_input_sizes(const std::vector> &inputs, const json &config_json, const Model &model, httplib::Response &res) - { - if (inputs.size() != model.GetOutputSizes(config_json).size()) - { + bool check_input_sizes(const std::vector>& inputs, const json& config_json, const Model& model, httplib::Response& res) { + if (inputs.size() != model.GetInputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Number of inputs does not match number of model inputs. Expected " + std::to_string(model.GetInputSizes(config_json).size()) + " but got " + std::to_string(inputs.size()); @@ -367,10 +306,8 @@ namespace umbridge res.status = 400; return false; } - for (std::size_t i = 0; i < inputs.size(); i++) - { - if (inputs[i].size() != model.GetInputSizes(config_json)[i]) - { + for (std::size_t i = 0; i < inputs.size(); i++) { + if (inputs[i].size() != model.GetInputSizes(config_json)[i]) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input size mismatch! In input " + std::to_string(i) + " model expected size " + std::to_string(model.GetInputSizes(config_json)[i]) + " but got " + std::to_string(inputs[i].size()); @@ -383,10 +320,8 @@ namespace umbridge } // Check if sensitivity vector's dimension matches correct model output size and return error in httplib response - bool check_sensitivity_size(const std::vector &sens, int outWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (sens.size() != model.GetOutputSizes(config_json)[outWrt]) - { + bool check_sensitivity_size(const std::vector& sens, int outWrt, const json& config_json, const Model& model, httplib::Response& res) { + if (sens.size() != model.GetOutputSizes(config_json)[outWrt]) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Sensitivity vector size mismatch! Expected " + std::to_string(model.GetOutputSizes(config_json)[outWrt]) + " but got " + std::to_string(sens.size()); @@ -398,10 +333,8 @@ namespace umbridge } // Check if vector's dimension matches correct model output size and return error in httplib response - bool check_vector_size(const std::vector &vec, int inWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (vec.size() != model.GetInputSizes(config_json)[inWrt]) - { + bool check_vector_size(const std::vector& vec, int inWrt, const json& config_json, const Model& model, httplib::Response& res) { + if (vec.size() != model.GetInputSizes(config_json)[inWrt]) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Vector size mismatch! Expected " + std::to_string(model.GetInputSizes(config_json)[inWrt]) + " but got " + std::to_string(vec.size()); @@ -413,10 +346,8 @@ namespace umbridge } // Check if outputs dimensions match model's expected output size and return error in httplib response - bool check_output_sizes(const std::vector> &outputs, const json &config_json, const Model &model, httplib::Response &res) - { - if (outputs.size() != model.GetOutputSizes(config_json).size()) - { + bool check_output_sizes(const std::vector>& outputs, const json& config_json, const Model& model, httplib::Response& res) { + if (outputs.size() != model.GetOutputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidOutput"; response_body["error"]["message"] = "Number of outputs declared by model does not match number of outputs returned by model. Model declared " + std::to_string(model.GetOutputSizes(config_json).size()) + " but returned " + std::to_string(outputs.size()); @@ -424,10 +355,8 @@ namespace umbridge res.status = 500; return false; } - for (std::size_t i = 0; i < outputs.size(); i++) - { - if (outputs[i].size() != model.GetOutputSizes(config_json)[i]) - { + for (std::size_t i = 0; i < outputs.size(); i++) { + if (outputs[i].size() != model.GetOutputSizes(config_json)[i]) { json response_body; response_body["error"]["type"] = "InvalidOutput"; response_body["error"]["message"] = "Output size mismatch! In output " + std::to_string(i) + " model declared size " + std::to_string(model.GetOutputSizes(config_json)[i]) + " but returned " + std::to_string(outputs[i].size()); @@ -440,10 +369,8 @@ namespace umbridge } // Check if inWrt is between zero and model's input size inWrt and return error in httplib response - bool check_input_wrt(int inWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (inWrt < 0 || inWrt >= (int)model.GetInputSizes(config_json).size()) - { + bool check_input_wrt(int inWrt, const json& config_json, const Model& model, httplib::Response& res) { + if (inWrt < 0 || inWrt >= (int)model.GetInputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input inWrt out of range! Expected between 0 and " + std::to_string(model.GetInputSizes(config_json).size() - 1) + " but got " + std::to_string(inWrt); @@ -455,10 +382,8 @@ namespace umbridge } // Check if outWrt is between zero and model's output size outWrt and return error in httplib response - bool check_output_wrt(int outWrt, const json &config_json, const Model &model, httplib::Response &res) - { - if (outWrt < 0 || outWrt >= (int)model.GetOutputSizes(config_json).size()) - { + bool check_output_wrt(int outWrt, const json& config_json, const Model& model, httplib::Response& res) { + if (outWrt < 0 || outWrt >= (int)model.GetOutputSizes(config_json).size()) { json response_body; response_body["error"]["type"] = "InvalidInput"; response_body["error"]["message"] = "Input outWrt out of range! Expected between 0 and " + std::to_string(model.GetOutputSizes(config_json).size() - 1) + " but got " + std::to_string(outWrt); @@ -470,8 +395,7 @@ namespace umbridge } // Construct response for unsupported feature - void write_unsupported_feature_response(httplib::Response &res, std::string feature) - { + void write_unsupported_feature_response(httplib::Response& res, std::string feature) { json response_body; response_body["error"]["type"] = "UnsupportedFeature"; response_body["error"]["message"] = "Feature '" + feature + "' is not supported by this model"; @@ -479,13 +403,16 @@ namespace umbridge res.status = 400; } + // log request + + void log_request(const httplib::Request& req, const httplib::Response& res) { + std::cout << "Incoming request from: " << req.remote_addr << " | Type: " << req.method << " " << req.path << " -> " << res.status << std::endl; + } + // Get model from name - Model &get_model_from_name(std::vector &models, std::string name) - { - for (auto &model : models) - { - if (model->GetName() == name) - { + Model& get_model_from_name(std::vector& models, std::string name) { + for (auto& model : models) { + if (model->GetName() == name) { return *model; } } @@ -493,14 +420,10 @@ namespace umbridge } // Check if model exists and return error in httplib response - bool check_model_exists(std::vector &models, std::string name, httplib::Response &res) - { - try - { + bool check_model_exists(std::vector& models, std::string name, httplib::Response& res) { + try { get_model_from_name(models, name); - } - catch (std::runtime_error &e) - { + } catch (std::runtime_error& e) { json response_body; response_body["error"]["type"] = "ModelNotFound"; response_body["error"]["message"] = "Model '" + name + "' not supported by this server!"; @@ -512,23 +435,21 @@ namespace umbridge } // Provides access to a model via network - void serveModels(std::vector models, std::string host, int port, bool disable_parallel = true) - { + void serveModels(std::vector models, std::string host, int port, bool enable_parallel = true, bool error_checks = true) { httplib::Server svr; std::mutex model_mutex; // Ensure the underlying model is only called sequentially - svr.Post("/Evaluate", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/Evaluate", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - //if (!check_model_exists(models, request_body["name"], res)) - // return; + if (error_checks && !check_model_exists(models, request_body["name"], res)) + return; Model& model = get_model_from_name(models, request_body["name"]); - /*if (!model.SupportsEvaluate()) { + if (error_checks && !model.SupportsEvaluate()) { write_unsupported_feature_response(res, "Evaluate"); return; - }*/ + } std::vector> inputs(request_body["input"].size()); for (std::size_t i = 0; i < inputs.size(); i++) { @@ -538,31 +459,38 @@ namespace umbridge json empty_default_config; json config_json = request_body.value("config", empty_default_config); - /*if (!check_input_sizes(inputs, config_json, model, res)) - return;*/ + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) + return; - if(disable_parallel) - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (!enable_parallel) { + model_lock.lock(); + } std::vector> outputs = model.Evaluate(inputs, config_json); - //if (!check_output_sizes(outputs, config_json, model, res)) - // return; + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + + if (error_checks && !check_output_sizes(outputs, config_json, model, res)) + return; json response_body; + response_body["output"] = json::parse("[]"); for (std::size_t i = 0; i < outputs.size(); i++) { response_body["output"][i] = outputs[i]; } - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); - svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) + if (error_checks && !check_model_exists(models, request_body["name"], res)) return; Model& model = get_model_from_name(models, request_body["name"]); - if (!model.SupportsGradient()) { + if (error_checks && !model.SupportsGradient()) { write_unsupported_feature_response(res, "Gradient"); return; } @@ -580,32 +508,38 @@ namespace umbridge json empty_default_config; json config_json = request_body.value("config", empty_default_config); - if (!check_input_wrt(inWrt, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt, config_json, model, res)) return; - if (!check_output_wrt(outWrt, config_json, model, res)) + if (error_checks && !check_output_wrt(outWrt, config_json, model, res)) return; - if (!check_input_sizes(inputs, config_json, model, res)) + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) return; - if (!check_sensitivity_size(sens, outWrt, config_json, model, res)) + if (error_checks && !check_sensitivity_size(sens, outWrt, config_json, model, res)) return; - if(disable_parallel) - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (error_checks_parallel) { + model_lock.lock(); + } std::vector gradient = model.Gradient(outWrt, inWrt, inputs, sens, config_json); + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + json response_body; response_body["output"] = gradient; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); - svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) + if (error_checks && !check_model_exists(models, request_body["name"], res)) return; Model& model = get_model_from_name(models, request_body["name"]); - if (!model.SupportsApplyJacobian()) { + if (error_checks && !model.SupportsApplyJacobian()) { write_unsupported_feature_response(res, "ApplyJacobian"); return; } @@ -623,32 +557,38 @@ namespace umbridge json empty_default_config; json config_json = request_body.value("config", empty_default_config); - if (!check_input_wrt(inWrt, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt, config_json, model, res)) return; - if (!check_output_wrt(outWrt, config_json, model, res)) + if (error_checks && !check_output_wrt(outWrt, config_json, model, res)) return; - if (!check_input_sizes(inputs, config_json, model, res)) + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) return; - if (!check_vector_size(vec, inWrt, config_json, model, res)) + if (error_checks && !check_vector_size(vec, inWrt, config_json, model, res)) return; - if(disable_parallel) - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (!enable_parallel) { + model_lock.lock(); + } std::vector jacobian_action = model.ApplyJacobian(outWrt, inWrt, inputs, vec, config_json); + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + json response_body; response_body["output"] = jacobian_action; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); - svr.Post("/ApplyHessian", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ApplyHessian", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) + if (error_checks && !check_model_exists(models, request_body["name"], res)) return; Model& model = get_model_from_name(models, request_body["name"]); - if (!model.SupportsApplyHessian()) { + if (error_checks && !model.SupportsApplyHessian()) { write_unsupported_feature_response(res, "ApplyHessian"); return; } @@ -668,28 +608,34 @@ namespace umbridge json empty_default_config; json config_json = request_body.value("config", empty_default_config); - if (!check_input_wrt(inWrt1, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt1, config_json, model, res)) return; - if (!check_input_wrt(inWrt2, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt2, config_json, model, res)) return; - if (!check_output_wrt(outWrt, config_json, model, res)) + if (error_checks && !check_output_wrt(outWrt, config_json, model, res)) return; - if (!check_input_sizes(inputs, config_json, model, res)) + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) return; - if (!check_sensitivity_size(sens, outWrt, config_json, model, res)) + if (error_checks && !check_sensitivity_size(sens, outWrt, config_json, model, res)) return; - if(disable_parallel) - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (!enable_parallel) { + model_lock.lock(); + } std::vector hessian_action = model.ApplyHessian(outWrt, inWrt1, inWrt2, inputs, sens, vec, config_json); + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + json response_body; response_body["output"] = hessian_action; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); - svr.Get("/Info", [&](const httplib::Request &, httplib::Response &res) - { + svr.Get("/Info", [&](const httplib::Request &, httplib::Response &res) { json response_body; response_body["protocolVersion"] = 1.0; std::vector model_names; @@ -698,10 +644,10 @@ namespace umbridge } response_body["models"] = model_names; - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); - svr.Post("/ModelInfo", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/ModelInfo", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -714,10 +660,10 @@ namespace umbridge response_body["support"]["ApplyJacobian"] = model.SupportsApplyJacobian(); response_body["support"]["ApplyHessian"] = model.SupportsApplyHessian(); - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); - svr.Post("/InputSizes", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/InputSizes", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -729,10 +675,10 @@ namespace umbridge json response_body; response_body["inputSizes"] = model.GetInputSizes(config_json); - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); - svr.Post("/OutputSizes", [&](const httplib::Request &req, httplib::Response &res) - { + svr.Post("/OutputSizes", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); if (!check_model_exists(models, request_body["name"], res)) return; @@ -744,9 +690,23 @@ namespace umbridge json response_body; response_body["outputSizes"] = model.GetOutputSizes(config_json); - res.set_content(response_body.dump(), "application/json"); }); + res.set_content(response_body.dump(), "application/json"); + }); std::cout << "Listening on port " << port << "..." << std::endl; + +#ifdef LOGGING + svr.set_logger([](const httplib::Request& req, const httplib::Response& res) { + if (res.status >= 500) { + std::cerr << "[ERROR] "; + } else if (res.status >= 400) { + std::cerr << "[WARNING] "; + } else { + std::cout << "[INFO] "; + } + log_request(req, res); + }); +#endif svr.listen(host.c_str(), port); std::cout << "Quit" << std::endl; } diff --git a/lib/umbridge.h b/lib/umbridge.h index e57745f..b557a8f 100644 --- a/lib/umbridge.h +++ b/lib/umbridge.h @@ -434,18 +434,18 @@ namespace umbridge { } // Provides access to a model via network - void serveModels(std::vector models, std::string host, int port) { + void serveModels(std::vector models, std::string host, int port, bool enable_parallel = true, bool error_checks = true) { httplib::Server svr; std::mutex model_mutex; // Ensure the underlying model is only called sequentially svr.Post("/Evaluate", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) + if (error_checks && !check_model_exists(models, request_body["name"], res)) return; Model& model = get_model_from_name(models, request_body["name"]); - if (!model.SupportsEvaluate()) { + if (error_checks && !model.SupportsEvaluate()) { write_unsupported_feature_response(res, "Evaluate"); return; } @@ -458,13 +458,20 @@ namespace umbridge { json empty_default_config; json config_json = request_body.value("config", empty_default_config); - if (!check_input_sizes(inputs, config_json, model, res)) + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) return; - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (!enable_parallel) { + model_lock.lock(); + } std::vector> outputs = model.Evaluate(inputs, config_json); - if (!check_output_sizes(outputs, config_json, model, res)) + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + + if (error_checks && !check_output_sizes(outputs, config_json, model, res)) return; json response_body; @@ -478,11 +485,11 @@ namespace umbridge { svr.Post("/Gradient", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) + if (error_checks && !check_model_exists(models, request_body["name"], res)) return; Model& model = get_model_from_name(models, request_body["name"]); - if (!model.SupportsGradient()) { + if (error_checks && !model.SupportsGradient()) { write_unsupported_feature_response(res, "Gradient"); return; } @@ -500,18 +507,25 @@ namespace umbridge { json empty_default_config; json config_json = request_body.value("config", empty_default_config); - if (!check_input_wrt(inWrt, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt, config_json, model, res)) return; - if (!check_output_wrt(outWrt, config_json, model, res)) + if (error_checks && !check_output_wrt(outWrt, config_json, model, res)) return; - if (!check_input_sizes(inputs, config_json, model, res)) + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) return; - if (!check_sensitivity_size(sens, outWrt, config_json, model, res)) + if (error_checks && !check_sensitivity_size(sens, outWrt, config_json, model, res)) return; - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (error_checks_parallel) { + model_lock.lock(); + } std::vector gradient = model.Gradient(outWrt, inWrt, inputs, sens, config_json); + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + json response_body; response_body["output"] = gradient; @@ -520,11 +534,11 @@ namespace umbridge { svr.Post("/ApplyJacobian", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) + if (error_checks && !check_model_exists(models, request_body["name"], res)) return; Model& model = get_model_from_name(models, request_body["name"]); - if (!model.SupportsApplyJacobian()) { + if (error_checks && !model.SupportsApplyJacobian()) { write_unsupported_feature_response(res, "ApplyJacobian"); return; } @@ -542,18 +556,25 @@ namespace umbridge { json empty_default_config; json config_json = request_body.value("config", empty_default_config); - if (!check_input_wrt(inWrt, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt, config_json, model, res)) return; - if (!check_output_wrt(outWrt, config_json, model, res)) + if (error_checks && !check_output_wrt(outWrt, config_json, model, res)) return; - if (!check_input_sizes(inputs, config_json, model, res)) + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) return; - if (!check_vector_size(vec, inWrt, config_json, model, res)) + if (error_checks && !check_vector_size(vec, inWrt, config_json, model, res)) return; - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (!enable_parallel) { + model_lock.lock(); + } std::vector jacobian_action = model.ApplyJacobian(outWrt, inWrt, inputs, vec, config_json); + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + json response_body; response_body["output"] = jacobian_action; @@ -562,11 +583,11 @@ namespace umbridge { svr.Post("/ApplyHessian", [&](const httplib::Request &req, httplib::Response &res) { json request_body = json::parse(req.body); - if (!check_model_exists(models, request_body["name"], res)) + if (error_checks && !check_model_exists(models, request_body["name"], res)) return; Model& model = get_model_from_name(models, request_body["name"]); - if (!model.SupportsApplyHessian()) { + if (error_checks && !model.SupportsApplyHessian()) { write_unsupported_feature_response(res, "ApplyHessian"); return; } @@ -586,20 +607,27 @@ namespace umbridge { json empty_default_config; json config_json = request_body.value("config", empty_default_config); - if (!check_input_wrt(inWrt1, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt1, config_json, model, res)) return; - if (!check_input_wrt(inWrt2, config_json, model, res)) + if (error_checks && !check_input_wrt(inWrt2, config_json, model, res)) return; - if (!check_output_wrt(outWrt, config_json, model, res)) + if (error_checks && !check_output_wrt(outWrt, config_json, model, res)) return; - if (!check_input_sizes(inputs, config_json, model, res)) + if (error_checks && !check_input_sizes(inputs, config_json, model, res)) return; - if (!check_sensitivity_size(sens, outWrt, config_json, model, res)) + if (error_checks && !check_sensitivity_size(sens, outWrt, config_json, model, res)) return; - const std::lock_guard model_lock(model_mutex); + std::unique_lock model_lock(model_mutex, std::defer_lock); + if (!enable_parallel) { + model_lock.lock(); + } std::vector hessian_action = model.ApplyHessian(outWrt, inWrt1, inWrt2, inputs, sens, vec, config_json); + if (model_lock.owns_lock()) { + model_lock.unlock(); // for safety, although should unlock after request finished + } + json response_body; response_body["output"] = hessian_action;