diff --git a/CMakeLists_files.cmake b/CMakeLists_files.cmake
index 6f5ebfbd5f7..877962a2a76 100644
--- a/CMakeLists_files.cmake
+++ b/CMakeLists_files.cmake
@@ -62,6 +62,7 @@ list (APPEND MAIN_SOURCE_FILES
opm/material/fluidsystems/blackoilpvt/SolventPvt.cpp
opm/material/fluidsystems/blackoilpvt/WetGasPvt.cpp
opm/material/fluidsystems/blackoilpvt/WetHumidGasPvt.cpp
+ opm/ml/keras_model.cpp
)
if(ENABLE_ECL_INPUT)
list(APPEND MAIN_SOURCE_FILES
@@ -474,6 +475,7 @@ list (APPEND TEST_SOURCE_FILES
tests/material/test_spline.cpp
tests/material/test_tabulation.cpp
tests/test_Visitor.cpp
+ tests/ml/keras_model_test.cpp
)
# tests that need to be linked to dune-common
@@ -646,6 +648,15 @@ list (APPEND TEST_DATA_FILES
tests/material/co2_unittest_below_sat.json
tests/material/h2o_unittest.json
tests/material/h2_unittest.json
+ tests/ml/ml_tools/models/test_dense_1x1.model
+ tests/ml/ml_tools/models/test_dense_2x2.model
+ tests/ml/ml_tools/models/test_dense_10x1.model
+ tests/ml/ml_tools/models/test_dense_10x10.model
+ tests/ml/ml_tools/models/test_dense_10x10x10.model
+ tests/ml/ml_tools/models/test_dense_relu_10.model
+ tests/ml/ml_tools/models/test_dense_tanh_10.model
+ tests/ml/ml_tools/models/test_relu_10.model
+ tests/ml/ml_tools/models/test_scalingdense_10x1.model
)
if(ENABLE_ECL_OUTPUT)
list (APPEND TEST_DATA_FILES
@@ -1052,6 +1063,7 @@ list( APPEND PUBLIC_HEADER_FILES
opm/material/thermal/SomertonThermalConductionLaw.hpp
opm/material/thermal/EclSpecrockLaw.hpp
opm/material/thermal/NullSolidEnergyLaw.hpp
+ opm/ml/keras_model.hpp
)
if(ENABLE_ECL_INPUT)
diff --git a/opm/ml/LICENSE.MIT b/opm/ml/LICENSE.MIT
new file mode 100644
index 00000000000..6908c245ebf
--- /dev/null
+++ b/opm/ml/LICENSE.MIT
@@ -0,0 +1,19 @@
+Copyright (c) 2016 Robert W. Rose, 2018 Paul Maevskikh, 2024 NORCE
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/opm/ml/keras_model.cpp b/opm/ml/keras_model.cpp
new file mode 100644
index 00000000000..0aaf3a832ee
--- /dev/null
+++ b/opm/ml/keras_model.cpp
@@ -0,0 +1,419 @@
+/*
+
+ * Copyright (c) 2016 Robert W. Rose
+ * Copyright (c) 2018 Paul Maevskikh
+ *
+ * MIT License, see LICENSE.MIT file.
+ */
+
+/*
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include "keras_model.hpp"
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+
+namespace Opm {
+
+
+bool ReadUnsignedInt(std::ifstream* file, unsigned int* i) {
+ KASSERT(file, "Invalid file stream");
+ KASSERT(i, "Invalid pointer");
+
+ file->read((char*)i, sizeof(unsigned int));
+ KASSERT(file->gcount() == sizeof(unsigned int), "Expected unsigned int");
+
+ return true;
+}
+
+bool ReadFloat(std::ifstream* file, float* f) {
+ KASSERT(file, "Invalid file stream");
+ KASSERT(f, "Invalid pointer");
+
+ file->read((char*)f, sizeof(float));
+ KASSERT(file->gcount() == sizeof(float), "Expected Evaluation");
+
+ return true;
+}
+
+bool ReadFloats(std::ifstream* file, float* f, size_t n) {
+ KASSERT(file, "Invalid file stream");
+ KASSERT(f, "Invalid pointer");
+
+ file->read((char*)f, sizeof(float) * n);
+ KASSERT(((unsigned int)file->gcount()) == sizeof(float) * n,
+ "Expected Evaluations");
+
+ return true;
+}
+
+template
+bool KerasLayerActivation::LoadLayer(std::ifstream* file) {
+ KASSERT(file, "Invalid file stream");
+
+ unsigned int activation = 0;
+ KASSERT(ReadUnsignedInt(file, &activation),
+ "Failed to read activation type");
+
+ switch (activation) {
+ case kLinear:
+ activation_type_ = kLinear;
+ break;
+ case kRelu:
+ activation_type_ = kRelu;
+ break;
+ case kSoftPlus:
+ activation_type_ = kSoftPlus;
+ break;
+ case kHardSigmoid:
+ activation_type_ = kHardSigmoid;
+ break;
+ case kSigmoid:
+ activation_type_ = kSigmoid;
+ break;
+ case kTanh:
+ activation_type_ = kTanh;
+ break;
+ default:
+ KASSERT(false, "Unsupported activation type %d", activation);
+ }
+
+ return true;
+}
+
+template
+bool KerasLayerActivation::Apply(Tensor* in, Tensor* out) {
+ KASSERT(in, "Invalid input");
+ KASSERT(out, "Invalid output");
+
+ *out = *in;
+
+ switch (activation_type_) {
+ case kLinear:
+ break;
+ case kRelu:
+ for (size_t i = 0; i < out->data_.size(); i++) {
+ if (out->data_[i] < 0.0) {
+ out->data_[i] = 0.0;
+ }
+ }
+ break;
+ case kSoftPlus:
+ for (size_t i = 0; i < out->data_.size(); i++) {
+ out->data_[i] = log(1.0 + exp(out->data_[i]));
+ }
+ break;
+ case kHardSigmoid:
+ for (size_t i = 0; i < out->data_.size(); i++) {
+ Evaluation x = (out->data_[i] * 0.2) + 0.5;
+
+ if (x <= 0) {
+ out->data_[i] = 0.0;
+ } else if (x >= 1) {
+ out->data_[i] = 1.0;
+ } else {
+ out->data_[i] = x;
+ }
+ }
+ break;
+ case kSigmoid:
+ for (size_t i = 0; i < out->data_.size(); i++) {
+ Evaluation& x = out->data_[i];
+
+ if (x >= 0) {
+ out->data_[i] = 1.0 / (1.0 + exp(-x));
+ } else {
+ Evaluation z = exp(x);
+ out->data_[i] = z / (1.0 + z);
+ }
+ }
+ break;
+ case kTanh:
+ for (size_t i = 0; i < out->data_.size(); i++) {
+ out->data_[i] = sinh(out->data_[i])/cosh(out->data_[i]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+template
+bool KerasLayerScaling::LoadLayer(std::ifstream* file) {
+ KASSERT(file, "Invalid file stream");
+
+ KASSERT(ReadFloat(file, &data_min), "Failed to read min");
+ KASSERT(ReadFloat(file, &data_max), "Failed to read max");
+ KASSERT(ReadFloat(file, &feat_inf), "Failed to read max");
+ KASSERT(ReadFloat(file, &feat_sup), "Failed to read max");
+ return true;
+}
+
+template
+bool KerasLayerScaling::Apply(Tensor* in, Tensor* out) {
+ KASSERT(in, "Invalid input");
+ KASSERT(out, "Invalid output");
+
+ Tensor temp_in, temp_out;
+
+ *out = *in;
+
+ for (size_t i = 0; i < out->data_.size(); i++) {
+ auto tempscale = (out->data_[i] - data_min)/(data_max - data_min);
+ out->data_[i] = tempscale * (feat_sup - feat_inf) + feat_inf;
+ }
+
+ return true;
+}
+
+template
+bool KerasLayerUnScaling::LoadLayer(std::ifstream* file) {
+ KASSERT(file, "Invalid file stream");
+
+ KASSERT(ReadFloat(file, &data_min), "Failed to read min");
+ KASSERT(ReadFloat(file, &data_max), "Failed to read max");
+ KASSERT(ReadFloat(file, &feat_inf), "Failed to read inf");
+ KASSERT(ReadFloat(file, &feat_sup), "Failed to read sup");
+
+ return true;
+}
+
+template
+bool KerasLayerUnScaling::Apply(Tensor* in, Tensor* out) {
+ KASSERT(in, "Invalid input");
+ KASSERT(out, "Invalid output");
+
+ *out = *in;
+
+ for (size_t i = 0; i < out->data_.size(); i++) {
+ auto tempscale = (out->data_[i] - feat_inf)/(feat_sup - feat_inf);
+
+ out->data_[i] = tempscale * (data_max - data_min) + data_min;
+ }
+
+
+ return true;
+}
+
+
+template
+bool KerasLayerDense::LoadLayer(std::ifstream* file) {
+ KASSERT(file, "Invalid file stream");
+
+ unsigned int weights_rows = 0;
+ KASSERT(ReadUnsignedInt(file, &weights_rows), "Expected weight rows");
+ KASSERT(weights_rows > 0, "Invalid weights # rows");
+
+ unsigned int weights_cols = 0;
+ KASSERT(ReadUnsignedInt(file, &weights_cols), "Expected weight cols");
+ KASSERT(weights_cols > 0, "Invalid weights shape");
+
+ unsigned int biases_shape = 0;
+ KASSERT(ReadUnsignedInt(file, &biases_shape), "Expected biases shape");
+ KASSERT(biases_shape > 0, "Invalid biases shape");
+
+ weights_.Resize(weights_rows, weights_cols);
+ KASSERT(
+ ReadFloats(file, weights_.data_.data(), weights_rows * weights_cols),
+ "Expected weights");
+
+ biases_.Resize(biases_shape);
+ KASSERT(ReadFloats(file, biases_.data_.data(), biases_shape),
+ "Expected biases");
+
+ KASSERT(activation_.LoadLayer(file), "Failed to load activation");
+
+ return true;
+}
+
+template
+bool KerasLayerDense::Apply(Tensor* in, Tensor* out) {
+ KASSERT(in, "Invalid input");
+ KASSERT(out, "Invalid output");
+ KASSERT(in->dims_.size() <= 2, "Invalid input dimensions");
+
+ if (in->dims_.size() == 2) {
+ KASSERT(in->dims_[1] == weights_.dims_[0], "Dimension mismatch %d %d",
+ in->dims_[1], weights_.dims_[0]);
+ }
+
+ Tensor tmp(weights_.dims_[1]);
+
+ for (int i = 0; i < weights_.dims_[0]; i++) {
+ for (int j = 0; j < weights_.dims_[1]; j++) {
+ tmp(j) += (*in)(i)*weights_(i, j);
+ }
+ }
+
+ for (int i = 0; i < biases_.dims_[0]; i++) {
+ tmp(i) += biases_(i);
+ }
+
+ KASSERT(activation_.Apply(&tmp, out), "Failed to apply activation");
+
+ return true;
+}
+
+template
+bool KerasLayerFlatten::LoadLayer(std::ifstream* file) {
+ KASSERT(file, "Invalid file stream");
+ return true;
+}
+
+template
+bool KerasLayerFlatten::Apply(Tensor* in, Tensor* out) {
+ KASSERT(in, "Invalid input");
+ KASSERT(out, "Invalid output");
+
+ *out = *in;
+ out->Flatten();
+
+ return true;
+}
+
+
+
+template
+bool KerasLayerEmbedding::LoadLayer(std::ifstream* file) {
+ KASSERT(file, "Invalid file stream");
+
+ unsigned int weights_rows = 0;
+ KASSERT(ReadUnsignedInt(file, &weights_rows), "Expected weight rows");
+ KASSERT(weights_rows > 0, "Invalid weights # rows");
+
+ unsigned int weights_cols = 0;
+ KASSERT(ReadUnsignedInt(file, &weights_cols), "Expected weight cols");
+ KASSERT(weights_cols > 0, "Invalid weights shape");
+
+ weights_.Resize(weights_rows, weights_cols);
+ KASSERT(
+ ReadFloats(file, weights_.data_.data(), weights_rows * weights_cols),
+ "Expected weights");
+
+ return true;
+}
+
+template
+bool KerasLayerEmbedding::Apply(Tensor* in, Tensor* out) {
+ int output_rows = in->dims_[1];
+ int output_cols = weights_.dims_[1];
+ out->dims_ = {output_rows, output_cols};
+ out->data_.reserve(output_rows * output_cols);
+
+ std::for_each(in->data_.begin(), in->data_.end(), [=](Evaluation i) {
+ typename std::vector::const_iterator first =
+ this->weights_.data_.begin() + (getValue(i) * output_cols);
+ typename std::vector::const_iterator last =
+ this->weights_.data_.begin() + (getValue(i) + 1) * output_cols;
+
+ out->data_.insert(out->data_.end(), first, last);
+ });
+
+ return true;
+}
+
+
+template
+bool KerasModel::LoadModel(const std::string& filename) {
+ std::ifstream file(filename.c_str(), std::ios::binary);
+ KASSERT(file.is_open(), "Unable to open file %s", filename.c_str());
+
+ unsigned int num_layers = 0;
+ KASSERT(ReadUnsignedInt(&file, &num_layers), "Expected number of layers");
+
+ for (unsigned int i = 0; i < num_layers; i++) {
+ unsigned int layer_type = 0;
+ KASSERT(ReadUnsignedInt(&file, &layer_type), "Expected layer type");
+
+ KerasLayer* layer = NULL;
+
+ switch (layer_type) {
+ case kFlatten:
+ layer = new KerasLayerFlatten();
+ break;
+ case kScaling:
+ layer = new KerasLayerScaling();
+ break;
+ case kUnScaling:
+ layer = new KerasLayerUnScaling();
+ break;
+ case kDense:
+ layer = new KerasLayerDense();
+ break;
+ case kActivation:
+ layer = new KerasLayerActivation();
+ break;
+ default:
+ break;
+ }
+
+ KASSERT(layer, "Unknown layer type %d", layer_type);
+
+ bool result = layer->LoadLayer(&file);
+ if (!result) {
+ printf("Failed to load layer %d", i);
+ delete layer;
+ return false;
+ }
+
+ layers_.push_back(layer);
+ }
+
+ return true;
+}
+
+template
+bool KerasModel::Apply(Tensor* in, Tensor* out) {
+ Tensor temp_in, temp_out;
+
+ for (unsigned int i = 0; i < layers_.size(); i++) {
+ if (i == 0) {
+ temp_in = *in;
+ }
+
+ KASSERT(layers_[i]->Apply(&temp_in, &temp_out),
+ "Failed to apply layer %d", i);
+
+ temp_in = temp_out;
+ }
+
+ *out = temp_out;
+
+ return true;
+}
+
+template class KerasModel;
+
+template class KerasModel;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+
+} // namespace Opm
diff --git a/opm/ml/keras_model.hpp b/opm/ml/keras_model.hpp
new file mode 100644
index 00000000000..40a77ce81f3
--- /dev/null
+++ b/opm/ml/keras_model.hpp
@@ -0,0 +1,408 @@
+/*
+
+ * Copyright (c) 2016 Robert W. Rose
+ * Copyright (c) 2018 Paul Maevskikh
+ *
+ * MIT License, see LICENSE.MIT file.
+ */
+
+/*
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#ifndef KERAS_MODEL_H_
+#define KERAS_MODEL_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+namespace Opm {
+
+#define KASSERT(x, ...) \
+ if (!(x)) { \
+ printf("KASSERT: %s(%d): ", __FILE__, __LINE__); \
+ printf(__VA_ARGS__); \
+ printf("\n"); \
+ return false; \
+ }
+
+#define KASSERT_EQ(x, y, eps) \
+ if (fabs(x.value() - y.value()) > eps) { \
+ printf("KASSERT: Expected %f, got %f\n", y.value(), x.value()); \
+ return false; \
+ }
+
+#ifdef DEBUG
+#define KDEBUG(x, ...) \
+ if (!(x)) { \
+ printf("%s(%d): ", __FILE__, __LINE__); \
+ printf(__VA_ARGS__); \
+ printf("\n"); \
+ exit(-1); \
+ }
+#else
+#define KDEBUG(x, ...) ;
+#endif
+
+template
+class Tensor {
+ public:
+ Tensor() {}
+
+ Tensor(int i) { Resize(i); }
+
+ Tensor(int i, int j) { Resize(i, j); }
+
+ Tensor(int i, int j, int k) { Resize(i, j, k); }
+
+ Tensor(int i, int j, int k, int l) { Resize(i, j, k, l); }
+
+ void Resize(int i) {
+ dims_ = {i};
+ data_.resize(i);
+ }
+
+ void Resize(int i, int j) {
+ dims_ = {i, j};
+ data_.resize(i * j);
+ }
+
+ void Resize(int i, int j, int k) {
+ dims_ = {i, j, k};
+ data_.resize(i * j * k);
+ }
+
+ void Resize(int i, int j, int k, int l) {
+ dims_ = {i, j, k, l};
+ data_.resize(i * j * k * l);
+ }
+
+ inline void Flatten() {
+ KDEBUG(dims_.size() > 0, "Invalid tensor");
+
+ int elements = dims_[0];
+ for (unsigned int i = 1; i < dims_.size(); i++) {
+ elements *= dims_[i];
+ }
+ dims_ = {elements};
+ }
+
+ inline Foo& operator()(int i) {
+ KDEBUG(dims_.size() == 1, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+
+ return data_[i];
+ }
+
+ inline Foo& operator()(int i, int j) {
+ KDEBUG(dims_.size() == 2, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+
+ return data_[dims_[1] * i + j];
+ }
+
+ inline Foo operator()(int i, int j) const {
+ KDEBUG(dims_.size() == 2, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+
+ return data_[dims_[1] * i + j];
+ }
+
+ inline Foo& operator()(int i, int j, int k) {
+ KDEBUG(dims_.size() == 3, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+ KDEBUG(k < dims_[2] && k >= 0, "Invalid k: %d (max %d)", k, dims_[2]);
+
+ return data_[dims_[2] * (dims_[1] * i + j) + k];
+ }
+
+ inline Foo& operator()(int i, int j, int k, int l) {
+ KDEBUG(dims_.size() == 4, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+ KDEBUG(k < dims_[2] && k >= 0, "Invalid k: %d (max %d)", k, dims_[2]);
+ KDEBUG(l < dims_[3] && l >= 0, "Invalid l: %d (max %d)", l, dims_[3]);
+
+ return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
+ }
+
+ inline void Fill(Foo value) {
+ std::fill(data_.begin(), data_.end(), value);
+ }
+
+ Tensor Unpack(int row) const {
+ KASSERT(dims_.size() >= 2, "Invalid tensor");
+ std::vector pack_dims =
+ std::vector(dims_.begin() + 1, dims_.end());
+ int pack_size = std::accumulate(pack_dims.begin(), pack_dims.end(), 0);
+
+ typename std::vector::const_iterator first =
+ data_.begin() + (row * pack_size);
+ typename std::vector::const_iterator last =
+ data_.begin() + (row + 1) * pack_size;
+
+ Tensor x = Tensor();
+ x.dims_ = pack_dims;
+ x.data_ = std::vector(first, last);
+
+ return x;
+ }
+
+ Tensor Select(int row) const {
+ Tensor x = Unpack(row);
+ x.dims_.insert(x.dims_.begin(), 1);
+
+ return x;
+ }
+
+ Tensor operator+(const Tensor& other) {
+ KASSERT(dims_ == other.dims_,
+ "Cannot add tensors with different dimensions");
+
+ Tensor result;
+ result.dims_ = dims_;
+ result.data_.reserve(data_.size());
+
+ std::transform(data_.begin(), data_.end(), other.data_.begin(),
+ std::back_inserter(result.data_),
+ [](Foo x, Foo y) { return x + y; });
+
+ return result;
+ }
+
+ Tensor Multiply(const Tensor& other) {
+ KASSERT(dims_ == other.dims_,
+ "Cannot multiply elements with different dimensions");
+
+ Tensor result;
+ result.dims_ = dims_;
+ result.data_.reserve(data_.size());
+
+ std::transform(data_.begin(), data_.end(), other.data_.begin(),
+ std::back_inserter(result.data_),
+ [](Foo x, Foo y) { return x * y; });
+
+ return result;
+ }
+
+ Tensor Dot(const Tensor& other) {
+ KDEBUG(dims_.size() == 2, "Invalid tensor dimensions");
+ KDEBUG(other.dims_.size() == 2, "Invalid tensor dimensions");
+ KASSERT(dims_[1] == other.dims_[0],
+ "Cannot multiply with different inner dimensions");
+
+ Tensor tmp(dims_[0], other.dims_[1]);
+
+ for (int i = 0; i < dims_[0]; i++) {
+ for (int j = 0; j < other.dims_[1]; j++) {
+ for (int k = 0; k < dims_[1]; k++) {
+ tmp(i, j) += (*this)(i, k) * other(k, j);
+ }
+ }
+ }
+
+ return tmp;
+ }
+
+ std::vector dims_;
+ std::vector data_;
+};
+
+template
+class KerasLayer {
+ public:
+ KerasLayer() {}
+
+ virtual ~KerasLayer() {}
+
+ virtual bool LoadLayer(std::ifstream* file) = 0;
+
+ virtual bool Apply(Tensor* in, Tensor* out) = 0;
+};
+
+template
+class KerasLayerActivation : public KerasLayer {
+ public:
+ enum ActivationType {
+ kLinear = 1,
+ kRelu = 2,
+ kSoftPlus = 3,
+ kSigmoid = 4,
+ kTanh = 5,
+ kHardSigmoid = 6
+ };
+
+ KerasLayerActivation() : activation_type_(ActivationType::kLinear) {}
+
+ virtual ~KerasLayerActivation() {}
+
+ virtual bool LoadLayer(std::ifstream* file);
+
+ virtual bool Apply(Tensor* in, Tensor* out);
+
+ private:
+ ActivationType activation_type_;
+};
+
+template
+class KerasLayerScaling : public KerasLayer {
+ public:
+ KerasLayerScaling(): data_min(1.0f), data_max(1.0f), feat_inf(1.0f), feat_sup(1.0f) {}
+
+ virtual ~KerasLayerScaling() {}
+
+ virtual bool LoadLayer(std::ifstream* file);
+
+ virtual bool Apply(Tensor* in, Tensor* out);
+
+ private:
+ Tensor weights_;
+ Tensor biases_;
+ float data_min;
+ float data_max;
+ float feat_inf;
+ float feat_sup;
+};
+
+template
+class KerasLayerUnScaling : public KerasLayer {
+ public:
+ KerasLayerUnScaling(): data_min(1.0f), data_max(1.0f), feat_inf(1.0f), feat_sup(1.0f) {}
+
+ virtual ~KerasLayerUnScaling() {}
+
+ virtual bool LoadLayer(std::ifstream* file);
+
+ virtual bool Apply(Tensor* in, Tensor* out);
+
+ private:
+ Tensor weights_;
+ Tensor biases_;
+ float data_min;
+ float data_max;
+ float feat_inf;
+ float feat_sup;
+};
+
+
+template
+class KerasLayerDense : public KerasLayer {
+ public:
+ KerasLayerDense() {}
+
+ virtual ~KerasLayerDense() {}
+
+ virtual bool LoadLayer(std::ifstream* file);
+
+ virtual bool Apply(Tensor* in, Tensor* out);
+
+ private:
+ Tensor weights_;
+ Tensor biases_;
+
+ KerasLayerActivation activation_;
+};
+
+template
+class KerasLayerFlatten : public KerasLayer {
+ public:
+ KerasLayerFlatten() {}
+
+ virtual ~KerasLayerFlatten() {}
+
+ virtual bool LoadLayer(std::ifstream* file);
+
+ virtual bool Apply(Tensor* in, Tensor* out);
+
+ private:
+};
+
+
+template
+class KerasLayerEmbedding : public KerasLayer {
+ public:
+ KerasLayerEmbedding() {}
+
+ virtual ~KerasLayerEmbedding() {}
+
+ virtual bool LoadLayer(std::ifstream* file);
+
+ virtual bool Apply(Tensor* in, Tensor* out);
+
+ private:
+ Tensor weights_;
+};
+
+template
+class KerasModel {
+ public:
+ enum LayerType {
+ kFlatten = 1,
+ kScaling = 2,
+ kUnScaling = 3,
+ kDense = 4,
+ kActivation = 5
+ };
+
+ KerasModel() {}
+
+ virtual ~KerasModel() {
+ for (unsigned int i = 0; i < layers_.size(); i++) {
+ delete layers_[i];
+ }
+ }
+
+ virtual bool LoadModel(const std::string& filename);
+
+ virtual bool Apply(Tensor* in, Tensor* out);
+
+ private:
+ std::vector*> layers_;
+};
+
+class KerasTimer {
+ public:
+ KerasTimer() {}
+
+ void Start() { start_ = std::chrono::high_resolution_clock::now(); }
+
+ float Stop() {
+ std::chrono::time_point now =
+ std::chrono::high_resolution_clock::now();
+
+ std::chrono::duration diff = now - start_;
+
+ return diff.count();
+ }
+
+ private:
+ std::chrono::time_point start_;
+};
+
+} // namespace Opm
+
+#endif // KERAS_MODEL_H_
diff --git a/opm/ml/ml_model.cpp b/opm/ml/ml_model.cpp
new file mode 100644
index 00000000000..0552568b91e
--- /dev/null
+++ b/opm/ml/ml_model.cpp
@@ -0,0 +1,444 @@
+/*
+
+ * Copyright (c) 2016 Robert W. Rose
+ * Copyright (c) 2018 Paul Maevskikh
+ *
+ * MIT License, see LICENSE.MIT file.
+ */
+
+/*
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#include "ml_model.hpp"
+
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+
+namespace Opm {
+
+
+template
+bool readFile(std::ifstream& file, T& data, size_t n=1)
+{
+ file.read(reinterpret_cast(&data), sizeof (T)* n);
+ return !file.fail();
+}
+
+
+// bool ReadUnsignedInt(std::ifstream* file, unsigned int* i) {
+// // KASSERT(file, "Invalid file stream");
+// // // KASSERT(i, "Invalid pointer");
+
+// // KASSERT(file, "Invalid file stream");
+
+// OPM_ERROR_IF(!file,
+// "Invalid file stream");
+
+// OPM_ERROR_IF(!i,
+// "Invalid pointer");
+
+// file->read((char*)i, sizeof(unsigned int));
+// KASSERT(file->gcount() == sizeof(unsigned int), "Expected unsigned int");
+
+// return true;
+// }
+
+// bool ReadFloat(std::ifstream* file, float* f) {
+// KASSERT(file, "Invalid file stream");
+// KASSERT(f, "Invalid pointer");
+
+// file->read((char*)f, sizeof(float));
+// KASSERT(file->gcount() == sizeof(float), "Expected Evaluation");
+
+// return true;
+// }
+
+bool ReadFloats(std::ifstream* file, float* f, size_t n) {
+ KASSERT(file, "Invalid file stream");
+ KASSERT(f, "Invalid pointer");
+
+ file->read((char*)f, sizeof(float) * n);
+ KASSERT(((unsigned int)file->gcount()) == sizeof(float) * n,
+ "Expected Evaluations");
+
+ return true;
+}
+
+template
+bool KerasLayerActivation::loadLayer(std::ifstream& file) {
+ // KASSERT(file, "Invalid file stream");
+
+ unsigned int activation = 0;
+ KASSERT(readFile(file, activation),
+ "Failed to read activation type");
+
+ switch (activation) {
+ case kLinear:
+ activation_type_ = kLinear;
+ break;
+ case kRelu:
+ activation_type_ = kRelu;
+ break;
+ case kSoftPlus:
+ activation_type_ = kSoftPlus;
+ break;
+ case kHardSigmoid:
+ activation_type_ = kHardSigmoid;
+ break;
+ case kSigmoid:
+ activation_type_ = kSigmoid;
+ break;
+ case kTanh:
+ activation_type_ = kTanh;
+ break;
+ default:
+ KASSERT(false, "Unsupported activation type %d", activation);
+ }
+
+ return true;
+}
+
+template
+bool KerasLayerActivation::apply(Tensor& in, Tensor& out) {
+ // KASSERT(in, "Invalid input");
+ // KASSERT(out, "Invalid output");
+
+ out = in;
+
+ switch (activation_type_) {
+ case kLinear:
+ break;
+ case kRelu:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ if (out.data_[i] < 0.0) {
+ out.data_[i] = 0.0;
+ }
+ }
+ break;
+ case kSoftPlus:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ out.data_[i] = log(1.0 + exp(out.data_[i]));
+ }
+ break;
+ case kHardSigmoid:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ Evaluation x = (out.data_[i] * 0.2) + 0.5;
+
+ if (x <= 0) {
+ out.data_[i] = 0.0;
+ } else if (x >= 1) {
+ out.data_[i] = 1.0;
+ } else {
+ out.data_[i] = x;
+ }
+ }
+ break;
+ case kSigmoid:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ Evaluation& x = out.data_[i];
+
+ if (x >= 0) {
+ out.data_[i] = 1.0 / (1.0 + exp(-x));
+ } else {
+ Evaluation z = exp(x);
+ out.data_[i] = z / (1.0 + z);
+ }
+ }
+ break;
+ case kTanh:
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ out.data_[i] = sinh(out.data_[i])/cosh(out.data_[i]);
+ }
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+
+template
+bool KerasLayerScaling::loadLayer(std::ifstream& file) {
+ KASSERT(file, "Invalid file stream");
+
+ KASSERT(readFile(file, data_min), "Failed to read min");
+ KASSERT(readFile(file, data_max), "Failed to read min");
+ KASSERT(readFile(file, feat_inf), "Failed to read min");
+ KASSERT(readFile(file, feat_sup), "Failed to read min");
+ return true;
+}
+
+template
+bool KerasLayerScaling::apply(Tensor& in, Tensor& out) {
+ // KASSERT(in, "Invalid input");
+ // KASSERT(out, "Invalid output");
+
+ Tensor temp_in, temp_out;
+
+ out = in;
+
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ auto tempscale = (out.data_[i] - data_min)/(data_max - data_min);
+ out.data_[i] = tempscale * (feat_sup - feat_inf) + feat_inf;
+ }
+
+ return true;
+}
+
+template
+bool KerasLayerUnScaling::loadLayer(std::ifstream& file) {
+ KASSERT(file, "Invalid file stream");
+
+ KASSERT(readFile(file, data_min), "Failed to read min");
+ KASSERT(readFile(file, data_max), "Failed to read max");
+ KASSERT(readFile(file, feat_inf), "Failed to read inf");
+ KASSERT(readFile(file, feat_sup), "Failed to read sup");
+
+ return true;
+}
+
+template
+bool KerasLayerUnScaling::apply(Tensor& in, Tensor& out) {
+ // KASSERT(in, "Invalid input");
+ // KASSERT(out, "Invalid output");
+
+ out = in;
+
+ for (size_t i = 0; i < out.data_.size(); i++) {
+ auto tempscale = (out.data_[i] - feat_inf)/(feat_sup - feat_inf);
+
+ out.data_[i] = tempscale * (data_max - data_min) + data_min;
+ }
+
+
+ return true;
+}
+
+
+template
+bool KerasLayerDense::loadLayer(std::ifstream& file) {
+ KASSERT(file, "Invalid file stream");
+
+ unsigned int weights_rows = 0;
+ KASSERT(readFile(file, weights_rows), "Expected weight rows");
+ KASSERT(weights_rows > 0, "Invalid weights # rows");
+
+ unsigned int weights_cols = 0;
+ KASSERT(readFile(file, weights_cols), "Expected weight cols");
+ KASSERT(weights_cols > 0, "Invalid weights shape");
+
+ unsigned int biases_shape = 0;
+ KASSERT(readFile(file, biases_shape), "Expected biases shape");
+ KASSERT(biases_shape > 0, "Invalid biases shape");
+
+ weights_.resize(weights_rows, weights_cols);
+ KASSERT(
+ ReadFloats(&file, weights_.data_.data(), weights_rows * weights_cols),
+ "Expected weights");
+
+ biases_.resize(biases_shape);
+ KASSERT(ReadFloats(&file, biases_.data_.data(), biases_shape),
+ "Expected biases");
+
+ KASSERT(activation_.loadLayer(file), "Failed to load activation");
+
+ return true;
+}
+
+template
+bool KerasLayerDense::apply(Tensor& in, Tensor& out) {
+ // KASSERT(in, "Invalid input");
+ // KASSERT(out, "Invalid output");
+ KASSERT(in.dims_.size() <= 2, "Invalid input dimensions");
+
+ if (in.dims_.size() == 2) {
+ KASSERT(in.dims_[1] == weights_.dims_[0], "Dimension mismatch %d %d",
+ in.dims_[1], weights_.dims_[0]);
+ }
+
+ Tensor tmp(weights_.dims_[1]);
+
+ for (int i = 0; i < weights_.dims_[0]; i++) {
+ for (int j = 0; j < weights_.dims_[1]; j++) {
+ tmp(j) += (in)(i)*weights_(i, j);
+ }
+ }
+
+ for (int i = 0; i < biases_.dims_[0]; i++) {
+ tmp(i) += biases_(i);
+ }
+
+ KASSERT(activation_.apply(tmp, out), "Failed to apply activation");
+
+ return true;
+}
+
+template
+bool KerasLayerFlatten::loadLayer(std::ifstream& file) {
+ KASSERT(file, "Invalid file stream");
+ return true;
+}
+
+template
+bool KerasLayerFlatten::apply(Tensor& in, Tensor& out) {
+ // KASSERT(in, "Invalid input");
+ // KASSERT(out, "Invalid output");
+
+ out = in;
+ out.flatten();
+
+ return true;
+}
+
+
+
+template
+bool KerasLayerEmbedding::loadLayer(std::ifstream& file) {
+ KASSERT(file, "Invalid file stream");
+
+ unsigned int weights_rows = 0;
+ KASSERT(readFile(file, weights_rows), "Expected weight rows");
+ KASSERT(weights_rows > 0, "Invalid weights # rows");
+
+ unsigned int weights_cols = 0;
+ KASSERT(readFile(file, weights_cols), "Expected weight cols");
+ KASSERT(weights_cols > 0, "Invalid weights shape");
+
+ weights_.resize(weights_rows, weights_cols);
+ KASSERT(
+ ReadFloats(&file, weights_.data_.data(), weights_rows * weights_cols),
+ "Expected weights");
+
+ return true;
+}
+
+template
+bool KerasLayerEmbedding::apply(Tensor& in, Tensor& out) {
+ int output_rows = in.dims_[1];
+ int output_cols = weights_.dims_[1];
+ out.dims_ = {output_rows, output_cols};
+ out.data_.reserve(output_rows * output_cols);
+
+ std::for_each(in.data_.begin(), in.data_.end(), [=](Evaluation i) {
+ typename std::vector::const_iterator first =
+ this->weights_.data_.begin() + (getValue(i) * output_cols);
+ typename std::vector::const_iterator last =
+ this->weights_.data_.begin() + (getValue(i) + 1) * output_cols;
+
+ out.data_.insert(out.data_.end(), first, last);
+ });
+
+ return true;
+}
+
+
+template
+bool KerasModel::loadModel(const std::string& filename) {
+ std::ifstream file(filename.c_str(), std::ios::binary);
+ KASSERT(file.is_open(), "Unable to open file %s", filename.c_str());
+
+ unsigned int num_layers = 0;
+ KASSERT(readFile(file, num_layers), "Expected number of layers");
+
+ for (unsigned int i = 0; i < num_layers; i++) {
+ unsigned int layer_type = 0;
+ KASSERT(readFile(file, layer_type), "Expected layer type");
+
+ // KerasLayer* layer = NULL;
+ std::unique_ptr> layer = nullptr;
+
+ switch (layer_type) {
+ case kFlatten:
+ // layer = new KerasLayerFlatten();
+ layer = std::make_unique>() ;
+ break;
+ case kScaling:
+ // layer = new KerasLayerScaling();
+ layer = std::make_unique>() ;
+ break;
+ case kUnScaling:
+ // layer = new KerasLayerUnScaling();
+ layer = std::make_unique>() ;
+ break;
+ case kDense:
+ // layer = new KerasLayerDense();
+ layer = std::make_unique>() ;
+ break;
+ case kActivation:
+ // layer = new KerasLayerActivation();
+ layer = std::make_unique>() ;
+ break;
+ default:
+ break;
+ }
+
+ KASSERT(layer, "Unknown layer type %d", layer_type);
+
+ bool result = layer->loadLayer(file);
+ if (!result) {
+ std::printf("Failed to load layer %d", i);
+ // delete layer;
+ return false;
+ }
+
+ // layers_.push_back(layer);
+ layers_.emplace_back(std::move(layer));
+
+ }
+
+ return true;
+}
+
+template
+bool KerasModel::apply(Tensor& in, Tensor& out) {
+ Tensor temp_in, temp_out;
+
+ for (unsigned int i = 0; i < layers_.size(); i++) {
+ if (i == 0) {
+ temp_in = in;
+ }
+
+ KASSERT(layers_[i]->apply(temp_in, temp_out),
+ "Failed to apply layer %d", i);
+
+ temp_in = temp_out;
+ }
+
+ out = temp_out;
+
+ return true;
+}
+
+template class KerasModel;
+
+template class KerasModel;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+template class KerasModel>;
+
+} // namespace Opm
diff --git a/opm/ml/ml_model.hpp b/opm/ml/ml_model.hpp
new file mode 100644
index 00000000000..b1473fbfc1d
--- /dev/null
+++ b/opm/ml/ml_model.hpp
@@ -0,0 +1,437 @@
+/*
+
+ * Copyright (c) 2016 Robert W. Rose
+ * Copyright (c) 2018 Paul Maevskikh
+ *
+ * MIT License, see LICENSE.MIT file.
+ */
+
+/*
+ Copyright (c) 2024 NORCE
+ This file is part of the Open Porous Media project (OPM).
+
+ OPM is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ OPM is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with OPM. If not, see .
+*/
+
+#ifndef ML_MODEL_H_
+#define ML_MODEL_H_
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include
+#include
+#include
+
+namespace Opm {
+
+#define KASSERT(x, ...) \
+ if (!(x)) { \
+ std::printf("KASSERT: %s(%d): ", __FILE__, __LINE__); \
+ std::printf(__VA_ARGS__); \
+ std::printf("\n"); \
+ return false; \
+ }
+
+#define KASSERT_EQ(x, y, eps) \
+ if (fabs(x.value() - y.value()) > eps) { \
+ std::printf("KASSERT: Expected %f, got %f\n", y.value(), x.value()); \
+ return false; \
+ }
+
+#ifdef DEBUG
+#define KDEBUG(x, ...) \
+ if (!(x)) { \
+ std::printf("%s(%d): ", __FILE__, __LINE__); \
+ std::printf(__VA_ARGS__); \
+ std::printf("\n"); \
+ exit(-1); \
+ }
+#else
+#define KDEBUG(x, ...) ;
+#endif
+
+template
+class Tensor {
+ public:
+ Tensor() {}
+
+ explicit Tensor(int i) { resize(i); }
+
+ Tensor(int i, int j) { resize(i, j); }
+
+ Tensor(int i, int j, int k) { resize(i, j, k); }
+
+ Tensor(int i, int j, int k, int l) { resize(i, j, k, l); }
+
+ void resize(int i) {
+ dims_ = {i};
+ data_.resize(i);
+ }
+
+ void resize(int i, int j) {
+ dims_ = {i, j};
+ data_.resize(i * j);
+ }
+
+ void resize(int i, int j, int k) {
+ dims_ = {i, j, k};
+ data_.resize(i * j * k);
+ }
+
+ void resize(int i, int j, int k, int l) {
+ dims_ = {i, j, k, l};
+ data_.resize(i * j * k * l);
+ }
+
+ inline void flatten() {
+ KDEBUG(dims_.size() > 0, "Invalid tensor");
+
+ int elements = dims_[0];
+ for (unsigned int i = 1; i < dims_.size(); i++) {
+ elements *= dims_[i];
+ }
+ dims_ = {elements};
+ }
+
+ inline T& operator()(int i) {
+ KDEBUG(dims_.size() == 1, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+
+ return data_[i];
+ }
+
+ inline T& operator()(int i, int j) {
+ KDEBUG(dims_.size() == 2, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+
+ return data_[dims_[1] * i + j];
+ }
+
+ const T& operator()(int i, int j) const {
+ KDEBUG(dims_.size() == 2, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+
+ return data_[dims_[1] * i + j];
+ }
+
+ inline T& operator()(int i, int j, int k) {
+ KDEBUG(dims_.size() == 3, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+ KDEBUG(k < dims_[2] && k >= 0, "Invalid k: %d (max %d)", k, dims_[2]);
+
+ return data_[dims_[2] * (dims_[1] * i + j) + k];
+ }
+ const T& operator()(int i, int j, int k) const {
+ KDEBUG(dims_.size() == 3, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+ KDEBUG(k < dims_[2] && k >= 0, "Invalid k: %d (max %d)", k, dims_[2]);
+
+ return data_[dims_[2] * (dims_[1] * i + j) + k];
+ }
+
+ inline T& operator()(int i, int j, int k, int l) {
+ KDEBUG(dims_.size() == 4, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+ KDEBUG(k < dims_[2] && k >= 0, "Invalid k: %d (max %d)", k, dims_[2]);
+ KDEBUG(l < dims_[3] && l >= 0, "Invalid l: %d (max %d)", l, dims_[3]);
+
+ return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
+ }
+
+ const T& operator()(int i, int j, int k, int l) const{
+ KDEBUG(dims_.size() == 4, "Invalid indexing for tensor");
+ KDEBUG(i < dims_[0] && i >= 0, "Invalid i: %d (max %d)", i, dims_[0]);
+ KDEBUG(j < dims_[1] && j >= 0, "Invalid j: %d (max %d)", j, dims_[1]);
+ KDEBUG(k < dims_[2] && k >= 0, "Invalid k: %d (max %d)", k, dims_[2]);
+ KDEBUG(l < dims_[3] && l >= 0, "Invalid l: %d (max %d)", l, dims_[3]);
+
+ return data_[dims_[3] * (dims_[2] * (dims_[1] * i + j) + k) + l];
+ }
+ void fill(const T & value) {
+ std::fill(data_.begin(), data_.end(), value);
+ }
+
+ // Tensor Unpack(int row) const {
+ // KASSERT(dims_.size() >= 2, "Invalid tensor");
+ // std::vector pack_dims =
+ // std::vector(dims_.begin() + 1, dims_.end());
+ // int pack_size = std::accumulate(pack_dims.begin(), pack_dims.end(), 0);
+
+ // typename std::vector::const_iterator first =
+ // data_.begin() + (row * pack_size);
+ // typename std::vector::const_iterator last =
+ // data_.begin() + (row + 1) * pack_size;
+
+ // Tensor x = Tensor();
+ // x.dims_ = pack_dims;
+ // x.data_ = std::vector(first, last);
+
+ // return x;
+ // }
+
+ // Tensor Select(int row) const {
+ // Tensor x = Unpack(row);
+ // x.dims_.insert(x.dims_.begin(), 1);
+
+ // return x;
+ // }
+
+ Tensor operator+(const Tensor& other) {
+ // KASSERT(dims_ == other.dims_,
+ // "Cannot add tensors with different dimensions");
+ OPM_ERROR_IF(dims_.size() != other.dims_.size() ,
+ "Cannot add tensors with different dimensions");
+// std::cout<<"dims_ "< dims_;
+ std::vector data_;
+};
+
+template
+class KerasLayer {
+ public:
+ KerasLayer() {}
+
+ virtual ~KerasLayer() {}
+
+ virtual bool loadLayer(std::ifstream& file) = 0;
+
+ virtual bool apply(Tensor& in, Tensor& out) = 0;
+};
+
+template
+class KerasLayerActivation : public KerasLayer {
+ public:
+ enum ActivationType {
+ kLinear = 1,
+ kRelu = 2,
+ kSoftPlus = 3,
+ kSigmoid = 4,
+ kTanh = 5,
+ kHardSigmoid = 6
+ };
+
+ KerasLayerActivation() : activation_type_(ActivationType::kLinear) {}
+
+ virtual ~KerasLayerActivation() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(Tensor& in, Tensor& out);
+
+ private:
+ ActivationType activation_type_;
+};
+
+template
+class KerasLayerScaling : public KerasLayer {
+ public:
+ KerasLayerScaling(): data_min(1.0f), data_max(1.0f), feat_inf(1.0f), feat_sup(1.0f) {}
+
+ virtual ~KerasLayerScaling() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(Tensor& in, Tensor& out);
+
+ private:
+ Tensor weights_;
+ Tensor biases_;
+ float data_min;
+ float data_max;
+ float feat_inf;
+ float feat_sup;
+};
+
+template
+class KerasLayerUnScaling : public KerasLayer {
+ public:
+ KerasLayerUnScaling(): data_min(1.0f), data_max(1.0f), feat_inf(1.0f), feat_sup(1.0f) {}
+
+ virtual ~KerasLayerUnScaling() {}
+
+ virtual bool loadLayer(std::ifstream& file);
+
+ virtual bool apply(Tensor& in, Tensor& out);
+
+ private:
+ Tensor weights_;
+ Tensor