GaussianProcess
Overview
The GaussianProcess is designed to incorporate structures (data) and functions commonly used by every object that needs to use/modify Gaussian Processes such as GaussianProcessTrainer, GaussianProcessSurrogate, or Gaussian Process-based active learning objects. It contains accesses to the covariance function, stores covariance matrices and their corresponding decomposition and inverse action.
Initializing
The object which requires access to Gaussian Process-related functionalities shall have it as a member variable, which is important to enable restarting capabilities:
StochasticTools::GaussianProcess & _gp;
An important step is the initialization of the covariance function, which can either be done using the initialize
function as in GaussianProcessTrainer:
_gp.initialize(getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
_n_outputs = _gp.getCovarFunction().numOutputs();
Or by linking it directly as in GaussianProcessSurrogate:
_gp.linkCovarianceFunction(getCovarianceFunctionByName(covar_name));
Creating a covariance matrix
Once the covariance function has been added to the handler, one can use it to create a covariance matrix by either using setupCovarianceMatrix
as in GaussianProcessTrainer:
_gp.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
Or by simply calling the covariance matrix builder as in GaussianProcessSurrogate:
_gp.getCovarFunction().computeCovarianceMatrix(K_test, test_points, test_points, true);
Optimizing hyper parameters
As described in GaussianProcessTrainer, the covariance function might have hyper parameters that need to be optimized to be able to get accurate predictions from the corresponding Gaussian Processes. We can optimize these parameters during the generation of the Covariance matrix:
_gp.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
Or by simply calling the optimizer with Adam (stochastic algorithm):
(contrib/moose/modules/stochastic_tools/include/trainers/GaussianProcessTrainer.h)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#pragma once
#include "SurrogateTrainer.h"
#include "Standardizer.h"
#include <Eigen/Dense>
#include "Distribution.h"
#include "CovarianceFunctionBase.h"
#include "CovarianceInterface.h"
#include "GaussianProcess.h"
class GaussianProcessTrainer : public SurrogateTrainer, public CovarianceInterface
{
public:
static InputParameters validParams();
GaussianProcessTrainer(const InputParameters & parameters);
virtual void preTrain() override;
virtual void train() override;
virtual void postTrain() override;
StochasticTools::GaussianProcess & gp() { return _gp; }
const StochasticTools::GaussianProcess & gp() const { return _gp; }
private:
/// Data from the current predictor row
const std::vector<Real> & _predictor_row;
/// Gaussian process handler responsible for managing training related tasks
StochasticTools::GaussianProcess & _gp;
/// Parameters (x) used for training -- we'll allgather these in postTrain().
std::vector<std::vector<Real>> _params_buffer;
/// Data (y) used for training.
std::vector<std::vector<Real>> _data_buffer;
/// Paramaters (x) used for training, along with statistics
RealEigenMatrix & _training_params;
/// Data (y) used for training
RealEigenMatrix _training_data;
/// Switch for training param (x) standardization
bool _standardize_params;
/// Switch for training data(y) standardization
bool _standardize_data;
/// Flag to toggle hyperparameter tuning/optimization
bool _do_tuning;
/// Struct holding parameters necessary for parameter tuning
const StochasticTools::GaussianProcess::GPOptimizerOptions _optimization_opts;
/// Data from the current sampler row
const std::vector<Real> & _sampler_row;
};
(contrib/moose/modules/stochastic_tools/src/trainers/GaussianProcessTrainer.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessTrainer.h"
#include "Sampler.h"
#include "CartesianProduct.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
registerMooseObject("StochasticToolsApp", GaussianProcessTrainer);
InputParameters
GaussianProcessTrainer::validParams()
{
InputParameters params = SurrogateTrainer::validParams();
params.addClassDescription("Provides data preperation and training for a single- or multi-output "
"Gaussian Process surrogate model.");
params.addRequiredParam<UserObjectName>("covariance_function", "Name of covariance function.");
params.addParam<bool>(
"standardize_params", true, "Standardize (center and scale) training parameters (x values)");
params.addParam<bool>(
"standardize_data", true, "Standardize (center and scale) training data (y values)");
// Already preparing to use Adam here
params.addParam<unsigned int>("num_iters", 1000, "Tolerance value for Adam optimization");
params.addParam<unsigned int>("batch_size", 0, "The batch size for Adam optimization");
params.addParam<Real>("learning_rate", 0.001, "The learning rate for Adam optimization");
params.addParam<unsigned int>(
"show_every_nth_iteration",
0,
"Switch to show Adam optimization loss values at every nth step. If 0, nothing is showed.");
params.addParam<std::vector<std::string>>("tune_parameters",
"Select hyperparameters to be tuned");
params.addParam<std::vector<Real>>("tuning_min", "Minimum allowable tuning value");
params.addParam<std::vector<Real>>("tuning_max", "Maximum allowable tuning value");
return params;
}
GaussianProcessTrainer::GaussianProcessTrainer(const InputParameters & parameters)
: SurrogateTrainer(parameters),
CovarianceInterface(parameters),
_predictor_row(getPredictorData()),
_gp(declareModelData<StochasticTools::GaussianProcess>("_gp")),
_training_params(declareModelData<RealEigenMatrix>("_training_params")),
_standardize_params(getParam<bool>("standardize_params")),
_standardize_data(getParam<bool>("standardize_data")),
_do_tuning(isParamValid("tune_parameters")),
_optimization_opts(StochasticTools::GaussianProcess::GPOptimizerOptions(
getParam<unsigned int>("show_every_nth_iteration"),
getParam<unsigned int>("num_iters"),
getParam<unsigned int>("batch_size"),
getParam<Real>("learning_rate"))),
_sampler_row(getSamplerData())
{
// Error Checking
if (parameters.isParamSetByUser("batch_size"))
if (_sampler.getNumberOfRows() < _optimization_opts.batch_size)
paramError("batch_size", "Batch size cannot be greater than the training data set size.");
std::vector<std::string> tune_parameters(
_do_tuning ? getParam<std::vector<std::string>>("tune_parameters")
: std::vector<std::string>{});
if (isParamValid("tuning_min") &&
(getParam<std::vector<Real>>("tuning_min").size() != tune_parameters.size()))
mooseError("tuning_min size does not match tune_parameters");
if (isParamValid("tuning_max") &&
(getParam<std::vector<Real>>("tuning_max").size() != tune_parameters.size()))
mooseError("tuning_max size does not match tune_parameters");
std::vector<Real> lower_bounds, upper_bounds;
if (isParamValid("tuning_min"))
lower_bounds = getParam<std::vector<Real>>("tuning_min");
if (isParamValid("tuning_max"))
upper_bounds = getParam<std::vector<Real>>("tuning_max");
_gp.initialize(getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
_n_outputs = _gp.getCovarFunction().numOutputs();
}
void
GaussianProcessTrainer::preTrain()
{
_params_buffer.clear();
_data_buffer.clear();
_params_buffer.reserve(getLocalSampleSize());
_data_buffer.reserve(getLocalSampleSize());
}
void
GaussianProcessTrainer::train()
{
_params_buffer.push_back(_predictor_row);
if (_rvecval && _rvecval->size() != _n_outputs)
mooseError("The size of the provided response (",
_rvecval->size(),
") does not match the number of expected outputs from the covariance (",
_n_outputs,
")!");
_data_buffer.push_back(_rvecval ? (*_rvecval) : std::vector<Real>(1, *_rval));
}
void
GaussianProcessTrainer::postTrain()
{
// Instead of gatherSum, we have to allgather.
_communicator.allgather(_params_buffer);
_communicator.allgather(_data_buffer);
_training_params.resize(_params_buffer.size(), _n_dims);
_training_data.resize(_data_buffer.size(), _n_outputs);
for (auto ii : make_range(_training_params.rows()))
{
for (auto jj : make_range(_n_dims))
_training_params(ii, jj) = _params_buffer[ii][jj];
for (auto jj : make_range(_n_outputs))
_training_data(ii, jj) = _data_buffer[ii][jj];
}
// Standardize (center and scale) training params
if (_standardize_params)
_gp.standardizeParameters(_training_params);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp.paramStandardizer().set(0, 1, _n_dims);
// Standardize (center and scale) training data
if (_standardize_data)
_gp.standardizeData(_training_data);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp.dataStandardizer().set(0, 1, _n_outputs);
// Setup the covariance
_gp.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
}
(contrib/moose/modules/stochastic_tools/src/surrogates/GaussianProcessSurrogate.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessSurrogate.h"
#include "Sampler.h"
#include "CovarianceFunctionBase.h"
registerMooseObject("StochasticToolsApp", GaussianProcessSurrogate);
InputParameters
GaussianProcessSurrogate::validParams()
{
InputParameters params = SurrogateModel::validParams();
params.addClassDescription("Computes and evaluates Gaussian Process surrogate model.");
return params;
}
GaussianProcessSurrogate::GaussianProcessSurrogate(const InputParameters & parameters)
: SurrogateModel(parameters),
CovarianceInterface(parameters),
_gp(declareModelData<StochasticTools::GaussianProcess>("_gp")),
_training_params(getModelData<RealEigenMatrix>("_training_params"))
{
}
void
GaussianProcessSurrogate::setupCovariance(UserObjectName covar_name)
{
if (_gp.getCovarFunctionPtr() != nullptr)
::mooseError("Attempting to redefine covariance function using setupCovariance.");
_gp.linkCovarianceFunction(getCovarianceFunctionByName(covar_name));
}
Real
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x) const
{
// Overlaod for evaluate to maintain general compatibility. Only returns mean
Real dummy = 0;
return this->evaluate(x, dummy);
}
Real
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x, Real & std_dev) const
{
std::vector<Real> y;
std::vector<Real> std;
this->evaluate(x, y, std);
std_dev = std[0];
return y[0];
}
void
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x, std::vector<Real> & y) const
{
// Overlaod for evaluate to maintain general compatibility. Only returns mean
std::vector<Real> std_dummy;
this->evaluate(x, y, std_dummy);
}
void
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x,
std::vector<Real> & y,
std::vector<Real> & std) const
{
const unsigned int n_dims = _training_params.cols();
mooseAssert(x.size() == n_dims,
"Number of parameters provided for evaluation does not match number of parameters "
"used for training.");
const unsigned int n_outputs = _gp.getCovarFunction().numOutputs();
y = std::vector<Real>(n_outputs, 0.0);
std = std::vector<Real>(n_outputs, 0.0);
RealEigenMatrix test_points(1, n_dims);
for (unsigned int ii = 0; ii < n_dims; ++ii)
test_points(0, ii) = x[ii];
_gp.getParamStandardizer().getStandardized(test_points);
RealEigenMatrix K_train_test(_training_params.rows() * n_outputs, n_outputs);
_gp.getCovarFunction().computeCovarianceMatrix(
K_train_test, _training_params, test_points, false);
RealEigenMatrix K_test(n_outputs, n_outputs);
_gp.getCovarFunction().computeCovarianceMatrix(K_test, test_points, test_points, true);
// Compute the predicted mean value (centered)
RealEigenMatrix pred_value = (K_train_test.transpose() * _gp.getKResultsSolve()).transpose();
// De-center/scale the value and store for return
_gp.getDataStandardizer().getDestandardized(pred_value);
RealEigenMatrix pred_var =
K_test - (K_train_test.transpose() * _gp.getKCholeskyDecomp().solve(K_train_test));
// Vairance computed, take sqrt for standard deviation, scale up by training data std and store
RealEigenMatrix std_dev_mat = pred_var.array().sqrt();
_gp.getDataStandardizer().getDescaled(std_dev_mat);
for (const auto output_i : make_range(n_outputs))
{
y[output_i] = pred_value(0, output_i);
std[output_i] = std_dev_mat(output_i, output_i);
}
}
(contrib/moose/modules/stochastic_tools/src/trainers/GaussianProcessTrainer.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessTrainer.h"
#include "Sampler.h"
#include "CartesianProduct.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
registerMooseObject("StochasticToolsApp", GaussianProcessTrainer);
InputParameters
GaussianProcessTrainer::validParams()
{
InputParameters params = SurrogateTrainer::validParams();
params.addClassDescription("Provides data preperation and training for a single- or multi-output "
"Gaussian Process surrogate model.");
params.addRequiredParam<UserObjectName>("covariance_function", "Name of covariance function.");
params.addParam<bool>(
"standardize_params", true, "Standardize (center and scale) training parameters (x values)");
params.addParam<bool>(
"standardize_data", true, "Standardize (center and scale) training data (y values)");
// Already preparing to use Adam here
params.addParam<unsigned int>("num_iters", 1000, "Tolerance value for Adam optimization");
params.addParam<unsigned int>("batch_size", 0, "The batch size for Adam optimization");
params.addParam<Real>("learning_rate", 0.001, "The learning rate for Adam optimization");
params.addParam<unsigned int>(
"show_every_nth_iteration",
0,
"Switch to show Adam optimization loss values at every nth step. If 0, nothing is showed.");
params.addParam<std::vector<std::string>>("tune_parameters",
"Select hyperparameters to be tuned");
params.addParam<std::vector<Real>>("tuning_min", "Minimum allowable tuning value");
params.addParam<std::vector<Real>>("tuning_max", "Maximum allowable tuning value");
return params;
}
GaussianProcessTrainer::GaussianProcessTrainer(const InputParameters & parameters)
: SurrogateTrainer(parameters),
CovarianceInterface(parameters),
_predictor_row(getPredictorData()),
_gp(declareModelData<StochasticTools::GaussianProcess>("_gp")),
_training_params(declareModelData<RealEigenMatrix>("_training_params")),
_standardize_params(getParam<bool>("standardize_params")),
_standardize_data(getParam<bool>("standardize_data")),
_do_tuning(isParamValid("tune_parameters")),
_optimization_opts(StochasticTools::GaussianProcess::GPOptimizerOptions(
getParam<unsigned int>("show_every_nth_iteration"),
getParam<unsigned int>("num_iters"),
getParam<unsigned int>("batch_size"),
getParam<Real>("learning_rate"))),
_sampler_row(getSamplerData())
{
// Error Checking
if (parameters.isParamSetByUser("batch_size"))
if (_sampler.getNumberOfRows() < _optimization_opts.batch_size)
paramError("batch_size", "Batch size cannot be greater than the training data set size.");
std::vector<std::string> tune_parameters(
_do_tuning ? getParam<std::vector<std::string>>("tune_parameters")
: std::vector<std::string>{});
if (isParamValid("tuning_min") &&
(getParam<std::vector<Real>>("tuning_min").size() != tune_parameters.size()))
mooseError("tuning_min size does not match tune_parameters");
if (isParamValid("tuning_max") &&
(getParam<std::vector<Real>>("tuning_max").size() != tune_parameters.size()))
mooseError("tuning_max size does not match tune_parameters");
std::vector<Real> lower_bounds, upper_bounds;
if (isParamValid("tuning_min"))
lower_bounds = getParam<std::vector<Real>>("tuning_min");
if (isParamValid("tuning_max"))
upper_bounds = getParam<std::vector<Real>>("tuning_max");
_gp.initialize(getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
_n_outputs = _gp.getCovarFunction().numOutputs();
}
void
GaussianProcessTrainer::preTrain()
{
_params_buffer.clear();
_data_buffer.clear();
_params_buffer.reserve(getLocalSampleSize());
_data_buffer.reserve(getLocalSampleSize());
}
void
GaussianProcessTrainer::train()
{
_params_buffer.push_back(_predictor_row);
if (_rvecval && _rvecval->size() != _n_outputs)
mooseError("The size of the provided response (",
_rvecval->size(),
") does not match the number of expected outputs from the covariance (",
_n_outputs,
")!");
_data_buffer.push_back(_rvecval ? (*_rvecval) : std::vector<Real>(1, *_rval));
}
void
GaussianProcessTrainer::postTrain()
{
// Instead of gatherSum, we have to allgather.
_communicator.allgather(_params_buffer);
_communicator.allgather(_data_buffer);
_training_params.resize(_params_buffer.size(), _n_dims);
_training_data.resize(_data_buffer.size(), _n_outputs);
for (auto ii : make_range(_training_params.rows()))
{
for (auto jj : make_range(_n_dims))
_training_params(ii, jj) = _params_buffer[ii][jj];
for (auto jj : make_range(_n_outputs))
_training_data(ii, jj) = _data_buffer[ii][jj];
}
// Standardize (center and scale) training params
if (_standardize_params)
_gp.standardizeParameters(_training_params);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp.paramStandardizer().set(0, 1, _n_dims);
// Standardize (center and scale) training data
if (_standardize_data)
_gp.standardizeData(_training_data);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp.dataStandardizer().set(0, 1, _n_outputs);
// Setup the covariance
_gp.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
}
(contrib/moose/modules/stochastic_tools/src/surrogates/GaussianProcessSurrogate.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessSurrogate.h"
#include "Sampler.h"
#include "CovarianceFunctionBase.h"
registerMooseObject("StochasticToolsApp", GaussianProcessSurrogate);
InputParameters
GaussianProcessSurrogate::validParams()
{
InputParameters params = SurrogateModel::validParams();
params.addClassDescription("Computes and evaluates Gaussian Process surrogate model.");
return params;
}
GaussianProcessSurrogate::GaussianProcessSurrogate(const InputParameters & parameters)
: SurrogateModel(parameters),
CovarianceInterface(parameters),
_gp(declareModelData<StochasticTools::GaussianProcess>("_gp")),
_training_params(getModelData<RealEigenMatrix>("_training_params"))
{
}
void
GaussianProcessSurrogate::setupCovariance(UserObjectName covar_name)
{
if (_gp.getCovarFunctionPtr() != nullptr)
::mooseError("Attempting to redefine covariance function using setupCovariance.");
_gp.linkCovarianceFunction(getCovarianceFunctionByName(covar_name));
}
Real
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x) const
{
// Overlaod for evaluate to maintain general compatibility. Only returns mean
Real dummy = 0;
return this->evaluate(x, dummy);
}
Real
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x, Real & std_dev) const
{
std::vector<Real> y;
std::vector<Real> std;
this->evaluate(x, y, std);
std_dev = std[0];
return y[0];
}
void
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x, std::vector<Real> & y) const
{
// Overlaod for evaluate to maintain general compatibility. Only returns mean
std::vector<Real> std_dummy;
this->evaluate(x, y, std_dummy);
}
void
GaussianProcessSurrogate::evaluate(const std::vector<Real> & x,
std::vector<Real> & y,
std::vector<Real> & std) const
{
const unsigned int n_dims = _training_params.cols();
mooseAssert(x.size() == n_dims,
"Number of parameters provided for evaluation does not match number of parameters "
"used for training.");
const unsigned int n_outputs = _gp.getCovarFunction().numOutputs();
y = std::vector<Real>(n_outputs, 0.0);
std = std::vector<Real>(n_outputs, 0.0);
RealEigenMatrix test_points(1, n_dims);
for (unsigned int ii = 0; ii < n_dims; ++ii)
test_points(0, ii) = x[ii];
_gp.getParamStandardizer().getStandardized(test_points);
RealEigenMatrix K_train_test(_training_params.rows() * n_outputs, n_outputs);
_gp.getCovarFunction().computeCovarianceMatrix(
K_train_test, _training_params, test_points, false);
RealEigenMatrix K_test(n_outputs, n_outputs);
_gp.getCovarFunction().computeCovarianceMatrix(K_test, test_points, test_points, true);
// Compute the predicted mean value (centered)
RealEigenMatrix pred_value = (K_train_test.transpose() * _gp.getKResultsSolve()).transpose();
// De-center/scale the value and store for return
_gp.getDataStandardizer().getDestandardized(pred_value);
RealEigenMatrix pred_var =
K_test - (K_train_test.transpose() * _gp.getKCholeskyDecomp().solve(K_train_test));
// Vairance computed, take sqrt for standard deviation, scale up by training data std and store
RealEigenMatrix std_dev_mat = pred_var.array().sqrt();
_gp.getDataStandardizer().getDescaled(std_dev_mat);
for (const auto output_i : make_range(n_outputs))
{
y[output_i] = pred_value(0, output_i);
std[output_i] = std_dev_mat(output_i, output_i);
}
}
(contrib/moose/modules/stochastic_tools/src/trainers/GaussianProcessTrainer.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcessTrainer.h"
#include "Sampler.h"
#include "CartesianProduct.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
registerMooseObject("StochasticToolsApp", GaussianProcessTrainer);
InputParameters
GaussianProcessTrainer::validParams()
{
InputParameters params = SurrogateTrainer::validParams();
params.addClassDescription("Provides data preperation and training for a single- or multi-output "
"Gaussian Process surrogate model.");
params.addRequiredParam<UserObjectName>("covariance_function", "Name of covariance function.");
params.addParam<bool>(
"standardize_params", true, "Standardize (center and scale) training parameters (x values)");
params.addParam<bool>(
"standardize_data", true, "Standardize (center and scale) training data (y values)");
// Already preparing to use Adam here
params.addParam<unsigned int>("num_iters", 1000, "Tolerance value for Adam optimization");
params.addParam<unsigned int>("batch_size", 0, "The batch size for Adam optimization");
params.addParam<Real>("learning_rate", 0.001, "The learning rate for Adam optimization");
params.addParam<unsigned int>(
"show_every_nth_iteration",
0,
"Switch to show Adam optimization loss values at every nth step. If 0, nothing is showed.");
params.addParam<std::vector<std::string>>("tune_parameters",
"Select hyperparameters to be tuned");
params.addParam<std::vector<Real>>("tuning_min", "Minimum allowable tuning value");
params.addParam<std::vector<Real>>("tuning_max", "Maximum allowable tuning value");
return params;
}
GaussianProcessTrainer::GaussianProcessTrainer(const InputParameters & parameters)
: SurrogateTrainer(parameters),
CovarianceInterface(parameters),
_predictor_row(getPredictorData()),
_gp(declareModelData<StochasticTools::GaussianProcess>("_gp")),
_training_params(declareModelData<RealEigenMatrix>("_training_params")),
_standardize_params(getParam<bool>("standardize_params")),
_standardize_data(getParam<bool>("standardize_data")),
_do_tuning(isParamValid("tune_parameters")),
_optimization_opts(StochasticTools::GaussianProcess::GPOptimizerOptions(
getParam<unsigned int>("show_every_nth_iteration"),
getParam<unsigned int>("num_iters"),
getParam<unsigned int>("batch_size"),
getParam<Real>("learning_rate"))),
_sampler_row(getSamplerData())
{
// Error Checking
if (parameters.isParamSetByUser("batch_size"))
if (_sampler.getNumberOfRows() < _optimization_opts.batch_size)
paramError("batch_size", "Batch size cannot be greater than the training data set size.");
std::vector<std::string> tune_parameters(
_do_tuning ? getParam<std::vector<std::string>>("tune_parameters")
: std::vector<std::string>{});
if (isParamValid("tuning_min") &&
(getParam<std::vector<Real>>("tuning_min").size() != tune_parameters.size()))
mooseError("tuning_min size does not match tune_parameters");
if (isParamValid("tuning_max") &&
(getParam<std::vector<Real>>("tuning_max").size() != tune_parameters.size()))
mooseError("tuning_max size does not match tune_parameters");
std::vector<Real> lower_bounds, upper_bounds;
if (isParamValid("tuning_min"))
lower_bounds = getParam<std::vector<Real>>("tuning_min");
if (isParamValid("tuning_max"))
upper_bounds = getParam<std::vector<Real>>("tuning_max");
_gp.initialize(getCovarianceFunctionByName(parameters.get<UserObjectName>("covariance_function")),
tune_parameters,
lower_bounds,
upper_bounds);
_n_outputs = _gp.getCovarFunction().numOutputs();
}
void
GaussianProcessTrainer::preTrain()
{
_params_buffer.clear();
_data_buffer.clear();
_params_buffer.reserve(getLocalSampleSize());
_data_buffer.reserve(getLocalSampleSize());
}
void
GaussianProcessTrainer::train()
{
_params_buffer.push_back(_predictor_row);
if (_rvecval && _rvecval->size() != _n_outputs)
mooseError("The size of the provided response (",
_rvecval->size(),
") does not match the number of expected outputs from the covariance (",
_n_outputs,
")!");
_data_buffer.push_back(_rvecval ? (*_rvecval) : std::vector<Real>(1, *_rval));
}
void
GaussianProcessTrainer::postTrain()
{
// Instead of gatherSum, we have to allgather.
_communicator.allgather(_params_buffer);
_communicator.allgather(_data_buffer);
_training_params.resize(_params_buffer.size(), _n_dims);
_training_data.resize(_data_buffer.size(), _n_outputs);
for (auto ii : make_range(_training_params.rows()))
{
for (auto jj : make_range(_n_dims))
_training_params(ii, jj) = _params_buffer[ii][jj];
for (auto jj : make_range(_n_outputs))
_training_data(ii, jj) = _data_buffer[ii][jj];
}
// Standardize (center and scale) training params
if (_standardize_params)
_gp.standardizeParameters(_training_params);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp.paramStandardizer().set(0, 1, _n_dims);
// Standardize (center and scale) training data
if (_standardize_data)
_gp.standardizeData(_training_data);
// if not standardizing data set mean=0, std=1 for use in surrogate
else
_gp.dataStandardizer().set(0, 1, _n_outputs);
// Setup the covariance
_gp.setupCovarianceMatrix(_training_params, _training_data, _optimization_opts);
}
(contrib/moose/modules/stochastic_tools/src/utils/GaussianProcess.C)
// This file is part of the MOOSE framework
// https://www.mooseframework.org
//
// All rights reserved, see COPYRIGHT for full restrictions
// https://github.com/idaholab/moose/blob/master/COPYRIGHT
//
// Licensed under LGPL 2.1, please see LICENSE for details
// https://www.gnu.org/licenses/lgpl-2.1.html
#include "GaussianProcess.h"
#include "FEProblemBase.h"
#include <petsctao.h>
#include <petscdmda.h>
#include "libmesh/petsc_vector.h"
#include "libmesh/petsc_matrix.h"
#include <cmath>
#include "MooseRandom.h"
#include "Shuffle.h"
namespace StochasticTools
{
GaussianProcess::GPOptimizerOptions::GPOptimizerOptions(const bool show_every_nth_iteration,
const unsigned int num_iter,
const unsigned int batch_size,
const Real learning_rate,
const Real b1,
const Real b2,
const Real eps,
const Real lambda)
: show_every_nth_iteration(show_every_nth_iteration),
num_iter(num_iter),
batch_size(batch_size),
learning_rate(learning_rate),
b1(b1),
b2(b2),
eps(eps),
lambda(lambda)
{
}
GaussianProcess::GaussianProcess() {}
void
GaussianProcess::initialize(CovarianceFunctionBase * covariance_function,
const std::vector<std::string> & params_to_tune,
const std::vector<Real> & min,
const std::vector<Real> & max)
{
linkCovarianceFunction(covariance_function);
generateTuningMap(params_to_tune, min, max);
}
void
GaussianProcess::linkCovarianceFunction(CovarianceFunctionBase * covariance_function)
{
_covariance_function = covariance_function;
_covar_type = _covariance_function->type();
_covar_name = _covariance_function->name();
_covariance_function->dependentCovarianceTypes(_dependent_covar_types);
_dependent_covar_names = _covariance_function->dependentCovarianceNames();
_num_outputs = _covariance_function->numOutputs();
}
void
GaussianProcess::setupCovarianceMatrix(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
const GPOptimizerOptions & opts)
{
const bool batch_decision = opts.batch_size > 0 && (opts.batch_size <= training_params.rows());
_batch_size = batch_decision ? opts.batch_size : training_params.rows();
_K.resize(_num_outputs * _batch_size, _num_outputs * _batch_size);
if (_tuning_data.size())
tuneHyperParamsAdam(training_params, training_data, opts);
_K.resize(training_params.rows() * training_data.cols(),
training_params.rows() * training_data.cols());
_covariance_function->computeCovarianceMatrix(_K, training_params, training_params, true);
RealEigenMatrix flattened_data =
training_data.reshaped(training_params.rows() * training_data.cols(), 1);
// Compute the Cholesky decomposition and inverse action of the covariance matrix
setupStoredMatrices(flattened_data);
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
}
void
GaussianProcess::setupStoredMatrices(const RealEigenMatrix & input)
{
_K_cho_decomp = _K.llt();
_K_results_solve = _K_cho_decomp.solve(input);
}
void
GaussianProcess::generateTuningMap(const std::vector<std::string> & params_to_tune,
const std::vector<Real> & min_vector,
const std::vector<Real> & max_vector)
{
_num_tunable = 0;
const bool upper_bounds_specified = min_vector.size();
const bool lower_bounds_specified = max_vector.size();
for (const auto param_i : index_range(params_to_tune))
{
const auto & hp = params_to_tune[param_i];
if (_covariance_function->isTunable(hp))
{
unsigned int size;
Real min;
Real max;
// Get size and default min/max
const bool found = _covariance_function->getTuningData(hp, size, min, max);
if (!found)
::mooseError("The covariance parameter ", hp, " could not be found!");
// Check for overridden min/max
min = lower_bounds_specified ? min_vector[param_i] : min;
max = upper_bounds_specified ? max_vector[param_i] : max;
// Save data in tuple
_tuning_data[hp] = std::make_tuple(_num_tunable, size, min, max);
_num_tunable += size;
}
}
}
void
GaussianProcess::standardizeParameters(RealEigenMatrix & data, bool keep_moments)
{
if (!keep_moments)
_param_standardizer.computeSet(data);
_param_standardizer.getStandardized(data);
}
void
GaussianProcess::standardizeData(RealEigenMatrix & data, bool keep_moments)
{
if (!keep_moments)
_data_standardizer.computeSet(data);
_data_standardizer.getStandardized(data);
}
void
GaussianProcess::tuneHyperParamsAdam(const RealEigenMatrix & training_params,
const RealEigenMatrix & training_data,
const GPOptimizerOptions & opts)
{
std::vector<Real> theta(_num_tunable, 0.0);
_covariance_function->buildHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
mapToVec(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
// Internal params for Adam; set to the recommended values in the paper
Real b1 = opts.b1;
Real b2 = opts.b2;
Real eps = opts.eps;
std::vector<Real> m0(_num_tunable, 0.0);
std::vector<Real> v0(_num_tunable, 0.0);
Real new_val;
Real m_hat;
Real v_hat;
Real store_loss = 0.0;
std::vector<Real> grad1;
// Initialize randomizer
std::vector<unsigned int> v_sequence(training_params.rows());
std::iota(std::begin(v_sequence), std::end(v_sequence), 0);
RealEigenMatrix inputs(_batch_size, training_params.cols());
RealEigenMatrix outputs(_batch_size, training_data.cols());
if (opts.show_every_nth_iteration)
Moose::out << "OPTIMIZING GP HYPER-PARAMETERS USING Adam" << std::endl;
for (unsigned int ss = 0; ss < opts.num_iter; ++ss)
{
// Shuffle data
MooseRandom generator;
generator.seed(0, 1980);
generator.saveState();
MooseUtils::shuffle<unsigned int>(v_sequence, generator, 0);
for (unsigned int ii = 0; ii < _batch_size; ++ii)
{
for (unsigned int jj = 0; jj < training_params.cols(); ++jj)
inputs(ii, jj) = training_params(v_sequence[ii], jj);
for (unsigned int jj = 0; jj < training_data.cols(); ++jj)
outputs(ii, jj) = training_data(v_sequence[ii], jj);
}
store_loss = getLoss(inputs, outputs);
if (opts.show_every_nth_iteration && ((ss + 1) % opts.show_every_nth_iteration == 0))
Moose::out << "Iteration: " << ss + 1 << " LOSS: " << store_loss << std::endl;
grad1 = getGradient(inputs);
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
const auto first_index = std::get<0>(iter->second);
const auto num_entries = std::get<1>(iter->second);
for (unsigned int ii = 0; ii < num_entries; ++ii)
{
const auto global_index = first_index + ii;
m0[global_index] = b1 * m0[global_index] + (1 - b1) * grad1[global_index];
v0[global_index] =
b2 * v0[global_index] + (1 - b2) * grad1[global_index] * grad1[global_index];
m_hat = m0[global_index] / (1 - std::pow(b1, (ss + 1)));
v_hat = v0[global_index] / (1 - std::pow(b2, (ss + 1)));
new_val = theta[global_index] - opts.learning_rate * m_hat / (std::sqrt(v_hat) + eps);
const auto min_value = std::get<2>(iter->second);
const auto max_value = std::get<3>(iter->second);
theta[global_index] = std::min(std::max(new_val, min_value), max_value);
}
}
vecToMap(_tuning_data, _hyperparam_map, _hyperparam_vec_map, theta);
_covariance_function->loadHyperParamMap(_hyperparam_map, _hyperparam_vec_map);
}
if (opts.show_every_nth_iteration)
{
Moose::out << "OPTIMIZED GP HYPER-PARAMETERS:" << std::endl;
Moose::out << Moose::stringify(theta) << std::endl;
Moose::out << "FINAL LOSS: " << store_loss << std::endl;
}
}
Real
GaussianProcess::getLoss(RealEigenMatrix & inputs, RealEigenMatrix & outputs)
{
_covariance_function->computeCovarianceMatrix(_K, inputs, inputs, true);
RealEigenMatrix flattened_data = outputs.reshaped(outputs.rows() * outputs.cols(), 1);
setupStoredMatrices(flattened_data);
Real log_likelihood = 0;
log_likelihood += -(flattened_data.transpose() * _K_results_solve)(0, 0);
log_likelihood += -std::log(_K.determinant());
log_likelihood -= _batch_size * std::log(2 * M_PI);
log_likelihood = -log_likelihood / 2;
return log_likelihood;
}
std::vector<Real>
GaussianProcess::getGradient(RealEigenMatrix & inputs) const
{
RealEigenMatrix dKdhp(_batch_size, _batch_size);
RealEigenMatrix alpha = _K_results_solve * _K_results_solve.transpose();
std::vector<Real> grad_vec;
grad_vec.resize(_num_tunable);
for (auto iter = _tuning_data.begin(); iter != _tuning_data.end(); ++iter)
{
std::string hyper_param_name = iter->first;
const auto first_index = std::get<0>(iter->second);
const auto num_entries = std::get<1>(iter->second);
for (unsigned int ii = 0; ii < num_entries; ++ii)
{
const auto global_index = first_index + ii;
_covariance_function->computedKdhyper(dKdhp, inputs, hyper_param_name, ii);
RealEigenMatrix tmp = alpha * dKdhp - _K_cho_decomp.solve(dKdhp);
grad_vec[global_index] = -tmp.trace() / 2.0;
}
}
return grad_vec;
}
void
GaussianProcess::mapToVec(
const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
tuning_data,
const std::unordered_map<std::string, Real> & scalar_map,
const std::unordered_map<std::string, std::vector<Real>> & vector_map,
std::vector<Real> & vec) const
{
for (auto iter : tuning_data)
{
const std::string & param_name = iter.first;
const auto scalar_it = scalar_map.find(param_name);
if (scalar_it != scalar_map.end())
vec[std::get<0>(iter.second)] = scalar_it->second;
else
{
const auto vector_it = vector_map.find(param_name);
if (vector_it != vector_map.end())
for (unsigned int ii = 0; ii < std::get<1>(iter.second); ++ii)
vec[std::get<0>(iter.second) + ii] = (vector_it->second)[ii];
}
}
}
void
GaussianProcess::vecToMap(
const std::unordered_map<std::string, std::tuple<unsigned int, unsigned int, Real, Real>> &
tuning_data,
std::unordered_map<std::string, Real> & scalar_map,
std::unordered_map<std::string, std::vector<Real>> & vector_map,
const std::vector<Real> & vec) const
{
for (auto iter : tuning_data)
{
const std::string & param_name = iter.first;
if (scalar_map.find(param_name) != scalar_map.end())
scalar_map[param_name] = vec[std::get<0>(iter.second)];
else if (vector_map.find(param_name) != vector_map.end())
for (unsigned int ii = 0; ii < std::get<1>(iter.second); ++ii)
vector_map[param_name][ii] = vec[std::get<0>(iter.second) + ii];
}
}
} // StochasticTools namespace
template <>
void
dataStore(std::ostream & stream, Eigen::LLT<RealEigenMatrix> & decomp, void * context)
{
// Store the L matrix as opposed to the full matrix to avoid compounding
// roundoff error and decomposition error
RealEigenMatrix L(decomp.matrixL());
dataStore(stream, L, context);
}
template <>
void
dataLoad(std::istream & stream, Eigen::LLT<RealEigenMatrix> & decomp, void * context)
{
RealEigenMatrix L;
dataLoad(stream, L, context);
decomp.compute(L * L.transpose());
}
template <>
void
dataStore(std::ostream & stream, StochasticTools::GaussianProcess & gp_utils, void * context)
{
dataStore(stream, gp_utils.hyperparamMap(), context);
dataStore(stream, gp_utils.hyperparamVectorMap(), context);
dataStore(stream, gp_utils.covarType(), context);
dataStore(stream, gp_utils.covarName(), context);
dataStore(stream, gp_utils.covarNumOutputs(), context);
dataStore(stream, gp_utils.dependentCovarNames(), context);
dataStore(stream, gp_utils.dependentCovarTypes(), context);
dataStore(stream, gp_utils.K(), context);
dataStore(stream, gp_utils.KResultsSolve(), context);
dataStore(stream, gp_utils.KCholeskyDecomp(), context);
dataStore(stream, gp_utils.paramStandardizer(), context);
dataStore(stream, gp_utils.dataStandardizer(), context);
}
template <>
void
dataLoad(std::istream & stream, StochasticTools::GaussianProcess & gp_utils, void * context)
{
dataLoad(stream, gp_utils.hyperparamMap(), context);
dataLoad(stream, gp_utils.hyperparamVectorMap(), context);
dataLoad(stream, gp_utils.covarType(), context);
dataLoad(stream, gp_utils.covarName(), context);
dataLoad(stream, gp_utils.covarNumOutputs(), context);
dataLoad(stream, gp_utils.dependentCovarNames(), context);
dataLoad(stream, gp_utils.dependentCovarTypes(), context);
dataLoad(stream, gp_utils.K(), context);
dataLoad(stream, gp_utils.KResultsSolve(), context);
dataLoad(stream, gp_utils.KCholeskyDecomp(), context);
dataLoad(stream, gp_utils.paramStandardizer(), context);
dataLoad(stream, gp_utils.dataStandardizer(), context);
}