diff --git a/gpytorch/models/approximate_gp.py b/gpytorch/models/approximate_gp.py index 85e2674f4..36165576e 100644 --- a/gpytorch/models/approximate_gp.py +++ b/gpytorch/models/approximate_gp.py @@ -105,4 +105,5 @@ def get_fantasy_model(self, inputs, targets, **kwargs): def __call__(self, inputs, prior=False, **kwargs): if inputs.dim() == 1: inputs = inputs.unsqueeze(-1) + inputs = self.apply_input_transforms(X=inputs, is_training_input=self.training) return self.variational_strategy(inputs, prior=prior, **kwargs) diff --git a/gpytorch/models/exact_gp.py b/gpytorch/models/exact_gp.py index a82ef7d19..72a259555 100644 --- a/gpytorch/models/exact_gp.py +++ b/gpytorch/models/exact_gp.py @@ -210,7 +210,11 @@ def get_fantasy_model(self, inputs, targets, **kwargs): except KeyError: fantasy_kwargs = {} - full_output = super(ExactGP, self).__call__(*full_inputs, **kwargs) + # Prediction strategy should have transformed train inputs. + prediction_strategy_inputs = [ + self.apply_input_transforms(X=t_input, is_training_input=True) for t_input in full_inputs + ] + full_output = super(ExactGP, self).__call__(*prediction_strategy_inputs, **kwargs) # Copy model without copying training data or prediction strategy (since we'll overwrite those) old_pred_strat = self.prediction_strategy @@ -229,7 +233,7 @@ def get_fantasy_model(self, inputs, targets, **kwargs): new_model.likelihood = old_likelihood.get_fantasy_likelihood(**fantasy_kwargs) new_model.prediction_strategy = old_pred_strat.get_fantasy_strategy( - inputs, targets, full_inputs, full_targets, full_output, **fantasy_kwargs + inputs, targets, prediction_strategy_inputs, full_targets, full_output, **fantasy_kwargs ) # if the fantasies are at the same points, we need to expand the inputs for the new model @@ -242,8 +246,17 @@ def get_fantasy_model(self, inputs, targets, **kwargs): return new_model def __call__(self, *args, **kwargs): - train_inputs = list(self.train_inputs) if self.train_inputs is not None else [] - inputs = [i.unsqueeze(-1) if i.ndimension() == 1 else i for i in args] + train_inputs = ( + [self.apply_input_transforms(X=t_input, is_training_input=True) for t_input in self.train_inputs] + if self.train_inputs is not None + else [] + ) + inputs = [ + self.apply_input_transforms( + X=i.unsqueeze(-1) if i.ndimension() == 1 else i, is_training_input=self.training + ) + for i in args + ] # Training mode: optimizing if self.training: diff --git a/gpytorch/models/gp.py b/gpytorch/models/gp.py index 922e981b0..e74d38637 100644 --- a/gpytorch/models/gp.py +++ b/gpytorch/models/gp.py @@ -1,7 +1,14 @@ #!/usr/bin/env python3 +from torch import Tensor + from ..module import Module class GP(Module): - pass + def apply_input_transforms(self, X: Tensor, is_training_input: bool) -> Tensor: + input_transform = getattr(self, "input_transform", None) + if input_transform is not None: + return input_transform(X=X, is_training_input=is_training_input) + else: + return X