#!/usr/bin/env python3
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
r"""
Input Transformations.
These classes implement a variety of transformations for
input parameters including: learned input warping functions,
rounding functions, and log transformations. The input transformation
is typically part of a Model and applied within the model.forward()
method.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from collections import OrderedDict
from typing import Any, Callable, Dict, List, Optional, Union
from warnings import warn
import torch
from botorch.exceptions.errors import BotorchTensorDimensionError
from botorch.models.transforms.utils import subset_transform
from botorch.models.utils import fantasize
from botorch.utils.rounding import approximate_round, OneHotArgmaxSTE, RoundSTE
from gpytorch import Module as GPyTorchModule
from gpytorch.constraints import GreaterThan
from gpytorch.priors import Prior
from torch import nn, Tensor
from torch.distributions import Kumaraswamy
from torch.nn import Module, ModuleDict
class InputTransform(ABC):
    r"""Abstract base class for input transforms.
    Note: Input transforms must inherit from `torch.nn.Module`. This
        is deferred to the subclasses to avoid any potential conflict
        between `gpytorch.module.Module` and `torch.nn.Module` in `Warp`.
    Properties:
        is_one_to_many: A boolean denoting whether the transform produces
            multiple values for each input.
        transform_on_train: A boolean indicating whether to apply the
            transform in train() mode.
        transform_on_eval: A boolean indicating whether to apply the
            transform in eval() mode.
        transform_on_fantasize: A boolean indicating whether to apply
            the transform when called from within a `fantasize` call.
    :meta private:
    """
    is_one_to_many: bool = False
    transform_on_eval: bool
    transform_on_train: bool
    transform_on_fantasize: bool
    def forward(self, X: Tensor) -> Tensor:
        r"""Transform the inputs to a model.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n' x d`-dim tensor of transformed inputs.
        """
        if self.training:
            if self.transform_on_train:
                return self.transform(X)
        elif self.transform_on_eval:
            if fantasize.off() or self.transform_on_fantasize:
                return self.transform(X)
        return X
    @abstractmethod
    def transform(self, X: Tensor) -> Tensor:
        r"""Transform the inputs to a model.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of transformed inputs.
        """
        pass  # pragma: no cover
    def untransform(self, X: Tensor) -> Tensor:
        r"""Un-transform the inputs to a model.
        Args:
            X: A `batch_shape x n x d`-dim tensor of transformed inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of un-transformed inputs.
        """
        raise NotImplementedError(
            f"{self.__class__.__name__} does not implement the `untransform` method."
        )
    def equals(self, other: InputTransform) -> bool:
        r"""Check if another input transform is equivalent.
        Note: The reason that a custom equals method is defined rather than
        defining an __eq__ method is because defining an __eq__ method sets
        the __hash__ method to None. Hashing modules is currently used in
        pytorch. See https://github.com/pytorch/pytorch/issues/7733.
        Args:
            other: Another input transform.
        Returns:
            A boolean indicating if the other transform is equivalent.
        """
        other_state_dict = other.state_dict()
        return (
            type(self) == type(other)
            and (self.transform_on_train == other.transform_on_train)
            and (self.transform_on_eval == other.transform_on_eval)
            and (self.transform_on_fantasize == other.transform_on_fantasize)
            and all(
                torch.allclose(v, other_state_dict[k].to(v))
                for k, v in self.state_dict().items()
            )
        )
    def preprocess_transform(self, X: Tensor) -> Tensor:
        r"""Apply transforms for preprocessing inputs.
        The main use cases for this method are 1) to preprocess training data
        before calling `set_train_data` and 2) preprocess `X_baseline` for noisy
        acquisition functions so that `X_baseline` is "preprocessed" with the
        same transformations as the cached training inputs.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of (transformed) inputs.
        """
        if self.transform_on_train:
            # We need to disable learning of bounds / affine coefficients here.
            # See why: https://github.com/pytorch/botorch/issues/1078.
            if hasattr(self, "learn_coefficients"):
                learn_coefficients = self.learn_coefficients
                self.learn_coefficients = False
                result = self.transform(X)
                self.learn_coefficients = learn_coefficients
                return result
            else:
                return self.transform(X)
        return X
class ReversibleInputTransform(InputTransform, ABC):
    r"""An abstract class for a reversible input transform.
    Properties:
        reverse: A boolean indicating if the functionality of transform
            and untransform methods should be swapped.
    :meta private:
    """
    reverse: bool
    def transform(self, X: Tensor) -> Tensor:
        r"""Transform the inputs.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of transformed inputs.
        """
        return self._untransform(X) if self.reverse else self._transform(X)
    def untransform(self, X: Tensor) -> Tensor:
        r"""Un-transform the inputs.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of un-transformed inputs.
        """
        return self._transform(X) if self.reverse else self._untransform(X)
    @abstractmethod
    def _transform(self, X: Tensor) -> Tensor:
        r"""Forward transform the inputs.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of transformed inputs.
        """
        pass  # pragma: no cover
    @abstractmethod
    def _untransform(self, X: Tensor) -> Tensor:
        r"""Reverse transform the inputs.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of transformed inputs.
        """
        pass  # pragma: no cover
    def equals(self, other: InputTransform) -> bool:
        r"""Check if another input transform is equivalent.
        Args:
            other: Another input transform.
        Returns:
            A boolean indicating if the other transform is equivalent.
        """
        return super().equals(other=other) and (self.reverse == other.reverse)
class AffineInputTransform(ReversibleInputTransform, Module):
    def __init__(
        self,
        d: int,
        coefficient: Tensor,
        offset: Tensor,
        indices: Optional[List[int]] = None,
        batch_shape: torch.Size = torch.Size(),  # noqa: B008
        transform_on_train: bool = True,
        transform_on_eval: bool = True,
        transform_on_fantasize: bool = True,
        reverse: bool = False,
    ) -> None:
        r"""Apply affine transformation to input:
            `output = (input - offset) / coefficient`
        Args:
            d: The dimension of the input space.
            coefficient: Tensor of linear coefficients, shape must to be
                broadcastable with `(batch_shape x n x d)`-dim input tensors.
            offset: Tensor of offset coefficients, shape must to be
                broadcastable with `(batch_shape x n x d)`-dim input tensors.
            indices: The indices of the inputs to transform. If omitted,
                take all dimensions of the inputs into account.
            batch_shape: The batch shape of the inputs (assuming input tensors
                of shape `batch_shape x n x d`). If provided, perform individual
                transformation per batch, otherwise uses a single transformation.
            transform_on_train: A boolean indicating whether to apply the
                transform in train() mode. Default: True.
            transform_on_eval: A boolean indicating whether to apply the
                transform in eval() mode. Default: True.
            transform_on_fantasize: A boolean indicating whether to apply the
                transform when called from within a `fantasize` call. Default: True.
            reverse: A boolean indicating whether the forward pass should untransform
                the inputs.
        """
        super().__init__()
        if (indices is not None) and (len(indices) == 0):
            raise ValueError("`indices` list is empty!")
        if (indices is not None) and (len(indices) > 0):
            indices = torch.tensor(indices, dtype=torch.long)
            if len(indices) > d:
                raise ValueError("Can provide at most `d` indices!")
            if (indices > d - 1).any():
                raise ValueError("Elements of `indices` have to be smaller than `d`!")
            if len(indices.unique()) != len(indices):
                raise ValueError("Elements of `indices` tensor must be unique!")
            self.indices = indices
        torch.broadcast_shapes(coefficient.shape, offset.shape)
        self._d = d
        self.register_buffer("coefficient", coefficient)
        self.register_buffer("offset", offset)
        self.batch_shape = batch_shape
        self.transform_on_train = transform_on_train
        self.transform_on_eval = transform_on_eval
        self.transform_on_fantasize = transform_on_fantasize
        self.reverse = reverse
    @property
    def learn_coefficients(self) -> bool:
        return getattr(self, "_learn_coefficients", False)
    @learn_coefficients.setter
    def learn_coefficients(self, value: bool) -> None:
        r"""A boolean denoting whether to learn the coefficients
        from inputs during model training.
        """
        self._learn_coefficients = value
    @subset_transform
    def _transform(self, X: Tensor) -> Tensor:
        r"""Apply affine transformation to input.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of transformed inputs.
        """
        if self.learn_coefficients and self.training:
            self._check_shape(X)
            self._update_coefficients(X)
        self._to(X)
        return (X - self.offset) / self.coefficient
    @subset_transform
    def _untransform(self, X: Tensor) -> Tensor:
        r"""Apply inverse of affine transformation.
        Args:
            X: A `batch_shape x n x d`-dim tensor of transformed inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of un-transformed inputs.
        """
        self._to(X)
        return self.coefficient * X + self.offset
    def equals(self, other: InputTransform) -> bool:
        r"""Check if another input transform is equivalent.
        Args:
            other: Another input transform.
        Returns:
            A boolean indicating if the other transform is equivalent.
        """
        if hasattr(self, "indices") != hasattr(other, "indices"):
            return False
        isequal = (
            super().equals(other=other)
            and (self._d == other._d)
            and torch.allclose(self.coefficient, other.coefficient)
            and torch.allclose(self.offset, other.offset)
            and self.learn_coefficients == other.learn_coefficients
        )
        if hasattr(self, "indices"):
            isequal = isequal and (self.indices == other.indices).all()
        return isequal
    def _check_shape(self, X: Tensor) -> None:
        """Checking input dimensions, included to increase code sharing
        among the derived classes Normalize and InputStandardize.
        """
        if X.size(-1) != self.offset.size(-1):
            raise BotorchTensorDimensionError(
                f"Wrong input dimension. Received {X.size(-1)}, "
                f"expected {self.offset.size(-1)}."
            )
        n = len(self.batch_shape) + 2
        if X.ndim < n:
            raise ValueError(
                f"`X` must have at least {n} dimensions, {n - 2} batch and 2 innate"
                f" , but has {X.ndim}."
            )
        torch.broadcast_shapes(self.coefficient.shape, self.offset.shape, X.shape)
    def _to(self, X: Tensor) -> None:
        r"""Makes coefficient and offset have same device and dtype as X."""
        self.coefficient = self.coefficient.to(X)
        self.offset = self.offset.to(X)
    def _update_coefficients(self, X: Tensor) -> None:
        r"""Updates affine coefficients. Implemented by subclasses,
        e.g. Normalize and InputStandardize.
        """
        raise NotImplementedError(
            "Only subclasses of AffineInputTransform implement "
            "_update_coefficients, e.g. Normalize and InputStandardize."
        )
[docs]class Normalize(AffineInputTransform):
    r"""Normalize the inputs to the unit cube.
    If no explicit bounds are provided this module is stateful: If in train mode,
    calling `forward` updates the module state (i.e. the normalizing bounds). If
    in eval mode, calling `forward` simply applies the normalization using the
    current module state.
    """
    def __init__(
        self,
        d: int,
        indices: Optional[List[int]] = None,
        bounds: Optional[Tensor] = None,
        batch_shape: torch.Size = torch.Size(),  # noqa: B008
        transform_on_train: bool = True,
        transform_on_eval: bool = True,
        transform_on_fantasize: bool = True,
        reverse: bool = False,
        min_range: float = 1e-8,
    ) -> None:
        r"""Normalize the inputs to the unit cube.
        Args:
            d: The dimension of the input space.
            indices: The indices of the inputs to normalize. If omitted,
                take all dimensions of the inputs into account.
            bounds: If provided, use these bounds to normalize the inputs. If
                omitted, learn the bounds in train mode.
            batch_shape: The batch shape of the inputs (assuming input tensors
                of shape `batch_shape x n x d`). If provided, perform individual
                normalization per batch, otherwise uses a single normalization.
            transform_on_train: A boolean indicating whether to apply the
                transforms in train() mode. Default: True.
            transform_on_eval: A boolean indicating whether to apply the
                transform in eval() mode. Default: True.
            transform_on_fantasize: A boolean indicating whether to apply the
                transform when called from within a `fantasize` call. Default: True.
            reverse: A boolean indicating whether the forward pass should untransform
                the inputs.
            min_range: Amount of noise to add to the range to ensure no division by
                zero errors.
        """
        transform_dimension = d if indices is None else len(indices)
        if bounds is not None:
            if indices is not None and bounds.size(-1) == d:
                bounds = bounds[..., indices]
            if bounds.size(-1) != transform_dimension:
                raise BotorchTensorDimensionError(
                    "Dimensions of provided `bounds` are incompatible with "
                    f"transform_dimension = {transform_dimension}!"
                )
            offset = bounds[..., 0:1, :]
            coefficient = bounds[..., 1:2, :] - offset
            if coefficient.ndim > 2:
                batch_shape = coefficient.shape[:-2]
        else:
            coefficient = torch.ones(*batch_shape, 1, transform_dimension)
            offset = torch.zeros(*batch_shape, 1, transform_dimension)
            self.learn_coefficients = True
        super().__init__(
            d=d,
            coefficient=coefficient,
            offset=offset,
            indices=indices,
            batch_shape=batch_shape,
            transform_on_train=transform_on_train,
            transform_on_eval=transform_on_eval,
            transform_on_fantasize=transform_on_fantasize,
            reverse=reverse,
        )
        self.min_range = min_range
    @property
    def ranges(self):
        return self.coefficient
    @property
    def mins(self):
        return self.offset
    @property
    def bounds(self) -> Tensor:
        r"""The bounds used for normalizing the inputs."""
        return torch.cat([self.offset, self.offset + self.coefficient], dim=-2)
    @property
    def learn_bounds(self) -> bool:
        return self.learn_coefficients
    def _update_coefficients(self, X) -> None:
        """Computes the normalization bounds and updates the affine
        coefficients, which determine the base class's behavior.
        """
        # Aggregate mins and ranges over extra batch and marginal dims
        batch_ndim = min(len(self.batch_shape), X.ndim - 2)  # batch rank of `X`
        reduce_dims = (*range(X.ndim - batch_ndim - 2), X.ndim - 2)
        self.offset = torch.amin(X, dim=reduce_dims).unsqueeze(-2)
        self.coefficient = torch.amax(X, dim=reduce_dims).unsqueeze(-2) - self.offset
        self.coefficient.clamp_(min=self.min_range) 
[docs]class Round(InputTransform, Module):
    r"""A discretization transformation for discrete inputs.
    For integers, this will typically be used in conjunction
    with normalization as follows:
    In eval() mode (i.e. after training), the inputs pass
    would typically be normalized to the unit cube (e.g. during candidate
    optimization). 1. These are unnormalized back to the raw input space.
    2. The integers are rounded. 3. All values are normalized to the unit
    cube.
    In train() mode, the inputs can either (a) be normalized to the unit
    cube or (b) provided using their raw values. In the case of (a)
    transform_on_train should be set to True, so that the normalized inputs
    are unnormalized before rounding. In the case of (b) transform_on_train
    should be set to False, so that the raw inputs are rounded and then
    normalized to the unit cube.
    By default, the straight through estimators are used for the gradients as
    proposed in [Daulton2022bopr]_. This transformation supports differentiable
    approximate rounding (currently only for integers). The rounding function
    is approximated with a piece-wise function where each piece is a hyperbolic
    tangent function.
    For categorical parameters, the input must be one-hot encoded.
    Example:
        >>> bounds = torch.tensor([[0, 5], [0, 1], [0, 1]]).t()
        >>> integer_indices = [0]
        >>> categorical_features = {1: 2}
        >>> unnormalize_tf = Normalize(
        >>>     d=d,
        >>>     bounds=bounds,
        >>>     transform_on_eval=True,
        >>>     transform_on_train=True,
        >>>     reverse=True,
        >>> )
        >>> round_tf = Round(integer_indices, categorical_features)
        >>> normalize_tf = Normalize(d=d, bounds=bounds)
        >>> tf = ChainedInputTransform(
        >>>     tf1=unnormalize_tf, tf2=round_tf, tf3=normalize_tf
        >>> )
    """
    def __init__(
        self,
        integer_indices: Optional[List[int]] = None,
        categorical_features: Optional[Dict[int, int]] = None,
        transform_on_train: bool = True,
        transform_on_eval: bool = True,
        transform_on_fantasize: bool = True,
        approximate: bool = False,
        tau: float = 1e-3,
        **kwargs,
    ) -> None:
        r"""Initialize transform.
        Args:
            integer_indices: The indices of the integer inputs.
            categorical_features: A dictionary mapping the starting index of each
                categorical feature to its cardinality. This assumes that categoricals
                are one-hot encoded.
            transform_on_train: A boolean indicating whether to apply the
                transforms in train() mode. Default: True.
            transform_on_eval: A boolean indicating whether to apply the
                transform in eval() mode. Default: True.
            transform_on_fantasize: A boolean indicating whether to apply the
                transform when called from within a `fantasize` call. Default: True.
            approximate: A boolean indicating whether approximate or exact
                rounding should be used. Default: False.
            tau: The temperature parameter for approximate rounding.
        """
        indices = kwargs.get("indices")
        if indices is not None:
            warn(
                "`indices` is marked for deprecation in favor of `integer_indices`.",
                DeprecationWarning,
            )
            integer_indices = indices
        if approximate and categorical_features is not None:
            raise NotImplementedError
        super().__init__()
        self.transform_on_train = transform_on_train
        self.transform_on_eval = transform_on_eval
        self.transform_on_fantasize = transform_on_fantasize
        integer_indices = integer_indices or []
        self.register_buffer(
            "integer_indices", torch.tensor(integer_indices, dtype=torch.long)
        )
        self.categorical_features = categorical_features or {}
        self.approximate = approximate
        self.tau = tau
[docs]    def equals(self, other: InputTransform) -> bool:
        r"""Check if another input transform is equivalent.
        Args:
            other: Another input transform.
        Returns:
            A boolean indicating if the other transform is equivalent.
        """
        return (
            super().equals(other=other)
            and (self.integer_indices == other.integer_indices).all()
            and self.categorical_features == other.categorical_features
            and self.approximate == other.approximate
            and self.tau == other.tau
        )  
[docs]class Log10(ReversibleInputTransform, Module):
    r"""A base-10 log transformation."""
    def __init__(
        self,
        indices: List[int],
        transform_on_train: bool = True,
        transform_on_eval: bool = True,
        transform_on_fantasize: bool = True,
        reverse: bool = False,
    ) -> None:
        r"""Initialize transform.
        Args:
            indices: The indices of the inputs to log transform.
            transform_on_train: A boolean indicating whether to apply the
                transforms in train() mode. Default: True.
            transform_on_eval: A boolean indicating whether to apply the
                transform in eval() mode. Default: True.
            transform_on_fantasize: A boolean indicating whether to apply the
                transform when called from within a `fantasize` call. Default: True.
            reverse: A boolean indicating whether the forward pass should untransform
                the inputs.
        """
        super().__init__()
        self.register_buffer("indices", torch.tensor(indices, dtype=torch.long))
        self.transform_on_train = transform_on_train
        self.transform_on_eval = transform_on_eval
        self.transform_on_fantasize = transform_on_fantasize
        self.reverse = reverse
    @subset_transform
    def _transform(self, X: Tensor) -> Tensor:
        r"""Log transform the inputs.
        Args:
            X: A `batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of transformed inputs.
        """
        return X.log10()
    @subset_transform
    def _untransform(self, X: Tensor) -> Tensor:
        r"""Reverse the log transformation.
        Args:
            X: A `batch_shape x n x d`-dim tensor of normalized inputs.
        Returns:
            A `batch_shape x n x d`-dim tensor of un-normalized inputs.
        """
        return 10.0**X 
[docs]class Warp(ReversibleInputTransform, GPyTorchModule):
    r"""A transform that uses learned input warping functions.
    Each specified input dimension is warped using the CDF of a
    Kumaraswamy distribution. Typically, MAP estimates of the
    parameters of the Kumaraswamy distribution, for each input
    dimension, are learned jointly with the GP hyperparameters.
    TODO: implement support using independent warping functions
    for each output in batched multi-output and multi-task models.
    For now, ModelListGPs should be used to learn independent warping
    functions for each output.
    """
    # TODO: make minimum value dtype-dependent
    _min_concentration_level = 1e-4
    def __init__(
        self,
        indices: List[int],
        transform_on_train: bool = True,
        transform_on_eval: bool = True,
        transform_on_fantasize: bool = True,
        reverse: bool = False,
        eps: float = 1e-7,
        concentration1_prior: Optional[Prior] = None,
        concentration0_prior: Optional[Prior] = None,
        batch_shape: Optional[torch.Size] = None,
    ) -> None:
        r"""Initialize transform.
        Args:
            indices: The indices of the inputs to warp.
            transform_on_train: A boolean indicating whether to apply the
                transforms in train() mode. Default: True.
            transform_on_eval: A boolean indicating whether to apply the
                transform in eval() mode. Default: True.
            transform_on_fantasize: A boolean indicating whether to apply the
                transform when called from within a `fantasize` call. Default: True.
            reverse: A boolean indicating whether the forward pass should untransform
                the inputs.
            eps: A small value used to clip values to be in the interval (0, 1).
            concentration1_prior: A prior distribution on the concentration1 parameter
                of the Kumaraswamy distribution.
            concentration0_prior: A prior distribution on the concentration0 parameter
                of the Kumaraswamy distribution.
            batch_shape: The batch shape.
        """
        super().__init__()
        self.register_buffer("indices", torch.tensor(indices, dtype=torch.long))
        self.transform_on_train = transform_on_train
        self.transform_on_eval = transform_on_eval
        self.transform_on_fantasize = transform_on_fantasize
        self.reverse = reverse
        self.batch_shape = batch_shape or torch.Size([])
        self._X_min = eps
        self._X_range = 1 - 2 * eps
        if len(self.batch_shape) > 0:
            # Note: this follows the gpytorch shape convention for lengthscales
            # There is ongoing discussion about the extra `1`.
            # TODO: update to follow new gpytorch convention resulting from
            # https://github.com/cornellius-gp/gpytorch/issues/1317
            batch_shape = self.batch_shape + torch.Size([1])
        else:
            batch_shape = self.batch_shape
        for i in (0, 1):
            p_name = f"concentration{i}"
            self.register_parameter(
                p_name,
                nn.Parameter(torch.full(batch_shape + self.indices.shape, 1.0)),
            )
        if concentration0_prior is not None:
            self.register_prior(
                "concentration0_prior",
                concentration0_prior,
                lambda m: m.concentration0,
                lambda m, v: m._set_concentration(i=0, value=v),
            )
        if concentration1_prior is not None:
            self.register_prior(
                "concentration1_prior",
                concentration1_prior,
                lambda m: m.concentration1,
                lambda m, v: m._set_concentration(i=1, value=v),
            )
        for i in (0, 1):
            p_name = f"concentration{i}"
            constraint = GreaterThan(
                self._min_concentration_level,
                transform=None,
                # set the initial value to be the identity transformation
                initial_value=1.0,
            )
            self.register_constraint(param_name=p_name, constraint=constraint)
    def _set_concentration(self, i: int, value: Union[float, Tensor]) -> None:
        if not torch.is_tensor(value):
            value = torch.as_tensor(value).to(self.concentration0)
        self.initialize(**{f"concentration{i}": value})
    @subset_transform
    def _transform(self, X: Tensor) -> Tensor:
        r"""Warp the inputs through the Kumaraswamy CDF.
        Args:
            X: A `input_batch_shape x (batch_shape) x n x d`-dim tensor of inputs.
                batch_shape here can either be self.batch_shape or 1's such that
                it is broadcastable with self.batch_shape if self.batch_shape is set.
        Returns:
            A `input_batch_shape x (batch_shape) x n x d`-dim tensor of transformed
                inputs.
        """
        # normalize to [eps, 1-eps], IDEA: could use Normalize and ChainedTransform.
        return self._k.cdf(
            torch.clamp(
                X * self._X_range + self._X_min,
                self._X_min,
                1.0 - self._X_min,
            )
        )
    @subset_transform
    def _untransform(self, X: Tensor) -> Tensor:
        r"""Warp the inputs through the Kumaraswamy inverse CDF.
        Args:
            X: A `input_batch_shape x batch_shape x n x d`-dim tensor of inputs.
        Returns:
            A `input_batch_shape x batch_shape x n x d`-dim tensor of transformed
                inputs.
        """
        if len(self.batch_shape) > 0:
            if self.batch_shape != X.shape[-2 - len(self.batch_shape) : -2]:
                raise BotorchTensorDimensionError(
                    "The right most batch dims of X must match self.batch_shape: "
                    f"({self.batch_shape})."
                )
        # unnormalize from [eps, 1-eps] to [0,1]
        return ((self._k.icdf(X) - self._X_min) / self._X_range).clamp(0.0, 1.0)
    @property
    def _k(self) -> Kumaraswamy:
        """Returns a Kumaraswamy distribution with the concentration parameters."""
        return Kumaraswamy(
            concentration1=self.concentration1,
            concentration0=self.concentration0,
        ) 
[docs]class AppendFeatures(InputTransform, Module):
    r"""A transform that appends the input with a given set of features either
    provided beforehand or generated on the fly via a callable.
    As an example, the predefined set of features can be used with
    `RiskMeasureMCObjective` to optimize risk measures as described in
    [Cakmak2020risk]_. A tutorial notebook implementing the rhoKG acqusition
    function introduced in [Cakmak2020risk]_ can be found at
    https://botorch.org/tutorials/risk_averse_bo_with_environmental_variables.
    The steps for using this to obtain samples of a risk measure are as follows:
    -   Train a model on `(x, w)` inputs and the corresponding observations;
    -   Pass in an instance of `AppendFeatures` with the `feature_set` denoting the
        samples of `W` as the `input_transform` to the trained model;
    -   Call `posterior(...).rsample(...)` on the model with `x` inputs only to
        get the joint posterior samples over `(x, w)`s, where the `w`s come
        from the `feature_set`;
    -   Pass these posterior samples through the `RiskMeasureMCObjective` of choice to
        get the samples of the risk measure.
    Note: The samples of the risk measure obtained this way are in general biased
    since the `feature_set` does not fully represent the distribution of the
    environmental variable.
    Possible examples for using a callable include statistical models that are built on
    PyTorch, built-in mathematical operations such as torch.sum, or custom scripted
    functions. By this, this input transform allows for advanced feature engineering
    and transfer learning models within the optimization loop.
    Example:
        >>> # We consider 1D `x` and 1D `w`, with `W` having a
        >>> # uniform distribution over [0, 1]
        >>> model = SingleTaskGP(
        ...     train_X=torch.rand(10, 2),
        ...     train_Y=torch.randn(10, 1),
        ...     input_transform=AppendFeatures(feature_set=torch.rand(10, 1))
        ... )
        >>> mll = ExactMarginalLogLikelihood(model.likelihood, model)
        >>> fit_gpytorch_mll(mll)
        >>> test_x = torch.rand(3, 1)
        >>> # `posterior_samples` is a `10 x 30 x 1`-dim tensor
        >>> posterior_samples = model.posterior(test_x).rsamples(torch.size([10]))
        >>> risk_measure = VaR(alpha=0.8, n_w=10)
        >>> # `risk_measure_samples` is a `10 x 3`-dim tensor of samples of the
        >>> # risk measure VaR
        >>> risk_measure_samples = risk_measure(posterior_samples)
    """
    is_one_to_many: bool = True
    def __init__(
        self,
        feature_set: Optional[Tensor] = None,
        f: Optional[Callable[[Tensor], Tensor]] = None,
        indices: Optional[List[int]] = None,
        fkwargs: Optional[Dict[str, Any]] = None,
        skip_expand: bool = False,
        transform_on_train: bool = False,
        transform_on_eval: bool = True,
        transform_on_fantasize: bool = False,
    ) -> None:
        r"""Append `feature_set` to each input or generate a set of features to
        append on the fly via a callable.
        Args:
            feature_set: An `n_f x d_f`-dim tensor denoting the features to be
                appended to the inputs. Default: None.
            f: A callable mapping a `batch_shape x q x d`-dim input tensor `X`
                to a `batch_shape x q x n_f x d_f`-dimensional output tensor.
                Default: None.
            indices: List of indices denoting the indices of the features to be
                passed into f. Per default all features are passed to `f`.
                Default: None.
            fkwargs: Dictionary of keyword arguments passed to the callable `f`.
                Default: None.
            skip_expand: A boolean indicating whether to expand the input tensor
                before appending features. This is intended for use with an
                `InputPerturbation`. If `True`, the input tensor will be expected
                to be of shape `batch_shape x (q * n_f) x d`. Not implemented
                in combination with a callable.
            transform_on_train: A boolean indicating whether to apply the
                transforms in train() mode. Default: False.
            transform_on_eval: A boolean indicating whether to apply the
                transform in eval() mode. Default: True.
            transform_on_fantasize: A boolean indicating whether to apply the
                transform when called from within a `fantasize` call. Default: False.
        """
        super().__init__()
        if (feature_set is None) and (f is None):
            raise ValueError(
                "Either a `feature_set` or a callable `f` has to be provided."
            )
        if (feature_set is not None) and (f is not None):
            raise ValueError(
                "Only one can be used: either `feature_set` or callable `f`."
            )
        if feature_set is not None:
            if feature_set.dim() != 2:
                raise ValueError("`feature_set` must be an `n_f x d_f`-dim tensor!")
            self.register_buffer("feature_set", feature_set)
            self._f = None
        if f is not None:
            if skip_expand:
                raise ValueError(
                    "`skip_expand` option is not supported in case of using a callable"
                )
            if (indices is not None) and (len(indices) == 0):
                raise ValueError("`indices` list is empty!")
            if indices is not None:
                indices = torch.tensor(indices, dtype=torch.long)
                if len(indices.unique()) != len(indices):
                    raise ValueError("Elements of `indices` tensor must be unique!")
                self.indices = indices
            else:
                self.indices = slice(None)
            self._f = f
            self.fkwargs = fkwargs or {}
        self.skip_expand = skip_expand
        self.transform_on_train = transform_on_train
        self.transform_on_eval = transform_on_eval
        self.transform_on_fantasize = transform_on_fantasize
 
[docs]class FilterFeatures(InputTransform, Module):
    r"""A transform that filters the input with a given set of features indices.
    As an example, this can be used in a multiobjective optimization with `ModelListGP`
    in which the specific models only share subsets of features (feature selection).
    A reason could be that it is known that specific features do not have any impact on
    a specific objective but they need to be included in the model for another one.
    """
    def __init__(
        self,
        feature_indices: Tensor,
        transform_on_train: bool = True,
        transform_on_eval: bool = True,
        transform_on_fantasize: bool = True,
    ) -> None:
        r"""Filter features from a model.
        Args:
            feature_set: An one-dim tensor denoting the indices of the features to be
                kept and fed to the model.
            transform_on_train: A boolean indicating whether to apply the
                transforms in train() mode. Default: True.
            transform_on_eval: A boolean indicating whether to apply the
                transform in eval() mode. Default: True.
            transform_on_fantasize: A boolean indicating whether to apply the
                transform when called from within a `fantasize` call. Default: True.
        """
        super().__init__()
        if feature_indices.dim() != 1:
            raise ValueError("`feature_indices` must be a one-dimensional tensor!")
        if feature_indices.dtype != torch.int64:
            raise ValueError("`feature_indices` tensor must be int64/long!")
        if (feature_indices < 0).any():
            raise ValueError(
                "Elements of `feature_indices` have to be larger/equal to zero!"
            )
        if len(feature_indices.unique()) != len(feature_indices):
            raise ValueError("Elements of `feature_indices` tensor must be unique!")
        self.transform_on_train = transform_on_train
        self.transform_on_eval = transform_on_eval
        self.transform_on_fantasize = transform_on_fantasize
        self.register_buffer("feature_indices", feature_indices)
[docs]    def equals(self, other: InputTransform) -> bool:
        r"""Check if another input transform is equivalent.
        Args:
            other: Another input transform
        Returns:
            A boolean indicating if the other transform is equivalent.
        """
        if len(self.feature_indices) != len(other.feature_indices):
            return False
        return super().equals(other=other)