Skip to content

Commit

Permalink
Require python 3.7 (#378)
Browse files Browse the repository at this point in the history
Summary:
Pull Request resolved: #378

This allows us to use
- postponed evaluations of annotations (PEP 563)
- dataclasses (PEP 557)

Reviewed By: ldworkin

Differential Revision: D20113101

fbshipit-source-id: 34e5ae22e9f590894a4c0ab200139380a297a248
  • Loading branch information
Balandat authored and facebook-github-bot committed Mar 2, 2020
1 parent 1913d1c commit 4302a0c
Show file tree
Hide file tree
Showing 60 changed files with 137 additions and 28 deletions.
8 changes: 4 additions & 4 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -94,9 +94,9 @@ commands:

jobs:

lint_test_py36_pip:
lint_test_py38_pip:
docker:
- image: circleci/python:3.6.9
- image: circleci/python:3.8.1
steps:
- checkout
- pip_install:
Expand Down Expand Up @@ -124,7 +124,7 @@ jobs:

auto_deploy_site:
docker:
- image: circleci/python:3.6.9-node
- image: circleci/python:3.8.1-node
steps:
- checkout
- pip_install:
Expand All @@ -150,7 +150,7 @@ workflows:

lint_and_test:
jobs:
- lint_test_py36_pip:
- lint_test_py38_pip:
filters: *exclude_ghpages_fbconfig
- lint_test_py37_conda:
filters: *exclude_ghpages_fbconfig
Expand Down
2 changes: 1 addition & 1 deletion .conda/meta.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ build:

requirements:
host:
- python>=3.6
- python>=3.7
run:
- pytorch>=1.4
- gpytorch>=1.0.0
Expand Down
2 changes: 1 addition & 1 deletion CONTRIBUTING.md
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ Travis will fail on your PR if it does not adhere to the black formatting style.

#### Type Hints

BoTorch is fully typed using python 3.6+
BoTorch is fully typed using python 3.7+
[type hints](https://www.python.org/dev/peps/pep-0484/).
We expect any contributions to also use proper type annotations. While we
currently do not enforce full consistency of these in our continuous integration
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,7 @@ Optimization simply use Ax.
## Installation

**Installation Requirements**
- Python >= 3.6
- Python >= 3.7
- PyTorch >= 1.4
- gpytorch >= 1.0
- scipy
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/acquisition.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Abstract base module for all botorch acquisition functions.
"""

from __future__ import annotations

import warnings
from abc import ABC, abstractmethod
from typing import Optional
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/active_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
ArXiv 2017.
"""

from __future__ import annotations

from typing import Optional

from botorch import settings
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/analytic.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
Monte-Carlo sampling.
"""

from __future__ import annotations

from abc import ABC
from copy import deepcopy
from typing import Dict, Optional, Tuple, Union
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/cost_aware.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
To be used in a context where there is an objective/cost tradeoff.
"""

from __future__ import annotations

import warnings
from abc import ABC, abstractmethod
from typing import Any, Callable, Optional
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/fixed_feature.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,8 @@
This is useful e.g. for performing contextual optimization.
"""

from __future__ import annotations

from typing import List, Union

import torch
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/knowledge_gradient.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,8 @@
bayesian optimization. NIPS 2016.
"""

from __future__ import annotations

from copy import deepcopy
from typing import Callable, Optional, Tuple, Union

Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/max_value_entropy_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
arXiv:1901.08275v1, 2019
"""

from __future__ import annotations

from copy import deepcopy
from math import log
from typing import Callable, Optional
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/monte_carlo.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@
The reparameterization trick for acquisition functions. ArXiv 2017.
"""

from __future__ import annotations

import math
from abc import ABC, abstractmethod
from typing import Optional, Union
Expand Down
2 changes: 2 additions & 0 deletions botorch/acquisition/objective.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Objective Modules to be used with acquisition functions.
"""

from __future__ import annotations

from abc import ABC, abstractmethod
from typing import Callable, List

Expand Down
4 changes: 3 additions & 1 deletion botorch/acquisition/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Utilities for acquisition functions.
"""

from __future__ import annotations

import math
from typing import Callable, Dict, List, Optional

Expand All @@ -34,7 +36,7 @@ def get_acquisition_function(
qmc: bool = True,
seed: Optional[int] = None,
**kwargs,
) -> "monte_carlo.MCAcquisitionFunction":
) -> monte_carlo.MCAcquisitionFunction:
r"""Convenience function for initializing botorch acquisition functions.
Args:
Expand Down
2 changes: 2 additions & 0 deletions botorch/cross_validation.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Cross-validation utilities using batch evaluation mode.
"""

from __future__ import annotations

from typing import Any, Dict, NamedTuple, Optional, Type

import torch
Expand Down
5 changes: 4 additions & 1 deletion botorch/fit.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Utilities for model fitting.
"""

from __future__ import annotations

import logging
import warnings
from copy import deepcopy
Expand Down Expand Up @@ -84,7 +86,8 @@ def fit_gpytorch_model(
model_ = model_list_to_batched(mll_.model)
mll.model.load_state_dict(model_.state_dict())
return mll.eval()
except (NotImplementedError, UnsupportedError, RuntimeError, AttributeError):
# NotImplentedError is omitted since it derives from RuntimeError
except (UnsupportedError, RuntimeError, AttributeError):
warnings.warn(FAILED_CONVERSION_MSG, BotorchWarning)
return fit_gpytorch_model(
mll=mll, optimizer=optimizer, sequential=False, max_retries=max_retries
Expand Down
2 changes: 2 additions & 0 deletions botorch/gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Candidate generation utilities.
"""

from __future__ import annotations

from typing import Any, Callable, Dict, List, Optional, Tuple, Type, Union

import torch
Expand Down
2 changes: 2 additions & 0 deletions botorch/models/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Utilities for converting between different models.
"""

from __future__ import annotations

from copy import deepcopy

import torch
Expand Down
2 changes: 2 additions & 0 deletions botorch/models/cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Cost models to be used with multi-fidelity optimization.
"""

from __future__ import annotations

from typing import Dict, Optional

import torch
Expand Down
6 changes: 4 additions & 2 deletions botorch/models/deterministic.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
known cost functions for cost-aware acquisition utilities.
"""

from __future__ import annotations

from abc import ABC, abstractmethod
from typing import Any, Callable, List, Optional, Union

Expand Down Expand Up @@ -73,7 +75,7 @@ def __init__(self, f: Callable[[Tensor], Tensor], num_outputs: int = 1) -> None:
self._f = f
self._num_outputs = num_outputs

def subset_output(self, idcs: List[int]) -> "GenericDeterministicModel":
def subset_output(self, idcs: List[int]) -> GenericDeterministicModel:
r"""Subset the model along the output dimension.
Args:
Expand Down Expand Up @@ -128,7 +130,7 @@ def __init__(self, a: Tensor, b: Union[Tensor, float] = 0.01) -> None:
self.register_buffer("b", b.expand(a.size(-1)))
self._num_outputs = a.size(-1)

def subset_output(self, idcs: List[int]) -> "AffineDeterministicModel":
def subset_output(self, idcs: List[int]) -> AffineDeterministicModel:
r"""Subset the model along the output dimension.
Args:
Expand Down
10 changes: 6 additions & 4 deletions botorch/models/gp_regression.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,8 @@
Gaussian Process Regression models based on GPyTorch models.
"""

from __future__ import annotations

from typing import Any, List, Optional, Union

import torch
Expand Down Expand Up @@ -212,7 +214,7 @@ def fantasize(
sampler: MCSampler,
observation_noise: Union[bool, Tensor] = True,
**kwargs: Any,
) -> "FixedNoiseGP":
) -> FixedNoiseGP:
r"""Construct a fantasy model.
Constructs a fantasy model in the following fashion:
Expand Down Expand Up @@ -254,7 +256,7 @@ def forward(self, x: Tensor) -> MultivariateNormal:
covar_x = self.covar_module(x)
return MultivariateNormal(mean_x, covar_x)

def subset_output(self, idcs: List[int]) -> "BatchedMultiOutputGPyTorchModel":
def subset_output(self, idcs: List[int]) -> BatchedMultiOutputGPyTorchModel:
r"""Subset the model along the output dimension.
Args:
Expand Down Expand Up @@ -336,8 +338,8 @@ def __init__(

def condition_on_observations(
self, X: Tensor, Y: Tensor, **kwargs: Any
) -> "HeteroskedasticSingleTaskGP":
) -> HeteroskedasticSingleTaskGP:
raise NotImplementedError

def subset_output(self, idcs: List[int]) -> "HeteroskedasticSingleTaskGP":
def subset_output(self, idcs: List[int]) -> HeteroskedasticSingleTaskGP:
raise NotImplementedError
2 changes: 2 additions & 0 deletions botorch/models/gp_regression_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
multi-fidelity bayesian optimization for hyperparameter tuning. ArXiv 2019.
"""

from __future__ import annotations

from typing import Optional

from gpytorch.kernels.kernel import ProductKernel
Expand Down
10 changes: 6 additions & 4 deletions botorch/models/gpytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,8 @@
GPyTorch Model class such as an ExactGP.
"""

from __future__ import annotations

import itertools
import warnings
from abc import ABC
Expand Down Expand Up @@ -136,7 +138,7 @@ def posterior(
posterior = self.outcome_transform.untransform_posterior(posterior)
return posterior

def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> "Model":
def condition_on_observations(self, X: Tensor, Y: Tensor, **kwargs: Any) -> Model:
r"""Condition the model on new observations.
Args:
Expand Down Expand Up @@ -330,7 +332,7 @@ def posterior(

def condition_on_observations(
self, X: Tensor, Y: Tensor, **kwargs: Any
) -> "BatchedMultiOutputGPyTorchModel":
) -> BatchedMultiOutputGPyTorchModel:
r"""Condition the model on new observations.
Args:
Expand Down Expand Up @@ -389,7 +391,7 @@ def condition_on_observations(
fantasy_model._aug_batch_shape = fantasy_model.train_targets.shape[:-1]
return fantasy_model

def subset_output(self, idcs: List[int]) -> "BatchedMultiOutputGPyTorchModel":
def subset_output(self, idcs: List[int]) -> BatchedMultiOutputGPyTorchModel:
r"""Subset the model along the output dimension.
Args:
Expand Down Expand Up @@ -524,7 +526,7 @@ def posterior(

def condition_on_observations(
self, X: Tensor, Y: Tensor, **kwargs: Any
) -> "ModelListGPyTorchModel":
) -> ModelListGPyTorchModel:
class_name = self.__class__.__name__
raise NotImplementedError(
f"`condition_on_observations` not implemented in {class_name}"
Expand Down
2 changes: 2 additions & 0 deletions botorch/models/kernels/downsampling.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from __future__ import annotations

from typing import Optional

import torch
Expand Down
2 changes: 2 additions & 0 deletions botorch/models/kernels/exponential_decay.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from __future__ import annotations

from typing import Optional

import torch
Expand Down
4 changes: 3 additions & 1 deletion botorch/models/kernels/linear_truncated_fidelity.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

from __future__ import annotations

from copy import deepcopy
from typing import Any, List, Optional

Expand Down Expand Up @@ -220,7 +222,7 @@ def forward(self, x1: Tensor, x2: Tensor, diag: bool = False, **params) -> Tenso

return covar_unbiased + bias_factor * covar_biased

def __getitem__(self, index) -> "LinearTruncatedFidelityKernel":
def __getitem__(self, index) -> LinearTruncatedFidelityKernel:
new_kernel = deepcopy(self)
new_kernel.covar_module_unbiased = self.covar_module_unbiased[index]
new_kernel.covar_module_biased = self.covar_module_biased[index]
Expand Down
Loading

0 comments on commit 4302a0c

Please sign in to comment.