diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 000000000..dbff5f352 --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,42 @@ +name: publish + +on: + release: + types: [created] + +jobs: + deploy: + runs-on: ubuntu-latest + strategy: + matrix: + python-version: [3.8] + + steps: + - name: Checkout github repo + uses: actions/checkout@v2 + + - name: Create environment variables + run: echo "BLISS_HOME=$GITHUB_WORKSPACE" >> $GITHUB_ENV + + - name: Install poetry + run: pipx install poetry + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + cache: "poetry" + + - name: Install fftw3 + run: | + sudo apt-get install libfftw3-dev + + - name: Install poetry dependencies + run: | + poetry install + + - name: Build and publish + run: | + poetry version $(git describe --tags --abbrev=0) + poetry build + poetry publish --username __token__ --password ${{ secrets.PYPI_TOKEN }} diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 5cc7bfd53..784f02451 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -27,31 +27,22 @@ jobs: - name: Checkout LFS objects run: git lfs checkout + - name: Install poetry + run: pipx install poetry + - name: Set up Python ${{ matrix.python-version }} - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 with: python-version: ${{ matrix.python-version }} + cache: "poetry" - name: Create environment variables run: echo "BLISS_HOME=$GITHUB_WORKSPACE" >> $GITHUB_ENV - - name: Install Poetry - run: | - pip install --upgrade pip - pip install "poetry-core>=1.0.0" - pip install "poetry>=1.2.2" - - name: Install fftw3 run: | sudo apt-get install libfftw3-dev - - name: Poetry cache - uses: actions/cache@v2 - with: - path: | - ~/.cache/pypoetry - key: ${{ runner.os }}-${{ hashFiles('./poetry.lock') }} - - name: Install poetry dependencies run: | poetry install diff --git a/.gitignore b/.gitignore index b4b82d67c..74ec448cc 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,7 @@ fits/* .tox *lightning_logs *.pkl +*.whl .idea htmlcov/ build/ diff --git a/README.md b/README.md index 3eec3aaf4..33f59c68c 100644 --- a/README.md +++ b/README.md @@ -22,47 +22,63 @@ BLISS uses state-of-the-art variational inference techniques including # Installation +BLISS is pip installable with the following command: + +```bash +pip install bliss-deblender +``` + +and the required dependencies are listed in the ``[tool.poetry.dependencies]`` block of the ``pyproject.toml`` file. + +# Installation (Developers) + 1. To use and install `bliss` you first need to install [poetry](https://python-poetry.org/docs/). 2. Then, install the [fftw](http://www.fftw.org) library (which is used by `galsim`). With Ubuntu you can install it by running -``` + +```bash sudo apt-get install libfftw3-dev ``` 3. Install git-lfs if you haven't already installed it for another project: -``` + +```bash git-lfs install ``` 4. Now download the bliss repo and fetch some pre-trained models and test data from git-lfs: -``` + +```bash git clone https://github.com/prob-ml/bliss.git ``` 5. To create a poetry environment with the `bliss` dependencies satisified, run -``` + +```bash cd bliss poetry install poetry shell ``` 6. Verify that bliss is installed correctly by running the tests both on your CPU (default) and on your GPU: -``` + +```bash pytest pytest --gpu ``` 7. Finally, if you are planning to contribute code to this repository, consider installing our pre-commit hooks so that your code commits will be checked locally for compliance with our coding conventions: -``` + +```bash pre-commit --install ``` # Latest updates -### Galaxies +## Galaxies - BLISS now includes a galaxy model based on a VAE that was trained on Galsim galaxies. - BLISS now includes an algorithm for detecting, measuring, and deblending galaxies. -### Stars +## Stars - BLISS already includes the StarNet functionality from its predecessor repo: [DeblendingStarFields](https://github.com/Runjing-Liu120/DeblendingStarfields). diff --git a/bliss/catalog.py b/bliss/catalog.py index 8436b44db..8c569e964 100644 --- a/bliss/catalog.py +++ b/bliss/catalog.py @@ -113,15 +113,10 @@ def to_full_params(self): parameters. Returns: - A dictionary of tensors with the same members as those in `tile_params`. - The first two dimensions of each tensor is `batch_size x max(n_sources)`, - where `max(n_sources)` is the maximum number of sources detected across samples. - In samples where the number of sources detected is less than max(n_sources), - these values will be zeroed out. Thus, it is imperative to use the "n_sources" - element to verify which locations/fluxes/parameters are zeroed out. + The FullCatalog instance corresponding to the TileCatalog instance. NOTE: The locations (`"locs"`) are between 0 and 1. The output also contains - pixel locations ("plocs") that are between 0 and slen. + pixel locations ("plocs") that are between 0 and `slen`. """ plocs = self._get_full_locs_from_tiles() param_names_to_mask = {"plocs"}.union(set(self.keys())) @@ -203,7 +198,7 @@ def to_dict(self) -> Dict[str, Tensor]: out[k] = v return out - def equals(self, other, exclude=None, **kwargs): + def equals(self, other, exclude=None, **kwargs) -> bool: self_dict = self.to_dict() other_dict: Dict[str, Tensor] = other.to_dict() exclude = set() if exclude is None else set(exclude) @@ -216,7 +211,7 @@ def equals(self, other, exclude=None, **kwargs): def __eq__(self, other): return self.equals(other) - def get_tile_params_at_coord(self, plocs: torch.Tensor): + def get_tile_params_at_coord(self, plocs: torch.Tensor) -> Dict[str, Tensor]: """Return the parameters of the tiles that contain each of the locations in plocs.""" assert len(plocs.shape) == 2 and plocs.shape[1] == 2 assert plocs.device == self.locs.device @@ -232,8 +227,8 @@ def get_tile_params_at_coord(self, plocs: torch.Tensor): return {k: v[:, x_indx, y_indx, :, :].reshape(n_total, -1) for k, v in self.items()} - def set_all_fluxes_and_mags(self, decoder): - """Set all fluxes (galaxy and star) of tile catalog given an ImageDecoder instance.""" + def set_all_fluxes_and_mags(self, decoder) -> None: + """Set all fluxes (galaxy and star) of tile catalog given an `ImageDecoder` instance.""" # first get galaxy fluxes assert "galaxy_bools" in self and "galaxy_params" in self and "star_fluxes" in self assert ( @@ -255,8 +250,8 @@ def set_all_fluxes_and_mags(self, decoder): self["mags"] = torch.zeros_like(self["fluxes"]) self["mags"][is_on_array] = convert_flux_to_mag(self["fluxes"][is_on_array]) - def set_galaxy_ellips(self, decoder, scale: float = 0.393): - """Sets galaxy ellipticities of tile catalog given an ImageDecoder instance.""" + def set_galaxy_ellips(self, decoder, scale: float = 0.393) -> None: + """Sets galaxy ellipticities of tile catalog given an `ImageDecoder` instance.""" galaxy_bools, galaxy_params = self["galaxy_bools"], self["galaxy_params"] ellips = decoder.get_galaxy_ellips(galaxy_bools, galaxy_params, scale=scale) self["ellips"] = ellips diff --git a/bliss/datasets/galsim_galaxies.py b/bliss/datasets/galsim_galaxies.py index 0e9c96edb..c8275ce53 100644 --- a/bliss/datasets/galsim_galaxies.py +++ b/bliss/datasets/galsim_galaxies.py @@ -1,7 +1,5 @@ from typing import Dict, List, Optional -import galsim -import numpy as np import pytorch_lightning as pl import torch from einops import rearrange @@ -222,91 +220,6 @@ def test_dataloader(self): return DataLoader(self, batch_size=self.batch_size, num_workers=self.num_workers) -class ToyGaussian(pl.LightningDataModule, Dataset): - def __init__( - self, - num_workers, - batch_size, - n_batches, - slen, - n_bands, - pixel_scale, - background, - psf_fwhm, - min_flux, - max_flux, - min_hlr, - max_hlr, - max_e, - ): - super().__init__() - assert n_bands == 1, "Only 1 band is supported" - self.num_workers = num_workers - self.batch_size = batch_size - self.n_batches = n_batches - - self.slen = slen - self.n_bands = n_bands - self.pixel_scale = pixel_scale - - # create background - self.background = torch.zeros((self.n_bands, self.slen, self.slen), dtype=torch.float32) - self.background[...] = background - - # small dummy psf - self.psf = galsim.Gaussian(fwhm=psf_fwhm).withFlux(1.0) - self.min_flux = min_flux - self.max_flux = max_flux - self.min_hlr = min_hlr - self.max_hlr = max_hlr - self.max_e = max_e - - def _uniform(self, a, b): - # uses pytorch to return a single float ~ U(a, b) - unif = (a - b) * torch.rand(1) + b - return unif.item() - - def __getitem__(self, idx): - flux_avg = self._uniform(self.min_flux, self.max_flux) - hlr = self._uniform(self.min_hlr, self.max_hlr) # arcseconds - flux = (hlr / self.pixel_scale) ** 2 * np.pi * flux_avg # approx - - # sample ellipticity - ell = self._uniform(0, self.max_e) - theta = self._uniform(0, 2 * np.pi) - g1 = ell * np.cos(theta) - g2 = ell * np.sin(theta) - - # pylint: disable=no-value-for-parameter - galaxy = galsim.Gaussian(half_light_radius=hlr).shear(g1=g1, g2=g2).withFlux(flux) - gal_conv = galsim.Convolution(galaxy, self.psf) - image = gal_conv.drawImage( - nx=self.slen, ny=self.slen, method="auto", scale=self.pixel_scale - ) - - # convert image to pytorch and reshape - image = torch.from_numpy(image.array).reshape(1, self.slen, self.slen) - - # add noise and background. - image += self.background - noise = torch.sqrt(image) * torch.randn(*image.shape) - image += noise - - return {"images": image, "background": self.background} - - def __len__(self): - return self.batch_size * self.n_batches - - def train_dataloader(self): - return DataLoader(self, batch_size=self.batch_size, num_workers=self.num_workers) - - def val_dataloader(self): - return DataLoader(self, batch_size=self.batch_size, num_workers=self.num_workers) - - def test_dataloader(self): - return DataLoader(self, batch_size=self.batch_size, num_workers=self.num_workers) - - def _add_noise_and_background(image: Tensor, background: Tensor) -> Tensor: image_with_background = image + background noise = image_with_background.sqrt() * torch.randn_like(image_with_background) diff --git a/bliss/models/prior.py b/bliss/models/prior.py index 2284465fb..747a6252a 100644 --- a/bliss/models/prior.py +++ b/bliss/models/prior.py @@ -1,5 +1,5 @@ from pathlib import Path -from typing import Optional, Union +from typing import Optional from warnings import warn import numpy as np @@ -10,7 +10,7 @@ from torch.distributions import Poisson from bliss.catalog import TileCatalog, get_is_on_from_n_sources -from bliss.datasets.galsim_galaxies import SingleGalsimGalaxies, ToyGaussian +from bliss.datasets.galsim_galaxies import SingleGalsimGalaxies from bliss.models.galaxy_net import OneCenteredGalaxyAE @@ -21,7 +21,7 @@ def __init__( n_latent_batches: Optional[int] = None, autoencoder: Optional[OneCenteredGalaxyAE] = None, autoencoder_ckpt: Optional[str] = None, - galaxy_dataset: Optional[Union[SingleGalsimGalaxies, ToyGaussian]] = None, + galaxy_dataset: Optional[SingleGalsimGalaxies] = None, ): """Class to sample galaxy latent variables. diff --git a/case_studies/sdss_galaxies_vae/config/config.yaml b/case_studies/sdss_galaxies_vae/config/config.yaml index d7b6d65c8..380c62e9b 100644 --- a/case_studies/sdss_galaxies_vae/config/config.yaml +++ b/case_studies/sdss_galaxies_vae/config/config.yaml @@ -88,21 +88,6 @@ datasets: generate_device: "cpu" num_workers: 5 fix_validation_set: true - toy_gaussian: - _target_: bliss.datasets.galsim_galaxies.ToyGaussian - num_workers: 0 - n_batches: 10 - batch_size: 64 - slen: 53 - n_bands: 1 - background: 865. - psf_fwhm: 0.8 - pixel_scale: 0.4 - min_flux: 300 # to be observable it cannot be too below background. - max_flux: 10000 - min_hlr: 0.8 - max_hlr: 3.0 - max_e: 0.6 models: binary: diff --git a/case_studies/strong_lensing/config/config.yaml b/case_studies/strong_lensing/config/config.yaml index e7633c96a..6b8fae312 100644 --- a/case_studies/strong_lensing/config/config.yaml +++ b/case_studies/strong_lensing/config/config.yaml @@ -37,21 +37,6 @@ datasets: n_batches: 10 batch_size: 32 generate_device: "cuda:0" - toy_gaussian: - _target_: bliss.datasets.galsim_galaxies.ToyGaussian - num_workers: 0 - n_batches: 10 - batch_size: 64 - slen: 53 - n_bands: 1 - background: 865. - psf_fwhm: 0.8 - pixel_scale: 0.4 - min_flux: 300 - max_flux: 10000 - min_hlr: 0.8 - max_hlr: 3.0 - max_e: 0.6 models: lensing_binary: diff --git a/pyproject.toml b/pyproject.toml index d1226fe47..9e58cfc5b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,11 +14,11 @@ description = "Bayesian Light Source Separator" documentation = "https://prob-ml.github.io/bliss/" keywords = ["cosmology", "blending", "weak lensing", "bayesian", "ml", "pytorch"] license = "MIT" -name = "bliss" +name = "bliss-deblender" packages = [{include = "bliss"}] readme = "README.md" repository = "https://github.com/prob-ml/bliss" -version = "0.1.0" +version = "0.1.1" [tool.poetry.dependencies] astropy = ">=4.2.1" diff --git a/tests/case_studies/sdss_galaxies_vae/test_galaxy_vae.py b/tests/case_studies/sdss_galaxies_vae/test_galaxy_vae.py index 16f064679..ec2716a70 100644 --- a/tests/case_studies/sdss_galaxies_vae/test_galaxy_vae.py +++ b/tests/case_studies/sdss_galaxies_vae/test_galaxy_vae.py @@ -1,39 +1,3 @@ -def test_galaxy_autoencoder_toy_gaussian(vae_setup, devices): - use_cuda = devices.use_cuda - overrides = { - "mode": "train", - "training": "sdss_vae", - "training.dataset": "${datasets.toy_gaussian}", - } - - if use_cuda: - overrides.update( - { - "training.n_epochs": 101, - "training.trainer.check_val_every_n_epoch": 50, - } - ) - else: - overrides.update( - { - "datasets.toy_gaussian.batch_size": 10, - "datasets.toy_gaussian.n_batches": 1, - "training.n_epochs": 2, - "training.trainer.check_val_every_n_epoch": 1, - } - ) - - # train galaxy_vae - galaxy_ae = vae_setup.get_trained_model(overrides) - results = vae_setup.test_model(overrides, galaxy_ae) - - # only expect tests to pass in cuda: - if not devices.use_cuda: - return - - assert results["max_residual"] < 60 - - def test_galaxy_autoencoder_bulge_disk(vae_setup, devices): use_cuda = devices.use_cuda overrides = {