diff --git a/docs/img/domino_result_rtwt.jpg b/docs/img/domino_result_rtwt.jpg new file mode 100644 index 0000000000..34c0cefa9f Binary files /dev/null and b/docs/img/domino_result_rtwt.jpg differ diff --git a/examples/cfd/external_aerodynamics/domino/README.md b/examples/cfd/external_aerodynamics/domino/README.md new file mode 100644 index 0000000000..1f1cbdea51 --- /dev/null +++ b/examples/cfd/external_aerodynamics/domino/README.md @@ -0,0 +1,15 @@ +# DoMINO: Decomposable Multi-scale Iterative Neural Operator for External Aerodynamics + +DoMINO is a local, multi-scale, point-cloud based model architecture to model large-scale +physics problems such as external aerodynamics. The DoMINO model architecture takes STL +geometries as input and evaluates flow quantities such as pressure and +wall shear stress on the surface of the car as well as velocity fields and pressure +in the volume around it. The DoMINO architecture is designed to be a fast, accurate +and scalable surrogate model for large-scale simulations. + +The DoMINO model architecture is used to support the Real Time Wind Tunnel OV Blueprint +demo presented at Supercomputing' 24. + +![Results from DoMINO for RTWT SC demo](../../../../docs/img/domino_result_rtwt.jpg) + +More details on the model architecture and reference pipeline will be released soon. diff --git a/examples/generative/diffusion/README.md b/examples/generative/diffusion/README.md index b658a4f246..d7bc9bd71d 100644 --- a/examples/generative/diffusion/README.md +++ b/examples/generative/diffusion/README.md @@ -30,7 +30,7 @@ Datasets used for model training and sampling can be downloaded via the followin ## Running the Experiments Download the high res and low res data and save the data files to the subdirectory ``modulus/examples/generative/diffusion/Kolmogorov_2D_data/``. -- Note: The directory from which the downloaded dataset files are loaded is specified in the configuration yaml files at ``modulus/examples/generative/diffusion/Kolmogorov_2D_data/conf/``. In the case when the default relative file location in a yaml file cannot be correctly recognized, please replace the relative location with the absolute location. For example, in the configuration file "modulus/examples/generative/diffusion/Kolmogorov_2D_data/conf/config_dfsr_train.yaml", Line 24, the value of the key 'data' can be changed to an absolute file directory of the dataset file, e.g., ``//examples/generative/diffusion/Kolmogorov_2D_data/kf_2d_re1000_256_40seed.npy`` +- Note: The directory from which the downloaded dataset files are loaded is specified in the configuration yaml files at ``modulus/examples/generative/diffusion/conf/``. In the case when the default relative file location in a yaml file cannot be correctly recognized, please replace the relative location with the absolute location. For example, in the configuration file `modulus/examples/generative/diffusion/conf/config_dfsr_train.yaml`, Line 24, the value of the key 'data' can be changed to an absolute file directory of the dataset file, e.g., ``//examples/generative/diffusion/Kolmogorov_2D_data/kf_2d_re1000_256_40seed.npy`` Step 1 - Model Training diff --git a/modulus/datapipes/healpix/data_modules.py b/modulus/datapipes/healpix/data_modules.py index 50f7284ba5..fdabb9f8de 100644 --- a/modulus/datapipes/healpix/data_modules.py +++ b/modulus/datapipes/healpix/data_modules.py @@ -285,7 +285,7 @@ def create_time_series_dataset_classic( for variable in all_variables: file_name = _get_file_name(src_directory, prefix, variable, suffix) logger.debug("open nc dataset %s", file_name) - if "sample" in list(xr.open_dataset(file_name).dims.keys()): + if "sample" in list(xr.open_dataset(file_name).sizes.keys()): ds = xr.open_dataset(file_name, chunks={"sample": batch_size}).rename( {"sample": "time"} ) @@ -989,7 +989,7 @@ def setup(self) -> None: ) dataset = dataset.sel( - channel_in=self.input_variables, + channel_in=self.input_variables + coupled_variables, channel_out=self.output_variables, ) else: diff --git a/modulus/datapipes/healpix/timeseries_dataset.py b/modulus/datapipes/healpix/timeseries_dataset.py index fed380256f..a36eb33ff5 100644 --- a/modulus/datapipes/healpix/timeseries_dataset.py +++ b/modulus/datapipes/healpix/timeseries_dataset.py @@ -303,8 +303,8 @@ def _get_time_index(self, item): if self.forecast_mode else (item + 1) * self.batch_size + self._window_length ) - if not self.drop_last and max_index > self.ds.dims["time"]: - batch_size = self.batch_size - (max_index - self.ds.dims["time"]) + if not self.drop_last and max_index > self.ds.sizes["time"]: + batch_size = self.batch_size - (max_index - self.ds.sizes["time"]) else: batch_size = self.batch_size return (start_index, max_index), batch_size diff --git a/test/models/dlwp_healpix/test_healpix_layers.py b/test/models/dlwp_healpix/test_healpix_layers.py index d7f822470f..fba2b3468b 100644 --- a/test/models/dlwp_healpix/test_healpix_layers.py +++ b/test/models/dlwp_healpix/test_healpix_layers.py @@ -154,38 +154,34 @@ def test_HEALPixLayer_initialization(device, multiplier): def test_HEALPixLayer_forward(device, multiplier): layer = HEALPixLayer(layer=MulX, multiplier=multiplier) + kernel_size = 3 + dilation = 2 + in_channels = 4 + out_channels = 8 + tensor_size = torch.randint(low=2, high=4, size=(1,)).tolist() - tensor_size = [24, 4, *tensor_size, *tensor_size] + tensor_size = [24, in_channels, *tensor_size, *tensor_size] invar = torch.rand(tensor_size, device=device) outvar = layer(invar) assert common.compare_output(outvar, invar * multiplier) - # test nhwc mode and dilation layer = HEALPixLayer( layer=torch.nn.Conv2d, - in_channels=4, - out_channels=8, - kernel_size=3, + in_channels=in_channels, + out_channels=out_channels, + kernel_size=kernel_size, device=device, - # dilation=4, - ) - - outvar = layer(invar) - - layer = HEALPixLayer( - layer=torch.nn.Conv2d, - in_channels=4, - out_channels=8, - kernel_size=3, - device=device, - dilation=1, + dilation=dilation, enable_healpixpad=True, enable_nhwc=True, ) - assert outvar.shape == layer(invar).shape - assert outvar.stride() != layer(invar).stride() + # size of the padding added byu HEALPixLayer + expected_shape = [24, out_channels, tensor_size[-1], tensor_size[-1]] + expected_shape = torch.Size(expected_shape) + + assert expected_shape == layer(invar).shape del layer, outvar, invar torch.cuda.empty_cache()