logger = logging . Logger ( logging . root . level )
+
+
[docs]
class DarcyDataset ( PTDataset ):
"""
DarcyDataset stores data generated according to Darcy's Law.
@@ -237,12 +239,13 @@
Source code for neuralop.data.datasets.darcy encoding= encoding ,
channel_dim = channel_dim ,
input_subsampling_rate = subsampling_rate ,
- output_subsampling_rate = subsampling_rate )
+ output_subsampling_rate = subsampling_rate )
+
# legacy Small Darcy Flow example
example_data_root = get_project_root () / "neuralop/data/datasets/data"
-
[docs]
+
[docs]
def load_darcy_flow_small ( n_train ,
n_tests ,
batch_size ,
diff --git a/dev/_modules/neuralop/data/datasets/navier_stokes.html b/dev/_modules/neuralop/data/datasets/navier_stokes.html
new file mode 100644
index 0000000..f1ff92d
--- /dev/null
+++ b/dev/_modules/neuralop/data/datasets/navier_stokes.html
@@ -0,0 +1,331 @@
+
+
+
+
+
+
+
neuralop.data.datasets.navier_stokes — neuraloperator 0.3.0 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
Source code for neuralop.data.datasets.navier_stokes
+import logging
+import os
+from pathlib import Path
+from typing import Union , List
+
+from torch.utils.data import DataLoader
+
+from .pt_dataset import PTDataset
+from .web_utils import download_from_zenodo_record
+from neuralop.utils import get_project_root
+
+logger = logging . Logger ( logging . root . level )
+
+
+
[docs]
+
class NavierStokesDataset ( PTDataset ):
+
"""
+
NavierStokesDataset stores data generated according to the 2d
+
incompressible Navier-Stokes equations. Input and output are both
+
2d fields with one channel of data which describes the vorticity at each point.
+
+
Data source: https://zenodo.org/records/12825163
+
+
Attributes
+
----------
+
train_db: torch.utils.data.Dataset of training examples
+
test_db: "" of test examples
+
data_processor: neuralop.datasets.DataProcessor to process data examples
+
optional, default is None
+
"""
+
def __init__ ( self ,
+
root_dir : Union [ Path , str ],
+
n_train : int ,
+
n_tests : List [ int ],
+
batch_size : int ,
+
test_batch_sizes : List [ int ],
+
train_resolution : int ,
+
test_resolutions : int = [ 16 , 32 ],
+
encode_input : bool = False ,
+
encode_output : bool = True ,
+
encoding = "channel-wise" ,
+
channel_dim = 1 ,
+
subsampling_rate = None ,
+
download : bool = True ):
+
+
+
""" NavierStokesDataset
+
+
Parameters
+
----------
+
root_dir : Union[Path, str]
+
root at which to download data files
+
dataset_name : str
+
prefix of pt data files to store/access
+
n_train : int
+
number of train instances
+
n_tests : List[int]
+
number of test instances per test dataset
+
batch_size : int
+
batch size of training set
+
test_batch_sizes : List[int]
+
batch size of test sets
+
train_resolution : int
+
resolution of data for training set
+
test_resolutions : List[int], optional
+
resolution of data for testing sets, by default [16,32]
+
encode_input : bool, optional
+
whether to normalize inputs in provided DataProcessor,
+
by default False
+
encode_output : bool, optional
+
whether to normalize outputs in provided DataProcessor,
+
by default True
+
encoding : str, optional
+
parameter for input/output normalization. Whether
+
to normalize by channel ("channel-wise") or
+
by pixel ("pixel-wise"), default "channel-wise"
+
input_subsampling_rate : int or List[int], optional
+
rate at which to subsample each input dimension, by default None
+
output_subsampling_rate : int or List[int], optional
+
rate at which to subsample each output dimension, by default None
+
channel_dim : int, optional
+
dimension of saved tensors to index data channels, by default 1
+
"""
+
# convert root dir to Path
+
if isinstance ( root_dir , str ):
+
root_dir = Path ( root_dir )
+
if not root_dir . exists ():
+
root_dir . mkdir ( parents = True )
+
+
# Zenodo record ID for Navier-Stokes dataset
+
zenodo_record_id = "12825163"
+
+
# List of resolutions needed for dataset object
+
resolutions = set ( test_resolutions + [ train_resolution ])
+
+
# We store data at these resolutions on the Zenodo archive
+
available_resolutions = [ 128 , 1024 ]
+
for res in resolutions :
+
assert res in available_resolutions , f "Error: resolution { res } not available"
+
+
# download darcy data from zenodo archive if passed
+
if download :
+
files_to_download = []
+
already_downloaded_files = [ x for x in root_dir . iterdir ()]
+
for res in resolutions :
+
if f "nsforcing_train_ { res } .pt" not in already_downloaded_files or \
+
f "nsforcing_test_ { res } .pt" not in already_downloaded_files :
+
files_to_download . append ( f "nsforcing_ { res } .tgz" )
+
download_from_zenodo_record ( record_id = zenodo_record_id ,
+
root = root_dir ,
+
files_to_download = files_to_download )
+
+
# once downloaded/if files already exist, init PTDataset
+
super () . __init__ ( root_dir = root_dir ,
+
n_train = n_train ,
+
n_tests = n_tests ,
+
dataset_name = "nsforcing" ,
+
batch_size = batch_size ,
+
test_batch_sizes = test_batch_sizes ,
+
train_resolution = train_resolution ,
+
test_resolutions = test_resolutions ,
+
encode_input = encode_input ,
+
encode_output = encode_output ,
+
encoding = encoding ,
+
channel_dim = channel_dim ,
+
input_subsampling_rate = subsampling_rate ,
+
output_subsampling_rate = subsampling_rate )
+
+
+example_data_root = get_project_root () / "neuralop/datasets/data"
+# load navier stokes pt for backwards compatibility
+def load_navier_stokes_pt ( n_train ,
+ n_tests ,
+ batch_size ,
+ test_batch_sizes ,
+ data_root = example_data_root ,
+ train_resolution = 128 ,
+ test_resolutions = [ 128 ],
+ encode_input = False ,
+ encode_output = True ,
+ encoding = "channel-wise" ,
+ channel_dim = 1 ,
+ subsampling_rate = None ,):
+
+ dataset = NavierStokesDataset ( root_dir = data_root ,
+ n_train = n_train ,
+ n_tests = n_tests ,
+ batch_size = batch_size ,
+ test_batch_sizes = test_batch_sizes ,
+ train_resolution = train_resolution ,
+ test_resolutions = test_resolutions ,
+ encode_input = encode_input ,
+ encode_output = encode_output ,
+ encoding = encoding ,
+ channel_dim = channel_dim ,
+ subsampling_rate = subsampling_rate )
+
+ # return dataloaders for backwards compat
+ train_loader = DataLoader ( dataset . train_db ,
+ batch_size = batch_size ,
+ num_workers = 0 ,
+ pin_memory = True ,
+ persistent_workers = False ,)
+
+ test_loaders = {}
+ for res , test_bsize in zip ( test_resolutions , test_batch_sizes ):
+ test_loaders [ res ] = DataLoader ( dataset . test_dbs [ res ],
+ batch_size = test_bsize ,
+ shuffle = False ,
+ num_workers = 0 ,
+ pin_memory = True ,
+ persistent_workers = False ,)
+
+ return train_loader , test_loaders , dataset . data_processor
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/dev/_sources/auto_examples/plot_FNO_darcy.rst.txt b/dev/_sources/auto_examples/plot_FNO_darcy.rst.txt
index d43d4c5..f01dc51 100644
--- a/dev/_sources/auto_examples/plot_FNO_darcy.rst.txt
+++ b/dev/_sources/auto_examples/plot_FNO_darcy.rst.txt
@@ -248,13 +248,13 @@ Training the model
)
### SCHEDULER ###
-
+
### LOSSES ###
- * Train:
+ * Train:
- * Test: {'h1': , 'l2': }
+ * Test: {'h1': , 'l2': }
@@ -311,22 +311,22 @@ Then train the model on our small Darcy-Flow dataset:
Training on 1000 samples
Testing on [50, 50] samples on resolutions [16, 32].
Raw outputs of shape torch.Size([32, 1, 16, 16])
- [0] time=2.63, avg_loss=0.6321, train_err=19.7545
+ [0] time=2.54, avg_loss=0.6321, train_err=19.7545
Eval: 16_h1=0.3228, 16_l2=0.2567, 32_h1=0.4432, 32_l2=0.2759
- [3] time=2.60, avg_loss=0.2116, train_err=6.6120
+ [3] time=2.53, avg_loss=0.2116, train_err=6.6120
Eval: 16_h1=0.2058, 16_l2=0.1716, 32_h1=0.3869, 32_l2=0.2037
- [6] time=2.66, avg_loss=0.1687, train_err=5.2734
+ [6] time=2.52, avg_loss=0.1687, train_err=5.2734
Eval: 16_h1=0.1864, 16_l2=0.1414, 32_h1=0.3874, 32_l2=0.1798
- [9] time=2.62, avg_loss=0.1457, train_err=4.5546
+ [9] time=2.53, avg_loss=0.1457, train_err=4.5546
Eval: 16_h1=0.1864, 16_l2=0.1451, 32_h1=0.4279, 32_l2=0.1923
- [12] time=2.61, avg_loss=0.1348, train_err=4.2138
+ [12] time=2.52, avg_loss=0.1348, train_err=4.2138
Eval: 16_h1=0.1892, 16_l2=0.1436, 32_h1=0.4446, 32_l2=0.1909
- [15] time=2.56, avg_loss=0.1176, train_err=3.6743
+ [15] time=2.53, avg_loss=0.1176, train_err=3.6743
Eval: 16_h1=0.1565, 16_l2=0.1118, 32_h1=0.3807, 32_l2=0.1519
- [18] time=2.54, avg_loss=0.0866, train_err=2.7047
+ [18] time=2.52, avg_loss=0.0866, train_err=2.7047
Eval: 16_h1=0.1576, 16_l2=0.1159, 32_h1=0.4055, 32_l2=0.1698
- {'train_err': 2.8488178942352533, 'avg_loss': 0.0911621726155281, 'avg_lasso_loss': None, 'epoch_train_time': 2.5474441709999383}
+ {'train_err': 2.8488178942352533, 'avg_loss': 0.0911621726155281, 'avg_lasso_loss': None, 'epoch_train_time': 2.529337541000018}
@@ -476,7 +476,7 @@ are other ways to scale the outputs of the FNO to train a true super-resolution
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** (0 minutes 53.259 seconds)
+ **Total running time of the script:** (0 minutes 51.863 seconds)
.. _sphx_glr_download_auto_examples_plot_FNO_darcy.py:
diff --git a/dev/_sources/auto_examples/plot_SFNO_swe.rst.txt b/dev/_sources/auto_examples/plot_SFNO_swe.rst.txt
index 406e09c..d84a85f 100644
--- a/dev/_sources/auto_examples/plot_SFNO_swe.rst.txt
+++ b/dev/_sources/auto_examples/plot_SFNO_swe.rst.txt
@@ -239,13 +239,13 @@ Creating the losses
)
### SCHEDULER ###
-
+
### LOSSES ###
- * Train:
+ * Train:
- * Test: {'l2': }
+ * Test: {'l2': }
@@ -302,22 +302,22 @@ Train the model on the spherical SWE dataset
Training on 200 samples
Testing on [50, 50] samples on resolutions [(32, 64), (64, 128)].
Raw outputs of shape torch.Size([4, 3, 32, 64])
- [0] time=3.59, avg_loss=2.6227, train_err=10.4908
- Eval: (32, 64)_l2=2.0467, (64, 128)_l2=2.4205
- [3] time=3.57, avg_loss=0.3785, train_err=1.5141
- Eval: (32, 64)_l2=0.6473, (64, 128)_l2=2.3253
- [6] time=3.49, avg_loss=0.2692, train_err=1.0770
- Eval: (32, 64)_l2=0.5009, (64, 128)_l2=2.3432
- [9] time=3.47, avg_loss=0.2131, train_err=0.8524
- Eval: (32, 64)_l2=0.3992, (64, 128)_l2=2.3173
- [12] time=3.50, avg_loss=0.1812, train_err=0.7249
- Eval: (32, 64)_l2=0.3265, (64, 128)_l2=2.3299
- [15] time=3.48, avg_loss=0.1573, train_err=0.6291
- Eval: (32, 64)_l2=0.3441, (64, 128)_l2=2.3306
- [18] time=3.52, avg_loss=0.1410, train_err=0.5639
- Eval: (32, 64)_l2=0.3440, (64, 128)_l2=2.3065
+ [0] time=3.56, avg_loss=2.6136, train_err=10.4543
+ Eval: (32, 64)_l2=1.8840, (64, 128)_l2=2.4460
+ [3] time=3.51, avg_loss=0.4108, train_err=1.6431
+ Eval: (32, 64)_l2=0.6960, (64, 128)_l2=2.2584
+ [6] time=3.49, avg_loss=0.2774, train_err=1.1095
+ Eval: (32, 64)_l2=0.6552, (64, 128)_l2=2.2398
+ [9] time=3.49, avg_loss=0.2443, train_err=0.9772
+ Eval: (32, 64)_l2=0.6225, (64, 128)_l2=2.2648
+ [12] time=3.49, avg_loss=0.1959, train_err=0.7834
+ Eval: (32, 64)_l2=0.5752, (64, 128)_l2=2.2719
+ [15] time=3.53, avg_loss=0.1631, train_err=0.6525
+ Eval: (32, 64)_l2=0.5845, (64, 128)_l2=2.2840
+ [18] time=3.49, avg_loss=0.1435, train_err=0.5739
+ Eval: (32, 64)_l2=0.5651, (64, 128)_l2=2.2851
- {'train_err': 0.5455278062820434, 'avg_loss': 0.13638195157051086, 'avg_lasso_loss': None, 'epoch_train_time': 3.472705569000027}
+ {'train_err': 0.5775461041927338, 'avg_loss': 0.14438652604818344, 'avg_lasso_loss': None, 'epoch_train_time': 3.4856522659999882}
@@ -388,7 +388,7 @@ In practice we would train a Neural Operator on one or multiple GPUs
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** (1 minutes 25.850 seconds)
+ **Total running time of the script:** (1 minutes 25.808 seconds)
.. _sphx_glr_download_auto_examples_plot_SFNO_swe.py:
diff --git a/dev/_sources/auto_examples/plot_UNO_darcy.rst.txt b/dev/_sources/auto_examples/plot_UNO_darcy.rst.txt
index f18271b..1782175 100644
--- a/dev/_sources/auto_examples/plot_UNO_darcy.rst.txt
+++ b/dev/_sources/auto_examples/plot_UNO_darcy.rst.txt
@@ -345,13 +345,13 @@ Creating the losses
)
### SCHEDULER ###
-
+
### LOSSES ###
- * Train:
+ * Train:
- * Test: {'h1': , 'l2': }
+ * Test: {'h1': , 'l2': }
@@ -410,22 +410,22 @@ Actually train the model on our small Darcy-Flow dataset
Training on 1000 samples
Testing on [50, 50] samples on resolutions [16, 32].
Raw outputs of shape torch.Size([32, 1, 16, 16])
- [0] time=10.29, avg_loss=0.7399, train_err=23.1232
- Eval: 16_h1=0.3704, 16_l2=0.2892, 32_h1=0.7984, 32_l2=0.5579
- [3] time=10.12, avg_loss=0.2458, train_err=7.6809
- Eval: 16_h1=0.2079, 16_l2=0.1581, 32_h1=0.6426, 32_l2=0.4875
- [6] time=10.30, avg_loss=0.2391, train_err=7.4705
- Eval: 16_h1=0.2303, 16_l2=0.1860, 32_h1=0.6388, 32_l2=0.4976
- [9] time=10.24, avg_loss=0.2134, train_err=6.6696
- Eval: 16_h1=0.2252, 16_l2=0.1626, 32_h1=0.6335, 32_l2=0.4734
- [12] time=10.14, avg_loss=0.1867, train_err=5.8336
- Eval: 16_h1=0.2220, 16_l2=0.1662, 32_h1=0.6159, 32_l2=0.4444
- [15] time=10.34, avg_loss=0.1618, train_err=5.0563
- Eval: 16_h1=0.1846, 16_l2=0.1355, 32_h1=0.6092, 32_l2=0.4437
- [18] time=10.16, avg_loss=0.1543, train_err=4.8216
- Eval: 16_h1=0.1876, 16_l2=0.1375, 32_h1=0.6006, 32_l2=0.4470
+ [0] time=10.09, avg_loss=0.6439, train_err=20.1217
+ Eval: 16_h1=0.3112, 16_l2=0.2451, 32_h1=0.7407, 32_l2=0.5562
+ [3] time=10.11, avg_loss=0.2395, train_err=7.4830
+ Eval: 16_h1=0.2119, 16_l2=0.1621, 32_h1=0.7009, 32_l2=0.5500
+ [6] time=10.06, avg_loss=0.2402, train_err=7.5053
+ Eval: 16_h1=0.2200, 16_l2=0.1712, 32_h1=0.6949, 32_l2=0.5358
+ [9] time=10.05, avg_loss=0.2237, train_err=6.9917
+ Eval: 16_h1=0.2063, 16_l2=0.1511, 32_h1=0.6638, 32_l2=0.4938
+ [12] time=10.14, avg_loss=0.1872, train_err=5.8492
+ Eval: 16_h1=0.2248, 16_l2=0.1629, 32_h1=0.6854, 32_l2=0.4926
+ [15] time=10.10, avg_loss=0.1493, train_err=4.6657
+ Eval: 16_h1=0.2186, 16_l2=0.1646, 32_h1=0.6619, 32_l2=0.4929
+ [18] time=10.09, avg_loss=0.1492, train_err=4.6611
+ Eval: 16_h1=0.2014, 16_l2=0.1530, 32_h1=0.6626, 32_l2=0.4440
- {'train_err': 3.9057141728699207, 'avg_loss': 0.12498285353183747, 'avg_lasso_loss': None, 'epoch_train_time': 10.18016356399994}
+ {'train_err': 4.119179047644138, 'avg_loss': 0.13181372952461243, 'avg_lasso_loss': None, 'epoch_train_time': 10.129040809000003}
@@ -499,7 +499,7 @@ In practice we would train a Neural Operator on one or multiple GPUs
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** (3 minutes 27.939 seconds)
+ **Total running time of the script:** (3 minutes 25.226 seconds)
.. _sphx_glr_download_auto_examples_plot_UNO_darcy.py:
diff --git a/dev/_sources/auto_examples/plot_count_flops.rst.txt b/dev/_sources/auto_examples/plot_count_flops.rst.txt
index 06bf91b..3c4fe84 100644
--- a/dev/_sources/auto_examples/plot_count_flops.rst.txt
+++ b/dev/_sources/auto_examples/plot_count_flops.rst.txt
@@ -80,7 +80,7 @@ This output is organized as a defaultdict object that counts the FLOPS used in e
.. code-block:: none
- defaultdict(. at 0x7f90bb157670>, {'': defaultdict(, {'convolution.default': 2982150144, 'bmm.default': 138412032}), 'lifting': defaultdict(, {'convolution.default': 562036736}), 'lifting.fcs.0': defaultdict(, {'convolution.default': 25165824}), 'lifting.fcs.1': defaultdict(, {'convolution.default': 536870912}), 'fno_blocks': defaultdict(, {'convolution.default': 2147483648, 'bmm.default': 138412032}), 'fno_blocks.fno_skips.0': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.0.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.0': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.0': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.0.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.0.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.fno_skips.1': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.1.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.1': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.1': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.1.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.1.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.fno_skips.2': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.2.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.2': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.2': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.2.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.2.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.fno_skips.3': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.3.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.3': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.3': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.3.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.3.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'projection': defaultdict(, {'convolution.default': 272629760}), 'projection.fcs.0': defaultdict(, {'convolution.default': 268435456}), 'projection.fcs.1': defaultdict(, {'convolution.default': 4194304})})
+ defaultdict(. at 0x7ff340775550>, {'': defaultdict(, {'convolution.default': 2982150144, 'bmm.default': 138412032}), 'lifting': defaultdict(, {'convolution.default': 562036736}), 'lifting.fcs.0': defaultdict(, {'convolution.default': 25165824}), 'lifting.fcs.1': defaultdict(, {'convolution.default': 536870912}), 'fno_blocks': defaultdict(, {'convolution.default': 2147483648, 'bmm.default': 138412032}), 'fno_blocks.fno_skips.0': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.0.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.0': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.0': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.0.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.0.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.fno_skips.1': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.1.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.1': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.1': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.1.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.1.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.fno_skips.2': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.2.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.2': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.2': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.2.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.2.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.fno_skips.3': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.fno_skips.3.conv': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.convs.3': defaultdict(, {'bmm.default': 34603008}), 'fno_blocks.channel_mlp.3': defaultdict(, {'convolution.default': 268435456}), 'fno_blocks.channel_mlp.3.fcs.0': defaultdict(, {'convolution.default': 134217728}), 'fno_blocks.channel_mlp.3.fcs.1': defaultdict(, {'convolution.default': 134217728}), 'projection': defaultdict(, {'convolution.default': 272629760}), 'projection.fcs.0': defaultdict(, {'convolution.default': 268435456}), 'projection.fcs.1': defaultdict(, {'convolution.default': 4194304})})
@@ -125,7 +125,7 @@ To check the maximum FLOPS used during the forward pass, let's create a recursiv
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** (0 minutes 4.230 seconds)
+ **Total running time of the script:** (0 minutes 3.919 seconds)
.. _sphx_glr_download_auto_examples_plot_count_flops.py:
diff --git a/dev/_sources/auto_examples/plot_darcy_flow.rst.txt b/dev/_sources/auto_examples/plot_darcy_flow.rst.txt
index d799bfb..1c09a44 100644
--- a/dev/_sources/auto_examples/plot_darcy_flow.rst.txt
+++ b/dev/_sources/auto_examples/plot_darcy_flow.rst.txt
@@ -163,7 +163,7 @@ Visualizing the data
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** (0 minutes 0.407 seconds)
+ **Total running time of the script:** (0 minutes 0.408 seconds)
.. _sphx_glr_download_auto_examples_plot_darcy_flow.py:
diff --git a/dev/_sources/auto_examples/plot_darcy_flow_spectrum.rst.txt b/dev/_sources/auto_examples/plot_darcy_flow_spectrum.rst.txt
index 72d9c2d..21a7871 100644
--- a/dev/_sources/auto_examples/plot_darcy_flow_spectrum.rst.txt
+++ b/dev/_sources/auto_examples/plot_darcy_flow_spectrum.rst.txt
@@ -219,7 +219,7 @@ Loading the Navier-Stokes dataset in 128x128 resolution
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** (0 minutes 0.279 seconds)
+ **Total running time of the script:** (0 minutes 0.277 seconds)
.. _sphx_glr_download_auto_examples_plot_darcy_flow_spectrum.py:
diff --git a/dev/_sources/auto_examples/plot_incremental_FNO_darcy.rst.txt b/dev/_sources/auto_examples/plot_incremental_FNO_darcy.rst.txt
index b6947f0..daef0d5 100644
--- a/dev/_sources/auto_examples/plot_incremental_FNO_darcy.rst.txt
+++ b/dev/_sources/auto_examples/plot_incremental_FNO_darcy.rst.txt
@@ -240,15 +240,15 @@ Set up the losses
)
### SCHEDULER ###
-
+
### LOSSES ###
### INCREMENTAL RESOLUTION + GRADIENT EXPLAINED ###
- * Train:
+ * Train:
- * Test: {'h1': , 'l2': }
+ * Test: {'h1': , 'l2': }
@@ -335,15 +335,15 @@ Train the model
Eval: 16_h1=0.8889, 16_l2=0.7005, 32_h1=1.1195, 32_l2=0.7407
[2] time=0.22, avg_loss=0.6497, train_err=9.2815
Eval: 16_h1=0.6372, 16_l2=0.4883, 32_h1=0.6967, 32_l2=0.5000
- [3] time=0.23, avg_loss=0.5559, train_err=7.9411
+ [3] time=0.22, avg_loss=0.5559, train_err=7.9411
Eval: 16_h1=0.6112, 16_l2=0.4348, 32_h1=0.7432, 32_l2=0.4530
- [4] time=0.23, avg_loss=0.4852, train_err=6.9312
+ [4] time=0.22, avg_loss=0.4852, train_err=6.9312
Eval: 16_h1=0.5762, 16_l2=0.4037, 32_h1=0.7138, 32_l2=0.4262
- [5] time=0.23, avg_loss=0.4393, train_err=6.2764
+ [5] time=0.22, avg_loss=0.4393, train_err=6.2764
Eval: 16_h1=0.5515, 16_l2=0.3826, 32_h1=0.7143, 32_l2=0.4146
[6] time=0.23, avg_loss=0.4039, train_err=5.7703
Eval: 16_h1=0.5421, 16_l2=0.3832, 32_h1=0.7289, 32_l2=0.4221
- [7] time=0.23, avg_loss=0.3626, train_err=5.1807
+ [7] time=0.22, avg_loss=0.3626, train_err=5.1807
Eval: 16_h1=0.5418, 16_l2=0.3902, 32_h1=0.7402, 32_l2=0.4312
[8] time=0.23, avg_loss=0.3563, train_err=5.0894
Eval: 16_h1=0.5598, 16_l2=0.3874, 32_h1=0.7716, 32_l2=0.4260
@@ -352,28 +352,28 @@ Train the model
Incre Res Update: change index to 1
Incre Res Update: change sub to 1
Incre Res Update: change res to 16
- [10] time=0.29, avg_loss=0.4253, train_err=6.0757
+ [10] time=0.28, avg_loss=0.4253, train_err=6.0757
Eval: 16_h1=0.4069, 16_l2=0.2959, 32_h1=0.4904, 32_l2=0.2928
- [11] time=0.28, avg_loss=0.3745, train_err=5.3500
+ [11] time=0.27, avg_loss=0.3745, train_err=5.3500
Eval: 16_h1=0.3820, 16_l2=0.2869, 32_h1=0.4769, 32_l2=0.3026
[12] time=0.28, avg_loss=0.3405, train_err=4.8636
Eval: 16_h1=0.3404, 16_l2=0.2598, 32_h1=0.4410, 32_l2=0.2731
- [13] time=0.28, avg_loss=0.3090, train_err=4.4136
+ [13] time=0.27, avg_loss=0.3090, train_err=4.4136
Eval: 16_h1=0.3231, 16_l2=0.2452, 32_h1=0.4245, 32_l2=0.2586
- [14] time=0.28, avg_loss=0.2896, train_err=4.1368
+ [14] time=0.27, avg_loss=0.2896, train_err=4.1368
Eval: 16_h1=0.3130, 16_l2=0.2380, 32_h1=0.4161, 32_l2=0.2522
[15] time=0.28, avg_loss=0.2789, train_err=3.9843
Eval: 16_h1=0.3072, 16_l2=0.2324, 32_h1=0.4151, 32_l2=0.2455
[16] time=0.28, avg_loss=0.2690, train_err=3.8434
Eval: 16_h1=0.3042, 16_l2=0.2305, 32_h1=0.4100, 32_l2=0.2425
- [17] time=0.28, avg_loss=0.2637, train_err=3.7674
+ [17] time=0.27, avg_loss=0.2637, train_err=3.7674
Eval: 16_h1=0.2954, 16_l2=0.2229, 32_h1=0.4023, 32_l2=0.2354
- [18] time=0.29, avg_loss=0.2557, train_err=3.6533
+ [18] time=0.28, avg_loss=0.2557, train_err=3.6533
Eval: 16_h1=0.2756, 16_l2=0.2105, 32_h1=0.3780, 32_l2=0.2269
- [19] time=0.29, avg_loss=0.2395, train_err=3.4208
+ [19] time=0.28, avg_loss=0.2395, train_err=3.4208
Eval: 16_h1=0.2735, 16_l2=0.2106, 32_h1=0.3738, 32_l2=0.2303
- {'train_err': 3.4207682268960133, 'avg_loss': 0.23945377588272096, 'avg_lasso_loss': None, 'epoch_train_time': 0.28630295900006786, '16_h1': tensor(0.2735), '16_l2': tensor(0.2106), '32_h1': tensor(0.3738), '32_l2': tensor(0.2303)}
+ {'train_err': 3.4207682268960133, 'avg_loss': 0.23945377588272096, 'avg_lasso_loss': None, 'epoch_train_time': 0.27744663099997524, '16_h1': tensor(0.2735), '16_l2': tensor(0.2106), '32_h1': tensor(0.3738), '32_l2': tensor(0.2303)}
@@ -447,7 +447,7 @@ In practice we would train a Neural Operator on one or multiple GPUs
.. rst-class:: sphx-glr-timing
- **Total running time of the script:** (0 minutes 7.140 seconds)
+ **Total running time of the script:** (0 minutes 6.999 seconds)
.. _sphx_glr_download_auto_examples_plot_incremental_FNO_darcy.py:
diff --git a/dev/_sources/auto_examples/sg_execution_times.rst.txt b/dev/_sources/auto_examples/sg_execution_times.rst.txt
index 5d8d254..3f99cd8 100644
--- a/dev/_sources/auto_examples/sg_execution_times.rst.txt
+++ b/dev/_sources/auto_examples/sg_execution_times.rst.txt
@@ -6,7 +6,7 @@
Computation times
=================
-**05:59.105** total execution time for 8 files **from auto_examples**:
+**05:54.500** total execution time for 8 files **from auto_examples**:
.. container::
@@ -33,25 +33,25 @@ Computation times
- Time
- Mem (MB)
* - :ref:`sphx_glr_auto_examples_plot_UNO_darcy.py` (``plot_UNO_darcy.py``)
- - 03:27.939
+ - 03:25.226
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_SFNO_swe.py` (``plot_SFNO_swe.py``)
- - 01:25.850
+ - 01:25.808
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_FNO_darcy.py` (``plot_FNO_darcy.py``)
- - 00:53.259
+ - 00:51.863
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_incremental_FNO_darcy.py` (``plot_incremental_FNO_darcy.py``)
- - 00:07.140
+ - 00:06.999
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_count_flops.py` (``plot_count_flops.py``)
- - 00:04.230
+ - 00:03.919
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_darcy_flow.py` (``plot_darcy_flow.py``)
- - 00:00.407
+ - 00:00.408
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_darcy_flow_spectrum.py` (``plot_darcy_flow_spectrum.py``)
- - 00:00.279
+ - 00:00.277
- 0.0
* - :ref:`sphx_glr_auto_examples_checkpoint_FNO_darcy.py` (``checkpoint_FNO_darcy.py``)
- 00:00.000
diff --git a/dev/_sources/install.rst.txt b/dev/_sources/install.rst.txt
index 6f5efdd..a05d6ab 100644
--- a/dev/_sources/install.rst.txt
+++ b/dev/_sources/install.rst.txt
@@ -7,14 +7,14 @@ Once installed, you can import it as ``neuralop``::
import neuralop
+~~~~~~~~~~~~~~~~~~~~~~~~~~
Pre-requisites
===============
You will need to have Python 3 installed, as well as NumPy, Scipy, PyTorch, TensorLy and TensorLy-Torch.
If you are starting with Python or generally want a pain-free experience, we recommend that you
-install the `Anaconda distribiution `_. It comes ready to use with all prerequisite packages.
-
+install the `Anaconda distribution `_. It comes ready to use with all prerequisite packages.
Installing with pip
=================================
@@ -28,16 +28,19 @@ To install via pip, simply run, in your terminal::
(the `-U` is optional, use it if you want to update the package).
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
Building ``neuraloperator`` from source
========================================
-First clone the repository and cd there::
+First ensure that you are in an environment with Python 3, NumPy, Scipy, PyTorch, TensorLy and TensorLy-Torch.
+Then clone the repository and cd there::
git clone https://github.com/neuraloperator/neuraloperator
cd neuraloperator
-Then can install the requirements ::
+Then, install the requirements ::
pip install -r requirements.txt
@@ -46,6 +49,23 @@ Then install the package (here in editable mode with `-e`, or equivalently `--ed
pip install -e .
+~~~~~~~~~~~~~~~~~~~~
+
+.. _open3d_dependency :
+Fast 3D spatial computing with Open3D
+=======================================
+
+To accelerate spatial computing for 3D applications, we include
+`Open3D `_ as an optional dependency. Open3D includes
+utilities for reading 3D mesh files and fast 3D neighbor search. To install::
+
+ pip install open3d
+
+Note that Open3D is only
+compatible with specific builds of PyTorch and CUDA. Check the sub-package
+`Open3D-ML `_ documentation for more details.
+
+~~~~~~~~~~~~~~~~~~~~~
Running the tests
=================
diff --git a/dev/_sources/modules/api.rst.txt b/dev/_sources/modules/api.rst.txt
index b39132a..a6ccb07 100644
--- a/dev/_sources/modules/api.rst.txt
+++ b/dev/_sources/modules/api.rst.txt
@@ -339,6 +339,37 @@ We also ship a small dataset for testing:
load_darcy_flow_small
+We provide downloadable datasets for Darcy-Flow, Navier-Stokes, and Car-CFD.
+
+.. automodule:: neuralop.data.datasets.darcy
+ :no-members:
+ :no-inherited-members:
+
+.. autosummary::
+ :toctree: generated
+ :template: class.rst
+
+ DarcyDataset
+
+.. automodule:: neuralop.data.datasets.navier_stokes
+ :no-members:
+ :no-inherited-members:
+
+.. autosummary::
+ :toctree: generated
+ :template: class.rst
+
+ NavierStokesDataset
+
+.. automodule:: neuralop.data.datasets.car_cfd_dataset
+ :no-members:
+ :no-inherited-members:
+
+.. autosummary::
+ :toctree: generated
+ :template: class.rst
+
+ CarCFDDataset
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
DataProcessors
diff --git a/dev/_sources/modules/generated/neuralop.data.datasets.car_cfd_dataset.CarCFDDataset.rst.txt b/dev/_sources/modules/generated/neuralop.data.datasets.car_cfd_dataset.CarCFDDataset.rst.txt
new file mode 100644
index 0000000..9000086
--- /dev/null
+++ b/dev/_sources/modules/generated/neuralop.data.datasets.car_cfd_dataset.CarCFDDataset.rst.txt
@@ -0,0 +1,11 @@
+:mod:`neuralop.data.datasets.car_cfd_dataset`.CarCFDDataset
+==================================================================
+
+.. currentmodule:: neuralop.data.datasets.car_cfd_dataset
+
+.. autoclass:: CarCFDDataset
+ :members:
+
+.. raw:: html
+
+
\ No newline at end of file
diff --git a/dev/_sources/modules/generated/neuralop.data.datasets.darcy.DarcyDataset.rst.txt b/dev/_sources/modules/generated/neuralop.data.datasets.darcy.DarcyDataset.rst.txt
new file mode 100644
index 0000000..9c0cfb6
--- /dev/null
+++ b/dev/_sources/modules/generated/neuralop.data.datasets.darcy.DarcyDataset.rst.txt
@@ -0,0 +1,11 @@
+:mod:`neuralop.data.datasets.darcy`.DarcyDataset
+=======================================================
+
+.. currentmodule:: neuralop.data.datasets.darcy
+
+.. autoclass:: DarcyDataset
+ :members:
+
+.. raw:: html
+
+
\ No newline at end of file
diff --git a/dev/_sources/modules/generated/neuralop.data.datasets.navier_stokes.NavierStokesDataset.rst.txt b/dev/_sources/modules/generated/neuralop.data.datasets.navier_stokes.NavierStokesDataset.rst.txt
new file mode 100644
index 0000000..159d6b7
--- /dev/null
+++ b/dev/_sources/modules/generated/neuralop.data.datasets.navier_stokes.NavierStokesDataset.rst.txt
@@ -0,0 +1,11 @@
+:mod:`neuralop.data.datasets.navier_stokes`.NavierStokesDataset
+======================================================================
+
+.. currentmodule:: neuralop.data.datasets.navier_stokes
+
+.. autoclass:: NavierStokesDataset
+ :members:
+
+.. raw:: html
+
+
\ No newline at end of file
diff --git a/dev/_sources/sg_execution_times.rst.txt b/dev/_sources/sg_execution_times.rst.txt
index 7178903..6589866 100644
--- a/dev/_sources/sg_execution_times.rst.txt
+++ b/dev/_sources/sg_execution_times.rst.txt
@@ -6,7 +6,7 @@
Computation times
=================
-**05:59.105** total execution time for 8 files **from all galleries**:
+**05:54.500** total execution time for 8 files **from all galleries**:
.. container::
@@ -33,25 +33,25 @@ Computation times
- Time
- Mem (MB)
* - :ref:`sphx_glr_auto_examples_plot_UNO_darcy.py` (``../../examples/plot_UNO_darcy.py``)
- - 03:27.939
+ - 03:25.226
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_SFNO_swe.py` (``../../examples/plot_SFNO_swe.py``)
- - 01:25.850
+ - 01:25.808
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_FNO_darcy.py` (``../../examples/plot_FNO_darcy.py``)
- - 00:53.259
+ - 00:51.863
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_incremental_FNO_darcy.py` (``../../examples/plot_incremental_FNO_darcy.py``)
- - 00:07.140
+ - 00:06.999
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_count_flops.py` (``../../examples/plot_count_flops.py``)
- - 00:04.230
+ - 00:03.919
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_darcy_flow.py` (``../../examples/plot_darcy_flow.py``)
- - 00:00.407
+ - 00:00.408
- 0.0
* - :ref:`sphx_glr_auto_examples_plot_darcy_flow_spectrum.py` (``../../examples/plot_darcy_flow_spectrum.py``)
- - 00:00.279
+ - 00:00.277
- 0.0
* - :ref:`sphx_glr_auto_examples_checkpoint_FNO_darcy.py` (``../../examples/checkpoint_FNO_darcy.py``)
- 00:00.000
diff --git a/dev/auto_examples/plot_FNO_darcy.html b/dev/auto_examples/plot_FNO_darcy.html
index e35c86b..6d4f602 100644
--- a/dev/auto_examples/plot_FNO_darcy.html
+++ b/dev/auto_examples/plot_FNO_darcy.html
@@ -301,13 +301,13 @@ Training the model
@@ -451,7 +451,7 @@
Zero-shot super-evaluation
-