Skip to content

Commit

Permalink
Github action: auto-update.
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Oct 12, 2024
1 parent ed928ac commit 3d07a44
Show file tree
Hide file tree
Showing 88 changed files with 2,390 additions and 1,362 deletions.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Training a neural operator on Darcy-Flow - Author Robert Joseph George\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package on Incremental FNO and Incremental Resolution\n"
"\n# Training an FNO with incremental meta-learning\nIn this example, we demonstrate how to use the small Darcy-Flow \nexample we ship with the package to demonstrate the Incremental FNO\nmeta-learning algorithm\n"
]
},
{
Expand Down
Binary file not shown.
111 changes: 88 additions & 23 deletions dev/_downloads/1a3050d57a180b92b424ce128dfe1d36/plot_FNO_darcy.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,21 @@
"""
Training a TFNO on Darcy-Flow
Training an FNO on Darcy-Flow
=============================
In this example, we demonstrate how to use the small Darcy-Flow example we ship with the package
to train a Tensorized Fourier-Neural Operator
In this example, we demonstrate how to use the small `Darcy-Flow example <../auto_examples/plot_darcy_flow.html>`_ we ship with the package
to train a Fourier Neural Operator.
Note that this dataset is much smaller than one we would use in practice. The small Darcy-flow is an example built to
be trained on a CPU in a few seconds, whereas normally we would train on one or multiple GPUs.
"""

# %%
#


import torch
import matplotlib.pyplot as plt
import sys
from neuralop.models import TFNO
from neuralop.models import FNO
from neuralop import Trainer
from neuralop.training import AdamW
from neuralop.data.datasets import load_darcy_flow_small
Expand All @@ -24,7 +26,7 @@


# %%
# Loading the Navier-Stokes dataset in 128x128 resolution
# Let's load the small Darcy-flow dataset.
train_loader, test_loaders, data_processor = load_darcy_flow_small(
n_train=1000, batch_size=32,
test_resolutions=[16, 32], n_tests=[100, 50],
Expand All @@ -34,15 +36,22 @@


# %%
# We create a tensorized FNO model
# We create a simple FNO model

model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)
model = FNO(n_modes=(16, 16),
in_channels=1,
out_channels=1,
hidden_channels=32,
projection_channel_ratio=2)
model = model.to(device)

n_params = count_model_params(model)
print(f'\nOur model has {n_params} parameters.')
sys.stdout.flush()

# %%
# Training setup
# ----------------

# %%
#Create the optimizer
Expand All @@ -53,7 +62,7 @@


# %%
# Creating the losses
# Then create the losses
l2loss = LpLoss(d=2, p=2)
h1loss = H1Loss(d=2)

Expand All @@ -62,7 +71,8 @@


# %%

# Training the model
# ---------------------

print('\n### MODEL ###\n', model)
print('\n### OPTIMIZER ###\n', optimizer)
Expand All @@ -74,7 +84,7 @@


# %%
# Create the trainer
# Create the trainer:
trainer = Trainer(model=model, n_epochs=20,
device=device,
data_processor=data_processor,
Expand All @@ -85,7 +95,7 @@


# %%
# Actually train the model on our small Darcy-Flow dataset
# Then train the model on our small Darcy-Flow dataset:

trainer.train(train_loader=train_loader,
test_loaders=test_loaders,
Expand All @@ -95,18 +105,61 @@
training_loss=train_loss,
eval_losses=eval_losses)

# %%
# .. plot_preds :
# Visualizing predictions
# ------------------------
# Let's take a look at what our model's predicted outputs look like.
# Again note that in this example, we train on a very small resolution for
# a very small number of epochs.
# In practice, we would train at a larger resolution, on many more samples.

test_samples = test_loaders[16].dataset

fig = plt.figure(figsize=(7, 7))
for index in range(3):
data = test_samples[index]
data = data_processor.preprocess(data, batched=False)
# Input x
x = data['x']
# Ground-truth
y = data['y']
# Model prediction
out = model(x.unsqueeze(0))

ax = fig.add_subplot(3, 3, index*3 + 1)
ax.imshow(x[0], cmap='gray')
if index == 0:
ax.set_title('Input x')
plt.xticks([], [])
plt.yticks([], [])

ax = fig.add_subplot(3, 3, index*3 + 2)
ax.imshow(y.squeeze())
if index == 0:
ax.set_title('Ground-truth y')
plt.xticks([], [])
plt.yticks([], [])

ax = fig.add_subplot(3, 3, index*3 + 3)
ax.imshow(out.squeeze().detach().numpy())
if index == 0:
ax.set_title('Model prediction')
plt.xticks([], [])
plt.yticks([], [])

fig.suptitle('Inputs, ground-truth output and prediction (16x16).', y=0.98)
plt.tight_layout()
fig.show()


# %%
# Plot the prediction, and compare with the ground-truth
# Note that we trained on a very small resolution for
# a very small number of epochs
# In practice, we would train at larger resolution, on many more samples.
#
# However, for practicity, we created a minimal example that
# i) fits in just a few Mb of memory
# ii) can be trained quickly on CPU
#
# In practice we would train a Neural Operator on one or multiple GPUs
# .. zero_shot :
# Zero-shot super-evaluation
# ---------------------------
# In addition to training and making predictions on the same input size,
# the FNO's invariance to the discretization of input data means we
# can natively make predictions on higher-resolution inputs and get higher-resolution outputs.

test_samples = test_loaders[32].dataset

Expand Down Expand Up @@ -142,6 +195,18 @@
plt.xticks([], [])
plt.yticks([], [])

fig.suptitle('Inputs, ground-truth output and prediction.', y=0.98)
fig.suptitle('Inputs, ground-truth output and prediction (32x32).', y=0.98)
plt.tight_layout()
fig.show()

# %%
# We only trained the model on data at a resolution of 16x16, and with no modifications
# or special prompting, we were able to perform inference on higher-resolution input data
# and get higher-resolution predictions! In practice, we often want to evaluate neural operators
# at multiple resolutions to track a model's zero-shot super-evaluation performance throughout
# training. That's why many of our datasets, including the small Darcy-flow we showcased,
# are parameterized with a list of `test_resolutions` to choose from.
#
# However, as you can see, these predictions are noisier than we would expect for a model evaluated
# at the same resolution at which it was trained. Leveraging the FNO's discretization-invariance, there
# are other ways to scale the outputs of the FNO to train a true super-resolution capability.
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1,17 +1,16 @@
"""
Training a TFNO on Darcy-Flow
=============================
Checkpointing and loading training states
=========================================
In this example, we demonstrate how to use the small Darcy-Flow example we ship with the package
to train a Tensorized Fourier-Neural Operator
In this example, we demonstrate the Trainer's saving and loading functionality, which makes it easy to checkpoint and resume training states.
"""

# %%
#
import torch
import matplotlib.pyplot as plt
import sys
from neuralop.models import TFNO
from neuralop.models import FNO
from neuralop import Trainer
from neuralop.training import AdamW
from neuralop.data.datasets import load_darcy_flow_small
Expand All @@ -31,9 +30,16 @@


# %%
# We create a tensorized FNO model
# We create an FNO model

model = FNO(n_modes=(16, 16),
in_channels=1,
out_channels=1,
hidden_channels=32,
projection_channel_ratio=2,
factorization='tucker',
rank=0.42)

model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)
model = model.to(device)

n_params = count_model_params(model)
Expand Down Expand Up @@ -94,6 +100,7 @@
save_dir="./checkpoints")


# .. resume_from_dir:
# resume training from saved checkpoint at epoch 10

trainer = Trainer(model=model, n_epochs=20,
Expand Down
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Training a TFNO on Darcy-Flow\n\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package\nto train a Tensorized Fourier-Neural Operator\n"
"\n# Checkpointing and loading training states\n\nIn this example, we demonstrate the Trainer's saving and loading functionality, which makes it easy to checkpoint and resume training states. \n"
]
},
{
Expand All @@ -15,7 +15,7 @@
},
"outputs": [],
"source": [
"import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import TFNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'"
"import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import FNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'"
]
},
{
Expand All @@ -40,7 +40,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"We create a tensorized FNO model\n\n"
"We create an FNO model\n\n"
]
},
{
Expand All @@ -51,7 +51,7 @@
},
"outputs": [],
"source": [
"model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
"model = FNO(n_modes=(16, 16),\n in_channels=1, \n out_channels=1, \n hidden_channels=32, \n projection_channel_ratio=2, \n factorization='tucker', \n rank=0.42)\n\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
]
},
{
Expand Down Expand Up @@ -134,7 +134,7 @@
},
"outputs": [],
"source": [
"trainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss, \n save_every=1,\n save_dir=\"./checkpoints\")\n\n\n# resume training from saved checkpoint at epoch 10\n\ntrainer = Trainer(model=model, n_epochs=20,\n device=device,\n data_processor=data_processor,\n wandb_log=False,\n eval_interval=3,\n use_distributed=False,\n verbose=True)\n\ntrainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss,\n resume_from_dir=\"./checkpoints\")"
"trainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss, \n save_every=1,\n save_dir=\"./checkpoints\")\n\n\n# .. resume_from_dir:\n# resume training from saved checkpoint at epoch 10\n\ntrainer = Trainer(model=model, n_epochs=20,\n device=device,\n data_processor=data_processor,\n wandb_log=False,\n eval_interval=3,\n use_distributed=False,\n verbose=True)\n\ntrainer.train(train_loader=train_loader,\n test_loaders={},\n optimizer=optimizer,\n scheduler=scheduler, \n regularizer=False, \n training_loss=train_loss,\n resume_from_dir=\"./checkpoints\")"
]
}
],
Expand Down
Binary file modified dev/_downloads/5e60095ce99919773daa83384f767e02/plot_SFNO_swe.zip
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
},
"outputs": [],
"source": [
"from copy import deepcopy\nimport torch\nfrom torchtnt.utils.flops import FlopTensorDispatchMode\n\nfrom neuralop.models import FNO\n\ndevice = 'cpu'\n\nfno = FNO(n_modes=(64,64), \n in_channels=3, \n out_channels=1, \n hidden_channels=64, \n projection_channels=64)\n\nbatch_size = 4\nmodel_input = torch.randn(batch_size, 3, 128, 128)\n\n\nwith FlopTensorDispatchMode(fno) as ftdm:\n # count forward flops\n res = fno(model_input).mean()\n fno_forward_flops = deepcopy(ftdm.flop_counts)\n \n ftdm.reset()\n res.backward()\n fno_backward_flops = deepcopy(ftdm.flop_counts)"
"from copy import deepcopy\nimport torch\nfrom torchtnt.utils.flops import FlopTensorDispatchMode\n\nfrom neuralop.models import FNO\n\ndevice = 'cpu'\n\nfno = FNO(n_modes=(64,64), \n in_channels=1, \n out_channels=1, \n hidden_channels=64, \n projection_channel_ratio=1)\n\nbatch_size = 4\nmodel_input = torch.randn(batch_size, 1, 128, 128)\n\n\nwith FlopTensorDispatchMode(fno) as ftdm:\n # count forward flops\n res = fno(model_input).mean()\n fno_forward_flops = deepcopy(ftdm.flop_counts)\n \n ftdm.reset()\n res.backward()\n fno_backward_flops = deepcopy(ftdm.flop_counts)"
]
},
{
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Loading the Navier-Stokes dataset in 128x128 resolution\n\n"
"Loading the Spherical Shallow Water Equations in multiple resolutions\n\n"
]
},
{
Expand All @@ -40,7 +40,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"We create a tensorized FNO model\n\n"
"We create a spherical FNO model\n\n"
]
},
{
Expand All @@ -51,7 +51,7 @@
},
"outputs": [],
"source": [
"model = SFNO(n_modes=(32, 32), in_channels=3, out_channels=3, hidden_channels=32, projection_channels=64, factorization='dense')\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
"model = SFNO(n_modes=(32, 32),\n in_channels=3,\n out_channels=3,\n hidden_channels=32,\n projection_channel_ratio=2,\n factorization='dense')\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
]
},
{
Expand Down Expand Up @@ -123,7 +123,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"Actually train the model on our small Darcy-Flow dataset\n\n"
"Train the model on the spherical SWE dataset\n\n"
]
},
{
Expand Down
15 changes: 10 additions & 5 deletions dev/_downloads/be42c4c413e9b89016fa3a4984cb9758/plot_SFNO_swe.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,20 @@
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

# %%
# Loading the Navier-Stokes dataset in 128x128 resolution
# Loading the Spherical Shallow Water Equations in multiple resolutions
train_loader, test_loaders = load_spherical_swe(n_train=200, batch_size=4, train_resolution=(32, 64),
test_resolutions=[(32, 64), (64, 128)], n_tests=[50, 50], test_batch_sizes=[10, 10],)


# %%
# We create a tensorized FNO model

model = SFNO(n_modes=(32, 32), in_channels=3, out_channels=3, hidden_channels=32, projection_channels=64, factorization='dense')
# We create a spherical FNO model

model = SFNO(n_modes=(32, 32),
in_channels=3,
out_channels=3,
hidden_channels=32,
projection_channel_ratio=2,
factorization='dense')
model = model.to(device)

n_params = count_model_params(model)
Expand Down Expand Up @@ -79,7 +84,7 @@


# %%
# Actually train the model on our small Darcy-Flow dataset
# Train the model on the spherical SWE dataset

trainer.train(train_loader=train_loader,
test_loaders=test_loaders,
Expand Down
Binary file modified dev/_downloads/cefc537c5730a6b3e916b83c1fd313d6/plot_UNO_darcy.zip
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@
device = 'cpu'

fno = FNO(n_modes=(64,64),
in_channels=3,
in_channels=1,
out_channels=1,
hidden_channels=64,
projection_channels=64)
projection_channel_ratio=1)

batch_size = 4
model_input = torch.randn(batch_size, 3, 128, 128)
model_input = torch.randn(batch_size, 1, 128, 128)


with FlopTensorDispatchMode(fno) as ftdm:
Expand Down
Loading

0 comments on commit 3d07a44

Please sign in to comment.