Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tensorflow 1.x backend: multiple outputs extension of DeepONet #1410

Closed
wants to merge 45 commits into from
Closed
Show file tree
Hide file tree
Changes from 3 commits
Commits
Show all changes
45 commits
Select commit Hold shift + click to select a range
9a7e38b
Tensorflow 1.x backend: multiple outputs extension of DeepONet
vl-dud Jul 31, 2023
ae48af0
Codacy Pylint fix
vl-dud Aug 3, 2023
76b7964
move vanilla deeponet building into a separate method
vl-dud Aug 4, 2023
c0e06a5
Remove unwanted method
vl-dud Aug 23, 2023
8338b81
Change `output_count` to `num_outputs`; format via Black
vl-dud Aug 26, 2023
1515c4b
add DeepONet building strategies
vl-dud Sep 15, 2023
ba8d2a0
Add docs for the strategy argument
vl-dud Sep 18, 2023
5087fc2
Format comments
vl-dud Sep 18, 2023
44dae05
Use maximum 88 characters per line
vl-dud Sep 18, 2023
4f23bf8
rename merge to merge_branch_trunk
vl-dud Sep 20, 2023
6f75d99
rename merge to merge_branch_trunk
vl-dud Oct 3, 2023
367905f
Change default deeponet strategy
vl-dud Oct 4, 2023
1d233c8
Change strategy to multi_output_strategy
vl-dud Oct 9, 2023
9fe3572
Codacy Pylint fix
vl-dud Oct 9, 2023
cb2b3fc
Update deeponet.py for tf2 multiple outputs
mitchelldaneker Oct 9, 2023
e7b7e5d
Update deeponet.py
mitchelldaneker Oct 10, 2023
9f41776
Update deeponet.py
mitchelldaneker Oct 10, 2023
97c3641
Add files via upload
mitchelldaneker Oct 10, 2023
85e5984
Update triple.py
mitchelldaneker Oct 10, 2023
b33f812
Merge remote-tracking branch 'origin/master' into deeponet-multiple-o…
vl-dud Oct 13, 2023
25bf219
Add DeepONet strategy classes to __init__.py
vl-dud Oct 13, 2023
44bfd0a
Update __init__.py
mitchelldaneker Oct 13, 2023
d11ab3a
Update deeponet.py
mitchelldaneker Oct 13, 2023
10ed010
Update __init__.py
mitchelldaneker Oct 13, 2023
3ccd772
Update deeponet.py
mitchelldaneker Oct 13, 2023
64ab358
Update antiderivative_aligned_UQ.py
mitchelldaneker Oct 13, 2023
68b2733
Update deeponet.py
mitchelldaneker Oct 13, 2023
569f94e
Revert "Add DeepONet strategy classes to __init__.py"
vl-dud Oct 16, 2023
7c4f750
Hide deeponet strategy classes
vl-dud Oct 16, 2023
ee3eccc
Update triple.py
mitchelldaneker Oct 16, 2023
91a07e9
Update deeponet.py
mitchelldaneker Oct 16, 2023
1eda936
Merge pull request #3 from mitchelldaneker/multiple-outputs-deeponet-tf2
vl-dud Oct 19, 2023
4c8c40e
Format a code with Black
vl-dud Oct 19, 2023
bed66e0
Codacy Pylint fix
vl-dud Oct 19, 2023
7d938c5
Codacy Pylint fix
vl-dud Oct 19, 2023
509f42c
Update triple.py
mitchelldaneker Oct 19, 2023
5f67bdd
Update deeponet.py
mitchelldaneker Oct 19, 2023
b9bf993
Update triple.py
mitchelldaneker Oct 19, 2023
5d66929
Update deeponet.py
mitchelldaneker Oct 20, 2023
f3bb8d2
Update deeponet.py
mitchelldaneker Oct 20, 2023
8102950
Update deeponet.py
mitchelldaneker Oct 20, 2023
54662db
Merge pull request #6 from mitchelldaneker/tf_multiple_outputs
vl-dud Oct 20, 2023
2459f80
Update deeponet.py
mitchelldaneker Oct 20, 2023
b79e0e0
Update triple.py
mitchelldaneker Oct 20, 2023
226ddac
Merge pull request #7 from mitchelldaneker/tf2_multiple_outputs
vl-dud Oct 20, 2023
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion deepxde/data/pde_operator.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,6 +72,8 @@ def __init__(

def losses(self, targets, outputs, loss_fn, inputs, model, aux=None):
f = []
if hasattr(model.net, 'prepare_multiple_outputs'):
lululxvi marked this conversation as resolved.
Show resolved Hide resolved
outputs = model.net.prepare_multiple_outputs(outputs)
if self.pde.pde is not None:
f = self.pde.pde(inputs[1], outputs, model.net.auxiliary_vars)
if not isinstance(f, (list, tuple)):
Expand Down Expand Up @@ -237,7 +239,8 @@ def _losses(self, outputs, loss_fn, inputs, model, num_func):
losses = []
for i in range(num_func):
out = outputs[i][:, None]

if hasattr(model.net, 'prepare_multiple_outputs'):
out = model.net.prepare_multiple_outputs(out)
f = []
if self.pde.pde is not None:
f = self.pde.pde(inputs[1], out, model.net.auxiliary_vars[i][:, None])
Expand Down
80 changes: 60 additions & 20 deletions deepxde/nn/tensorflow_compat_v1/deeponet.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ def __init__(
stacked=False,
trainable_branch=True,
trainable_trunk=True,
output_count=1
lululxvi marked this conversation as resolved.
Show resolved Hide resolved
):
super().__init__()
if isinstance(trainable_trunk, (list, tuple)):
Expand All @@ -50,6 +51,7 @@ def __init__(

self.layer_size_func = layer_sizes_branch
self.layer_size_loc = layer_sizes_trunk
self.output_count = output_count
if isinstance(activation, dict):
self.activation_branch = activations.get(activation["branch"])
self.activation_trunk = activations.get(activation["trunk"])
Expand Down Expand Up @@ -94,13 +96,33 @@ def _feed_dict_inputs(self, inputs):
inputs = [np.tile(self._X_func_default, (n, 1)), inputs]
return dict(zip([self.X_func, self.X_loc], inputs))

def prepare_multiple_outputs(self, outputs):
if self.output_count > 1:
return tf.reshape(outputs, (tf.shape(outputs)[0], tf.shape(outputs)[2]))
return outputs

@timing
def build(self):
print("Building DeepONet...")
self.X_func = tf.placeholder(config.real(tf), [None, self.layer_size_func[0]])
self.X_loc = tf.placeholder(config.real(tf), [None, self.layer_size_loc[0]])
self._inputs = [self.X_func, self.X_loc]

if self.output_count == 1:
self.y = self._build_vanilla_deeponet()
else:
ys = []
for _ in range(self.output_count):
ys.append(self._build_vanilla_deeponet())
self.y = tf.stack(ys, axis=2)

if self._output_transform is not None:
self.y = self._output_transform(self._inputs, self.y)

self.target = tf.placeholder(config.real(tf), [None, self.output_count])
self.built = True

def _build_vanilla_deeponet(self):
# Branch net to encode the input function
y_func = self.X_func
if callable(self.layer_size_func[1]):
Expand Down Expand Up @@ -162,18 +184,17 @@ def build(self):
raise AssertionError(
"Output sizes of branch net and trunk net do not match."
)
self.y = tf.einsum("bi,bi->b", y_func, y_loc)
self.y = tf.expand_dims(self.y, axis=1)
y = tf.einsum(
"bi,bi->b",
y_func,
y_loc
)
y = tf.expand_dims(y, axis=1)
# Add bias
if self.use_bias:
b = tf.Variable(tf.zeros(1, dtype=config.real(tf)))
self.y += b

if self._output_transform is not None:
self.y = self._output_transform(self._inputs, self.y)

self.target = tf.placeholder(config.real(tf), [None, 1])
self.built = True
y += b
return y

def _dense(
self,
Expand All @@ -195,7 +216,7 @@ def _dense(
)

def _stacked_dense(
self, inputs, units, stack_size, activation=None, use_bias=True, trainable=True
self, inputs, units, stack_size, activation=None, use_bias=True, trainable=True
lululxvi marked this conversation as resolved.
Show resolved Hide resolved
):
"""Stacked densely-connected NN layer.

Expand Down Expand Up @@ -268,10 +289,12 @@ def __init__(
activation,
kernel_initializer,
regularization=None,
output_count=1
):
super().__init__()
self.layer_size_func = layer_size_branch
self.layer_size_loc = layer_size_trunk
self.output_count = output_count
if isinstance(activation, dict):
self.activation_branch = activations.get(activation["branch"])
self.activation_trunk = activations.get(activation["trunk"])
Expand All @@ -294,14 +317,33 @@ def outputs(self):
def targets(self):
return self.target

def prepare_multiple_outputs(self, outputs):
if self.output_count > 1:
return tf.reshape(outputs, (tf.shape(outputs)[0], tf.shape(outputs)[2]))
return outputs

@timing
def build(self):
print("Building DeepONetCartesianProd...")
self.X_func = tf.placeholder(config.real(tf), [None, self.layer_size_func[0]])
self.X_loc = tf.placeholder(config.real(tf), [None, self.layer_size_loc[0]])
self._inputs = [self.X_func, self.X_loc]

# Branch net to encode the input function
if self.output_count == 1:
self.y = self._build_vanilla_deeponet()
else:
ys = []
for _ in range(0, self.output_count):
ys.append(self._build_vanilla_deeponet())
self.y = tf.stack(ys, axis=2)

if self._output_transform is not None:
self.y = self._output_transform(self._inputs, self.y)

self.target = tf.placeholder(config.real(tf), [None, None])
self.built = True

def _build_vanilla_deeponet(self):
y_func = self.X_func
if callable(self.layer_size_func[1]):
# User-defined network
Expand Down Expand Up @@ -336,18 +378,16 @@ def build(self):
kernel_regularizer=self.regularizer,
)

# Dot product
if y_func.shape[-1] != y_loc.shape[-1]:
raise AssertionError(
"Output sizes of branch net and trunk net do not match."
)
self.y = tf.einsum("bi,ni->bn", y_func, y_loc)
y = tf.einsum(
"bi,ni->bn",
y_func,
y_loc
)
# Add bias
b = tf.Variable(tf.zeros(1, dtype=config.real(tf)))
self.y += b

if self._output_transform is not None:
self.y = self._output_transform(self._inputs, self.y)

self.target = tf.placeholder(config.real(tf), [None, None])
self.built = True
y += b
return y
72 changes: 72 additions & 0 deletions examples/operator/poisson_aligned_2d.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
"""
Poisson-like 2D problem
Backend supported: tensorflow.compat.v1
"""
import numpy as np
import deepxde as dde
from deepxde.backend import tf
import matplotlib.pyplot as plt


# Two target variables: A and B
# Equations: dA_xx = f, dB_tt = f
def equation(x, y, f):
A = y[:, 0:1]
B = y[:, 1:2]
dA_xx = dde.grad.hessian(y, x, component=0, i=0, j=0)
dB_tt = dde.grad.hessian(y, x, component=1, i=1, j=1)
return [
dA_xx - f,
dB_tt - f
]


# Define space/time geometry
geomtime = dde.geometry.GeometryXTime(
dde.geometry.Interval(0, 1),
dde.geometry.TimeDomain(0, 1)
)

# Boundary conditions for A and B
A_bc = dde.icbc.DirichletBC(
geomtime,
lambda _: 0,
lambda _, on_boundary: on_boundary and np.isclose(_[0], 0),
component=0
)
B_bc = dde.icbc.DirichletBC(
geomtime,
lambda _: 0,
lambda _, on_boundary: on_boundary and np.isclose(_[0], 1),
component=1
)

space = dde.data.GRF2D()
evaluation_points = geomtime.uniform_points(10)

data = dde.data.PDEOperatorCartesianProd(
dde.data.TimePDE(
geomtime,
equation,
[A_bc, B_bc],
num_domain=1000,
num_boundary=10
),
space,
evaluation_points,
num_function=10
)

# Define DeepONet with two outputs
net = dde.nn.DeepONetCartesianProd(
[evaluation_points.shape[0], 100, 100],
[geomtime.dim, 100, 100],
activation="tanh",
kernel_initializer="Glorot normal",
output_count=2
)

# Train model
model = dde.Model(data, net)
model.compile("adam", lr=0.001)
model.train(iterations=5000)