From 8aa264e7744b22179d64752c4aea655be490cba9 Mon Sep 17 00:00:00 2001 From: H Gazula Date: Fri, 8 Mar 2024 07:58:45 -0500 Subject: [PATCH 01/17] resolved https://github.com/neuronets/nobrainer/issues/285 --- nobrainer/processing/segmentation.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nobrainer/processing/segmentation.py b/nobrainer/processing/segmentation.py index 66e1768d..48729749 100644 --- a/nobrainer/processing/segmentation.py +++ b/nobrainer/processing/segmentation.py @@ -39,6 +39,7 @@ def fit( opt_args=None, loss=losses.dice, metrics=metrics.dice, + callbacks=None ): """Train a segmentation model""" # TODO: check validity of datasets @@ -82,7 +83,12 @@ def _compile(): _compile() self.model_.summary() - callbacks = [] + if callbacks is not None and not isinstance(callbacks, list): + raise AttributeError('Callbacks must be either of type list or None') + + if callbacks is None: + callbacks = [] + if self.checkpoint_tracker: callbacks.append(self.checkpoint_tracker) self.model_.fit( From 9d55484883e27e24bf6f45311439ad3c373b732d Mon Sep 17 00:00:00 2001 From: H Gazula Date: Sun, 10 Mar 2024 10:41:23 -0400 Subject: [PATCH 02/17] Resolved https://github.com/neuronets/nobrainer/issues/283 --- nobrainer/models/attention_unet.py | 81 +++++ .../models/attention_unet_with_inception.py | 319 ++++++++++++++++++ 2 files changed, 400 insertions(+) create mode 100644 nobrainer/models/attention_unet.py create mode 100644 nobrainer/models/attention_unet_with_inception.py diff --git a/nobrainer/models/attention_unet.py b/nobrainer/models/attention_unet.py new file mode 100644 index 00000000..879d3b22 --- /dev/null +++ b/nobrainer/models/attention_unet.py @@ -0,0 +1,81 @@ +"""Model definition for Attention U-Net. +Adapted from https://github.com/nikhilroxtomar/Semantic-Segmentation-Architecture/blob/main/TensorFlow/attention-unet.py +""" + +import tensorflow as tf +import tensorflow.keras.layers as L +from tensorflow.keras import layers +from tensorflow.keras.models import Model + + +def conv_block(x, num_filters): + x = L.Conv3D(num_filters, 3, padding="same")(x) + x = L.BatchNormalization()(x) + x = L.Activation("relu")(x) + + x = L.Conv3D(num_filters, 3, padding="same")(x) + x = L.BatchNormalization()(x) + x = L.Activation("relu")(x) + + return x + + +def encoder_block(x, num_filters): + x = conv_block(x, num_filters) + p = L.MaxPool3D()(x) + return x, p + + +def attention_gate(g, s, num_filters): + Wg = L.Conv3D(num_filters, 1, padding="same")(g) + Wg = L.BatchNormalization()(Wg) + + Ws = L.Conv3D(num_filters, 1, padding="same")(s) + Ws = L.BatchNormalization()(Ws) + + out = L.Activation("relu")(Wg + Ws) + out = L.Conv3D(num_filters, 1, padding="same")(out) + out = L.Activation("sigmoid")(out) + + return out * s + + +def decoder_block(x, s, num_filters): + x = L.UpSampling3D()(x) + s = attention_gate(x, s, num_filters) + x = L.Concatenate()([x, s]) + x = conv_block(x, num_filters) + return x + + +def attention_unet(n_classes, input_shape): + """Inputs""" + inputs = L.Input(input_shape) + + """ Encoder """ + s1, p1 = encoder_block(inputs, 64) + s2, p2 = encoder_block(p1, 128) + s3, p3 = encoder_block(p2, 256) + + b1 = conv_block(p3, 512) + + """ Decoder """ + d1 = decoder_block(b1, s3, 256) + d2 = decoder_block(d1, s2, 128) + d3 = decoder_block(d2, s1, 64) + + """ Outputs """ + outputs = L.Conv3D(n_classes, 1, padding="same")(d3) + + final_activation = "sigmoid" if n_classes == 1 else "softmax" + outputs = layers.Activation(final_activation)(outputs) + + """ Model """ + return Model(inputs=inputs, outputs=outputs, name="Attention U-Net") + + +if __name__ == "__main__": + n_classes = 50 + input_shape = (256, 256, 256, 3) + model = attention_unet(n_classes, input_shape) + model.summary() diff --git a/nobrainer/models/attention_unet_with_inception.py b/nobrainer/models/attention_unet_with_inception.py new file mode 100644 index 00000000..f0d6ed20 --- /dev/null +++ b/nobrainer/models/attention_unet_with_inception.py @@ -0,0 +1,319 @@ +"""Attention U-net with inception layers. +Adapted from https://github.com/robinvvinod/unet +""" + +import tensorflow.keras.backend as K +from tensorflow.keras import layers +from tensorflow.keras.models import Model + +from hyperparameters import alpha + +K.set_image_data_format("channels_last") + + +def expend_as(tensor, rep): + # Anonymous lambda function to expand the specified axis by a factor of argument, rep. + # If tensor has shape (512,512,N), lambda will return a tensor of shape (512,512,N*rep), if specified axis=2 + + my_repeat = layers.Lambda( + lambda x, repnum: K.repeat_elements(x, repnum, axis=4), + arguments={"repnum": rep}, + )(tensor) + return my_repeat + + +def conv3d_block( + input_tensor, + n_filters, + kernel_size=3, + batchnorm=True, + strides=1, + dilation_rate=1, + recurrent=1, +): + # A wrapper of the Keras Conv3D block to serve as a building block for downsampling layers + # Includes options to use batch normalization, dilation and recurrence + + conv = layers.Conv3D( + filters=n_filters, + kernel_size=kernel_size, + strides=strides, + kernel_initializer="he_normal", + padding="same", + dilation_rate=dilation_rate, + )(input_tensor) + if batchnorm: + conv = layers.BatchNormalization()(conv) + output = layers.LeakyReLU(alpha=alpha)(conv) + + for _ in range(recurrent - 1): + conv = layers.Conv3D( + filters=n_filters, + kernel_size=kernel_size, + strides=1, + kernel_initializer="he_normal", + padding="same", + dilation_rate=dilation_rate, + )(output) + if batchnorm: + conv = layers.BatchNormalization()(conv) + res = layers.LeakyReLU(alpha=alpha)(conv) + output = layers.Add()([output, res]) + + return output + + +def AttnGatingBlock(x, g, inter_shape): + shape_x = K.int_shape(x) + shape_g = K.int_shape(g) + + # Getting the gating signal to the same number of filters as the inter_shape + phi_g = layers.Conv3D( + filters=inter_shape, kernel_size=1, strides=1, padding="same" + )(g) + + # Getting the x signal to the same shape as the gating signal + theta_x = layers.Conv3D( + filters=inter_shape, + kernel_size=3, + strides=( + shape_x[1] // shape_g[1], + shape_x[2] // shape_g[2], + shape_x[3] // shape_g[3], + ), + padding="same", + )(x) + + # Element-wise addition of the gating and x signals + add_xg = layers.add([phi_g, theta_x]) + add_xg = layers.Activation("relu")(add_xg) + + # 1x1x1 convolution + psi = layers.Conv3D(filters=1, kernel_size=1, padding="same")(add_xg) + psi = layers.Activation("sigmoid")(psi) + shape_sigmoid = K.int_shape(psi) + + # Upsampling psi back to the original dimensions of x signal + upsample_sigmoid_xg = layers.UpSampling3D( + size=( + shape_x[1] // shape_sigmoid[1], + shape_x[2] // shape_sigmoid[2], + shape_x[3] // shape_sigmoid[3], + ) + )(psi) + + # Expanding the filter axis to the number of filters in the original x signal + upsample_sigmoid_xg = expend_as(upsample_sigmoid_xg, shape_x[4]) + + # Element-wise multiplication of attention coefficients back onto original x signal + attn_coefficients = layers.multiply([upsample_sigmoid_xg, x]) + + # Final 1x1x1 convolution to consolidate attention signal to original x dimensions + output = layers.Conv3D( + filters=shape_x[4], kernel_size=1, strides=1, padding="same" + )(attn_coefficients) + output = layers.BatchNormalization()(output) + return output + + +def transpose_block( + input_tensor, + skip_tensor, + n_filters, + kernel_size=3, + strides=1, + batchnorm=True, + recurrent=1, +): + # A wrapper of the Keras Conv3DTranspose block to serve as a building block for upsampling layers + + shape_x = K.int_shape(input_tensor) + shape_xskip = K.int_shape(skip_tensor) + + conv = layers.Conv3DTranspose( + filters=n_filters, + kernel_size=kernel_size, + padding="same", + strides=( + shape_xskip[1] // shape_x[1], + shape_xskip[2] // shape_x[2], + shape_xskip[3] // shape_x[3], + ), + kernel_initializer="he_normal", + )(input_tensor) + conv = layers.LeakyReLU(alpha=alpha)(conv) + + act = conv3d_block( + conv, + n_filters=n_filters, + kernel_size=kernel_size, + strides=1, + batchnorm=batchnorm, + dilation_rate=1, + recurrent=recurrent, + ) + output = layers.Concatenate(axis=4)([act, skip_tensor]) + return output + + +# Use the functions provided in layers3D to build the network +def inception_block( + input_tensor, + n_filters, + kernel_size=3, + strides=1, + batchnorm=True, + recurrent=1, + layers_list=[], +): + # Inception-style convolutional block similar to InceptionNet + # The first convolution follows the function arguments, while subsequent inception convolutions follow the parameters in + # argument, layers + + # layers is a nested list containing the different secondary inceptions in the format of (kernel_size, dil_rate) + + # E.g => layers=[ [(3,1),(3,1)], [(5,1)], [(3,1),(3,2)] ] + # This will implement 3 sets of secondary convolutions + # Set 1 => 3x3 dil = 1 followed by another 3x3 dil = 1 + # Set 2 => 5x5 dil = 1 + # Set 3 => 3x3 dil = 1 followed by 3x3 dil = 2 + + res = conv3d_block( + input_tensor, + n_filters=n_filters, + kernel_size=kernel_size, + strides=strides, + batchnorm=batchnorm, + dilation_rate=1, + recurrent=recurrent, + ) + + temp = [] + for layer in layers_list: + local_res = res + for conv in layer: + incep_kernel_size = conv[0] + incep_dilation_rate = conv[1] + local_res = conv3d_block( + local_res, + n_filters=n_filters, + kernel_size=incep_kernel_size, + strides=1, + batchnorm=batchnorm, + dilation_rate=incep_dilation_rate, + recurrent=recurrent, + ) + temp.append(local_res) + + temp = layers.concatenate(temp) + res = conv3d_block( + temp, + n_filters=n_filters, + kernel_size=1, + strides=1, + batchnorm=batchnorm, + dilation_rate=1, + ) + + shortcut = conv3d_block( + input_tensor, + n_filters=n_filters, + kernel_size=1, + strides=strides, + batchnorm=batchnorm, + dilation_rate=1, + ) + if batchnorm: + shortcut = layers.BatchNormalization()(shortcut) + + output = layers.Add()([shortcut, res]) + return output + + +def attention_unet_with_inception( + n_classes, input_shape, batch_size=None, n_filters=16, batchnorm=True +): + # contracting path + + inputs = layers.Input(shape=input_shape, batch_size=batch_size) + + c0 = inception_block( + inputs, + n_filters=n_filters, + batchnorm=batchnorm, + strides=1, + recurrent=2, + layers_list=[[(3, 1), (3, 1)], [(3, 2)]], + ) # 512x512x512 + + c1 = inception_block( + c0, + n_filters=n_filters * 2, + batchnorm=batchnorm, + strides=2, + recurrent=2, + layers_list=[[(3, 1), (3, 1)], [(3, 2)]], + ) # 256x256x256 + + c2 = inception_block( + c1, + n_filters=n_filters * 4, + batchnorm=batchnorm, + strides=2, + recurrent=2, + layers_list=[[(3, 1), (3, 1)], [(3, 2)]], + ) # 128x128x128 + + c3 = inception_block( + c2, + n_filters=n_filters * 8, + batchnorm=batchnorm, + strides=2, + recurrent=2, + layers_list=[[(3, 1), (3, 1)], [(3, 2)]], + ) # 64x64x64 + + # bridge + + b0 = inception_block( + c3, + n_filters=n_filters * 16, + batchnorm=batchnorm, + strides=2, + recurrent=2, + layers_list=[[(3, 1), (3, 1)], [(3, 2)]], + ) # 32x32x32 + + # expansive path + + attn0 = AttnGatingBlock(c3, b0, n_filters * 16) + u0 = transpose_block( + b0, attn0, n_filters=n_filters * 8, batchnorm=batchnorm, recurrent=2 + ) # 64x64x64 + + attn1 = AttnGatingBlock(c2, u0, n_filters * 8) + u1 = transpose_block( + u0, attn1, n_filters=n_filters * 4, batchnorm=batchnorm, recurrent=2 + ) # 128x128x128 + + attn2 = AttnGatingBlock(c1, u1, n_filters * 4) + u2 = transpose_block( + u1, attn2, n_filters=n_filters * 2, batchnorm=batchnorm, recurrent=2 + ) # 256x256x256 + + u3 = transpose_block( + u2, c0, n_filters=n_filters, batchnorm=batchnorm, recurrent=2 + ) # 512x512x512 + + outputs = layers.Conv3D(filters=1, kernel_size=1, strides=1)(u3) + + final_activation = "sigmoid" if n_classes == 1 else "softmax" + outputs = layers.Activation(final_activation)(outputs) + + model = Model(inputs=[inputs], outputs=[outputs]) + return model + + +if __name__ == "__main__": + model = attention_unet_with_inception(n_classes=1, input_shape=(256, 256, 256, 1)) + model.summary() From cff83686bf9c8af23b86d5e21067c5985578ddb1 Mon Sep 17 00:00:00 2001 From: H Gazula Date: Mon, 11 Mar 2024 17:29:39 -0400 Subject: [PATCH 03/17] add tests for u-net models and resolved https://github.com/neuronets/nobrainer/issues/291 --- .../dwc.py | 0 nobrainer/models/__init__.py | 4 ++++ nobrainer/models/attention_unet.py | 4 ++-- nobrainer/models/attention_unet_with_inception.py | 10 ++++------ nobrainer/models/tests/models_test.py | 12 ++++++++++++ 5 files changed, 22 insertions(+), 8 deletions(-) rename nobrainer/{distrubuted_learning => distributed_learning}/dwc.py (100%) diff --git a/nobrainer/distrubuted_learning/dwc.py b/nobrainer/distributed_learning/dwc.py similarity index 100% rename from nobrainer/distrubuted_learning/dwc.py rename to nobrainer/distributed_learning/dwc.py diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index 8bc4d125..34da1027 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -1,3 +1,5 @@ +from .attention_unet import attention_unet +from .attention_unet_with_inception import attention_unet_with_inception from .autoencoder import autoencoder from .dcgan import dcgan from .highresnet import highresnet @@ -29,6 +31,8 @@ def get(name): "progressivegan": progressivegan, "progressiveae": progressiveae, "dcgan": dcgan, + "attention_unet": attention_unet, + "attention_unet_with_inception": attention_unet_with_inception, } try: diff --git a/nobrainer/models/attention_unet.py b/nobrainer/models/attention_unet.py index 879d3b22..8d7bd5f9 100644 --- a/nobrainer/models/attention_unet.py +++ b/nobrainer/models/attention_unet.py @@ -3,8 +3,8 @@ """ import tensorflow as tf -import tensorflow.keras.layers as L from tensorflow.keras import layers +import tensorflow.keras.layers as L from tensorflow.keras.models import Model @@ -71,7 +71,7 @@ def attention_unet(n_classes, input_shape): outputs = layers.Activation(final_activation)(outputs) """ Model """ - return Model(inputs=inputs, outputs=outputs, name="Attention U-Net") + return Model(inputs=inputs, outputs=outputs, name="Attention_U-Net") if __name__ == "__main__": diff --git a/nobrainer/models/attention_unet_with_inception.py b/nobrainer/models/attention_unet_with_inception.py index f0d6ed20..fa002ea6 100644 --- a/nobrainer/models/attention_unet_with_inception.py +++ b/nobrainer/models/attention_unet_with_inception.py @@ -2,12 +2,10 @@ Adapted from https://github.com/robinvvinod/unet """ -import tensorflow.keras.backend as K from tensorflow.keras import layers +import tensorflow.keras.backend as K from tensorflow.keras.models import Model -from hyperparameters import alpha - K.set_image_data_format("channels_last") @@ -44,7 +42,7 @@ def conv3d_block( )(input_tensor) if batchnorm: conv = layers.BatchNormalization()(conv) - output = layers.LeakyReLU(alpha=alpha)(conv) + output = layers.LeakyReLU(alpha=0.1)(conv) for _ in range(recurrent - 1): conv = layers.Conv3D( @@ -57,7 +55,7 @@ def conv3d_block( )(output) if batchnorm: conv = layers.BatchNormalization()(conv) - res = layers.LeakyReLU(alpha=alpha)(conv) + res = layers.LeakyReLU(alpha=0.1)(conv) output = layers.Add()([output, res]) return output @@ -141,7 +139,7 @@ def transpose_block( ), kernel_initializer="he_normal", )(input_tensor) - conv = layers.LeakyReLU(alpha=alpha)(conv) + conv = layers.LeakyReLU(alpha=0.1)(conv) act = conv3d_block( conv, diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index 68f4b985..d8a3534c 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -4,6 +4,8 @@ from nobrainer.bayesian_utils import default_mean_field_normal_fn +from ..attention_unet import attention_unet +from ..attention_unet_with_inception import attention_unet_with_inception from ..autoencoder import autoencoder from ..bayesian_vnet import bayesian_vnet from ..bayesian_vnet_semi import bayesian_vnet_semi @@ -241,3 +243,13 @@ def test_vox2vox(): pred_shape = (1, 2, 2, 2, 1) out = vox_discriminator(inputs=[y, x]) assert out.shape == pred_shape + + +def test_attention_unet(): + model_test(attention_unet, n_classes=1, input_shape=(1, 64, 64, 64, 1)) + + +def test_attention_unet_with_inception(): + model_test( + attention_unet_with_inception, n_classes=1, input_shape=(1, 64, 64, 64, 1) + ) From c67bd9ebefde9dda0aaf4a12345d179d6ae69c95 Mon Sep 17 00:00:00 2001 From: Harsha Date: Tue, 12 Mar 2024 13:49:54 -0400 Subject: [PATCH 04/17] resolved https://github.com/neuronets/nobrainer/issues/278 --- nobrainer/dataset.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/nobrainer/dataset.py b/nobrainer/dataset.py index 791737fd..30c5be53 100644 --- a/nobrainer/dataset.py +++ b/nobrainer/dataset.py @@ -121,6 +121,9 @@ def from_tfrecords( ) block_length = len([0 for _ in first_shard]) + if not n_volumes: + n_volumes = block_length * len(files) + dataset = dataset.interleave( map_func=lambda x: tf.data.TFRecordDataset( x, compression_type=compression_type From eb6d11e83d50a7ed9d200b885d76bb3df18f3011 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 12 Mar 2024 18:14:10 +0000 Subject: [PATCH 05/17] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nobrainer/processing/segmentation.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/nobrainer/processing/segmentation.py b/nobrainer/processing/segmentation.py index 48729749..faf1a7b7 100644 --- a/nobrainer/processing/segmentation.py +++ b/nobrainer/processing/segmentation.py @@ -39,7 +39,7 @@ def fit( opt_args=None, loss=losses.dice, metrics=metrics.dice, - callbacks=None + callbacks=None, ): """Train a segmentation model""" # TODO: check validity of datasets @@ -84,11 +84,11 @@ def _compile(): self.model_.summary() if callbacks is not None and not isinstance(callbacks, list): - raise AttributeError('Callbacks must be either of type list or None') - + raise AttributeError("Callbacks must be either of type list or None") + if callbacks is None: callbacks = [] - + if self.checkpoint_tracker: callbacks.append(self.checkpoint_tracker) self.model_.fit( From b3e2a8a7d3468c9848a109f0c5d47842e8903a55 Mon Sep 17 00:00:00 2001 From: H Gazula Date: Tue, 12 Mar 2024 19:49:53 -0400 Subject: [PATCH 06/17] resolved https://github.com/neuronets/nobrainer/issues/289 --- nobrainer/models/__init__.py | 2 + nobrainer/models/tests/models_test.py | 5 + nobrainer/models/unetr.py | 384 ++++++++++++++++++++++++++ 3 files changed, 391 insertions(+) create mode 100644 nobrainer/models/unetr.py diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index 34da1027..d03fb7bb 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -7,6 +7,7 @@ from .progressiveae import progressiveae from .progressivegan import progressivegan from .unet import unet +from .unetr import unetr def get(name): @@ -33,6 +34,7 @@ def get(name): "dcgan": dcgan, "attention_unet": attention_unet, "attention_unet_with_inception": attention_unet_with_inception, + "unetr": unetr, } try: diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index d8a3534c..3daeda40 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -16,6 +16,7 @@ from ..progressivegan import progressivegan from ..unet import unet from ..unet_lstm import unet_lstm +from ..unetr import unetr from ..vnet import vnet from ..vox2vox import Vox_ensembler, vox_gan @@ -253,3 +254,7 @@ def test_attention_unet_with_inception(): model_test( attention_unet_with_inception, n_classes=1, input_shape=(1, 64, 64, 64, 1) ) + + +def test_unetr(): + model_test(unetr, n_classes=1, input_shape=(1, 96, 96, 96, 1)) diff --git a/nobrainer/models/unetr.py b/nobrainer/models/unetr.py new file mode 100644 index 00000000..56019d79 --- /dev/null +++ b/nobrainer/models/unetr.py @@ -0,0 +1,384 @@ +"""UNETR implementation in Tensorflow 2.0. + +Adapted from https://github.com/nikhilroxtomar/Semantic-Segmentation-Architecture/blob/main/TensorFlow/unetr.py +""" +import math + +import tensorflow as tf + + +class SingleDeconv3DBlock(tf.keras.layers.Layer): + def __init__(self, filters): + super(SingleDeconv3DBlock, self).__init__() + self.block = tf.keras.layers.Conv3DTranspose( + filters=filters, + kernel_size=2, + strides=2, + padding="valid", + output_padding=None, + ) + + def call(self, inputs): + return self.block(inputs) + + +class SingleConv3DBlock(tf.keras.layers.Layer): + def __init__(self, filters, kernel_size): + super(SingleConv3DBlock, self).__init__() + self.kernel = kernel_size + self.res = tuple(map(lambda i: (i - 1) // 2, self.kernel)) + self.block = tf.keras.layers.Conv3D( + filters=filters, kernel_size=kernel_size, strides=1, padding="same" + ) + + def call(self, inputs): + return self.block(inputs) + + +class Conv3DBlock(tf.keras.layers.Layer): + def __init__(self, filters, kernel_size=(3, 3, 3)): + super(Conv3DBlock, self).__init__() + self.a = tf.keras.Sequential( + [ + SingleConv3DBlock(filters, kernel_size=kernel_size), + tf.keras.layers.BatchNormalization(), + tf.keras.layers.Activation("relu"), + ] + ) + + def call(self, inputs): + return self.a(inputs) + + +class Deconv3DBlock(tf.keras.layers.Layer): + def __init__(self, filters, kernel_size=(3, 3, 3)): + super(Deconv3DBlock, self).__init__() + self.a = tf.keras.Sequential( + [ + SingleDeconv3DBlock(filters=filters), + SingleConv3DBlock(filters=filters, kernel_size=kernel_size), + tf.keras.layers.BatchNormalization(), + tf.keras.layers.Activation("relu"), + ] + ) + + def call(self, inputs): + return self.a(inputs) + + +class SelfAttention(tf.keras.layers.Layer): + def __init__(self, num_heads, embed_dim, dropout): + super(SelfAttention, self).__init__() + + self.num_attention_heads = num_heads + self.attention_head_size = int(embed_dim / num_heads) + self.all_head_size = self.num_attention_heads * self.attention_head_size + + self.query = tf.keras.layers.Dense(self.all_head_size) + self.key = tf.keras.layers.Dense(self.all_head_size) + self.value = tf.keras.layers.Dense(self.all_head_size) + + self.out = tf.keras.layers.Dense(embed_dim) + self.attn_dropout = tf.keras.layers.Dropout(dropout) + self.proj_dropout = tf.keras.layers.Dropout(dropout) + + self.softmax = tf.keras.layers.Softmax() + + self.vis = False + + def transpose_for_scores(self, x): + new_x_shape = list( + x.shape[:-1] + (self.num_attention_heads, self.attention_head_size) + ) + new_x_shape[0] = -1 + y = tf.reshape(x, new_x_shape) + return tf.transpose(y, perm=[0, 2, 1, 3]) + + def call(self, hidden_states): + mixed_query_layer = self.query(hidden_states) + mixed_key_layer = self.key(hidden_states) + mixed_value_layer = self.value(hidden_states) + + query_layer = self.transpose_for_scores(mixed_query_layer) + key_layer = self.transpose_for_scores(mixed_key_layer) + value_layer = self.transpose_for_scores(mixed_value_layer) + attention_scores = query_layer @ tf.transpose(key_layer, perm=[0, 1, 3, 2]) + attention_scores = attention_scores / math.sqrt(self.attention_head_size) + attention_probs = self.softmax(attention_scores) + weights = attention_probs if self.vis else None + attention_probs = self.attn_dropout(attention_probs) + + context_layer = attention_probs @ value_layer + context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3]) + new_context_layer_shape = list(context_layer.shape[:-2] + (self.all_head_size,)) + new_context_layer_shape[0] = -1 + context_layer = tf.reshape(context_layer, new_context_layer_shape) + attention_output = self.out(context_layer) + attention_output = self.proj_dropout(attention_output) + return attention_output, weights + + +class Mlp(tf.keras.layers.Layer): + def __init__(self, output_features, drop=0.0): + super(Mlp, self).__init__() + self.a = tf.keras.layers.Dense(units=output_features, activation=tf.nn.gelu) + self.b = tf.keras.layers.Dropout(drop) + + def call(self, inputs): + x = self.a(inputs) + return self.b(x) + + +class PositionwiseFeedForward(tf.keras.layers.Layer): + def __init__(self, d_model=768, d_ff=2048, dropout=0.1): + super(PositionwiseFeedForward, self).__init__() + self.a = tf.keras.layers.Dense(units=d_ff) + self.b = tf.keras.layers.Dense(units=d_model) + self.c = tf.keras.layers.Dropout(dropout) + + def call(self, inputs): + return self.b(self.c(tf.nn.relu(self.a(inputs)))) + + +##embeddings, projection_dim=embed_dim +class PatchEmbedding(tf.keras.layers.Layer): + def __init__(self, cube_size, patch_size, embed_dim): + super(PatchEmbedding, self).__init__() + self.num_of_patches = int( + (cube_size[0] * cube_size[1] * cube_size[2]) + / (patch_size * patch_size * patch_size) + ) + self.patch_size = patch_size + self.size = patch_size + self.embed_dim = embed_dim + + self.projection = tf.keras.layers.Dense(embed_dim) + + self.clsToken = tf.Variable( + tf.keras.initializers.GlorotNormal()(shape=(1, 512, embed_dim)), + trainable=True, + ) + + self.positionalEmbedding = tf.keras.layers.Embedding( + self.num_of_patches, embed_dim + ) + self.patches = None + self.lyer = tf.keras.layers.Conv3D( + filters=self.embed_dim, + kernel_size=self.patch_size, + strides=self.patch_size, + padding="valid", + ) + # embedding - basically is adding numerical embedding to the layer along with an extra dim + + def call(self, inputs): + patches = self.lyer(inputs) + patches = tf.reshape( + patches, (tf.shape(inputs)[0], -1, self.size * self.size * 3) + ) + patches = self.projection(patches) + positions = tf.range(0, self.num_of_patches, 1)[tf.newaxis, ...] + positionalEmbedding = self.positionalEmbedding(positions) + patches = patches + positionalEmbedding + + return patches, positionalEmbedding + + +##transformerblock +class TransformerLayer(tf.keras.layers.Layer): + def __init__(self, embed_dim, num_heads, dropout, cube_size, patch_size): + super(TransformerLayer, self).__init__() + + self.attention_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6) + + self.mlp_norm = tf.keras.layers.LayerNormalization(epsilon=1e-6) + + # embed_dim/no-of_heads + self.mlp_dim = int( + (cube_size[0] * cube_size[1] * cube_size[2]) + / (patch_size * patch_size * patch_size) + ) + + self.mlp = PositionwiseFeedForward(embed_dim, 2048) + self.attn = SelfAttention(num_heads, embed_dim, dropout) + + def call(self, x, training=True): + h = x + x = self.attention_norm(x) + x, weights = self.attn(x) + x = x + h + h = x + + x = self.mlp_norm(x) + x = self.mlp(x) + + x = x + h + + return x, weights + + +class TransformerEncoder(tf.keras.layers.Layer): + def __init__( + self, + embed_dim, + num_heads, + cube_size, + patch_size, + num_layers=12, + dropout=0.1, + extract_layers=[3, 6, 9, 12], + ): + super(TransformerEncoder, self).__init__() + # embed_dim, num_heads ,dropout, cube_size, patch_size + self.embeddings = PatchEmbedding(cube_size, patch_size, embed_dim) + self.extract_layers = extract_layers + self.encoders = [ + TransformerLayer(embed_dim, num_heads, dropout, cube_size, patch_size) + for _ in range(num_layers) + ] + + def call(self, inputs, training=True): + extract_layers = [] + x = inputs + x, _ = self.embeddings(x) + + for depth, layer in enumerate(self.encoders): + x, _ = layer(x, training=training) + if depth + 1 in self.extract_layers: + extract_layers.append(x) + + return extract_layers + + +class UNETR(tf.keras.Model): + def __init__( + self, + img_shape=(96, 96, 96), + input_dim=3, + output_dim=3, + embed_dim=768, + patch_size=16, + num_heads=12, + dropout=0.1, + ): + super(UNETR, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.embed_dim = embed_dim + self.img_shape = img_shape + self.patch_size = patch_size + self.num_heads = num_heads + self.dropout = dropout + self.num_layers = 12 + self.ext_layers = [3, 6, 9, 12] + + self.patch_dim = [int(x / patch_size) for x in img_shape] + self.transformer = TransformerEncoder( + self.embed_dim, + self.num_heads, + self.img_shape, + self.patch_size, + self.num_layers, + self.dropout, + self.ext_layers, + ) + + # U-Net Decoder + self.decoder0 = tf.keras.Sequential( + [Conv3DBlock(32, (3, 3, 3)), Conv3DBlock(64, (3, 3, 3))] + ) + + self.decoder3 = tf.keras.Sequential( + [Deconv3DBlock(512), Deconv3DBlock(256), Deconv3DBlock(128)] + ) + + self.decoder6 = tf.keras.Sequential([Deconv3DBlock(512), Deconv3DBlock(256)]) + + self.decoder9 = Deconv3DBlock(512) + + self.decoder12_upsampler = SingleDeconv3DBlock(512) + + self.decoder9_upsampler = tf.keras.Sequential( + [ + Conv3DBlock(512), + Conv3DBlock(512), + Conv3DBlock(512), + SingleDeconv3DBlock(256), + ] + ) + + self.decoder6_upsampler = tf.keras.Sequential( + [Conv3DBlock(256), Conv3DBlock(256), SingleDeconv3DBlock(128)] + ) + + self.decoder3_upsampler = tf.keras.Sequential( + [Conv3DBlock(128), Conv3DBlock(128), SingleDeconv3DBlock(64)] + ) + + self.decoder0_header = tf.keras.Sequential( + [Conv3DBlock(64), Conv3DBlock(64), SingleConv3DBlock(output_dim, (1, 1, 1))] + ) + + def call(self, x): + z = self.transformer(x) + z0, z3, z6, z9, z12 = x, z[0], z[1], z[2], z[3] + z3 = tf.reshape( + tf.transpose(z3, perm=[0, 2, 1]), [-1, *self.patch_dim, self.embed_dim] + ) + z6 = tf.reshape( + tf.transpose(z6, perm=[0, 2, 1]), [-1, *self.patch_dim, self.embed_dim] + ) + z9 = tf.reshape( + tf.transpose(z9, perm=[0, 2, 1]), [-1, *self.patch_dim, self.embed_dim] + ) + z12 = tf.reshape( + tf.transpose(z12, perm=[0, 2, 1]), [-1, *self.patch_dim, self.embed_dim] + ) + z12 = self.decoder12_upsampler(z12) + z9 = self.decoder9(z9) + z9 = self.decoder9_upsampler(tf.concat([z9, z12], 4)) + z6 = self.decoder6(z6) + z6 = self.decoder6_upsampler(tf.concat([z6, z9], 4)) + z3 = self.decoder3(z3) + z3 = self.decoder3_upsampler(tf.concat([z3, z6], 4)) + z0 = self.decoder0(z0) + output = self.decoder0_header(tf.concat([z0, z3], 4)) + return output + + # def model(self): + # x = tf.keras.layers.Input(shape=(96, 96, 96, 3)) + # return tf.keras.Model(inputs=[x], outputs=self.call(x)) + + +def unetr( + n_classes=1, + input_shape=(96, 96, 96, 3), + embed_dim=768, + patch_size=16, + num_heads=12, + dropout=0.1, +): + *img_shape, input_dim = input_shape + + input = tf.keras.layers.Input([*img_shape, input_dim], name="input_image") + + z = UNETR( + img_shape=img_shape, + input_dim=input_dim, + output_dim=n_classes, + embed_dim=embed_dim, + patch_size=patch_size, + num_heads=num_heads, + dropout=dropout, + )(input) + + final_activation = "sigmoid" if n_classes == 1 else "softmax" + output = tf.keras.layers.Activation(final_activation)(z) + + return tf.keras.Model(inputs=[input], outputs=[output]) + + +if __name__ == "__main__": + input_shape = (96, 96, 96, 3) + sub1 = unetr(input_shape=input_shape, n_classes=1) + sub1.summary() From 5a3d807ec15bda749902c2f97002af0c5279d5a7 Mon Sep 17 00:00:00 2001 From: Harsha Date: Mon, 18 Mar 2024 13:58:19 -0400 Subject: [PATCH 07/17] resolved https://github.com/neuronets/nobrainer/issues/296 --- nobrainer/processing/segmentation.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nobrainer/processing/segmentation.py b/nobrainer/processing/segmentation.py index faf1a7b7..63648d90 100644 --- a/nobrainer/processing/segmentation.py +++ b/nobrainer/processing/segmentation.py @@ -40,6 +40,7 @@ def fit( loss=losses.dice, metrics=metrics.dice, callbacks=None, + verbose=1, ): """Train a segmentation model""" # TODO: check validity of datasets @@ -100,6 +101,7 @@ def _compile(): if dataset_validate else None, callbacks=callbacks, + verbose=verbose ) return self From 7d00dc1ca56dc8af38dbe6fa425a96a034678dfb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 17:58:32 +0000 Subject: [PATCH 08/17] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nobrainer/processing/segmentation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nobrainer/processing/segmentation.py b/nobrainer/processing/segmentation.py index 63648d90..c04ee906 100644 --- a/nobrainer/processing/segmentation.py +++ b/nobrainer/processing/segmentation.py @@ -101,7 +101,7 @@ def _compile(): if dataset_validate else None, callbacks=callbacks, - verbose=verbose + verbose=verbose, ) return self From ab341eea02429f60018601bea0d694e42524a917 Mon Sep 17 00:00:00 2001 From: Harsha Date: Mon, 18 Mar 2024 15:45:10 -0400 Subject: [PATCH 09/17] resolved https://github.com/neuronets/nobrainer/issues/297 --- nobrainer/models/__init__.py | 3 ++- nobrainer/models/tests/models_test.py | 5 +++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index d03fb7bb..14e696ab 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -8,7 +8,7 @@ from .progressivegan import progressivegan from .unet import unet from .unetr import unetr - +from .bayesian_meshnet import variational_meshnet def get(name): """Return callable that creates a particular `tf.keras.Model`. @@ -35,6 +35,7 @@ def get(name): "attention_unet": attention_unet, "attention_unet_with_inception": attention_unet_with_inception, "unetr": unetr, + "variational_meshnet": variational_meshnet, } try: diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index 3daeda40..afd58c62 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -19,6 +19,7 @@ from ..unetr import unetr from ..vnet import vnet from ..vox2vox import Vox_ensembler, vox_gan +from ..bayesian_meshnet import variational_meshnet def model_test(model_cls, n_classes, input_shape, kwds={}): @@ -258,3 +259,7 @@ def test_attention_unet_with_inception(): def test_unetr(): model_test(unetr, n_classes=1, input_shape=(1, 96, 96, 96, 1)) + + +def test_variational_meshnet(): + model_test(variational_meshnet, n_classes=1, input_shape=(1, 128, 128, 128, 1)) From 1171229cff194c6789bb8d2da1b8a2f62e349774 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 18 Mar 2024 19:45:24 +0000 Subject: [PATCH 10/17] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nobrainer/models/__init__.py | 3 ++- nobrainer/models/tests/models_test.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index 14e696ab..5e0092b5 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -1,6 +1,7 @@ from .attention_unet import attention_unet from .attention_unet_with_inception import attention_unet_with_inception from .autoencoder import autoencoder +from .bayesian_meshnet import variational_meshnet from .dcgan import dcgan from .highresnet import highresnet from .meshnet import meshnet @@ -8,7 +9,7 @@ from .progressivegan import progressivegan from .unet import unet from .unetr import unetr -from .bayesian_meshnet import variational_meshnet + def get(name): """Return callable that creates a particular `tf.keras.Model`. diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index afd58c62..f9a6da35 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -7,6 +7,7 @@ from ..attention_unet import attention_unet from ..attention_unet_with_inception import attention_unet_with_inception from ..autoencoder import autoencoder +from ..bayesian_meshnet import variational_meshnet from ..bayesian_vnet import bayesian_vnet from ..bayesian_vnet_semi import bayesian_vnet_semi from ..brainsiam import brainsiam @@ -19,7 +20,6 @@ from ..unetr import unetr from ..vnet import vnet from ..vox2vox import Vox_ensembler, vox_gan -from ..bayesian_meshnet import variational_meshnet def model_test(model_cls, n_classes, input_shape, kwds={}): From 87278e2de6f94d914e6070539d02577f2e4f1c03 Mon Sep 17 00:00:00 2001 From: Harsha Date: Tue, 19 Mar 2024 09:17:49 -0400 Subject: [PATCH 11/17] restricting tf to 2.15.1 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 0d2e9348..081a3ef5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -38,7 +38,7 @@ install_requires = numpy scikit-image tensorflow-probability ~= 0.22.0 - tensorflow ~= 2.13 + tensorflow >=2.13, <= 2.15.1 tensorflow-addons ~= 0.21.0 psutil zip_safe = False From 29db9115515bcddc4d920deafd97c0d7ee04d585 Mon Sep 17 00:00:00 2001 From: Harsha Date: Tue, 19 Mar 2024 09:24:55 -0400 Subject: [PATCH 12/17] fix formatting issues --- nobrainer/models/attention_unet.py | 1 - nobrainer/models/attention_unet_with_inception.py | 15 ++++++++++----- nobrainer/models/unetr.py | 6 +++--- 3 files changed, 13 insertions(+), 9 deletions(-) diff --git a/nobrainer/models/attention_unet.py b/nobrainer/models/attention_unet.py index 8d7bd5f9..1bad990b 100644 --- a/nobrainer/models/attention_unet.py +++ b/nobrainer/models/attention_unet.py @@ -2,7 +2,6 @@ Adapted from https://github.com/nikhilroxtomar/Semantic-Segmentation-Architecture/blob/main/TensorFlow/attention-unet.py """ -import tensorflow as tf from tensorflow.keras import layers import tensorflow.keras.layers as L from tensorflow.keras.models import Model diff --git a/nobrainer/models/attention_unet_with_inception.py b/nobrainer/models/attention_unet_with_inception.py index fa002ea6..0485a619 100644 --- a/nobrainer/models/attention_unet_with_inception.py +++ b/nobrainer/models/attention_unet_with_inception.py @@ -11,7 +11,8 @@ def expend_as(tensor, rep): # Anonymous lambda function to expand the specified axis by a factor of argument, rep. - # If tensor has shape (512,512,N), lambda will return a tensor of shape (512,512,N*rep), if specified axis=2 + # If tensor has shape (512,512,N), lambda will return a tensor of shape + # (512,512,N*rep), if specified axis=2 my_repeat = layers.Lambda( lambda x, repnum: K.repeat_elements(x, repnum, axis=4), @@ -29,7 +30,8 @@ def conv3d_block( dilation_rate=1, recurrent=1, ): - # A wrapper of the Keras Conv3D block to serve as a building block for downsampling layers + # A wrapper of the Keras Conv3D block to serve as a building block for + # downsampling layers # Includes options to use batch normalization, dilation and recurrence conv = layers.Conv3D( @@ -123,7 +125,8 @@ def transpose_block( batchnorm=True, recurrent=1, ): - # A wrapper of the Keras Conv3DTranspose block to serve as a building block for upsampling layers + # A wrapper of the Keras Conv3DTranspose block to serve as a building block + # for upsampling layers shape_x = K.int_shape(input_tensor) shape_xskip = K.int_shape(skip_tensor) @@ -165,10 +168,12 @@ def inception_block( layers_list=[], ): # Inception-style convolutional block similar to InceptionNet - # The first convolution follows the function arguments, while subsequent inception convolutions follow the parameters in + # The first convolution follows the function arguments, while subsequent + # inception convolutions follow the parameters in # argument, layers - # layers is a nested list containing the different secondary inceptions in the format of (kernel_size, dil_rate) + # layers is a nested list containing the different secondary inceptions in + # the format of (kernel_size, dil_rate) # E.g => layers=[ [(3,1),(3,1)], [(5,1)], [(3,1),(3,2)] ] # This will implement 3 sets of secondary convolutions diff --git a/nobrainer/models/unetr.py b/nobrainer/models/unetr.py index 56019d79..30861617 100644 --- a/nobrainer/models/unetr.py +++ b/nobrainer/models/unetr.py @@ -1,6 +1,6 @@ """UNETR implementation in Tensorflow 2.0. -Adapted from https://github.com/nikhilroxtomar/Semantic-Segmentation-Architecture/blob/main/TensorFlow/unetr.py +Adapted from https://www.kaggle.com/code/usharengaraju/tensorflow-unetr-w-b """ import math @@ -140,7 +140,7 @@ def call(self, inputs): return self.b(self.c(tf.nn.relu(self.a(inputs)))) -##embeddings, projection_dim=embed_dim +# embeddings, projection_dim=embed_dim class PatchEmbedding(tf.keras.layers.Layer): def __init__(self, cube_size, patch_size, embed_dim): super(PatchEmbedding, self).__init__() @@ -184,7 +184,7 @@ def call(self, inputs): return patches, positionalEmbedding -##transformerblock +# transformerblock class TransformerLayer(tf.keras.layers.Layer): def __init__(self, embed_dim, num_heads, dropout, cube_size, patch_size): super(TransformerLayer, self).__init__() From 80257e626de121d4767c23eebbba51b76d05365a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Mar 2024 13:27:02 +0000 Subject: [PATCH 13/17] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nobrainer/models/attention_unet_with_inception.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nobrainer/models/attention_unet_with_inception.py b/nobrainer/models/attention_unet_with_inception.py index 0485a619..81809317 100644 --- a/nobrainer/models/attention_unet_with_inception.py +++ b/nobrainer/models/attention_unet_with_inception.py @@ -125,7 +125,7 @@ def transpose_block( batchnorm=True, recurrent=1, ): - # A wrapper of the Keras Conv3DTranspose block to serve as a building block + # A wrapper of the Keras Conv3DTranspose block to serve as a building block # for upsampling layers shape_x = K.int_shape(input_tensor) @@ -168,11 +168,11 @@ def inception_block( layers_list=[], ): # Inception-style convolutional block similar to InceptionNet - # The first convolution follows the function arguments, while subsequent + # The first convolution follows the function arguments, while subsequent # inception convolutions follow the parameters in # argument, layers - # layers is a nested list containing the different secondary inceptions in + # layers is a nested list containing the different secondary inceptions in # the format of (kernel_size, dil_rate) # E.g => layers=[ [(3,1),(3,1)], [(5,1)], [(3,1),(3,2)] ] From 4d67696b42991127eae6d725ec8e926d4d4e8224 Mon Sep 17 00:00:00 2001 From: H Gazula Date: Tue, 19 Mar 2024 18:04:03 -0400 Subject: [PATCH 14/17] skip testing big models --- nobrainer/models/attention_unet.py | 2 +- nobrainer/models/tests/models_test.py | 6 ++++++ 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/nobrainer/models/attention_unet.py b/nobrainer/models/attention_unet.py index 1bad990b..7b155daf 100644 --- a/nobrainer/models/attention_unet.py +++ b/nobrainer/models/attention_unet.py @@ -1,6 +1,6 @@ """Model definition for Attention U-Net. Adapted from https://github.com/nikhilroxtomar/Semantic-Segmentation-Architecture/blob/main/TensorFlow/attention-unet.py -""" +""" # noqa: E501 from tensorflow.keras import layers import tensorflow.keras.layers as L diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index f9a6da35..c0e8f030 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -1,3 +1,5 @@ +import os + import numpy as np import pytest import tensorflow as tf @@ -21,6 +23,8 @@ from ..vnet import vnet from ..vox2vox import Vox_ensembler, vox_gan +IN_GITHUB_ACTIONS = os.getenv("GITHUB_ACTIONS") == "true" + def model_test(model_cls, n_classes, input_shape, kwds={}): """Tests for models.""" @@ -257,9 +261,11 @@ def test_attention_unet_with_inception(): ) +@pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Cannot test in GitHub Actions") def test_unetr(): model_test(unetr, n_classes=1, input_shape=(1, 96, 96, 96, 1)) +@pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Cannot test in GitHub Actions") def test_variational_meshnet(): model_test(variational_meshnet, n_classes=1, input_shape=(1, 128, 128, 128, 1)) From da5e0bc7e8fa32b27daeae554bfdb2bf11fa7f09 Mon Sep 17 00:00:00 2001 From: H Gazula Date: Thu, 21 Mar 2024 01:41:44 -0400 Subject: [PATCH 15/17] resolved https://github.com/neuronets/nobrainer/issues/302 --- nobrainer/models/tests/models_test.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nobrainer/models/tests/models_test.py b/nobrainer/models/tests/models_test.py index c0e8f030..45cffbdc 100644 --- a/nobrainer/models/tests/models_test.py +++ b/nobrainer/models/tests/models_test.py @@ -266,6 +266,10 @@ def test_unetr(): model_test(unetr, n_classes=1, input_shape=(1, 96, 96, 96, 1)) -@pytest.mark.skipif(IN_GITHUB_ACTIONS, reason="Cannot test in GitHub Actions") def test_variational_meshnet(): - model_test(variational_meshnet, n_classes=1, input_shape=(1, 128, 128, 128, 1)) + model_test( + variational_meshnet, + n_classes=1, + input_shape=(1, 128, 128, 128, 1), + kwds={"filters": 4}, + ) From 2caa2e638329c080e7f23f969c958ca9e3aa7bec Mon Sep 17 00:00:00 2001 From: H Gazula Date: Fri, 22 Mar 2024 14:31:50 -0400 Subject: [PATCH 16/17] resolved https://github.com/neuronets/nobrainer/issues/303 --- nobrainer/models/__init__.py | 44 ++++++++++++++++++---------- nobrainer/processing/segmentation.py | 23 +++++++++++++-- 2 files changed, 48 insertions(+), 19 deletions(-) diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index 5e0092b5..8dd75b44 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -1,3 +1,5 @@ +from pprint import pprint + from .attention_unet import attention_unet from .attention_unet_with_inception import attention_unet_with_inception from .autoencoder import autoencoder @@ -10,6 +12,22 @@ from .unet import unet from .unetr import unetr +__all__ = ["get", "list_available_models"] + +_models = { + "highresnet": highresnet, + "meshnet": meshnet, + "unet": unet, + "autoencoder": autoencoder, + "progressivegan": progressivegan, + "progressiveae": progressiveae, + "dcgan": dcgan, + "attention_unet": attention_unet, + "attention_unet_with_inception": attention_unet_with_inception, + "unetr": unetr, + "variational_meshnet": variational_meshnet, +} + def get(name): """Return callable that creates a particular `tf.keras.Model`. @@ -25,24 +43,18 @@ def get(name): if not isinstance(name, str): raise ValueError("Model name must be a string.") - models = { - "highresnet": highresnet, - "meshnet": meshnet, - "unet": unet, - "autoencoder": autoencoder, - "progressivegan": progressivegan, - "progressiveae": progressiveae, - "dcgan": dcgan, - "attention_unet": attention_unet, - "attention_unet_with_inception": attention_unet_with_inception, - "unetr": unetr, - "variational_meshnet": variational_meshnet, - } - try: - return models[name.lower()] + return _models[name.lower()] except KeyError: - avail = ", ".join(models.keys()) + avail = ", ".join(_models.keys()) raise ValueError( "Unknown model: '{}'. Available models are {}.".format(name, avail) ) + + +def available_models(): + return list(_models) + +def list_available_models(): + pprint(available_models()) + diff --git a/nobrainer/processing/segmentation.py b/nobrainer/processing/segmentation.py index c04ee906..ff0519df 100644 --- a/nobrainer/processing/segmentation.py +++ b/nobrainer/processing/segmentation.py @@ -5,6 +5,7 @@ from .base import BaseEstimator from .. import losses, metrics +from ..models import available_models, list_available_models logging.getLogger().setLevel(logging.INFO) @@ -23,12 +24,25 @@ def __init__( self.base_model = base_model.__name__ else: self.base_model = base_model + + if self.base_model and self.base_model not in available_models(): + raise ValueError( + "Unknown model: '{}'. Available models are {}.".format( + self.base_model, available_models() + ) + ) + self.model_ = None self.model_args = model_args or {} self.block_shape_ = None self.volume_shape_ = None self.scalar_labels_ = None + def add_model(self, base_model, model_args=None): + """Add a segmentation model""" + self.base_model = base_model + self.model_args = model_args or {} + def fit( self, dataset_train, @@ -97,9 +111,9 @@ def _compile(): epochs=epochs, steps_per_epoch=dataset_train.get_steps_per_epoch(), validation_data=dataset_validate.dataset if dataset_validate else None, - validation_steps=dataset_validate.get_steps_per_epoch() - if dataset_validate - else None, + validation_steps=( + dataset_validate.get_steps_per_epoch() if dataset_validate else None + ), callbacks=callbacks, verbose=verbose, ) @@ -119,3 +133,6 @@ def predict(self, x, batch_size=1, normalizer=None): batch_size=batch_size, normalizer=normalizer, ) + @classmethod + def list_available_models(cls): + list_available_models() From d271fc9ea8f85a087025d5b3810fb3adf06fc2fb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 22 Mar 2024 18:32:09 +0000 Subject: [PATCH 17/17] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- nobrainer/models/__init__.py | 2 +- nobrainer/processing/segmentation.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/nobrainer/models/__init__.py b/nobrainer/models/__init__.py index 8dd75b44..a4842bc7 100644 --- a/nobrainer/models/__init__.py +++ b/nobrainer/models/__init__.py @@ -55,6 +55,6 @@ def get(name): def available_models(): return list(_models) + def list_available_models(): pprint(available_models()) - diff --git a/nobrainer/processing/segmentation.py b/nobrainer/processing/segmentation.py index ff0519df..7ba9a32a 100644 --- a/nobrainer/processing/segmentation.py +++ b/nobrainer/processing/segmentation.py @@ -133,6 +133,7 @@ def predict(self, x, batch_size=1, normalizer=None): batch_size=batch_size, normalizer=normalizer, ) + @classmethod def list_available_models(cls): list_available_models()