Skip to content

Commit

Permalink
Update code format.
Browse files Browse the repository at this point in the history
  • Loading branch information
fchollet committed Jan 26, 2024
1 parent e149772 commit a44c051
Show file tree
Hide file tree
Showing 45 changed files with 315 additions and 205 deletions.
1 change: 0 additions & 1 deletion benchmarks/layer_benchmark/conv_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
```
"""


from absl import app
from absl import flags

Expand Down
1 change: 0 additions & 1 deletion benchmarks/layer_benchmark/pooling_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@
```
"""


from absl import app
from absl import flags

Expand Down
1 change: 1 addition & 0 deletions benchmarks/torch_ctl_benchmark/conv_model_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
vanilla torch way, and we compare the performance between building model with
Keras and torch.
"""

import numpy as np
import torch
import torch.nn as nn
Expand Down
1 change: 1 addition & 0 deletions benchmarks/torch_ctl_benchmark/dense_model_benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
vanilla torch way, and we compare the performance between building model with
Keras and torch.
"""

import numpy as np
import torch
import torch.nn as nn
Expand Down
1 change: 1 addition & 0 deletions guides/distributed_training_with_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Guide to multi-GPU/TPU training for Keras models with JAX.
Accelerator: GPU
"""

"""
## Introduction
Expand Down
1 change: 1 addition & 0 deletions guides/distributed_training_with_tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Guide to multi-GPU training for Keras models with TensorFlow.
Accelerator: GPU
"""

"""
## Introduction
Expand Down
1 change: 1 addition & 0 deletions guides/distributed_training_with_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Guide to multi-GPU training for Keras models with PyTorch.
Accelerator: GPU
"""

"""
## Introduction
Expand Down
1 change: 1 addition & 0 deletions guides/functional_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Complete guide to the functional API.
Accelerator: GPU
"""

"""
## Setup
"""
Expand Down
1 change: 1 addition & 0 deletions guides/making_new_layers_and_models_via_subclassing.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Complete guide to writing `Layer` and `Model` objects from scratch.
Accelerator: None
"""

"""
## Introduction
Expand Down
1 change: 1 addition & 0 deletions guides/sequential_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Complete guide to the Sequential model.
Accelerator: GPU
"""

"""
## Setup
Expand Down
1 change: 1 addition & 0 deletions guides/transfer_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Complete guide to transfer learning & fine-tuning in Keras.
Accelerator: GPU
"""

"""
## Setup
"""
Expand Down
1 change: 1 addition & 0 deletions guides/understanding_masking_and_padding.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Complete guide to using mask-aware sequence layers in Keras.
Accelerator: None
"""

"""
## Setup
"""
Expand Down
1 change: 1 addition & 0 deletions guides/writing_a_custom_training_loop_in_jax.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Writing low-level training & evaluation loops in JAX.
Accelerator: None
"""

"""
## Setup
"""
Expand Down
1 change: 1 addition & 0 deletions guides/writing_a_custom_training_loop_in_tensorflow.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Writing low-level training & evaluation loops in TensorFlow.
Accelerator: None
"""

"""
## Setup
"""
Expand Down
1 change: 1 addition & 0 deletions guides/writing_a_custom_training_loop_in_torch.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Writing low-level training & evaluation loops in PyTorch.
Accelerator: None
"""

"""
## Setup
"""
Expand Down
1 change: 1 addition & 0 deletions guides/writing_your_own_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@
Description: Complete guide to writing new Keras callbacks.
Accelerator: GPU
"""

"""
## Introduction
Expand Down
1 change: 1 addition & 0 deletions keras/backend/jax/distribution_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
This is just a prototype and we might want to unify it
with other backends in the future.
"""

import jax
import numpy as np

Expand Down
12 changes: 6 additions & 6 deletions keras/backend/numpy/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,9 +16,9 @@ def segment_sum(data, segment_ids, num_segments=None, sorted=False):
valid_segment_ids = segment_ids[valid_indices]

data_shape = list(valid_data.shape)
data_shape[
0
] = num_segments # Replace first dimension (which corresponds to segments)
data_shape[0] = (
num_segments # Replace first dimension (which corresponds to segments)
)

if sorted:
result = np.zeros(data_shape, dtype=valid_data.dtype)
Expand All @@ -43,9 +43,9 @@ def segment_max(data, segment_ids, num_segments=None, sorted=False):
valid_segment_ids = segment_ids[valid_indices]

data_shape = list(valid_data.shape)
data_shape[
0
] = num_segments # Replace first dimension (which corresponds to segments)
data_shape[0] = (
num_segments # Replace first dimension (which corresponds to segments)
)

if sorted:
result = np.zeros(data_shape, dtype=valid_data.dtype)
Expand Down
1 change: 1 addition & 0 deletions keras/backend/tensorflow/distribution_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
This is just a prototype and we might want to unify it
with other backends in the future.
"""

import tensorflow as tf
from tensorflow.experimental import dtensor

Expand Down
8 changes: 5 additions & 3 deletions keras/backend/tensorflow/numpy.py
Original file line number Diff line number Diff line change
Expand Up @@ -576,9 +576,11 @@ def concatenate(xs, axis=0):
return tf.sparse.concat(axis=axis, sp_inputs=xs)
else:
xs = [
convert_to_tensor(x, sparse=False)
if isinstance(x, tf.SparseTensor)
else x
(
convert_to_tensor(x, sparse=False)
if isinstance(x, tf.SparseTensor)
else x
)
for x in xs
]
xs = tf.nest.map_structure(convert_to_tensor, xs)
Expand Down
8 changes: 5 additions & 3 deletions keras/backend/tensorflow/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -244,9 +244,11 @@ def _get_input_tensor(time):
for i, inp in enumerate(flattened_inputs)
)
input_ta = tuple(
ta.unstack(input_)
if not go_backwards
else ta.unstack(tf.reverse(input_, [0]))
(
ta.unstack(input_)
if not go_backwards
else ta.unstack(tf.reverse(input_, [0]))
)
for ta, input_ in zip(input_ta, flattened_inputs)
)

Expand Down
8 changes: 5 additions & 3 deletions keras/backend/torch/rnn.py
Original file line number Diff line number Diff line change
Expand Up @@ -167,9 +167,11 @@ def _get_input_tensor(time):
# flattened tensor.

input_ta = tuple(
list(torch.unbind(input_))
if not go_backwards
else list(torch.unbind(torch.flip(input_, [0])))
(
list(torch.unbind(input_))
if not go_backwards
else list(torch.unbind(torch.flip(input_, [0])))
)
for input_ in flattened_inputs
)

Expand Down
1 change: 1 addition & 0 deletions keras/export/export_lib_test.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Tests for inference-only model/layer exporting utilities."""

import os

import numpy as np
Expand Down
1 change: 0 additions & 1 deletion keras/layers/convolutional/base_separable_conv.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
"""Keras abstract base layer for separable convolution."""


from keras import activations
from keras import constraints
from keras import initializers
Expand Down
6 changes: 3 additions & 3 deletions keras/layers/convolutional/conv_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -206,9 +206,9 @@ def np_conv3d(
(*new_kenel_size_tuple, ch_in, ch_out),
dtype=kernel_weights.dtype,
)
new_kernel_weights[
::h_dilation, ::w_dilation, ::d_dilation
] = kernel_weights
new_kernel_weights[::h_dilation, ::w_dilation, ::d_dilation] = (
kernel_weights
)
kernel_weights = new_kernel_weights
h_kernel, w_kernel, d_kernel = kernel_weights.shape[:3]

Expand Down
6 changes: 3 additions & 3 deletions keras/layers/convolutional/conv_transpose_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -238,9 +238,9 @@ def np_conv3d_transpose(
(*new_kenel_size_tuple, ch_out, ch_in),
dtype=kernel_weights.dtype,
)
new_kernel_weights[
::h_dilation, ::w_dilation, ::d_dilation
] = kernel_weights
new_kernel_weights[::h_dilation, ::w_dilation, ::d_dilation] = (
kernel_weights
)
kernel_weights = new_kernel_weights
h_kernel, w_kernel, d_kernel = kernel_weights.shape[:3]

Expand Down
6 changes: 3 additions & 3 deletions keras/layers/core/einsum_dense_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,9 +254,9 @@ def test_einsum_dense_basics(
},
input_shape=input_shape,
expected_output_shape=expected_output_shape,
expected_num_trainable_weights=2
if expected_bias_shape is not None
else 1,
expected_num_trainable_weights=(
2 if expected_bias_shape is not None else 1
),
expected_num_non_trainable_weights=0,
expected_num_seed_generators=0,
expected_num_losses=0,
Expand Down
8 changes: 4 additions & 4 deletions keras/layers/core/lambda_layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,10 +217,10 @@ def from_config(cls, config, custom_objects=None, safe_mode=None):
)
config["output_shape"] = fn
else:
config[
"output_shape"
] = serialization_lib.deserialize_keras_object(
fn_config, custom_objects=custom_objects
config["output_shape"] = (
serialization_lib.deserialize_keras_object(
fn_config, custom_objects=custom_objects
)
)
if "arguments" in config:
config["arguments"] = serialization_lib.deserialize_keras_object(
Expand Down
1 change: 1 addition & 0 deletions keras/layers/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
- RNG seed tracking
- activity regularization
"""

import collections
import inspect
import warnings
Expand Down
6 changes: 3 additions & 3 deletions keras/layers/normalization/batch_normalization_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,9 +132,9 @@ def test_correctness(
self.assertNotAllClose(unmasked_out, masked_out)

@parameterized.product(
synchronized=(False, True)
if backend.backend == "tensorflow"
else (False,),
synchronized=(
(False, True) if backend.backend == "tensorflow" else (False,)
),
)
def test_input_fully_masked(self, synchronized):
norm = layers.BatchNormalization(
Expand Down
32 changes: 20 additions & 12 deletions keras/layers/reshaping/cropping2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,12 +103,16 @@ def compute_output_shape(self, input_shape):
return (
input_shape[0],
input_shape[1],
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] is not None
else None,
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] is not None
else None,
(
input_shape[2] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[2] is not None
else None
),
(
input_shape[3] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[3] is not None
else None
),
)
else:
if (
Expand All @@ -125,12 +129,16 @@ def compute_output_shape(self, input_shape):
)
return (
input_shape[0],
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] is not None
else None,
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] is not None
else None,
(
input_shape[1] - self.cropping[0][0] - self.cropping[0][1]
if input_shape[1] is not None
else None
),
(
input_shape[2] - self.cropping[1][0] - self.cropping[1][1]
if input_shape[2] is not None
else None
),
input_shape[3],
)

Expand Down
8 changes: 5 additions & 3 deletions keras/layers/reshaping/flatten_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,11 @@ def test_flatten(self, sparse):
init_kwargs={},
input_data=inputs,
input_sparse=True,
expected_output=expected_output_channels_last
if backend.config.image_data_format() == "channels_last"
else expected_output_channels_first,
expected_output=(
expected_output_channels_last
if backend.config.image_data_format() == "channels_last"
else expected_output_channels_first
),
expected_output_sparse=sparse,
run_training_check=not sparse,
)
Expand Down
6 changes: 3 additions & 3 deletions keras/layers/rnn/conv_lstm1d_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,9 +44,9 @@ def test_basics(self):
"return_sequences": True,
},
input_shape=(3, 2, 8, 3) if channels_last else (3, 2, 3, 8),
expected_output_shape=(3, 2, 6, 5)
if channels_last
else (3, 2, 5, 6),
expected_output_shape=(
(3, 2, 6, 5) if channels_last else (3, 2, 5, 6)
),
expected_num_trainable_weights=3,
expected_num_non_trainable_weights=0,
supports_masking=True,
Expand Down
Loading

0 comments on commit a44c051

Please sign in to comment.