From 3eaecd13c981b50c22a15bffd77c56cbd72db6b2 Mon Sep 17 00:00:00 2001 From: HuYongtao Date: Fri, 5 Jul 2024 16:22:13 +0800 Subject: [PATCH] fix(lint): fix lint warning --- example/wide_n_deep/follower.py | 3 ++- example/wide_n_deep/leader.py | 5 +++-- fedlearner/trainer/estimator.py | 3 ++- fedlearner/trainer/run_hooks.py | 2 +- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/example/wide_n_deep/follower.py b/example/wide_n_deep/follower.py index ab0591c1b..dc77ff58a 100644 --- a/example/wide_n_deep/follower.py +++ b/example/wide_n_deep/follower.py @@ -122,7 +122,8 @@ def model_fn(model, features, labels, mode): train_op = model.minimize( optimizer, act1_f, grad_loss=gact1_f, global_step=global_step) final_ops = final_fn(model=model, tensor_name='reflux_embedding', - is_send=False, assignee=peer_embeddings, shape=[num_slot,fid_size,embed_size]) + is_send=False, assignee=peer_embeddings, + shape=[num_slot, fid_size, embed_size]) embedding_hook = tf.train.FinalOpsHook(final_ops=final_ops) return model.make_spec(mode, loss=tf.math.reduce_mean(act1_f), training_chief_hooks=[embedding_hook], diff --git a/example/wide_n_deep/leader.py b/example/wide_n_deep/leader.py index 781f2ebce..1a052079c 100644 --- a/example/wide_n_deep/leader.py +++ b/example/wide_n_deep/leader.py @@ -69,7 +69,7 @@ def final_fn(model, tensor_name, is_send, tensor=None, shape=None): if is_send: assert tensor, "Please specify tensor to send" if DEBUG_PRINT: - ops.append(tf.print(tensor)) + ops.append(tf.print(tensor)) ops.append(model.send_no_deps(tensor_name, tensor)) return ops @@ -161,7 +161,8 @@ def model_fn(model, features, labels, mode): {"loss" : loss}, every_n_iter=10) metric_hook = flt.GlobalStepMetricTensorHook(tensor_dict={"loss": loss}, every_steps=10) - final_ops = final_fn(model=model, tensor_name='reflux_embedding',is_send=True,tensor=embeddings) + final_ops = final_fn(model=model, tensor_name='reflux_embedding', + is_send=True, tensor=embeddings) embedding_hook = tf.train.FinalOpsHook(final_ops=final_ops) optimizer = tf.train.GradientDescentOptimizer(0.1) diff --git a/fedlearner/trainer/estimator.py b/fedlearner/trainer/estimator.py index 360be6848..6a0332216 100644 --- a/fedlearner/trainer/estimator.py +++ b/fedlearner/trainer/estimator.py @@ -87,7 +87,8 @@ def send_no_deps(self, name, tensor): self._sends.append((name, tensor, False)) return send_op - def recv_no_deps(self, name, dtype=tf.float32, require_grad=False, shape=None): + def recv_no_deps(self, + name, dtype=tf.float32, require_grad=False, shape=None): receive_op = self._bridge.receive_op(name, dtype) if shape: receive_op = tf.ensure_shape(receive_op, shape) diff --git a/fedlearner/trainer/run_hooks.py b/fedlearner/trainer/run_hooks.py index ec8131b3d..df674c6e0 100644 --- a/fedlearner/trainer/run_hooks.py +++ b/fedlearner/trainer/run_hooks.py @@ -243,4 +243,4 @@ def _parse_op_label(self, label): inputs = [] else: inputs = inputs.split(', ') - return nn, op, inputs \ No newline at end of file + return nn, op, inputs