From 2ea3d64f47216c8b7e9f6561587e99ee7636b92a Mon Sep 17 00:00:00 2001 From: ChanBong Date: Mon, 13 Nov 2023 20:50:01 +0000 Subject: [PATCH] fix docstring issues in torch.utils.tensorboard (#113336) Fixes #112637 Fixed all the issues listed. ### Error Counts |File | Count Before | Count now| |---- | ---- | ---- | |`torch/utils/tensorboard/_proto_graph.py` | 9 | 0| |`torch/utils/tensorboard/_pytorch_graph.py` | 27 | 14| |`torch/utils/tensorboard/_utils.py` | 5 | 2| |`torch/utils/tensorboard/summary.py` | 27 | 12| |`torch/utils/tensorboard/writer.py` | 42 | 4| |`torch/utils/tensorboard/_caffe2_graph.py` | 19 | 0| |`torch/utils/hipify/constants.py` | 2 | 0| Pull Request resolved: https://github.com/pytorch/pytorch/pull/113336 Approved by: https://github.com/ezyang --- torch/utils/hipify/constants.py | 3 +- torch/utils/tensorboard/_caffe2_graph.py | 30 +++++----- torch/utils/tensorboard/_proto_graph.py | 19 +++--- torch/utils/tensorboard/_pytorch_graph.py | 21 +++---- torch/utils/tensorboard/_utils.py | 7 ++- torch/utils/tensorboard/summary.py | 25 ++++---- torch/utils/tensorboard/writer.py | 70 +++++++++++++---------- 7 files changed, 98 insertions(+), 77 deletions(-) diff --git a/torch/utils/hipify/constants.py b/torch/utils/hipify/constants.py index 20d62b2fb290c..fb56e7a77a3ed 100644 --- a/torch/utils/hipify/constants.py +++ b/torch/utils/hipify/constants.py @@ -1,4 +1,5 @@ -""" Constants for annotations in the mapping. +"""Constants for annotations in the mapping. + The constants defined here are used to annotate the mapping tuples in cuda_to_hip_mappings.py. They are based on https://github.com/ROCm-Developer-Tools/HIP/blob/master/hipify-clang/src/Statistics.h diff --git a/torch/utils/tensorboard/_caffe2_graph.py b/torch/utils/tensorboard/_caffe2_graph.py index 2aa162af7ad5c..536746026052d 100644 --- a/torch/utils/tensorboard/_caffe2_graph.py +++ b/torch/utils/tensorboard/_caffe2_graph.py @@ -41,6 +41,7 @@ def _make_unique_name(seen: Set[str], name: str, min_version: int = 0): def _rename_tensorflow_style(shapes, blob_name_tracker, ops): """ Convert some of the common names in Caffe2 to tensorflow. + NOTE: The common names in both Caffe2 and Tensorflow are currently hardcoded, if either side changes at some point, then this code should change as well. @@ -81,6 +82,7 @@ def f(name): def _convert_to_ssa(shapes, blob_name_tracker, ops): """ Convert an operator graph to SSA (i.e. out-of-place). + i.e. blobs will be renamed so that each blob is produced only once. Args: @@ -215,8 +217,8 @@ def g(name): def _add_gradient_scope(shapes, blob_name_tracker, ops): """ - For all operators or blobs with name containing "_grad", add a - "GRADIENTS/" scope. + For all operators or blobs with name containing "_grad", add a "GRADIENTS/" scope. + Note: breaks graph execution since the blob -> gradient mapping is hardcoded. @@ -241,8 +243,7 @@ def f(name): def _replace_colons(shapes, blob_name_tracker, ops, repl): """ - `:i` has a special meaning in Tensorflow. This function replaces all colons - with $ to avoid any possible conflicts. + `:i` has a special meaning in Tensorflow. This function replaces all colons with $ to avoid any possible conflicts. Args: shapes: Dictionary mapping blob names to their shapes/dimensions. @@ -266,6 +267,7 @@ def f(name): def _fill_missing_operator_names(ops): """ Give missing operators a name. + We expect C2 operators to be generally unnamed. This gives them a scope (inferred from their outputs) and a name after their type. Duplicates will be postfixed by an index. @@ -323,8 +325,7 @@ def _tf_device(device_option): def _add_tf_shape(attr_dict, ints): """ - Converts a list of ints to a TensorShapeProto representing the dimensions of - a blob/object. + Convert a list of ints to a TensorShapeProto representing the dimensions of a blob/object. Args: attr_dict: Dictionary to update (usually attributes of a Node) @@ -343,8 +344,7 @@ def _add_tf_shape(attr_dict, ints): def _set_tf_attr(attr_dict, arg): """ - Add attributes to a node. Key is the arg.name, and values can be shape, - floats, strings, ints or an empty list. + Add attributes to a node. Key is the arg.name, and values can be shape, floats, strings, ints or an empty list. Args: attr_dict: Dictionary to update (usually attributes of a Node) @@ -388,7 +388,7 @@ def _set_tf_attr(attr_dict, arg): def _operator_to_node(shapes, op): """ - Converts an operator to a node in a TF graph. + Convert an operator to a node in a TF graph. Args: shapes: Dictionary mapping blob names to their shapes/dimensions. @@ -477,7 +477,7 @@ def _operator_to_node_simp(op, inter_blobs, seen): def _blob_to_node(producing_ops, shapes, name): """ - Converts a blob (operator input or output) to a node in a TF graph. + Convert a blob (operator input or output) to a node in a TF graph. Args: producing_ops: Dictionary of blob name to list of @@ -512,7 +512,7 @@ def _blob_to_node(producing_ops, shapes, name): def _clear_debug_info(ops, perform_clear): """ - Removes debug information from operators, they are copious. + Remove debug information from operators, they are copious. Args: ops: List of Caffe2 operators @@ -536,6 +536,7 @@ def _clear_debug_info(ops, perform_clear): def _check_if_forward(blob): """ Blobs with names containing '_m' or 'grad' are part of the backward pass. + This function references facebookresearch/Detectron/detectron/utils/net.py. Args: @@ -636,7 +637,7 @@ def _operators_to_graph_def( custom_rename=None, ): """ - Main function to convert set of operators to a graph. + Convert a set of operators to a graph using the main function. Args: shapes: Dictionary mapping blob names to their shapes/dimensions. @@ -760,8 +761,9 @@ def _try_get_shapes(nets): def model_to_graph_def(model, **kwargs): """ - Convert a Caffe2 model to a Tensorflow graph. This function extracts - 'param_init_net' and 'net' from the model and passes it to nets_to_graph() + Convert a Caffe2 model to a Tensorflow graph. + + This function extracts 'param_init_net' and 'net' from the model and passes it to nets_to_graph() for further processing. Args: diff --git a/torch/utils/tensorboard/_proto_graph.py b/torch/utils/tensorboard/_proto_graph.py index 0700e0ffab35b..3c0d15723d245 100644 --- a/torch/utils/tensorboard/_proto_graph.py +++ b/torch/utils/tensorboard/_proto_graph.py @@ -5,10 +5,11 @@ def attr_value_proto(dtype, shape, s): - """Creates a dict of objects matching - https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/attr_value.proto - specifically designed for a NodeDef. The values have been - reverse engineered from standard TensorBoard logged data. + """Create a dict of objects matching a NodeDef's attr field. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/attr_value.proto + specifically designed for a NodeDef. The values have been reverse engineered from + standard TensorBoard logged data. """ attr = {} if s is not None: @@ -20,8 +21,9 @@ def attr_value_proto(dtype, shape, s): def tensor_shape_proto(outputsize): - """Creates an object matching - https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/tensor_shape.proto + """Create an object matching a tensor_shape field. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/tensor_shape.proto . """ return TensorShapeProto(dim=[TensorShapeProto.Dim(size=d) for d in outputsize]) @@ -35,8 +37,9 @@ def node_proto( outputsize=None, attributes="", ): - """Creates an object matching - https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/node_def.proto + """Create an object matching a NodeDef. + + Follows https://github.com/tensorflow/tensorboard/blob/master/tensorboard/compat/proto/node_def.proto . """ if input is None: input = [] diff --git a/torch/utils/tensorboard/_pytorch_graph.py b/torch/utils/tensorboard/_pytorch_graph.py index ccfe5d81af1ff..f4274199ffd33 100644 --- a/torch/utils/tensorboard/_pytorch_graph.py +++ b/torch/utils/tensorboard/_pytorch_graph.py @@ -119,8 +119,7 @@ def __init__(self, node_cpp): class GraphPy: - """Helper class to convert torch.nn.Module to GraphDef proto and visualization - with TensorBoard. + """Helper class to convert torch.nn.Module to GraphDef proto and visualization with TensorBoard. GraphDef generation operates in two passes: @@ -213,10 +212,7 @@ def populate_namespace_from_OP_to_IO(self): ] def to_proto(self): - """ - Converts graph representation of GraphPy object to TensorBoard - required format. - """ + """Convert graph representation of GraphPy object to TensorBoard required format.""" # TODO: compute correct memory usage and CPU time once # PyTorch supports it nodes = [] @@ -234,9 +230,9 @@ def to_proto(self): def parse(graph, trace, args=None, omit_useless_nodes=True): - """This method parses an optimized PyTorch model graph and produces - a list of nodes and node stats for eventual conversion to TensorBoard - protobuf format. + """Parse an optimized PyTorch model graph and produces a list of nodes and node stats. + + Useful for eventual conversion to TensorBoard protobuf format. Args: graph (PyTorch module): The model graph to be parsed. @@ -318,8 +314,7 @@ def parse_traced_name(module): def graph(model, args, verbose=False, use_strict_trace=True): """ - This method processes a PyTorch model and produces a `GraphDef` proto - that can be logged to TensorBoard. + Process a PyTorch model and produces a `GraphDef` proto that can be logged to TensorBoard. Args: model (PyTorch module): The model to be parsed. @@ -363,7 +358,7 @@ def graph(model, args, verbose=False, use_strict_trace=True): @contextlib.contextmanager def _set_model_to_eval(model): - """A context manager to temporarily set the training mode of ``model`` to eval.""" + """Context manager to temporarily set the training mode of ``model`` to eval.""" if not isinstance(model, torch.jit.ScriptFunction): originally_training = model.training model.train(False) @@ -380,6 +375,6 @@ def _set_model_to_eval(model): def _node_get(node: torch._C.Node, key: str): - """Gets attributes of a node which is polymorphic over return type.""" + """Get attributes of a node which is polymorphic over return type.""" sel = node.kindOf(key) return getattr(node, sel)(key) diff --git a/torch/utils/tensorboard/_utils.py b/torch/utils/tensorboard/_utils.py index 2b959726fd262..dabe57b6842ec 100644 --- a/torch/utils/tensorboard/_utils.py +++ b/torch/utils/tensorboard/_utils.py @@ -38,8 +38,11 @@ def render_to_rgb(figure): def _prepare_video(V): """ - Converts a 5D tensor [batchsize, time(frame), channel(color), height, width] - into 4D tensor with dimension [time(frame), new_width, new_height, channel]. + Convert a 5D tensor into 4D tensor. + + Convesrion is done from [batchsize, time(frame), channel(color), height, width] (5D tensor) + to [time(frame), new_width, new_height, channel] (4D tensor). + A batch of images are spreaded to a grid, which forms a frame. e.g. Video with batchsize 16 will have a 4x4 grid. """ diff --git a/torch/utils/tensorboard/summary.py b/torch/utils/tensorboard/summary.py index aaf510ba187f7..8211f6e0c8024 100644 --- a/torch/utils/tensorboard/summary.py +++ b/torch/utils/tensorboard/summary.py @@ -161,7 +161,8 @@ def _draw_single_box( def hparams(hparam_dict=None, metric_dict=None, hparam_domain_discrete=None): - """Outputs three `Summary` protocol buffers needed by hparams plugin. + """Output three `Summary` protocol buffers needed by hparams plugin. + `Experiment` keeps the metadata of an experiment, such as the name of the hyperparameters and the name of the metrics. `SessionStartInfo` keeps key-value pairs of the hyperparameters @@ -349,7 +350,8 @@ def hparams(hparam_dict=None, metric_dict=None, hparam_domain_discrete=None): def scalar(name, tensor, collections=None, new_style=False, double_precision=False): - """Outputs a `Summary` protocol buffer containing a single scalar value. + """Output a `Summary` protocol buffer containing a single scalar value. + The generated Summary has a Tensor.proto containing the input Tensor. Args: name: A name for the generated node. Will also serve as the series name in @@ -429,7 +431,8 @@ def tensor_proto(tag, tensor): def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_counts): # pylint: disable=line-too-long - """Outputs a `Summary` protocol buffer with a histogram. + """Output a `Summary` protocol buffer with a histogram. + The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. @@ -461,7 +464,8 @@ def histogram_raw(name, min, max, num, sum, sum_squares, bucket_limits, bucket_c def histogram(name, values, bins, max_bins=None): # pylint: disable=line-too-long - """Outputs a `Summary` protocol buffer with a histogram. + """Output a `Summary` protocol buffer with a histogram. + The generated [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) has one summary value containing a histogram for `values`. @@ -536,7 +540,8 @@ def make_histogram(values, bins, max_bins=None): def image(tag, tensor, rescale=1, dataformats="NCHW"): - """Outputs a `Summary` protocol buffer with images. + """Output a `Summary` protocol buffer with images. + The summary has up to `max_images` summary values containing images. The images are built from `tensor` which must be 3-D with shape `[height, width, channels]` and where `channels` can be: @@ -574,7 +579,7 @@ def image(tag, tensor, rescale=1, dataformats="NCHW"): def image_boxes( tag, tensor_image, tensor_boxes, rescale=1, dataformats="CHW", labels=None ): - """Outputs a `Summary` protocol buffer with images.""" + """Output a `Summary` protocol buffer with images.""" tensor_image = make_np(tensor_image) tensor_image = convert_to_HWC(tensor_image, dataformats) tensor_boxes = make_np(tensor_boxes) @@ -606,7 +611,7 @@ def draw_boxes(disp_image, boxes, labels=None): def make_image(tensor, rescale=1, rois=None, labels=None): - """Convert a numpy representation of an image to Image protobuf""" + """Convert a numpy representation of an image to Image protobuf.""" from PIL import Image height, width, channel = tensor.shape @@ -859,7 +864,7 @@ def compute_curve(labels, predictions, num_thresholds=None, weights=None): def _get_tensor_summary( name, display_name, description, tensor, content_type, components, json_config ): - """Creates a tensor summary with summary metadata. + """Create a tensor summary with summary metadata. Args: name: Uniquely identifiable name of the summary op. Could be replaced by @@ -916,7 +921,7 @@ def _get_tensor_summary( def _get_json_config(config_dict): - """Parses and returns JSON string from python dictionary.""" + """Parse and returns JSON string from python dictionary.""" json_config = "{}" if config_dict is not None: json_config = json.dumps(config_dict, sort_keys=True) @@ -927,7 +932,7 @@ def _get_json_config(config_dict): def mesh( tag, vertices, colors, faces, config_dict, display_name=None, description=None ): - """Outputs a merged `Summary` protocol buffer with a mesh/point cloud. + """Output a merged `Summary` protocol buffer with a mesh/point cloud. Args: tag: A name for this summary operation. diff --git a/torch/utils/tensorboard/writer.py b/torch/utils/tensorboard/writer.py index 188bd7aab0493..bd8c5d188dc50 100644 --- a/torch/utils/tensorboard/writer.py +++ b/torch/utils/tensorboard/writer.py @@ -1,5 +1,4 @@ -"""Provides an API for writing protocol buffers to event files to be -consumed by TensorBoard for visualization.""" +"""Provide an API for writing protocol buffers to event files to be consumed by TensorBoard for visualization.""" import os import time @@ -51,7 +50,8 @@ class FileWriter: """ def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=""): - """Creates a `FileWriter` and an event file. + """Create a `FileWriter` and an event file. + On construction the writer creates a new event file in `log_dir`. The other arguments to the constructor control the asynchronous writes to the event file. @@ -77,11 +77,12 @@ def __init__(self, log_dir, max_queue=10, flush_secs=120, filename_suffix=""): ) def get_logdir(self): - """Returns the directory where event file will be written.""" + """Return the directory where event file will be written.""" return self.event_writer.get_logdir() def add_event(self, event, step=None, walltime=None): - """Adds an event to the event file. + """Add an event to the event file. + Args: event: An `Event` protocol buffer. step: Number. Optional global step value for training process @@ -97,7 +98,8 @@ def add_event(self, event, step=None, walltime=None): self.event_writer.add_event(event) def add_summary(self, summary, global_step=None, walltime=None): - """Adds a `Summary` protocol buffer to the event file. + """Add a `Summary` protocol buffer to the event file. + This method wraps the provided summary in an `Event` protocol buffer and adds it to the event file. @@ -112,7 +114,7 @@ def add_summary(self, summary, global_step=None, walltime=None): self.add_event(event, global_step, walltime) def add_graph(self, graph_profile, walltime=None): - """Adds a `Graph` and step stats protocol buffer to the event file. + """Add a `Graph` and step stats protocol buffer to the event file. Args: graph_profile: A `Graph` and step stats protocol buffer. @@ -131,7 +133,7 @@ def add_graph(self, graph_profile, walltime=None): self.add_event(event, None, walltime) def add_onnx_graph(self, graph, walltime=None): - """Adds a `Graph` protocol buffer to the event file. + """Add a `Graph` protocol buffer to the event file. Args: graph: A `Graph` protocol buffer. @@ -143,6 +145,7 @@ def add_onnx_graph(self, graph, walltime=None): def flush(self): """Flushes the event file to disk. + Call this method to make sure that all pending events have been written to disk. """ @@ -150,12 +153,14 @@ def flush(self): def close(self): """Flushes the event file to disk and close the file. + Call this method when you do not need the summary writer anymore. """ self.event_writer.close() def reopen(self): """Reopens the EventFileWriter. + Can be called after `close()` to add more events in the same directory. The events will go into a new events file. Does nothing if the EventFileWriter was not closed. @@ -164,8 +169,7 @@ def reopen(self): class SummaryWriter: - """Writes entries directly to event files in the log_dir to be - consumed by TensorBoard. + """Writes entries directly to event files in the log_dir to be consumed by TensorBoard. The `SummaryWriter` class provides a high-level API to create an event file in a given directory and add summaries and events to it. The class updates the @@ -183,8 +187,7 @@ def __init__( flush_secs=120, filename_suffix="", ): - """Creates a `SummaryWriter` that will write out events and summaries - to the event file. + """Create a `SummaryWriter` that will write out events and summaries to the event file. Args: log_dir (str): Save directory location. Default is @@ -257,9 +260,11 @@ def __init__( def _check_caffe2_blob(self, item): """ - Caffe2 users have the option of passing a string representing the name of - a blob in the workspace instead of passing the actual Tensor/array containing - the numeric values. Thus, we need to check if we received a string as input + Check if the input is a string representing a Caffe2 blob name. + + Caffe2 users have the option of passing a string representing the name of a blob + in the workspace instead of passing the actual Tensor/array containing the numeric values. + Thus, we need to check if we received a string as input instead of an actual Tensor/array, and if so, we need to fetch the Blob from the workspace corresponding to that name. Fetching can be done with the following: @@ -271,7 +276,7 @@ def _check_caffe2_blob(self, item): return isinstance(item, str) def _get_file_writer(self): - """Returns the default FileWriter instance. Recreates it if closed.""" + """Return the default FileWriter instance. Recreates it if closed.""" if self.all_writers is None or self.file_writer is None: self.file_writer = FileWriter( self.log_dir, self.max_queue, self.flush_secs, self.filename_suffix @@ -292,7 +297,7 @@ def _get_file_writer(self): return self.file_writer def get_logdir(self): - """Returns the directory where event files will be written.""" + """Return the directory where event files will be written.""" return self.log_dir def add_hparams( @@ -391,7 +396,7 @@ def add_scalar( self._get_file_writer().add_summary(summary, global_step, walltime) def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, walltime=None): - """Adds many scalar data to summary. + """Add many scalar data to summary. Args: main_tag (str): The parent name for the tags @@ -534,7 +539,7 @@ def add_histogram_raw( global_step=None, walltime=None, ): - """Adds histogram with raw data. + """Add histogram with raw data. Args: tag (str): Data identifier @@ -1022,7 +1027,8 @@ def add_pr_curve( weights=None, walltime=None, ): - """Adds precision recall curve. + """Add precision recall curve. + Plotting a precision-recall curve lets you understand your model's performance under different threshold settings. With this function, you provide the ground truth labeling (T/F) and prediction confidence @@ -1074,7 +1080,7 @@ def add_pr_curve_raw( weights=None, walltime=None, ): - """Adds precision recall curve with raw data. + """Add precision recall curve with raw data. Args: tag (str): Data identifier @@ -1110,8 +1116,7 @@ def add_pr_curve_raw( def add_custom_scalars_multilinechart( self, tags, category="default", title="untitled" ): - """Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument - is *tags*. + """Shorthand for creating multilinechart. Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*. Args: tags (list): list of tags that have been used in ``add_scalar()`` @@ -1129,8 +1134,10 @@ def add_custom_scalars_multilinechart( def add_custom_scalars_marginchart( self, tags, category="default", title="untitled" ): - """Shorthand for creating marginchart. Similar to ``add_custom_scalars()``, but the only necessary argument - is *tags*, which should have exactly 3 elements. + """Shorthand for creating marginchart. + + Similar to ``add_custom_scalars()``, but the only necessary argument is *tags*, + which should have exactly 3 elements. Args: tags (list): list of tags that have been used in ``add_scalar()`` @@ -1147,9 +1154,11 @@ def add_custom_scalars_marginchart( self._get_file_writer().add_summary(custom_scalars(layout)) def add_custom_scalars(self, layout): - """Create special chart by collecting charts tags in 'scalars'. Note that this function can only be called once - for each SummaryWriter() object. Because it only provides metadata to tensorboard, the function can be called - before or after the training loop. + """Create special chart by collecting charts tags in 'scalars'. + + NOTE: This function can only be called once for each SummaryWriter() object. + + Because it only provides metadata to tensorboard, the function can be called before or after the training loop. Args: layout (dict): {categoryName: *charts*}, where *charts* is also a dictionary @@ -1178,7 +1187,9 @@ def add_mesh( global_step=None, walltime=None, ): - """Add meshes or 3D point clouds to TensorBoard. The visualization is based on Three.js, + """Add meshes or 3D point clouds to TensorBoard. + + The visualization is based on Three.js, so it allows users to interact with the rendered object. Besides the basic definitions such as vertices, faces, users can further provide camera parameter, lighting condition, etc. Please check https://threejs.org/docs/index.html#manual/en/introduction/Creating-a-scene for @@ -1235,6 +1246,7 @@ def add_mesh( def flush(self): """Flushes the event file to disk. + Call this method to make sure that all pending events have been written to disk. """