diff --git a/userbenchmark/dynamo/dynamobench/_dynamo/testing.py b/userbenchmark/dynamo/dynamobench/_dynamo/testing.py index 96191ea66..02d86d4bf 100644 --- a/userbenchmark/dynamo/dynamobench/_dynamo/testing.py +++ b/userbenchmark/dynamo/dynamobench/_dynamo/testing.py @@ -57,8 +57,8 @@ def collect_results(model, prediction, loss, example_inputs): # f"High loss value alert - {loss:.2f}. Can result in unstable gradients." # ) - grads = dict() - params = dict() + grads = {} + params = {} for name, param in model.named_parameters(): if isinstance(model, eval_frame.OptimizedModule): name = remove_optimized_module_prefix(name) @@ -71,7 +71,7 @@ def collect_results(model, prediction, loss, example_inputs): params[name] = param_copy results.append(grads) results.append(params) - buffers = dict() + buffers = {} for name, buffer in model.named_buffers(): if isinstance(model, eval_frame.OptimizedModule): name = remove_optimized_module_prefix(name) diff --git a/userbenchmark/dynamo/dynamobench/_dynamo/utils.py b/userbenchmark/dynamo/dynamobench/_dynamo/utils.py index d9a3e6186..7c813f44b 100644 --- a/userbenchmark/dynamo/dynamobench/_dynamo/utils.py +++ b/userbenchmark/dynamo/dynamobench/_dynamo/utils.py @@ -78,7 +78,7 @@ np.random: tnp.random, } else: - NP_SUPPORTED_MODULES = tuple() + NP_SUPPORTED_MODULES = () NP_TO_TNP_MODULE = {} from torch._subclasses.fake_tensor import FakeTensor, is_fake, maybe_get_fake_mode @@ -463,8 +463,8 @@ class ExactWeakKeyDictionary: """Similar to weakref.WeakKeyDictionary, but use `is`/`id` rather than `==` to compare equality""" def __init__(self): - self.values = dict() - self.refs = dict() + self.values = {} + self.refs = {} def __getitem__(self, key): return self.values[id(key)] @@ -1144,10 +1144,10 @@ def check_numpy_ndarray_args(args, kwargs): ) -dict_keys: Type[KeysView[Any]] = type(dict().keys()) -dict_values: Type[ValuesView[Any]] = type(dict().values()) +dict_keys: Type[KeysView[Any]] = type({}.keys()) +dict_values: Type[ValuesView[Any]] = type({}.values()) odict_values: Type[ValuesView[Any]] = type(collections.OrderedDict().values()) -tuple_iterator: Type[Iterator[Any]] = type(iter(tuple())) +tuple_iterator: Type[Iterator[Any]] = type(iter(())) tuple_iterator_len = tuple_iterator.__length_hint__ # type: ignore[attr-defined] object_new = object.__new__ @@ -1610,7 +1610,7 @@ def disable_cache_limit(): guard_failures: DefaultDict[Any, List[Any]] = collections.defaultdict(list) # Keep a record of graph break reasons for logging -graph_break_reasons: List["torch._dynamo.output_graph.GraphCompileReason"] = list() +graph_break_reasons: List["torch._dynamo.output_graph.GraphCompileReason"] = [] # keep record of compiled code, if we are in "error if recompile" # to track code that dynamo has compiled previously diff --git a/userbenchmark/dynamo/dynamobench/common.py b/userbenchmark/dynamo/dynamobench/common.py index 65b503de9..ea4fe73f5 100644 --- a/userbenchmark/dynamo/dynamobench/common.py +++ b/userbenchmark/dynamo/dynamobench/common.py @@ -1167,7 +1167,7 @@ def try_script(model, example_inputs): class AOTInductorModelCache: - cache = dict() + cache = {} @classmethod def load(cls, model, example_inputs, device): diff --git a/userbenchmark/dynamo/dynamobench/huggingface.py b/userbenchmark/dynamo/dynamobench/huggingface.py index 0994a4637..46492cc2d 100755 --- a/userbenchmark/dynamo/dynamobench/huggingface.py +++ b/userbenchmark/dynamo/dynamobench/huggingface.py @@ -86,7 +86,7 @@ def process_hf_reformer_output(out): # combination of models supported by HF Fx parser and some manually supplied # models. For these models, we already know the largest batch size that can fit # on A100 GPUs - 40 GB. -BATCH_SIZE_KNOWN_MODELS = dict() +BATCH_SIZE_KNOWN_MODELS = {} # Get the list of models and their batch sizes @@ -619,7 +619,7 @@ def refresh_model_names_and_batch_sizes(): """ import transformers.utils.fx as hf_fx - family = dict() + family = {} lm_seen = set() family_seen = set() for cls_name in hf_fx._SUPPORTED_MODELS: diff --git a/userbenchmark/dynamo/dynamobench/timm_models.py b/userbenchmark/dynamo/dynamobench/timm_models.py index 1460ad6b7..650af586d 100755 --- a/userbenchmark/dynamo/dynamobench/timm_models.py +++ b/userbenchmark/dynamo/dynamobench/timm_models.py @@ -36,7 +36,7 @@ def pip_install(package): from timm.data import resolve_data_config from timm.models import create_model -TIMM_MODELS = dict() +TIMM_MODELS = {} filename = os.path.join(os.path.dirname(__file__), "timm_models_list.txt") with open(filename) as fh: @@ -174,7 +174,7 @@ def get_family_name(name): return name.split("_")[0] def populate_family(models): - family = dict() + family = {} for model_name in models: family_name = get_family_name(model_name) if family_name not in family: