From 5b3ae92215512023567885fd00930acaaae880aa Mon Sep 17 00:00:00 2001 From: "Li, Guizi" Date: Fri, 5 Apr 2019 00:35:35 +0800 Subject: [PATCH 01/62] Add lm-1b FP32 inference benchmarking scripts (#254) * add lm-1b * fix per reviewer comments * fix typo * minor fix * remove unused documents and others * minor fix * Fix formatting issue in the tf_model_args.txt * correct the lm-1b unit test * remove step in benchmarks/launch_benchmark.py --- benchmarks/README.md | 1 + benchmarks/common/tensorflow/start.sh | 14 +++ benchmarks/language_modeling/__init__.py | 19 ++++ .../language_modeling/tensorflow/__init__.py | 19 ++++ .../tensorflow/lm-1b/README.md | 92 +++++++++++++++++++ .../tensorflow/lm-1b/__init__.py | 19 ++++ .../tensorflow/lm-1b/inference/__init__.py | 19 ++++ .../lm-1b/inference/fp32/__init__.py | 19 ++++ .../lm-1b/inference/fp32/model_init.py | 76 +++++++++++++++ .../unit/common/tensorflow/tf_model_args.txt | 1 + 10 files changed, 279 insertions(+) create mode 100644 benchmarks/language_modeling/__init__.py create mode 100644 benchmarks/language_modeling/tensorflow/__init__.py create mode 100644 benchmarks/language_modeling/tensorflow/lm-1b/README.md create mode 100644 benchmarks/language_modeling/tensorflow/lm-1b/__init__.py create mode 100644 benchmarks/language_modeling/tensorflow/lm-1b/inference/__init__.py create mode 100644 benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/__init__.py create mode 100644 benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py diff --git a/benchmarks/README.md b/benchmarks/README.md index d48642837..e3fda63ef 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -28,6 +28,7 @@ dependencies to be installed: | Image Recognition | TensorFlow | [SqueezeNet](https://arxiv.org/pdf/1602.07360.pdf) | Inference | [FP32](image_recognition/tensorflow/squeezenet/README.md#fp32-inference-instructions) | | Image Segmentation | TensorFlow | [Mask R-CNN](https://arxiv.org/pdf/1703.06870.pdf) | Inference | [FP32](image_segmentation/tensorflow/maskrcnn/README.md#fp32-inference-instructions) | | Image Segmentation | TensorFlow | [UNet](https://arxiv.org/pdf/1505.04597.pdf) | Inference | [FP32](image_segmentation/tensorflow/unet/README.md#fp32-inference-instructions) | +| Language Modeling | TensorFlow | [LM-1B](https://arxiv.org/pdf/1602.02410.pdf) | Inference | [FP32](language_modeling/tensorflow/lm-1b/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [GNMT](https://arxiv.org/pdf/1609.08144.pdf) | Inference | [FP32](language_translation/tensorflow/gnmt/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [Transformer Language](https://arxiv.org/pdf/1706.03762.pdf)| Inference | [FP32](language_translation/tensorflow/transformer_language/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [Transformer_LT_Official ](https://arxiv.org/pdf/1706.03762.pdf)| Inference | [FP32](language_translation/tensorflow/transformer_lt_official/README.md#fp32-inference-instructions) | diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index bc7fd699c..60500ba3e 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -397,6 +397,18 @@ function inception_resnet_v2() { fi } +# language modeling lm-1b +function lm-1b() { + if [ ${PRECISION} == "fp32" ]; then + CMD="${CMD} $(add_steps_args)" + + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi +} + # Mask R-CNN model function maskrcnn() { if [ ${PRECISION} == "fp32" ]; then @@ -803,6 +815,8 @@ elif [ ${MODEL_NAME} == "inceptionv4" ]; then inceptionv4 elif [ ${MODEL_NAME} == "inception_resnet_v2" ]; then inception_resnet_v2 +elif [ ${MODEL_NAME} == "lm-1b" ]; then + lm-1b elif [ ${MODEL_NAME} == "maskrcnn" ]; then maskrcnn elif [ ${MODEL_NAME} == "mobilenet_v1" ]; then diff --git a/benchmarks/language_modeling/__init__.py b/benchmarks/language_modeling/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/__init__.py b/benchmarks/language_modeling/tensorflow/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/README.md b/benchmarks/language_modeling/tensorflow/lm-1b/README.md new file mode 100644 index 000000000..525ff352b --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/README.md @@ -0,0 +1,92 @@ +# LM-1B + +This document has instructions for how to run LM-1B benchmark for the +following modes/platforms: +* [FP32 inference](#fp32-inference-instructions) + +Benchmarking instructions and scripts for model training and inference for +other platforms are coming later. + +## FP32 Inference Instructions + +1. Clone [mlperf/inference](https://github.com/mlperf/inference.git) and +checkout `setInter` branch. +``` +git clone https://github.com/mlperf/inference.git +cd mlperf +git checkout setInter +``` + +To prepare the checkpoint and dataset, run: +``` +python inference/cloud/language_modeling/benchmark.py +``` + +2. Clone this [intelai/models](https://github.com/IntelAI/models) +repository: + +``` +git clone https://github.com/IntelAI/models.git +``` + +3. Next, navigate to the `benchmarks` directory in your local clone of +the [intelai/models](https://github.com/IntelAI/models) repo (from step 2). +The `launch_benchmark.py` script in the `benchmarks` directory is +used for starting a benchmarking run in a optimized TensorFlow docker +container. It has arguments to specify which model, framework, mode, +precision, and docker image to use, and the checkpoint directory. + +Substitute the `--model-source-dir` to `/inference/cloud/language_modeling`. +Before benchmarking, ensure that you have run the script to prepare checkpoint files and the dataset +from Step 1. + +LM-1B can run for latency or throughput +benchmarking. Use one of the following examples below, depending on +your use case. + +For latency (using `--socket-id 0` and `--batch-size 1`): + +``` +python launch_benchmark.py \ + --model-name lm-1b \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 1 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --model-source-dir /inference/cloud/language_modeling + +``` + +For throughput (using `--socket-id 0` and `--batch-size 1024`): + +``` +python launch_benchmark.py \ + --model-name lm-1b \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 1024 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --model-source-dir /inference/cloud/language_modeling \ + -- steps=4 \ +``` + +Note that the `--verbose` flag can be added to any of the above commands +to get additional debug output. + +4. By default, the log file is saved to the +`models/benchmarks/common/tensorflow/logs` directory. The user can specify a +different directory using `--output-dir`. + +Example log tail when benchmarking for latency or throughput: +``` +Running warmup... +Running benchmark... +Number samples: 4234 +Longest latency was: 2.9153692722320557 seconds. Average latency was:2.891982913017273 +Perplexity: 40.110043230980665, target is 40.209 . +Ran inference with batch size 1024 +``` diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/__init__.py b/benchmarks/language_modeling/tensorflow/lm-1b/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/__init__.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/__init__.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py new file mode 100644 index 000000000..77d903020 --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py @@ -0,0 +1,76 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import os +from argparse import ArgumentParser + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for LM-1B FP32 inference""" + + def __init__(self, args, custom_args, platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.cmd = self.get_numactl_command(self.args.socket_id) + + self.set_num_inter_intra_threads() + + # Set the KMP env vars + self.set_kmp_vars(kmp_blocktime="0", kmp_affinity="granularity=fine,compact,1,0") + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + if self.args.socket_id != -1: + if self.args.num_cores != -1: + self.cmd += "--physcpubind=0-" + \ + (str(self.args.num_cores - 1)) + " " + self.cmd += self.python_exe + " " + + run_script = os.path.join(self.args.model_source_dir, + "benchmark.py") + + # Model args + arg_parser = ArgumentParser(description='process custom_args') + + arg_parser.add_argument('-S', '--steps', help='Number of steps', + dest="steps", + default="100") + self.args = arg_parser.parse_args(self.custom_args, + namespace=self.args) + + # Model parameter control + cmd_args = " -b=" + str(self.args.batch_size) + \ + " -I=" + str(self.args.steps) + \ + " --inter=" + \ + str(self.args.num_inter_threads) + \ + " --intra=" + \ + str(self.args.num_intra_threads) + + self.cmd = self.cmd + run_script + cmd_args + + def run(self): + original_dir = os.getcwd() + os.chdir(self.args.model_source_dir) + self.run_command(self.cmd) + + os.chdir(original_dir) diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index 6381db35a..79d76806f 100755 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -53,6 +53,7 @@ run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model- run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1 +python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset,python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only From d8f901449e287fa02061c2d289121b14fd038023 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 4 Apr 2019 10:49:07 -0700 Subject: [PATCH 02/62] Allow overwriting the KMP_* env vars in SSD-MobileNet Int8 script (#267) --- .../ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py b/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py index 89b890ab1..90a1d1fd0 100644 --- a/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py +++ b/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py @@ -36,10 +36,6 @@ import argparse from tensorflow.python.client import timeline -os.environ["KMP_BLOCKTIME"] = "0" -os.environ["KMP_SETTINGS"] = "1" -os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0" - parser = argparse.ArgumentParser() parser.add_argument('-g', '--graph', help='Path to input graph to run', type=str, required=True) parser.add_argument('-d', '--dataset', help='Full Path to input dataset to run', type=str, required=True) From 8802bc680393686bbf1372ce0ec65c58e759c412 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 4 Apr 2019 15:00:11 -0700 Subject: [PATCH 03/62] Add Contribute.md doc with instructions on adding a new model (#266) Also, updated the launch script documentation with instructions on using the `--debug` flag. --- Contribute.md | 176 +++++++++++++++++++++ README.md | 5 +- add_model_init.png | Bin 0 -> 14105 bytes add_readme.png | Bin 0 -> 15909 bytes benchmarks_directory_structure.png | Bin 0 -> 11588 bytes docs/general/tensorflow/LaunchBenchmark.md | 95 +++++++++++ models_directory_structure.png | Bin 0 -> 11038 bytes 7 files changed, 275 insertions(+), 1 deletion(-) create mode 100644 Contribute.md create mode 100644 add_model_init.png create mode 100644 add_readme.png create mode 100644 benchmarks_directory_structure.png create mode 100644 models_directory_structure.png diff --git a/Contribute.md b/Contribute.md new file mode 100644 index 000000000..84e1b2f64 --- /dev/null +++ b/Contribute.md @@ -0,0 +1,176 @@ +# Contributing to the Model Zoo for IntelĀ® Architecture + +## Adding benchmarking scripts for a new TensorFlow model + +### Code updates + +In order to add a new model to the zoo, there are a few things that are +required: + +1. Setup the directory structure to allow the + [launch script](/docs/general/tensorflow/LaunchBenchmark.md) to find + your model. This involves creating folders for: + `/benchmarks/////`. + Note that you will need to add `__init__.py` files in each new + directory that you add, in order for python to find the code. + + ![Benchmarks Directory Structure](benchmarks_directory_structure.png) + +2. Next, in the leaf folder that was created in the previous step, you + will need to create a `model_init.py` file: + + ![Add model init](add_model_init.png) + + This file is used to initialize the best known configuration for the + model, and then start executing inference or training. When the + [launch script](/docs/general/tensorflow/LaunchBenchmark.md) is run, + it will look for the appropriate `model_init.py` file to use + according to the model name, framework, mode, and precision that are + specified by the user. + + The contents of the `model_init.py` file will vary by framework. For + TensorFlow models, we typically use the + [base model init class](/benchmarks/common/base_model_init.py) that + includes functions for doing common tasks such as setting up the best + known environment variables (like `KMP_BLOCKTIME`, `KMP_SETTINGS`, + `KMP_AFFINITY`, and `OMP_NUM_THREADS`), num intra threads, and num + inter threads. The `model_init.py` file also sets up the string that + will ultimately be used to run inference or model training, which + normally includes the use of `numactl` and sending all of the + appropriate arguments to the model's script. Also, if your model + requires any non-standard arguments (arguments that are not part of + the [launch script flags](/docs/general/tensorflow/LaunchBenchmark.md#launch_benchmarkpy-flags)), + the `model_init.py` file is where you would define and parse those + args. + +3. [start.sh](/benchmarks/common/tensorflow/start.sh) is a shell script + that is called by the `launch_benchmarks.py` script in the docker + container. This script installs dependencies that are required by + the model, sets up the `PYTHONPATH` environment variable, and then + calls the [run_tf_benchmark.py](/benchmarks/common/tensorflow/run_tf_benchmark.py) + script with the appropriate args. That run script will end up calling + the `model_init.py` file that you have defined in the previous step. + + To add support for a new model in the `start.sh` script, you will + need to add a function with the same name as your model. Note that + this function name should match the `` folder from the + first step where you setup the directories for your model. In this + function, add commands to install any third-party dependencies within + an `if [ ${NOINSTALL} != "True" ]; then` conditional block. The + purpose of the `NOINSTALL` flag is to be able to skip the installs + for quicker iteration when running on bare metal or debugging. If + your model requires the `PYTHONPATH` environment variable to be setup + to find model code or dependencies, that should be done in the + model's function. Next, setup the command that will be run. The + standard launch script args are already added to the `CMD` variable, + so your model function will only need to add on more args if you have + model-specific args defined in your `model_init.py`. Lastly, call the + `run_model` function with the `PYTHONPATH` and the `CMD` string. + + Below is a sample template of a `start.sh` model function that + installs dependencies from `requirements.txt` file, sets up the + `PYHTONPATH` to find model source files, adds on a custom steps flag + to the run command, and then runs the model: + ```bash + function () { + if [ ${PRECISION} == "fp32" ]; then + if [ ${NOINSTALL} != "True" ]; then + pip install -r ${MOUNT_EXTERNAL_MODELS_SOURCE}/requirements.txt + fi + + export PYTHONPATH=${PYTHONPATH}:${MOUNT_EXTERNAL_MODELS_SOURCE} + CMD="${CMD} $(add_steps_args)" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi + } + ``` + +Optional step: +* If there is CPU-optimized model code that has not been upstreamed to + the original repository, then it can be added to the + [models](/models) directory in the zoo repo. As with the first step + in the previous section, the directory structure should be setup like: + `/models/////`: + + ![Models Directory Structure](models_directory_structure.png) + + If there are model files that can be shared by multiple modes or + precisions, they can be placed the higher-level directory. For + example, if a file could be shared by both `FP32` and `Int8` + precisions, then it could be placed in the directory at: + `/models////` (omitting the + `` directory). Note that if this is being done, you need to + ensure that the license that is associated with the original model + repository is compatible with the license of the model zoo. + +### Debugging + +There are a couple of options for debugging and quicker iteration when +developing new scripts: +* Use the `--debug` flag in the launch_benchmark.py script, which will + give you a shell into the docker container. See the + [debugging section](/docs/general/tensorflow/LaunchBenchmark.md#debugging) + of the launch script documentation for more information on using this + flag. +* Run the launch script on bare metal (without a docker container). The + launch script documentation also has a + [section](/docs/general/tensorflow/LaunchBenchmark.md#alpha-feature-running-on-bare-metal) + with instructions on how to do this. Note that when running without + docker, you are responsible for installing all dependencies on your + system before running the launch script. If you are using this option + during development, be sure to also test _with_ a docker container to + ensure that the `start.sh` script dependency installation is working + properly for your model. + +### Documentation updates + +1. Create a `README.md` file in the + `/benchmarks///` directory: + + ![Add README file](add_readme.png) + + This README file should describe all of the steps necessary to run + the model, including downloading and preprocessing the dataset, + downloading the pretrained model, cloning repositories, and running + the benchmarking script with the appropriate arguments. Most models + have best known settings for throughput and latency performance + testing as well as testing accuracy. The README file should specify + how to set these configs using the `launch_benchmark.py` script. + +2. Update the table in the [benchmarks README](/benchmarks/README.md) + with a link to the model that you are adding. Note that the models + in this table are ordered alphabetically by use case, framework, and + model name. The model name should link to the original paper for the + model. The benchmarking instructions column should link to the README + file that you created in the previous step. + +### Testing + +1. After you've completed the above steps, run the model according to + instructions in the README file for the new model. Ensure that the + performance and accuracy metrics are on par with what you would + expect. + +2. Add unit tests to cover the new model. + * For TensorFlow models, there is a + [parameterized test](/tests/unit/common/tensorflow/test_run_tf_benchmarks.py#L80) + that checks the flow running from `run_tf_benchmarks.py` to the + inference command that is executed by the `model_init.py` file. The + test ensures that the inference command has all of the expected + arguments. + + To add a new parameterized instance of the test for your + new model, update the [tf_models_args.txt](/tests/unit/common/tensorflow/tf_model_args.txt) + file. This file has comma-separated values where each row has two + items: (1) the `run_tf_benchmarks.py` command with the appropriate + flags to run the model (2) the expected inference or training + command that should get run by the `model_init.py` file. + * If any launch script or base class files were changed, then + additional unit tests should be added. + * Unit tests and style checks are run when you post a GitHub PR, and + the tests must be passing before the PR is merged. + * For information on how to run the unit tests and style checks + locally, see the [tests documentation](/tests/README.md). diff --git a/README.md b/README.md index d369ba9e4..e409c28b7 100644 --- a/README.md +++ b/README.md @@ -29,4 +29,7 @@ We hope this structure is intuitive and helps you find what you are looking for; ![Repo Structure](repo_structure.png) -*Note: For model quantization and optimization tools, see [https://github.com/IntelAI/tools](https://github.com/IntelAI/tools)*. \ No newline at end of file +*Note: For model quantization and optimization tools, see [https://github.com/IntelAI/tools](https://github.com/IntelAI/tools)*. + +## How to Contribute +If you would like to add a new benchmarking script, please use [this guide](/Contribute.md). \ No newline at end of file diff --git a/add_model_init.png b/add_model_init.png new file mode 100644 index 0000000000000000000000000000000000000000..6bacd1bb67252070894e20ef9dd53c7e9432d1df GIT binary patch literal 14105 zcmb7rby!qi*Y?aXbPXL6Lw6%c51rD|jUY&e(lA4Zga}ATODWx=z@Q)qC|%M>OLxk5 z{5@|x@AZD)A0O8R*O`6x+I!YIYp->$dmZ9*wN(l6Xz@TG5TUx7l0FCo1_Re?I9R}6 z18s$Q5D4#wv!bG|x}qXn*W1I<+0_9AQj1G}jcsV)OxfFcZWUO_ri9Sge3)WY63Kwr zdSs|TnyRayCf`#{khEl?OVTA=#KMf1gqczYFPg>z-vQ}I z0_<}3{IIEODJRi)Ib!x7{@mW(aX(|Q{)!2TSCUBN_~lp}3~BPPh%+Ptg$oN>alU@x zCEoAL)h3^Y-D@KFBePEWq!G6?xwl*8y4V#ALVaFV!vyKR(UlWP=0P0m+UYfu9V^+T z;p?8Q<4H)Tnd{}f%*_?~s>o^n_LePq$wOi+O+tbWmC6xFL&bz(ef8Lj@-DyTR~V0! zrjY3ug8T8hw4^d!x_*w@nW;U_KFsW<-8nB}+M?o9TGiM4Z(ea^_2S|fRDaJVll`_; z1aB5jenT(yE!fm%kX6&CD7|U4MP#^={s%XE=O~ zeAsirbE~i1=zSOLRl8_74|mv~sqZ}DvE!NQkE&zpUyv}k-bgStkZ_3WZ$G>MqYQ++ z`u*ePbWAlHqVHgZc7+h)?5^(%J zFc}m?k^;uPO28QY#kwJontXpFpT(4y0yol|9l!lqFuVC+Q*M`UZG#`S%?^RbNDK{b1?eo|L*zMQ5mM@qV=&b&$GzRhOeT0nCtF_c>Y-46H~E$<1X!ysG| zeL&2Qaw))=!y-lyZ(<8{!TgC;;5eTZ4P&vK6|qvd9urC++*8EKaiSDQbV<|jaN$C4 zkr@gu1$O07?wB)$iQ%DPu1#rQ%iy&s{m%v&YX|m@Lrk>Pv_(Ktx zp8Ri!snGY9gj-2v38ubmEV~@IwPJ_j60?AqSO~gdb@FqtFjk1V$|wm}Jsp>V#!HR2 zl+Fr2Q*zXZg=2$1FA0hgkl9BZc1PN*2wG_IJ4!gpl+yXY43eFD=DJVpq6HUg4J-L4 zX@8>}?8)jNI|{B$&E!F#AkrX`pnJ2)A<18ssUP^f;o*nAjvPSqbyfB5ex6v)_R4LQ zofBk8p89J3tNnn)pU5gnpnD(9_%;8F-}3b#BktpHa2E=uq{beL`yRI>QZce2lK->F zXB@pmCyf)h3M`vyZ4`smMbcCD|9Q2xAX%_Gz{-Qa;W%W{N0%EkxX@<;xpaEa>vs3ny-51)kcNS zHFm3i)=EB{w~|qiF%N0KVmjs79=d-(>qv{Dl}>~thR{-pUOC}>&s^8}H99$bHpV}C zwe_G)BKP4?0o7RY@a&diHf^?c_F;B{D39on)0rSFnkM3VpPlPtI{P`>;Ji}t#QTYI zZu5;loh`>?ogPjdO{}<3I8QjwPhM4iA150BF}_eaUvR62U2I=MR??vRq~(WlZr9ey zR^Hf$v4m2_oKS13$)?f7Nz0;-I_GVBwRs&@wRuV&F$a6rsgI%r)|EMM4ff7ho(yGok-wgE#c=P+TsgO z=j@~(+t~=uKCVzt{1#$RRFP9L?EBF@j=XKG5ca^*s8T;P6F1jh`E*7;lbr z4sDK=d9+*W?$cSUS!0ohj_gBo(hY9w;!;mO@_xL1PP}t}M{(D9R(z(@tJZr-((u8Y z_ghIUiE2+*-{9THyWM_2cgJ_NeDZcbO@8RPe4p?>VZ3D00X@-uf8tr$Z%gqu^S85s zEdzTdesbp&du8)-0j>UhGD!h9Th70-wgUQ&FL{qmZ%nVBoCeSYczrqxJoOi8H)sz# z**g?ES-vp5l0LkDwsy&I5c0MDu|q9UgVp8CrTVS*Rmj2lf%W<#9TEz~CdX{VehU4H znSm`!B#cK2oq!TzN?{A&x)XIpM@G34eHfUu_nDd4Db$@Bb||c}O{>kUJ*wqcHd-D% zD4jXj8J;nBlzU?@sVZH@1W~GGX;zYs7fl{aykWzmqM&(1aw_^$HqhDEoPf_!ey}j} zye_ovo;_o?X*bu3*~+6|1}pD>$+1FN(^yYfe`r}``17fAsHcr*zR*e1kxh}77Wdq1 z{&36ngDZ;5Z#`?sTy(!bIQP=@c!*cm5^d@!O_7tG63BUq&k=6$jYiwPY**{v%9T%x zkM&V-fO!B$us|s8ooSnKp!$^$>a|UR4Pujpmx3t%MtQAhiiU|MNLco<%3~vkBuBQ# z6Aonc1`RrK_q(%JCh2b9C`;s$x~Lbv<(OjeGv7-Q)e1yleOv!VyXNa_(pFuo8(g;e z{rJcGzUx-O(B(tr?S$@xYJq06Pws~eHw8EO88#W7uN!&qTeLlO-WQ&%ZyzXJa~?1^ zqORdI8R-=EU+a4G_EFaRP)oO#w#TO*@8h~;;>)F^B`*fSey3hL+$ z7P#sN$^F$v|GR_YFus~{mf|&KnCXyZ*eUikV}B_1d`d@QNb$he`5e!j?wmc>vzg#p zrFPcNw;lsKe5dAF<_XOmv&}ZM>H1}(GPCNl6thA7p9g6DGIypMe`$WGy?GvGtvs(i zpZoiH=ycEQMANLF-!4PDOiz&P9p_bDkjKHol}1DVXh>^Fz;)hU^5Wvzt67~1Y+oXB z#tSpH>TLI{xdxeSjJrj!@mpuEIlUTs2v@UK>&nJbp~q?>=+%8T`^ z#3$-c@P8!b4akYl@j5d|3p`kysxMgUit5I1F)KEk6^OR{6wI|b8YN~l`(eQ9wT|^+ ztM~T1*@k>p-hllWB(3a5ROM}*Gd29>LC?hJ!L&U@W2WcZy{KH9uNhpog( z1Y7Wf3@;(D7E+|AugXkfHdjCSiG&z?&lO~PM9=vyjdy;;mYE7OSFDavRdsP2jP!sk zz4!I?eLS_fId!|~xDOtFBPBGOj2jbrn3nn5=9``+J=c=;DRHzbL;Tvn(DQ4wv?R>?o+;?n=j|6h0hd&hs1H2pV8VTpg2{I@&*At^2JSAl<1=wH_QXB04(ES|K$ zf0J@XvSG?Uf#9J2Q*tyZapi``6MU zL18X6zdd$O_x##}suq9v%(JjEUxo@+EOftN!T;ZM_XUYBn4J)7Yq+pYQ%`$lVn#hF(9sm-g!u z0tFd=_vk&HZQ;!W?-S_q(Cfy46M5^`tQIgBl=%gMNOLJU?522eXTR+?f9F(jz+vA@ zmhpxsI&J3*L0kJ_cT#SP9rNaMd-ET+CXB7uT-;`xOSZ?2r`5%~KjMogp0*Yp!C9qRS)U(ui7r>QEUX;2EQaR%Y;sX8 zR!J;MyeB?7@!mK9$!pda+xC1{L*UZ^B;?QK#)5qDXr6>mwBUuHXPI`c20j)4iI`7U zI3}H}4H|T{_i^88tv@~Z;acKi*!Aq<=3y^m*_3ry?)MA8hQ|fU?al7<8h6)=cTPLg zb%yPM&x{n(d5lZ_7DHbD$dmb9#gYcy#)&FQfArp{E;VqwqO9C`tTZhzPdfsOut@UR z_V8QvSDR6Pn7ei{UU*$cHpg-bIN z8WbMyr1n}PhJ*-P{STS*?IjDKI?h!~&J}MTe@k6xwneOn=e^U|o_JrTeR&Zhchhw( z#m3fSo`(k&OL_a2RYOZ_5M@wj*JF}p7YO!Yq*PH+!Aj>d&!G6;5ggEDQBygm&Zca}Rfyi$^K#`XQh zusazY+`&~j_qhjwt1ngk+c4g%;#MY_HaON<7n(IajpRx-a(?gcO%5G|+mu5OC<_## z@ZNOjmuN&T9#Rf|U-pfVAQJfq%umb6P-%KP!Tdl}?)JohVbGKMvr_aI{*>`*%bbFP z6w0wk!h2%6`wQ&>k|dmNoKv>_ir)q^g}xXbk+NwFCkS-(QGU%7vghRCNhlNaBvlwm z=eNwh1)*{U||J+_5&q4-ompy})P}tMo%5-8DxpV)_aaPAihyvg0 zE#N;H6a;i(wFqwiw901wjw3Xp*AV*;{sX=dfdh#b3^Lgi)Rp<-i@~obifW{RAa)Rg z$a8Xse~$0c?Ov1wXZHLIXcN0DR!t^d-^IkI9LLI$lzYBIO{_imyO^!><_KS>VVs41A zn#5@MKv6a%Fg6&k9jP_UlNwS4rJF1qrIUx7K$UQz37OqgP`YR&CphL=xu_}(hZ1$u z#UA@4xVT3(y+6G~a)PI~!A!pyZsP)V^}vl(Fn0XS@TwmlLZSkHmw_BkS8i=#m6C_! z;V4az)(Tr@QEIxfVT}>@8P-bRrahicc?%V_{neXnESI%DK-Kg+sFO&T;eEJ2F1!FHLfVu#&Dc)WN))V@iwf1^`BVPAR&$h!o(4im;l>p(l3-1+CqWg(joGuCZtZQ%8kk0d*ErN*f4VFy;gviDQ;`M?PJR!0trDBN-(yu*7 zS`)ts^e_6FQ@s;GENk<$dihI?h>S#DBzzl$QvGuy4hNIfl6n$y^23>L_hwDp1}UgeI<3#tFu{Z_Oq{RzF^r*^xsjgrA(v~fPk_`} zQF>8wju*W>NYIo;LQhjl((%_W$@}+_4D|7P#mTaEJfQxe7i_DT2yMC;gkh(0xiY@t zg3X_AHw? zVyB6&T>rf}i9beQI{yL=3{9a@eol_@qeCIePZP+Mgi}9yLMa4<4~4ACjCx^2kS#yL zXqnY{g4+CeV@V9CMb%^qm#kMy-zUcSupSI`ErGsSq#sp~ho9kecXPn59o6H)k#HMI zupY$mWvYKTQV}>vOT$U&j<^s<;L?VYD~oY`&j&F0gb|_u>AP^W@Owco#|4A0bB?5%~Xk~cAal~ zcB5>|oAs5Qt*jrwQ8?1L%x4N{qYR|wR{z=VZ0|?Vq3JUcdGu49X$@RtHG$3}>!Rk> zt2T!Bp2O*UruC8A;}wec#9Wu$#%;I(v_M5jEr>)Aj25dBKb@?k>3wh7z)nab9NBVV z=OWyE{rRcW&qgkptIgubE$B*%^H{Xs{cwg#pN$`pgu3~mS6m)qPiN}cP@>TA`D7q7 zIIOeQN}(PAsgR!z3#%mb_R34z6BjNZ$R>{#KqhK);y>$Q=h~_4-dND1a&_m{D~fE9 zy}I&Y7r(0luuzM_-Q`f&dgGG2(|9?F%*n9gCwa|##y@@~Agj`lMLb`Kq}pw;;nlqc z2;*q_NbBH3%ws}8OY^is5E=>%1F6X)XzKy=wD38BKAM8Zh*g;j%N@g&BKiolZwo+> zPiar6k<)me{PwL%mA~wL1whT%w^tln>Bd&JlpH%+-FqrF;CPW8S*V8sfc{J_wQ|G~ zuTHiUJMaETHQ;vz@6R-F9J3+4Ha~3Su?$z(Qn5ZD#v*3W%F4=$^qp3Zqkk12PH3L; z=(edf!Vv=JcV(zQdRDTf49~v%? z-!=9@Ie{`bDXfFDhugS%&}+%}3p&Q$YxJ#pr4t$(bh*i~ci zEvs<8f$2X4Nf6hPKLA+6HP2s0D=}pL;^AN?$7rP}fTnH!PWCi`=~4@H-Ut&vr!X%6 zPEI;0^-~-7K~yzCOZb`kX0pE4yjc7^js|8h6s>@3l88X9C$CfjP-IXYqzUPV7^xI0 zL}drT9ThA}8g;RF2?#vX^fIg(L>v#VrG>FRe+FCO!iDm-3DdcG?g zYUIEPz3H+BpV^+S#uV3EwFip2O!7LahBuJXz5JddPHaMP!DkhGsN8J$^#zThsj2B3 zmGbWp1*#UDWJ?s7@*EG8$t|8A2BGx38n zbP_cFXv!)m(d+9hI9BiW^7HpFH2B59!TlvZOlYIilDCr8rWDC<_|j8HrecZ+$2NAu zHerzf>>_fJS26Kq1U*?A#d6_S1V0q-e;>Afy?CaIp3p%5ndjhxK|iaao5xTGIs6hh zkZ=$T#s)t}!I!*&bl!S{m9mZxr1(_^7}O#O$m*{bL8HO)e=9kRzm*&izK-r8vITIa zz4wuiK7MyP)Dzw8FPi!DFh}grIJ3(GrWUEq3sX32<-6HsOKANBZ*SOiNGMY*gEX$0exW35AK;wi?uZdvdh;Ez%j(Od2t% z#nVyV-{^I+5l=+O%85_OYeNLtK|mW)SCmXTf+dt+X%Idm)es38qP~0HL|V&YnN@Tq ztau-59sYMMwgONJM=+>2nX!z^pwxe#TsQf$h}+*27OSs!Pv|cQ9xer^-JYv+cO>>u&YCoSUT-CN17u+2s*6V81lS?$H4I!{2vu< zh`7xfortufxgWjr_n=i~!*U0)`S<5g98hx0%b*>hahU1Z+{T>1k?TkXWRYLvd>x#O z;n{)*pghTE@m&2BBfW)+F)3-vUO33J%D~fY?gu;emF45QdWE9+bCyL=X=<*v`Uq@d z;X?M%UIX-6jNEW~1RlW)X#h$++4kIhC8t_6)tI^NoG`Kc3*5to&^if`vU4| z8JF>nz*QBfR*K`uI7UOx=X(h>xUI@nl}p5){CJa6k7FpOo4tIE4OCWe-CQ1JDt%ib zZPwpV_vh7?oBa^Iph5|=e-w>PNRB{0r8_<+mb*#mWQ+AwgTJf4SLb~v{;v2PnO$$Z z#zW5U-sg#Wv4Z$Cg`{?wBk}Sgrpm9XwbB`5*$EisMuaim`)*U?z7tM=Eoqjh@o-|m zO;r!#{@csB240|uD8-OOtd_x-#QyqjRc}dqIFblRrvLwgk7sJ5g65nKU%uMv=bfWS zs?Iini2b(W7)zh29Tc!ByK;nLD*-4B zHYz9wrZ8A$4WlsIz7+XbrXeD|DB9|K$hrTHDPK5>X615%Um>y**?k?BN78ZWGiRju zybqOdm54Mwv=xk-X9rfuc>hig@F>pAjU|nRi~~#3{v*EZDEhLbl1vb+z#&FkML zT!?|crj0L`D2N}GY&-5PXeQ)TD%dibp4FwYvI@{I2zM=3C734#M3~<5k)NhLCTb1< z^?iwPkpNSp037P~ZRWH*IR9g65!7O0e$+%BE)IkNZ+m8@aO8U+L@d4A%lL{1`9PB5 z@a2P$muu|E3d_flbZ4mzfIK+?7=RCjN0c>-xz8*AfQj!Qz|y_Og^mq#imbDn~hx@_k%!MF|@f?BUNUX{uSh%(M*ILScvg;d+Mwr_YhNCjfaA5qfi| zT8%H;l2qS#sLQx2{jM}C^BRR?T`Dc8A;f?&TAJ5kD0{?K)idy7L_^{KHs9;&f+Tb7X9}|RTnK|PQyyLnU(Ae%{%x@nDmxC8OnPk@0$JPQ%sML@22iwd}>bq zFkAHSYZq?6La5)ISB&^4fO?R2&^twAnviP17&DTL-Y7+|kAdBW%ueWZ&;TRi`zTL3 zpl|Vat%>L|P>vNm$qR_yILW*l%D&Gt1s@rW-(Bp?kEasQ2Iw!bN-Kbic|do32|&&R zMqZ2yoD`xl+zX;K^hu1ui01Ffst^1CBrbV6@ve?oUe5nf=+6`XD4de?H*YG3MmIc$ z3vBT)5}2$zf=>Vp7sQ91AIBRg&kIg?{-W9J`CFcdFI9#W`mWhSSt2nH??y{B|AN-0 zhrf;iZiS>{29#lUfwN?c)LHJh-YW(;F|ssxH4|MJb#UfW>9cA3g-^|{GYTxKNpBfs z0uKxr94uQsQFnM0+^dP$G0eQUoYg$W)w+(3?AKJfNv2-3gxQ?n1{uZ;{{o(fCzdXh zy&p6pQImD{s!zvC39BqVq;C9qr=2W)ghSll9(1k`5RT;@iygtZ8857C+`phMfJ(To z9&JFg#Ta(u6*@5785S0PiYnvB5?bGG#fmC`UM%rpEU1->6M1x8vX_*U9L|C%h#i1Z z1f9dk8v*zU;MBPf;`MG}rGznTlVv)LKg`!$Hi|3BYJK)GP0vj#$u{HkM#8hlc zCk#nm*Nqw=_Kw)ozxCU#Z2cob0jp1a_<@2$F*=CuDmc3GQVCl`l>5~M1pkPiyJ_>=r*?R2QcMM?PR1ljz z^>rx?Fc=z%ZCY_gJd>u)C4Zl%OwUe(n}X}57KOnpu8P{b`*)7zsfk~nw>b?beiK(-ggW}De5{Do)OU4&d`gn z2TygGJF$`EBhv%FfS;)!z+BUgOu#WeEQ;`Y8qNA^73mAo&@XYo z_eKETqkqHbibDgf(QbR(md2B#;Zc1VCp(+2&O?-2n44vqwpWLfw)!oc)c-Z-FG7md zSeC(Hwm|MCAU+E_bE_F6g0Pxd-^X1*D<@gRs)>Wa{n!N_Dit>zn|q&KJfPNp)Wam$ zJG+>n>oyOtyPW9y#W4i)1@wxb4RjS6*G2kYo|$kRsJ@_=%GeriG>Xt*+%GnP9PWW* zGQ#{u&8|Kj6^oYjCw3k>Tb3&nV%WAAX&#rz7}K~^piidmi8`SdIgu?4j8eGDlhu}* zvn?J&FAeo7Q0FiY0`iHrcxd4L;WB{Tz2)xHSFMBcPSBrK4=N8?d@@{yN^Tuu8=esO zV@P)0BjojzVwB&Hs=?aAWMn~+=?GjNZ#)r@HiB@E+^3NotPYPI7|6EXZ zH9C(O8tx0}&HxzpTv+bCk}m)JN56CR5&#THtgAGx7=qkjYz2Y(NwzGWOP*Nm;{|wb zvM-hI*92N&Ivyhz`mQOHjp@RA>a#))vvmY;MDV8MW=^*TZ@!X))}_f)^k`81Vk@#r zANr)qmusuM#~4`5(3p6&KIaK}*UUP3MDWKkuO{hJ zrGjgisA5UI9(p8T-()C!s#hXMF_dzZ6mq94x&fC!=hy>7q< zJdN`J^}3oq6LVx${}n}q%3(yDOPXwkZ4uNit{G#Xz8(Ua11j84K|=&51Fr=(F8-|8 z36c2Y4aSP`Qxb$6_E0&d_DCmPK<$V=G~h)NgTmZX0917FB*;*Pee|Y z=>ElZMdMs}Ln!$yVl~Lsi>#~H%ic3!<&j>7ife_f57I#JhrSTYY4I3UF5SPi!Gv7) zevZQ50)9UomWEuPyh-bnfJ+tTU+IsF~bj7jn) zW{~a^wa*dvU>{B>oC6wZ8?nEBV^_w@M_`Q{56KPX%c{25(KrLp!}n~*^mDTl;V@W4dAOl>A1w*^I2z4uH7@I)04p zC!?ARzGUoL1wbC%m>$_|r|yp9M8VOiKt`gc!-+QRX9tQ`8}0IIG75eas9zOVd5S4cUc)(AGzr;Sn1rQt|aZ zpUIj4dQKV?PtK)4t#&@=Lrk0v;HP7Eg!Gy`eKZP$k07M%nS1r}W!G`o&SGzpjPx>N z;3ks?lRqAd%ybA^$fU+f<_C6!fMFL%)sn$~30w{IWrBPh9s%9BX4e8iGzUmq$vV7? zSQ+KkTN?{JRMpxHn+cniB5w_Mfbu+sSemvnXCPW}Gy(-6QKUx(C_ApkUvtkSeIhJh z^!qzuCC{@`aRyR-Ps4OTjWIAo}a{pNV3^ae*4?JVs(FPOCxXGLIZv8 z+YoLV*r{8H6X;EU6*}Z5(m4g{f;xp^>zJ8J=oS+OHqmN8GhWLm$>}5C)l6D>n$M|7 z1PO9x6QI>?@Z}$(K38>eqC82`POPAK{%Uf;Tp+%7lh3EW7DT(=EPadZ>hwRfbHoYP zu{qtI*hx{8ovu&d>Ml}FxR5jn^|Ztt{SuG-Lc%T{jP0{NXuHAmCSin}6TUNsnt+sG zhNlG_uh%(@y;Udk81YPE=LqT;S)#eI;frn%Vq=S{`Ld1n1$T4W@Qj)fE)Xa@AjwDI z6sV`7aWYG&43wC*ozg_AmxFt31opYTq0!uwRWq99(_P%Q?M-gCh{k77+gmB z8tBlGNzJMx()MU85b%nmihG=S>YYAP4GDC!u!tyQ)c6a0@ZbYa54VYfP)Zlt` zE84BscgdzOD|%`W%hAzMA7CTZvM#QC4{m;)W(mj#YB3#9uvEe6emrQm(RE2r{L_}T z=x=}&C*OX?zCa1LHo^4AAFVLp_*^z>Q?=w*p1d)YbB>qttOAv=L=Uu zv$lCawR0tOhBA26gQSzX@Gu(|3ChW-jwab|wsOO_0|k2#`LN?MkZw}viNBqyqTqBo z1o0;j^KHM9Gz+F+z|rHp1z#YC+2Mg4;@JvPUfZkTIPrngVC{&{AA`5nE>n!V{`Y6Vd!u zk|^b~uC=gVbM(^Q7Y5|Ik**1&4`ve_NVdj{g+-uY=guR&YcvB81KsZF8CTIsAgmh{ z3ys8|6!Wb#Qo{ysTDH=+`Y%bp8467_ysGlF+r*sLimPfjS&u(GFkzpqG8b>gL;p;0 zt21FCc4XYi9{ZXrWREq5v~3s!7=rTWyFWu;Uu9QtKBp)O4n7M8>H$%^ZXCzsE+fle zA%B3y|D>q%am%SMg$equM$LU%$Y9Fk>&q65I;Ghn2R2Gxwf{!5iO-JJaW6L~qA|-S zt2#_Q2Su^Rkf*<=jwh`uzx7IB1q10g%N@TT&c~N%>f7kmMFiymHCjxIC-1f_0}W7J zS+PCeo#~e}@dpz&uHP}O;pBvsM4WiTiMHa*)rzJPrrjJ%s|)u>#16n2{{?AfL=obc z{Wcrlcuz%&R6Vg*oD(bq3N}7xn6=vw%H&Pk;C&Xe*!_AhWX>kc{-ak65q&0kBT!d< z@?a#8h3`5|R%T7b;YNY4_=jU8OqFmSeDK3)ta8$~2{>bB`rLOQoH3ctla`}s(Zx+6U{xq(3 z6~;e4p2M)%8HZm+ zFb8jqmXQ29G*~k|E znw>1M3X_(6+F-t1{`v2S6zGZFg%NUv5t$AN>jsLufVQG0TEoCHdVrrYOOBpqXz;b^ za;zy?9L;+l3@iOSp=XIPK{-i?4yz41cc-8m_uBp0WR>ln4r9Lqc&-@%q&U_Au@mM^ zNRF7B-A@mvBGe(Q+@AJC#Ey%=8x(8tzJG24p08{P zATJq$4_M^tK1MyU$cBr9g3QSh#g+ax+yD$1AN5^hzbwdf=ax?5jN1(U4+dG)@b+i9EhG9lG_;TR$;3KEI=${NdC^%=|Tn*r6AyV$WJOMScs8u z#>ee}U6klaencfH=r@K^zRDoYN4zCT^bS?PX7{yu+0g?O>a-)A>m0r|N&wA!c~2`1 zu_}~kmZX5K!U|02YzKP00b0cw#lt9EdJ|E1{yJz8>M6kyxn!+t%Df78#{5y=n>aQY z4V`^z!Z0dV^f89_J$jS39%6csmnx$1mw=SJEJAz~*qrheMFcSR`I4rJ=wI?`fczC8 zPZ~>*R5zr&iMC7rpr}dy-~D4CTK-?U%KwbwG35=TqCnZKaS{+aTqWEiz?e!ET&Y!G z0M-H)MU?cPrqn|!*EezkahZ8e=&vES|A!&HjR8ZxkPo}ZcOhQ~m;mFTBo>K-({6%@ z4Nrjk+5kgy^kjF_N6+C{A^-XuG0VHk4A@23cbycEK64nA_0eF3N;3^XUI*A2u=~m` zMO~#bt`b5291XyK52MyPcq*xwWl@}GasSwn+@mJefiDBmU2u~Bav0_Ci^bPw#>8Y< zix-#@{%=z>;C8@kU22=?NFv^IOWSsssSx$DtdJQ!Cm;aFr#BI!G?E}=gT2p#N!FS$ z@vTAQkN{){zuF8(j{VIw+Q0Rjli@H{K>cv2RuMi#TM@B9

PQi59i#t8jtAG?yHW zdm{+C@B3wmj*kM4c;KtUOMwt1x9;k0I8tkKp27!NO>M{is^)w38aDk!<{??!JitIpl;|b9AK%3rjRgpl$RhI6+SSRCR VZ&g*)|GxLDuB@$8tzZ-We*l|k?J@uW literal 0 HcmV?d00001 diff --git a/add_readme.png b/add_readme.png new file mode 100644 index 0000000000000000000000000000000000000000..4899a9fa3a7bc648de4b73c504fb7db5174c60c0 GIT binary patch literal 15909 zcmZv@1y~$GwAg4cE25PWQ#eshF)5267dzF~lEC?>CLt&Oy%sHL zM^6pA|9vqHHA)hEN&{u_3Ia6A5;T!knrpq8B!r?qB73u`%lhB(#!XO1$9DI2;PLZx z2PRY%zAW`v*do*@{zTbaf6(MmsU=+xrkEH!7Bm7SX|2*gTml;67rKJr-j9fYO6=VyOIQ4V1$FzuOoECW`h#fPacaak!u@LA zRw`K^zuUS~)9DZ8Bt&;c#-+5&AC13!qR$>dM$)XE`iv_wc3Mo?`aU_AOmHkf-)!=o zqI+?A%Y5128u=r+il2<{_tS!?3cgMjuq7yG9SEtv!e*L*&X*Vp8S#Ey4gJwS(Qa=4EB#BQkSD}Dr3CLCMC zGrcx`BRn+V_oJ*>l40C7N1*3&`zX$g1p|)-OTGikAP37&i8>063kQXr0*(9>4?Da6 z?wIXkGWT%-jXpC7Il_$&wc9g*&Tz6NuivA-$s5t^97B~fwJqVQ3z?%<<NrH7&uDxrqP6Y5wMlU-;n*9Aoq! ziN)EOS%woK5BQ%Ezr?#|p>7eL&}i7<%Lo1HiG*;y!u4GUNl|CQ;aymBF$wxecHXz+ zh@dSG9`D-!c5FusLdK+lqM|_?!fFuTzzTi`BQGOFWv6hk}hxtAx zU|@%x3k}yI{Awt|Y>(YYiPehVO1PZV9ZNIW_WQ=r?T0A#?Ru>m)+OSzP>2O?1I)RE z163=YKa@X?Kej)~dU9a$;4UGbdoB|zTxP^XA4~t&k&A)F-OsLh?IIiO^v83eaVvwtm%OWHrnj%;SI0ul_6Kxc3DP^#H zhrSOb4!sW1?V*1Vb&V)hc~9?~`azzkRJ-)O2Foe+Db*=Uwh|9@*Qb(E>T3lzqDS!K z`;N?)*kbX9;s;F)Fv>WB0;nw8E%`b_q3pAIVC9$=zb9rbZQ)u!Nl&%JFIMG8b;Ih0wK_#!3Kz9&^#aP9Cc+ZJ zhJoErRCi22XSjb8TM>s63njuN1`-o+J=q{lWgV&R&o9mXSzw)iI_2x+&r_Z$Bv?qE zT|brlO#E5p^VR2gE+(!Sn?LMWQAFWWqaPeBNi8gL0Y?PO; z7x(Nzwu`o#OHVaZKQMkw|JbV8EPRziEU_rVEo)Mp#6c&0pwPh%cwe2{I*E zYMD=5GA`~?z3=>0pWkCrpD*Pcy#?Pb9vIbA7ZmkZ=e6kx7J&=a88)?FC)^!@E$g!q zK4-lCumc2x=qGU$AtTy7aVtp@VK&KkqOUwvoPL%{@4Yxxc(?2}J_uQUFnhmlSt*}5 z7N}WVnNvCI(Pf)+ipc5S1lQDJg>E%7)iSwy^mxR5gvs&+Uj)AcKOMh~*)_u{BRivu zS+K^8$(pH1J8}`-er4^ufo1nZr#pgI)HB*u!Z?w)BbbLdM<|Cl$HXwovHiks9bsLU zQ`w4cW<#jS@rYN@x{JB%)eG~S`&{xucb#{&*R|elM?i~j!>v>Rfxp(p!6V?p@?yw) z?c&FUl6(Hex22BnkJa(j@juFzAbpEN+>4$SXU4pphNbI%Z4eeMQT|osDe(9GZ_}e~(hxW}L_GLrL_4@)_zXl5jQ1!waEoy0@PdeJ z$W9pjQ4x_27#$N!7VfKy=S6DEvyh^%=4tg=_1E?EyE?n`zspyDpUCAyCfe6hH7CDt(yd7?Fxa#f1{d-jjQqyXkd+uc0q&09z39KmAT! zKlvy3&`h&%e+*z3+Vv`?X88v#W!Q z9osrB@4C2=?LVX3qUhk?1`@m&D;FsgHUIu)Fw^4SGZMh|)DxJu-${1X1G|vP%qW}uD8^A;+*BqFxxP`wP(H6 zY&~70VqSP%ejT*#KRz%)?45PK(!8(OQUBr0w@+U-jH<)e7h1mGE zmm3QY`y+=?+YCw!*4d(rzXdRy%t!L*tanV9WU888wY&YST3?K^XI=hTCul1ASbbAR zzizyC=G?wzbMA6!^D}qRw{@c8>}4t>C)26H*|{aCIjdGX-=^bX66X6hK%lZHc3OdhLOV2}3SxLac*^$-U()j~~)yL5V zK)z5=LOue(p(Dh@oYKe9!O2~~N0|CQcL)H-Z>QO)DgSeehrKYhp0XOHq_Z1@lAD!_ zm7Q7yg_4p|$j#D9KtoFAzl#H(gsE*jJX{3W*u1^HS-m+~o!zY2IQaSb+1TH+y?@UF z+`;1R>*QhX!{X#l^Iw(x?|P&l?iOygE*`edPLyx;ntyQi^bn?|erxFe{QTE*LVRrh zZ%a<@|J^O1gKTeC*f?0(+5S)6KvAK$vjS?iJ`e{zDO*Q~lRMCc$a`)Mq5s_f|GDyi zTl^m-_5Wu{_WxP(e_Z+Rl0s~69rzy|`mfRY&sku&L{Nm-{?E{hpkVnLazjCJ*vdqcUv}I-FDXp(`)~CI_6q0Syn5vu z@=#%mi(w)8!(B%OqL+n*g;4e(#WYS}ietrK!AY&slopkJ4ioF~cEGVA?9p$C`cp1}wbipncO{7$T9|lVIWlj1RvSoO z)Znr!(dlzlEbID$&8PQReBM8~PO&0STof)AR8q`r@-3^@q}%S`yAwPLULgjV0N<_m z^8QM!W`wBhe5E$0tjQiL5)uqKwHaTd=FGj+rjAa==D**&gSW}SFX5kgomD6TZ?VaQ{kUY; znyjXZEAsu~$o-`GU?QercM)>2J}P7 z-dn~9Cw1!g1_|Gt%!LO%>>_#%+%4*w9yV|J?4z$W+kQe})D@>BfQcFpni0NN+Z~8> zhmV5Y*ri~8yq;it97+5dWxrfIcQE#;1Dv&$Ovn*-Df+Mj`*XewHSl3Kx*0No-|4!K zz%(#lrdX+4XL37$$~L=f8tkZx0lEiz4M!UurjjR+p24jDq3)Z}%Wl>`I^3=8RvT%{%F5<;`ok-|F4nkg=ihZ+_B<1!T64a4 z^mC)|MIuz$haQx~6qXYG`#@vc+cEZu9viYexzF78d`{sA=zPpP&~w>f zaibOwevcEZ*Qc|(A6?$ghf{fqz&raA^!ij2K)B0+8Zw!~n~Xs&Bcz!tk@&#;SXWZcyDY%nq z;wKNESxspc6Cw#2K{Bb*Vi55BIsN(RSJ$N%-yTUhIn>B_I@7x5SM6&2ZHuR%$7@K> z2$u=6jPX~qTTP8dOL_mzLLZ-DaAm1Ux8Kif%3&lLOg_v$70VNuErufygr)32vdLUN2Jg)5%2l{ryJ(UBV(Enrv@I4K*qIM ztkQmoP9@j$)zzrc;}CE^LNkxh4zqnfe_vttlzeR-_&UMX`&(-(?{*}aLN!_B3CyO+ z9Pxi4Sj+o2 zE3Aa`@n?12Mx4nvA6LJ}!%W}d-@vNVw6daO6Ry{f#`x-SI{)H*I!`-L5JPDe0kkh0 z@qF1^Agg?Vw+yCg+NhuSyJ}s4U8_cT?jy;LJ~FIlXUACQi-<z4>7KzI$3!yDSfk?_|B-Ac4nLOw;{&*0kO7 z+}f>bE}yubE&;Qi+S&Qi>t*l1X@wY1@4|x_(SHYrsRn)zpJ#y8fxi6IQ_gfXhzhNl zCj5=^(;5=Yl2qRz*VgC7j~b{jVIkzu>R!Us*UU%}3^2v#=4b}@Y+ro1 z0X%5+KkSZrl^K+UvSMk*$uUBxFy?U~VE-(h8ZAYvS(+5nOS4Gpl4K9YbSA}?YFdM$ z2Mrl_V0c_J!ugm8QJ8Y2eP|2tyiq?7IhVRxE`7dFnD40>0Veca zS}OQP$GTaqzQtQA6@HEJEAT{N+66SkOkn1RqXe5uqr-nMH#4|OzUKJP1^-^6m80h7 z`bpaZ907~MMfCGhq41pMR<}#uZkWC3?Ly8 z4zQ&SkiM-XXS2$D%0etGh_2kIHWrdQaCPuoXqjJFh(zBH=Y8$+A}{VIIeIKE-3K>x zDFC3H7giJmeZIwaDT<13J0#+=!72bH57J9ZAVL$Zk~K?H+SKwmZH&^_?`fJ7u18ce z8?~pHRj}b0AnCbTwlPjE_J<>`+M$mq$0Na5(U{3Y5T`&g^0LKB(qxZ0hdn^NGUy55 z-*tG~Fq?eYvYh{%uS&uqV!XpryS-S?G-z|0+O5*9Qvsp_WI;1)asUm~6q3PWs8Ox+ z6=?!0;_^o&uHkN|4$ASuV2U+R*nw-I)>sgRA0BSWk8grZNyrS4C}rK;-gkwj9#8RZ zAGp**WDU%=Li>8i5Clh2A(0K3%+RiAJmWAld3)dE*C$HYMCsk0w*h3fDs>O+xrgmL z7&dR{>h&_%T9U?5uLF}ACPT|h+8sWgy}R!N`rm#$Fah&c&*KqEt2fX;CF`OSvy6T} zvIAVoij`(t+?P6g(h4>}1?vnE7*&aBvPrw_V(A&lFkKqTBG?b@dw6^%AjVDeV93^dXo8pP9Hiye!>iq|1WH z<5&>$5YKl*BXQ~vLIfs=xOK)p(yr2xbHaL{WO3s$YQ;F7UM0-ob0#~kw_*=eO(Xl7 z4MR!moDM1*1_?<rDtWG7UASzTzI(J_l*rJgUg^g z5lTa%&5#(_s4g?@){i|JU$EjXF1~(;uKPoa@N547EWR;VJ)vXIbywke>wdt#gdR|9p+#g+$Ts?4B$4FYfz4ACrMX-X| zXGQv@wf+h*RKlY9do7>W&0L3BrcpfYd>;bIikF!1>oAEk`}9g;G+}yyu+u+S46~K* zCQw@_Vqz8WwW6hwt+*VR(B@SQ=8m`Ud4kG2EK})%wq3==ZuID&VO%?AkZdqK1fS>X z$F}J9y zX4FNBjOtA|r)!Y}wjCIK553;6dJO|#nIodcS+c^&^rjz>Sy_h`(5}DvSn`W`r2AetR*3jthq*;Dx z%6|dq%aDR+1pcIE4rxmOccCLP%^NW&fW5(0eNy`gCMcrtV1fA|VzT3I)qElTXMzvU znB$DHc)D@@!{mDICF4jR(PwpxySx$^8rAXvQkW&tA9;-o((x*ZY25znYLIxOpl;wR zp?(-+!pOps$bTbdfGmN9lx1@?oi}k$ct@sg#INa<oB9BuGY_Ej4b2m4gbwcK@FpRUu?2)N8=F%gdf{jO#xfG zE9V~R*HN4aqq+eA^DI9DYz0z&H#l|uG<3m}#wzXV^4!3l&(i&oVJM&suzwOoO>u`n zzymuj(;j~oOfBzAXcRG?hMAANyu7)B&Bf?hRf2&ZLqd6B#L~Q?_E&@Siq*mbm=><3 z^WVC~W$iHj(?Afg-5ZJpfDw(=f%@*ZZbp>a$D3pS^oZ)~P!!acyUk8d8a2IYjH&8L zW>WdBrZeIhMlRi2aFH4di*!1tA49}*^oXK5-;2k)-Vi<-EFE0--3~Tp**v`6wxZn zs)N+7k~?IM4*BuJ#3*?P9nR-!s1Qc^rv#wn4Azl~m`!a!Ne2#bYH7%EJ%SF4* z7|%u#Pz{5iM^G0wvKh%Ahz!OBb*)3Zi$CkdBqd#M7*P3Vlm>vBukH1G}tv9i)grI!BU zeYq9Um`pDGStf~89^l$k^|L$Q?g42a{FTk9T@2tvB2-Q{M^m;ykh$HBrkDqW1zRAL zdR(k=S7}yYShqNDi#(l_BqNEgCX(>tB>4eKlECqDleKEtR6zE7$VoIsP$4od-E7xo z&xG}K2{T_5v9Il5k&vA8$!yW>ruVjMc8ZLLM{{>T7E8eIenc+cYs)aQAPEVW@594x zzG4ND*Hjk(?H4Acx@TT^NU{I80k6TO+YuO(PWrb$>#tJLmz}rph!cR2l|0zxjUR zaa@zarB|_}jrs>HdYhxkY@;6}yiWE1fGzPq(##J{)w%ES1hzvl*yot=X8zOsM-BwQ5B%NGFruq-?wDG=5hHM8N+1K7i%rH0DAD)4!mzD2Ern@EFbu< z|7G_G8T|a&(pM4|gXWl3Pc(XnK zv1*6eNXw=J)--L~+QKxocSz^Yi0H&p0OV-XWq;GhS{)~5&X(&c0AXhy5MkN}`@o%! z>pW|no=k7*$5M^{U{KbW*xvrWujOI%`akvoi3V))raV9t8T*CKOAAb{op1Q8Msjy& z%huK&P;VTAsTF1Uza=8k;)T_oKta!w+0cToG6l5bwFcZnbvk7oRT^b+WUHm6?-W#3 z>6J!6${;+QSk6vHr5TRbk(jS!C-xuG*Ax%4_365|=kpMp68I!D$fX>%P_oH7+ z__f$AHg@S&X~kv|SCRhin-oJHusA9PG0j^(@g-%6TknIrHuA) zB8jZ3=qp!UM}tTKr?m14#Xq_FqEk8Oa{&1E{pDNyG6#ogL28-LVM;! zAuZiFN~?3OCx;CqPa?s6#nf*^>pL1r_H& z|A3zCDZ2EMNX%UfOfD<|!H0gbV72$m*np1dta{eA9aN%TBBQ?D6X^Rx#w3%~c&}F? z5@mJJ+yWB-d}{jo`e~`D{b#`k`?Y$BB8q97n;Gb-E`=QfDy?1uBBS(N`@(`UP7VR)mUa3~bjb&R+~wINL$8us zh(oKO)*lA%{{8Xwe%sd|S=VwJu4{iy7+Tgm)kH}Ntr8MZ;-ceC0BQ3>jk0-c8BeP0 zkoyhng_uDk5fEfn)1FC+9y78e?ymoRRt^~sS`#Zp*SWlpIU5P4|Bd!NI8xGL!6}K0 zrETJ6(c+3zJ|KoNZ+4Z~{-fyR zwBkK=?Uv4$dT$IIaQ61WJj0K?7r*c{6*=JT#j;ieic{vJ;2rUi82?;ut4?IH9bf)2 z|MV-q?{&QEb72h{ZhJq0k+Fa|&j!A+pZwK6f`rbw_CNg!eHnWdn`IKt!c%sI&<%`L5iy8Jc z@MXV#QTOS5K%E<#lRD1y{a7=cTnpLU70bxxHp6<@?Bqa2Ry|1J3ET%+j_vw#2a2?MQl%caMn z+ZYfxt)|o?y`=i#gC_;*P1(K9^~&ewr(v{S#7rTUm|O$c`9a|;=(nC9m_D2GKeLs6 zc&`mk`^m{@;0M0wR?t(VD!X@Hsi$(EwD2a4*6^Vr;c+MgY>}gwDsX#Wd^DM#bkSFs z&~x&I2hRL6>5VdDer6qUc1a8YhH|>sePgy_E*%<_HgW8J%gIgbw0865Ic{41uQ4VZ~`jS;MKW z;f>kVFb9Padoq}URfV+U^9s6&Dt?k050)Q%bsXvVR&2x^>=_+~db!A?^`(zu&ZkLs z2Qx)#^)z7_tMi9zvLWM|= zWZDC&f-CA@z%j#C3HN-LKWpTRv!ovM*Np&jPw){tUj@u0@EO6ht{>_K78n{c#Hu3SHd-7{O44Mujy15v3;Ma|x-;1sotNURK|WmgR?N%+5-{NJ*Zt84(bo zp$Sja2W$^}-jL8@8f7qyj3x^_UV@QK0%5-b##7bpt;ayy8NjXr%D+`B*xS}>b-YT) zg5=_u)V*|BnEskj71~sKZIqY6gu+YBY4GnA4k97L*O~T;D1YXPeKP>Skm*pM%h+kK!Pdi8UiG(;!Z&{5DyP75UbIONfhAs zz4v{GN04D*5Lu~JiDPbu*$Yetjy59zQ(m|-0c5sUmT>f}uGh(X5a}ZRgrNa23lA0D zN@?5r5@4G!f1oDt*5}AtM^*OF)5Q7v|`t2TWPft#hjy_pt71c zzt9zGi(qf~@^JQ}a&XJ%J3M@BeAArv`ik{VvL}BG8I{2}*n>C)faSjhvM40ZDaKJ7 zQDd;bgEBa(oi&YY9q;qBCDS&E1onEG@Dyy4^J|CV6ETB?)e?O5INz?0mn5iF+fYe=2G5GJ}@i)WT7c2^)Uwoy|%`=o~ zP`gqiNo3ks6}pc4E%%Hc8R_%y{>r53S*aVv##Y-=bv#9c{8=Q~YD-?ZreDzTWa0?# z99}IymkF~CxJ#4~ae5mP(%*J=waGp&53Uu6;)R1?^_=2=M9yMe>C#c*1yL zesWWR>#pdaOYtjI3yzd!zZDPe;`s1<=;=Rt$HVSAkk~cJ`#$l$H2qzTA(D@z;-Ss} zLTG)?FD0YAhcNH+q#>IH6;TbrxU)%P^$(ToPf0Qz&pCSvGG382Mu8Gyf{oP?)R7qq zi^~+tVj93qdM=54hdzRY#FB2|p!5eD!b{{brhuxw(5RMxoQCe=51wpEEY;)^AvZ(XuMRv`EoDD@TV- zEAbyo0;p-W{cqu}n0urH{}H+^*~}~*X{fk7KW5<(ik~pVl|N%a44qO!AXX<%g2d#K zl`NZrYGgO$S*8A3QcJXM|9h)-5BBeUKT&y(*p(y6x_Fp-UJ)4J6AUugOsBWwnR2|H zQQ>cN50du_Ws}L3X=IZu4Sm#q;$p(BGh%0H@Y)iG&L5XR8e~z`)`2lX4zp5*CSV|y$ z2sgpQzJSM|Aw@BCW-a{+$GLoPUEq!RvFt1-=tQB zrJ2zN!)5HU5@qbR^3e=D4@g5v=cfs*tDSigf4sqzfvP_SRg5%0=~L7ge^vklL%-)& zqg7h~?vqKV*n-@nfdmaBz3Kv2JkU9}Y$6jGFV4AEF_T~Oa=r#2mecS*CS|a^hqbli zGcn`xj~)Kpz>S;Na5T9I=lx>BL)YIN44E?H8q}$c$BQE*DtXG>)x>7yR)?@5xL-1U z3n;S1jU$^5Gt6xbDo>=uDf4|5MWVi9L_0MFSS4nJ+Ub|->TjAF8d*>Bkt3iyBo%d6 zsn>{`T`?yj3{9UOx@lx!sdgVp8i>>?950RctK0?p22dkeqB?o)H4pGCIi1-c*`7j9 z)Kjd9krVq4xuR5*cXygG-0n$xF^$I|lH_tWr2~It0r8|&nF)jo=SEH0b;W;`LN89~ z@WvF@%hThKVfe9uh@b1Dwn;r7K6YDp@O^O8_P;#hq4s*Q4`B4;Wb>OcmL2 zzmU1p=*?N+yjMWlX`9r_cICxPa?L-~#;4}ANpx&VNsI7|ynXNRja~xBH%|r$-+{R_ zMEZ$aQzJ9Rsg;?yG*V@04lfJJ&xfB$)NvPZi7T_Y99Gzm%3z6TeGbjIUhV4 z<;6~6U+a&37Cy)sZ4Z&40sZCD7-qTbt_FAAufVgE&&pxb($cE?`3YPADJ|<%q_(9m zuduj~d){KYQhF{8sZ_DIc0Sguz9xP_K1EK{GXE?ZED{Q81+4P~zELJY&!192q6cmL zqTSd#Wg8qtODdW!+RtOIRFN`2F?fKOSM-*G0bqnnzVSRVz`wds(>s>2DOJ$~qMSRo zUi+NfVO3!=1`r@%2op{uXwnVmwuAo#8hrENKqq7mNp?Yz>5A~n2hiG=w%LnoqCghl zxx9%n{|l8_H-y%yyyFEVs}7EajQCkO{p#*8Jp=VWlwoZ8s4!O4DZzknZ*@fPiwqMc zmh#5i#9>StL2#2YVqW`GG8>qWd0igqzZeOn>1RxNlrFZ~^(4Jr6CB_yE!*^-CFeNn zw(9jN%i4=VLK4o4W>K6npyPtB#yy{*W1M7~5ZAx?Hocy_BOTMK)S&Hr{?nocuq zYd>2U>UY3|rY0t>U7(h-8%A$Pr131f0J;&^V|jgI~zG7(1&d;Ou}d4}~0;WImM*mSZ^iu6=UlK;~2~qd+l$x?En(DUu=bd_b4s4S0W^`*wP- zn~b|J+}*zFYzg6M^Tsyr#dX#YEo~uUkQM?htq<+Q zNHaN&a0Zbd#9N0hJ^#;IBSf(ZfVwwx;zc>!_A;0v50r@dfY*ey4a95v56zme*~L8MFi>%)VG)3+eMiLucDoTrXU zfW3G<@SfNK6>RS=pv&u`eZT(;#`op;ruV0Vdqg>LSj-eC_{>=r`XOylSPDs%Ijp1-P~5D@Y( zvbDASV+myRnD=6ikN`f!QWZoYoo-e`+~wa?BW<%Nclr^_6DI9SS{vXGoge?CC|@KK z^eP2p$Fy`b6tw;$lBFw!!h{n_4M$%y0`i3|AUDL+_ojS#t;OE=A`zWGMgdmvK1)1A z0(iqZ{5<%Nk z`en;ixbFB{*3==eNGW)oNXlz2vKF~vH zm(kZD@xU*h*T~*E)h}H+JH1;xGfggy_kR5BxB5DPHQ}7I^Q>{NZ@9|snR)8kqmI(I zi5dIBA$8eZm7f(VdE*VvyDFqiQEwBmdUw%K2MR!yg+DcDzLfr+e(~8f zaH40^adwgfsY6SzvdnK3_u<=q!D6-g1#gB#pc6+2B!O(vP?CCVSFq_^_Iq8@7!t9A zLP+jixmi7>@Gh?n%w=58J zff3oyVps0g;mSv-5fRVUU){KDP=7qR8;1WfyxBu`9&-8&WF)N^;c6+iM}N5gxfo9b zDY^yH|14E_99ICAdG!V3h**t(6EHH!mlY|X53oilWEh40ZUI`G+lT`HCL?=;v!n|p zLxf+jNj$k;<``n(Frf9D5HZ$C8Dv+_2NYLj?r4r{)gnK5pL{C1?NYTqLOQEM{9S8) z>#*G;j4M1qs>McG)Q5!j*E>aWfM>%L23JE(t_?`gAhH%$gZ9cnB=-rCXa8YEEr5_U zZZ}5%s*NCji~#@@DoJz#y_0niD3bV( zA88feU*`Jp>{pcq!1yyEvG#K#LhrtFIHf9Vb+f#jhCV1T1>X8SYQi)wgDy{g)YpF$ zQ|q6^tCMm|Hi@#+tVnj#55@p5j7lL`mOt`4Xu+Kp2x7$_zQ=Lofj0VVP{{?c^lAZ* zp-+|`M-R?&id-*%-?L_Mk=VGv`2Q^`(!wb+`}}B#B>;IIv!i&UhTQRF+17igT?0Y< z>PnT;7Y-J!{HCxV%4{k06-G_aBD*LL{>y{g9UIL1#%BW6%u4QxBpT9Yc zUTOq#*7a;2@-j!Kk0Y}3+`W3u&nQ$v+_{QZGzK!32Q#T4a z0uqS6WtZPW(c*e_UqNzlE`&E+u)=U025es&Xp-{WN|QCU`XG6E0UQb)%57a-DZ6mGjn1^ts+q0z8g&6$pUVB~ zk$K@F^#XFVxbHjT?CzTyH5S!4+wZtZs1z*UJ+Vo+c1h=bSWSdGC1w!Nvm;s!gNXrr zT&Vu^Qut6c31E#$R&tjzhlxSG(G$w{kIj@=3P__)-^A^a64Ri;dzQWcyj|utXP2Y* zkWP^mr=$dS9(yxx%{k$ssOxt%WLB?F;M?e0n>M%lP93-*lj1A}%Ec0p_O5%OGfHu3O3rE{SY4-m( mE43OZt+s~>gD<}G4|+t~vQOt;OZe?OF!ItWQneChVgCz^{q$P^ literal 0 HcmV?d00001 diff --git a/benchmarks_directory_structure.png b/benchmarks_directory_structure.png new file mode 100644 index 0000000000000000000000000000000000000000..1bf56d91260b51aefa7196075252023d66c4b66f GIT binary patch literal 11588 zcma*N1yGz#(=LoI!QF!<5Zv88xVzgD+!Ng0A;BfMySqCC4-UcI-QnDM-uM5`^Vg|U zb+(FS@0sc8o@;t~y06_ZMfuOj2m}Za5D>^xl48oh|0CdU`W*;(FZJQ9f`C96w-6Oo zloAysRdlp7v#2;kreK) zK#E@1R7CG)$@xHz5Dgn!O`5X+f+U=S#8-`Xs4^4g($)KOg`*C3o(QK5U6xIs;( z@#Jd0>pJ*?n87}Yr(A1D2)~{UNmvNQbVWhd7+Rq#MPsD~%qubDctpk9T?B5vcpasm zaTyt`1EN$qc`p<(>vr5T@!Z_+{l0&4$HyfT`uM}t0p~5Va?6L7SB6=u7l}PwkpP3g zThYZ#KKXmUg%dfYR$p3VNJ~(7Y?IXP$n%d+DTDCuR4T?&F$IPXa!4CkV$z9thds3n ze|?m3%1Nl7&VMbF+LbIkq+!{L_a@6M|CPxYDVMy>r}UgIl?WaPl~?1XYub{NYx4+KbRq;Rc6|v|rpq`l&EGjJSraC}_!i$1aZC z`vOb*QqzXFqs{G3S~w>`vt|VP#OC;2m<|mJ77dzs9hzDanwu1H2oe(p0zDQI{xJf2 za_i$Bx4XbmTCOB_+)DIA3ACUUcs)bk3d_F=+Nlc?t*Sx0|k&ctj!?6%qicOP5 zWeN4{S!ZHH!ZZoE=nFL5WYU#oFyl7kFL>{SrV?Y(ztVSY9L%&fh zIu@}eYryh=@WAju_aI!3@rvo)z~gdCr)7Xi3LNdC?=Bxa>6zU~b;xKESYaZLnIF*E zYClJJMbVFD>^tis8OZE)*?77jfj9Ms?Dj(wlcWrVFNMz!6b-BmWawe-d8ZU*E`3cZ zf#%lN-xt;Q(nq<8Y%J&ym?zKj$t})U3O`RRk42gOfc${$fIdZ*gS_=e?hyH*MVpNe?!EWV%l1_zs_BZL1ilr(ZxmA6B z5q=%7_D8ZC+QSL?+>w}*&%`*`NBx&9T zxO4T>QFD4ZtqON7r&T{Y^s9b~*@gUpZ5Q?m?x^$*epP>|eS`+FLEAycb*l$Bff$oh zWpP*|v<9rfDENm2!FV)CcLWWD(Re9@{rKe^C9LkIvMk?NuBS z=UxLfrr;`+TK$K`2dNkNN3Zj{bA#Qr_r5SNa9FT)aF#Fwu!(R2C@csVFtaeou)J`L z@U|%3!GS^6D9xjDCQgg9$JvVWli=)fqxh=is>`ZR8|oX==LL)B$CHaXW`gM^JfHar z$)LolJ~W8&g|o%{ih8C%z{SQ_L%(5L5^%S0)Wr`1QjslBPT zrLtMNCHN8MWBkYKkK?ksiLUfgpQPe{Cr2toD+t8$@p0OpHZ;Fbk5dOxyX>Y+=&+rQ zcxF6kT}{v_>UC+^^I@l@#=2A8Ab#>!8OE14DLj!S+?@=G#|FcjMVK!=SI{w%(eGmlS(@4+HqDq*S)jy9t(odsjosMIQivG=8H&b@z0 z7w68vu#(Eo`!xT-Mdvh@P1aoqG`u@Zu;c8k*-}xZ=vlZwb~Ro)^wh-Uy>TIa7||C| z!PubPW_wZlob{ZUXqaf9R7c0I+hS>P#xhsaKAOE_F{-1ES4pKg)yd+z)2)`LmQw1i zXVchXdeh1dZ9j2x$5~_kb(HB{A$M0H}8f1<=OgW7nc*_cK&`3 zfj6qhY7e{fKabM2Bhy|@UT#l6Ph-~BZhtH*%)&XNV3FKwQ&b#s<9SupFV(j^#-3(J zvD-)Gn62B7Y(#&6{7hhFV|h2nd>9_%pBtXb=q>;9FkW3{LO-|~J^z!5zxwLqiK!C5 zYG;#ok?_LgO!DsqH=1iznIB(fh7GKJig|2YTQ(ziQ=+~|eL){ebaI_@GBsl!a9;%O;cMdytiJh}IP1orM$ouWDA9sfX1Z>wv*T9`V2aK-LUfPep z*0pY&LiP&b{6ReWzr3d2lrJ4ktIknx1iy9ExPIMKSQD&7r9ipqOza@`s(mfLT`qBv zKhqBW5JJ*9;VpQXy`OtK7?8OlQ12yw|7>l#f%}|PQ<-Rx))D08!IjF-L$e{ty_E~$ zPmul-r7MjFqDX50coGuA;Ut!(k;oBIvY5nw#epY&a&u>Admctc7Z(=>7gh#4$1jY`+}zxZOe~BnEcCz# zdM7tqXCqg7Tc;2IndJYQM-1#_;%H&-Y++|h`gdLZL~4N?sv733nWu&kIURwy&VJ6p2veaPJ0rb@Zom!Fm1eHIbwhm9tn@C%i| z>ZP<4$;m=T%Q5!-Knk5h_Z90O&C#55#NUFXFV)}ktJPl0 z_lOotO-9<{c>L~lY}N~ksq9vXa}JA?Z?GDzY>C-^uOV>(x+#o~Hy*u4E+X8W%5tvx;| z=~MJ-l~KTo+g`?WsRrXiKchF8M+Sr*eu|Ycrk&>_S@7I@23-YVme#~T}_HiK)voL zkJ+!}K4vrUVhz}FjD3EHwz4g!wW{r;VVSLx_q0hgkZ^E@;%DaBSM`p|}t3 zA3Cq6WgK5_=11i?S3f%K{*qc**WAsspY|}8o@6BnybkuZ*To|Iyh9NzdRf^S?uRn@a3Z)}( z=v5ILiR0J>D+WJ9&f79EZ1Ju=M7KR2mKwisH)z*ewg_e>!J9PQE?L#T`+78ojRzBH zcr;h;Px#op_w$E}JhwQo{12t-<=+!%HKa!q>Dk+xr>Y@81Vd)?zK!#q4SlDOxn6J@ z=l(onjDd&>jcH$Y@aS{i4R?9w^QIGwgm0X4vtj@Gus6E+C9`}W00*a>spojHcIV}8 zEs1r4PetGJLfocW@L?PCV70YTKyj{I=M$4IOx^R#)7|{ntIE1MRh`F;-jLBQKj^P_ z@SoDNL=ytwP!9uccGK)QhMVb_VELl3>CJyo%9-wsZzo>@cz?5Y=$$U~db$l$$#59J zZ;3uV^Mi(SCR#PboIKQm)+WIu;IvI|;=n^`n37JJZ1-@fEg7}*+#(D+k%%Io82|E3 zgfQ^?cZ_1SQg!T%!`ae8RDS2usWI^gJ#}@;u{6$kxSk{fZ;P7hj00jp@3w+|=pmT+ zFUJejEG7d;!q2{t(ARzrQ{uQOA9Zm;p(<7loubUa(Y&YT%5^1cueXhIs(%vAh2^@q zFw04_Dez!=NM6t3wCc>`UbFMXH5O=pR~i_6JNrX#z9B2biTMDBKj|g$z|`S+HO5&_ zd;IBbkk~_(q3vAcWH%Sr2uk}bh4sr}Q=`dXw12kW&4%T)1HK)sCqAv2mrlK9zQ$66 zjZXKs4(NgCZmA!;)=A?})ju8zDsQlNls;a8y^F%-7TuIq& zmQ>$v&(;|4O{nPp8o=<7w)R}mpgqT6ERf$tcSiDd|G683lg~^SeKM-QZ@fS*&G{Fn z^#aogk8xy#zOZGNKrIQQcI10*q;n~eut?Ivh#18B6^SHJaaO3`>Q&>&sFMjja#0;_mt=Atyf1xf}{A~9dHNagT!BWC?xW)uX~ zU=XAIn0bxVkSIY1%IK^MUyv!wiIfMBp?{1$2#+_ty*^=+K;w#VK{1JG_$jt_$C8C7 z`Fd4@IP;L&!qb_Un7GapkEkW19et`Q^EO_yDtFYG_Ww^TWYafXpFr0mn+%bgzSatH?m+hxOzfUp=DTWLIGA}DI%c(_L)pR(>V)hk;*+tVX z@-#6f_PSOQ&sp&}YY(RvAB76B<#N}k^j0O0g_HBGEH$DxBB`Re?@h)u5i(Tk3S#jt z5o+=^-Nz}D#1C*4mx)owTvgfjb6~0Keb~UV6XDo{QwMDsLG1cQx*Zl5#uHdv?y~S8 z1oD`%ClRsB%7&m2VZA`nQ4a+*IZP43(89xn01@$tXEAU%6<6O?_!YQ5Q)L|bOY#De zF9PDLd%Lopjsw45@Cdq&b`@;;UH4%sn3sl!#Ap5#7lEhw?@t~UZiFRjz&J>nf6NKpi0dzwLnJ)t|yl?`}I%AwY?j#5mt9nf>I=tF z37OD;58nk37(fzOJs-ttD1HRaQbb;kTgruo?*PFY&o(wz7!=zad9k^^*)#UgXlu7p z!q_QAgs!3S`7YY~i}^CG4;p%36TDryOHxf$DyDKjA2k|ZT+js{V;hbhW*aduG26A@ zBzi-lRBHEVWhwTyxn03{w;**>p&cc%S0pFjt@#+Vo1HgHgJ{+@{4@z^nV8M<)BPRT zox)MA1eur-ap=%ImvKnUgFQ%xF!S^CSuB53`9VJ*bFO5O1WL33M@MfUny9qG%Dy>( zkWhO=!0S=%HCxHx_CfhLUCQ; zdwae*JVTI0(mgXQBIPw~L)dfRN@6tV(AAq+kDDowBV?w+y`SrWMUg;ZP>mtx$0)+b zL(pB}uw5zQB5;L9267Z28=+`i^KMnsHiZC8B@dg!e5gK(BJ$P0R&N5#AJGz+|* z9auTkJA{s;461N*-S0w<`zfeC8?6^pU%lzL85|~@A|ow*EfEsH0~Lt~S+4i?`tp_N zEnSh}WT}3JEdYe9?7~lKTp*uOf(mn=OaAL8zs}_9K)^3}`T$$5^uzfIy;Av8`~CnO z5>L|&^7}s?XYjg@Oz6RUOdxhH0z4*952G^5yhM_xDe~T`5LA*135#hAYLoc$&dBy^ z90W>`nwgE(iXuaSJ90VH0IH9sxe*i77>+-+(l^I{E`!SLUHCc_OQxJFflNVcd~7-J4C02iM%LC21QK%icZA4<9J1F) z$I_PzCNUPcBzg|cTSm{6Xn(D4qzlco-3$!7(4Zey{YI5!T6UL;y98TKL4}va)nk7V za=~UEQb5rksBw3?3f}#dBHh}Rhf6uk&~a}(0cEJ`HA>%TKKeE(iYg^89z-mf7g^{$ z_>t&Qb^!pT{W(bTgiUm}qUsvOa_7h7hJG4K6g-FDX zK*Xwt)s$X`(|MwI*sPVl@g{tc!ymIOnH+*30>rFBw6uOvZf*E#X_qDrL1T(#=|SkZ z4-&Z)jVB{ZRMB=yc)pJrG9>2xEIr5qr?Wsif_^9#c$~yXKLz}aB5eh~;t;mQ{QM6oP?#z^d60Hxy3NX+sc>)Fl|qpbNg|8=wcs)N&bpX!SW3zh zkvs>8Hai>!-Tk9?#=4%v@ebq-WjF_$;bGOfP#TDukOZaqj$5ICm(K{dHEn2-!s<8p zJYNjLt{OBA<$DXx!=szTT_mdoc=^>nKR;Z}!r>6}l7jIvbv?cKlim?x(VSA@6O``E zg@2$NAXR%N)fe)&@2wR=z@xeB!^2+l$l$Qi79k@wNJnE1CXJJ{)?x++{@|-(1g{HP_WPe9;EjRGt|eL> zFlfuH%-w!RuxTD2Gy{}GrZjR{oQ0`N^@eVer_6evr9v9$SH-*+#rjv&a;k<-Ocse@0&xOL ze)@E^6gOPY@mF>2xaTW4lsicExQtRFEj5H-2{T*m&(DP>I^Y$kwm=ZTquyYOH`@S_T zBC!}m$^pb;|Iu1<{grb%NyK-YXnA-BuLlY~({{6lg8sPVyXGY(Z_5c&@$kjHA`5*) z6r##~)pzQK9>;$SFMqf(Ff;Ey>$~Fon$(sG+~ul8fQ!;A{U@&SJ8{;toQ1`QkV zY^7rO!~g_W;F`Tb3!PFWr9n9V+FSHHFvfAA-Bf8P8IR;UmKsJw8FS^S$Ut9Tn;dMMU!SfZBl9JNvAN8+iC_dCu z;pc@aAC(u|ySEDs`@_R7a^=#eCpEWJb?B^=XuE9fl{5qy1>^yA{@Vm(8Ha~^{ew~f zbm^86V0udc8*#mtwtFsWA(JQ-o9Z(0av+;8D$T?t`2>64Vw7%4yI!-@Zi>z+8>#mF zF98D7;MWn~T%U%i@8$NKJv(lJ1Dsghveb)$B8YSQphz`NVUce+j} z>xLz`ssa}kR&AyA5>FjDkNXmzyDiQTM+2y;$*G?D!Z5)-f$+-O+B8{0>mHRPKF`$y zK92{5j}GSVJ-T6W_vhzHT-PC+k1ATX^ON+yi-w>Kr+oQYT(|_#FMRjw0scC7<&A$Z zJZUgU!UrBIT2Fbp;R+PEcd!5&M+3&d;%)WJyKpUCTC@~Xo4Lp@b z1NkgxOA@F^w2z)nTNQvhjxy8pGHjjpf`WoVn|S?;qQG_(s>mKq6=6Cp*eSNMyKe!E z{6XAPpm#c729}edW3t0(Po5pmlgR7ga_2F~8INq!1*mvg&3;Yh5OX_dG9F_6`3TX*1L7R_n#MEez3d7`}jX7KyB~$=KLXl?evs{w|uBU)#3*W-ibeU?LJ< zU!IO`Yrv!1H}eKb05c|!OkBQDqE>3UGm_wVzAmg)V;VM){mE~#OpDcK(Ru60$9%&u zSuzH0ag1{y6$MDP)@N&jPjhZdh7zOsQ zWZ7ur3sBy07}rG7QWgL^3LnJla!LmVgZ)qGTGHMzabZr5iQxfc++sfcdydI=@jHC_ z0{GQdm-*RtkGoX8B?|FI9QynFi!E@MFXWPlf%BAj7@KBybL<-am-M)WHGnHPDM@oA zg`%&8myG!tz#izAX~elu;wn2gmGjAJpXxP1KX zM0UCEoLIi-WIWjwHj8Po);y=Z@mU5tAa?}=Dc+#uQoal>q!YfXSfmm2X$A{8Jl*Aw zo8v`4E+VmdXs`Ekx8!7h2S7?QQC>wVt5UrM!V&B%!;3np2% zAf4{dji?zJ=Pww?w>SfXGPA26%i}`xp(#PE#dD{I9TD#Wns!sIOxL^oYFsajiPBP8 z^jtTBEcpmIZR7RZ-G^-&R|}N(@$*2=tjy>)Lmy&_lyVb#FMt}Xd7LH&;5sH#bFl-e zjp)L-@G9UZ;V`w6{Ww^EZ4E^I0NAi2>nf=brziq;nm7{eC4j_Ku4up2mNV86IIcbY zJYlv_Wt7@D%!w-lL`Xz6lkZ>fI%{)w`udQy=PLACEoGM4+?>`BY}PvT2-qz@0#par zZgPykqhxFUeONy|z**&vBrj0YUzX~%HYD3NskC^W6jtxbJdkr&&T3B8gNJm1Sh>4& zAxVPpp37dyK(~tv%S1i+7j{;)Sp`u{9#l3>SEj8 zS>eOS2IK+Dn!oW)i~6~6+9p;IEe-kSkJ_l*9%(=&9Dlt*-nmwNe{Sl28C7q){smID?>0yG@t9{MBmm_44oms9= z)Q(CWmO>_wPBTN!&ue(%ZTDta4mbDX#L&G?N28mo>l*Lser~_}^8*$UXWwJ=TxyK6 z>+1xv(G|dPm->`D7!NkDJGENOP;wuY)$%EfEgC9>C6nsFTB>yf`V{H2;y1Xqs=gP< zSxiW`Z;RY_%W>v@pcVr+{M8cYWGegY~!@AV;H_4lBtdB%;c2v_@zXP9WMm!dv4d|G>v{14xLjHrJ- z`WY}?;yoRMCDCfDAwKZXLS&9ccWxJCMTACI3>kgq+tfV@ihtMj+aCdB1TBkf&tBQ_ zSkl7U6PIn!=^ZxAgvX)#FwTFcYfxBF{rEjBj72HSR?5a=F}t>mTT^D_j%1EbTL($~ zww=1hBxmSP66^4vi)2+8MC}Qt0L&f`xT_Z$i5g~-;_>#R^vL^lO7kBBsV8gPSn(T= z1zB3_ba8A}KOBwFn#zC?twib(@Qyyl)vTrJ!B&gi%>}9k%1!*_$9@Ivd9IrYb)FM) zncb9P&E%Iu43wBfrn=kzION?)U$T3@>a{OeJe6%7fB>mXMwud{_$%-s;=&9l|Khcc z5KaQCg!#0+B9lnG?~VE=HFb=f+Gf7fm;PW`At{2|i*F-yTDnPSK8bmiv*p*+o?VH* z;VyH<^1C+3Y;aK9lU@lNL&QgK2WzM2BZ~q znI%BQk1-g?G?aCi8t2GjaVc`ETPr`oEUIzq+o)J1b2R1WF8*8)N$M)`&B-Kyco{HN zv@FtZZy9gd9Q%k_cC-LT)d-&~Rx@BCfm5Wsjhc9VeJ~?8-8X%-^5c_&d@SXdu*AuY~~+Q`;QGjDWP(~=lKv(vq`eDVz8H$5MKhW4B+S>vH#D; zIBG~{y0~J5W&bL0eM0{Ix7}WgwtLm_({PKj#RCnr&mk5?2U%*ZpDQ z{5-Ov?AqE>G-jDI)2SS`csPYtBhG?8EyJqKPy*wp{UBZ2j;U;i-YOWc2(%-Nx*X=w z7`#NX$3M&+TRcWRFx2$)35oPNho!!Y#+v>e*j$9~>vCXYNto$aKfXhfnQVj_EEA?B zxOxsXf=2<8(sCgnDV@UG`-~JJxi_AXUsfi|VZD&Wo`M`qv9IGdKmceMkHN!P0ZfO?(K<%eJjt+*6p&F|bWwF8Ol zctnWgxW<~_7N?0=`T?_5CN%@p*#;iad#9h1mqtOojUH6Zdv$dM^xu#oen_O8?kRAl(BNio9qU^QU4$BiDN?{HVj1f}p)voW z10GQUfS&woyM<{}LjW|xDE|S>WG)LEnOo3UGbh3b{!u-v8o(OU$x^Z2{%of4cv48zs7?PB*vPap?x2t5vssn<0%(H0qEd7V1LoC0dJ5il9kA-M# zSmC*%@4z9MV#AaXK?!v!cDeB_H+{K9n301rHNn%vj2!V{H{h#S0eFdgsuGd%Qe`sH zv=MlTn|~d^%7KH+T-Gm?yhp{C~wf(6A!GCO^9je;za%!Q$5O{Y%Du8>K2$!Gb- zP$Oyv060#}zrg7Mu9e@!M90u5BBAuY*6gE>860}AvMyfYuTzY@+=Ut*s~DsBy;?fIUnYjOSlH@1xSHh@XQGiZGW`2KMm)cPBn znT}zTiN+ZgutM@DYN10&jy)%~B5>bxaEc@cr=n<6WYlpa_3JSMd`+sdAAlQo{l9T5 zX1cp7BqX`9yfcPI`4HS6;wp=g4As|?Cn<%5$V^AwC}zsRO?aG36KX^j+&^gZ{p#cU zT6am=zn#R5lIBxv>bS*WfBSZB-|>eJ9FlW0)Ady~tq^B_cKG%0r)uyGHP9!O4XoD^ zeV0H-9k>4fB6xvZfHnM|BT0<`h|u@w=T<+erQ_jrN;B(41C{u$q^6k+tiSqSzoH#^ h2K?$@jq7ia#0ofg`a+$Xe}5t(B`z;kA!6wNzW^h|%dY?c literal 0 HcmV?d00001 diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index 082bcbc8f..d787ff7a8 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -106,6 +106,101 @@ optional arguments: --debug Launches debug mode which doesn't execute start.sh ``` +## Debugging + +The `--debug` flag in the `launch_benchmarks.py` script gives you a +shell into the docker container with the volumes mounted for any +dataset, pretrained model, model source code, etc that has been +provided by the other flags. It does not execute the `start.sh` script, +and is intended as a way to setup an environment for quicker iteration +when debugging and doing development. From the shell, you can manually +execute the `start.sh` script and select to not re-install dependencies +each time that you re-run, so that the script takes less time to run. + +Below is an example showing how to use the `--debug` flag: + +1. Run the model using your model's `launch_benchmark.py` command, but + add on the `--debug` flag, which will take you to a shell. If you + list the files in the directory at that prompt, you will see the + `start.sh` file: + + ``` + $ python launch_benchmark.py \ + --in-graph /home//resnet50_fp32_pretrained_model.pb \ + --model-name resnet50 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size=1 \ + --socket-id 0 \ + --data-location /home//Imagenet_Validation \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --debug + + # ls + __init__.py logs run_tf_benchmark.py start.sh + ``` + +2. Flags that were passed to the launch script are set as environment + variables in the container: + + ``` + # env + EXTERNAL_MODELS_SOURCE_DIRECTORY=None + IN_GRAPH=/in_graph/resnet50_fp32_pretrained_model.pb + WORKSPACE=/workspace/benchmarks/common/tensorflow + MODEL_NAME=resnet50 + PRECISION=fp32 + BATCH_SIZE=1 + MOUNT_EXTERNAL_MODELS_SOURCE=/workspace/models + DATASET_LOCATION=/dataset + BENCHMARK_ONLY=True + ACCURACY_ONLY=False + ... + ``` +3. Run the `start.sh` script, which will setup the `PYTHONPATH`, install + dependencies, and then run the model: + ``` + # bash start.sh + ... + Iteration 48: 0.011513 sec + Iteration 49: 0.011664 sec + Iteration 50: 0.011802 sec + Average time: 0.011650 sec + Batch size = 1 + Latency: 11.650 ms + Throughput: 85.833 images/sec + Ran inference with batch size 1 + Log location outside container: /benchmark_resnet50_inference_fp32_20190403_212048.log + ``` + +4. Code changes that are made locally will also be made in the container + (and vice versa), since the directories are mounted in the docker + container. Once code changes are made, you can rerun the start + script, except set the `NOINSTALL` variable, since dependencies were + already installed in the previous run. You can also change the + environment variable values for other settings, like the batch size. + + ``` + # NOINSTALL=True + # BATCH_SIZE=128 + # bash start.sh + ... + Iteration 48: 0.631819 sec + Iteration 49: 0.625606 sec + Iteration 50: 0.618813 sec + Average time: 0.625285 sec + Batch size = 128 + Throughput: 204.707 images/sec + Ran inference with batch size 128 + Log location outside container: /benchmark_resnet50_inference_fp32_20190403_212310.log + ``` + +5. Once you are done with the session, exit out of the docker container: + ``` + # exit + ``` + ## Alpha feature: Running on bare metal We recommend using [Docker](https://www.docker.com) to run the diff --git a/models_directory_structure.png b/models_directory_structure.png new file mode 100644 index 0000000000000000000000000000000000000000..906cfdf02a8ef0ac25d9250986c5f6e73373371d GIT binary patch literal 11038 zcmbVxWmsIzmNnM61&0tC2*F*0dvLel)>v=}?m>e~aCg_>!JDALT>`<~AvoXRy?18r zGxP7`2TygK+Pmb`UVH6z!j%=JFi=TQVPIe|WTYijVPIgXfOaS{BJjTxk%|Zg26fa* zTwGa3T%26l$==+`#ta5VI{aHQlDe7|L4W681JB%d5+bs@3b6)-L1ZF(8tP&=@ycS- zqJ359(Q6--Uv;zRy`@Br2F2Er=glL+zMh38R!?-SF%mZe*94`m)VEnY?<`;VwzRCa zulnrVkG5dJlo3c#4hGJ^3=j+#PIUW@Knoz$9athFAZ%Dfa?&dKp0APUNF~(SeslNy zmYikj+@>v0kI&EapUDh=f?&Q%@J7*WnCE-LHQ4Kgt7F0hva=e{CPzB*47t#?iY6lU ze`Fm^sTH{Vj#3oU5B+eTZvzVx(DO$c1f!gxEX)zZAabE>qSAqKvIlFqpkAYu~O}iJJ>#H&e zj$pU4o4I02e4mvwCABs*Jueg-M@-7q|=N`7~G z_I9jURbL@DJ6Wjfyksz8m>UYNr5O6nt9HK=p{>Si7CzK|b^{xr#^yNW9=@!kEfq%Zb`+`ivaT~78d0$ z0)BjhYKJ90hHEGLtu`Y*N{|yZTKi{jYTc2B%x;&OdN(AaLrf*oxTeUnHWaoh#apLz zSO*rd%s@JDKaszh5^PR;_BnBh=OAf|m09y?VcNc6k zjM5q!oG5LW2)_*61uWqa&Fdow#Ya@l05N5B?f}~y(nHw$E>Q;rQh1Ct&i7c%0oFP2 z%ZOMaSi49p-PrC}AIOn=#MQ$Pt;7*y=^z-qB6hJnc*w!x6UsP=s3_!Y1^!85);T66 z2=t-XVl(4o<8-?ME};FOW6}0;m`lVv^tY^_qF&D$VgX#&FWL@7q-bMbKn~0qScF}# z*VvnJgwW@Dcm7zP+BPHkqF}v+p?Hf9#jX`y#`dFvlld_Dimr~7PE0mVwt&D&Y%w-n z8jC&5yJwA+6CKy|%NaDtXq{D0p4pt&T(F4L8CxyJs&5&3X%fP^TBBaUJWFyAU}lP2 z3wJ1HL(z!m1>=R|^~&q@QjAYb?;oP~&KV5M2+2XiT}<7T{YO1Bf6^Q?n}wEH$ztYy z>Ta~3zH-Mjh-QJFbb)_n_qzSLKLevc0%5xYuqC9a!%)gm3WLOh>VuejIC_v(qAX=E z$vMIk&_^isI);g`V^D!2JB>%2i41XpMghAj(;np>#U4|tJU3-qLjC~dxvUfM zE$uCPOY%c_o@i~}jhZSg>JXwVeqpLp%z2V*VVa6h>7Y9AXZVIR&w#s>JC)!flky(T zKcyAQrOKsRd#WCVvg%wKW~HNAzqFV%1gkv7KPyL-m=|qV|5VYf(#&<0J*rx);Zs;K z5EK*C^=ZGOxMJ8J<2ofVCkY@Ch=PmqAtB_vvqb)tvaPf+IXiwm#XNbp_r8@kQ(-KJ za4KeeX-_eQfpg6AniV^Q_{*;W6B`Jr>9VnRRuNAWSCp{r%J)Sp{V5?! zg}L*Yb!&X98LO4qyNX}anA4-vzbjU99;K1;O$%`g>y<5>XlTow!d2LF6TaRn9It*&EB+j$ZIpOW$rCF~cO@CkY{LG_&S;>^>rU{C3hKzCq$mzl|zx|_ornO0f>na`x2y7 zCTh?}Ew41ablj!QDt!-$!?_-zzQG*BeC$`l$invRHrF;5Qwf0(K?}h*f+j}CB)g>4 zq&7zW3L^#!hFp!{84TqHD-K`dQ=CSRSq^T-baul zsN3Sv^pU)C*3pmImcHBai1LW(!dbJf87SAx=i&o>o>tw0CC{ee#CTzg)*bF{0q`&}NNv0eVTQNI&7DG1D zH8#}N%Qu9n5U3KVE~!T4^^)9~WN2g(r&A)8qLqYV1q65;jvHGZ=|<^->D;zc$8x(_%eouh3`$)dqbNW2bnR$ty8O43g?2RuFsM_ zW=Qg8;#kY%7SPPSb<;hL<&^gnK^)v3B-wIt`Pf=jqwHP0`|Dz~eBi#B)%VYt8Tv{Q8!*bg5vUFoLkMl^OT zH5x5_Q!SnpT#{MBU-BC2876T{Ih_B#A=gs#;2LZwxuUp|dEn|h-?uc=u;k`;MAk0Y z=Oy$^dspjafBO4Qwti^Rr`gBjKI=GUb@e)7NofYj1rrZ^qw}t6pO?s|reU$6^)B`} zH;T(4D$jh)VdzivTUaR)YuitMXW90@#subn&1dmde7qg4tubZlUx}W}Vil~tpgMx6 z2B`<95#k58pc&wJ2cs>>xnyQ^=>PDh+no&N)?8{CHb_=7JZpB^FI$=kv1XpzujV%tzAe9~ zrdiToJg{&6ZF%T$V!59&E<6p-u`=rj9tLDp$J&fJeYUetRtLvOYR@ zBWv1M&Y?TSNP&oa1|vQb&#LE6keXAhD`D4;I`_}(N~^-vSnn{eI+HrceCnSnub0Z) z6i;+Q-iCrZ$9#p)b9eKv`@du_3pMyC-aObq{t!MS*HtGOrgsGUc)d>(>t*_10sCQNM-5Zo=_*aQUzhl}0y& zcgf35-FF8!KcQmvoXEH`VN6K~%H*yEOG&Nn!}{C9t(CjDxIpH1cjq2=%}-#*Gx*t- zVo*YT&k|D(j0RQo-_WfYUSS2=5kbv^Ar zQVOAxlamWLLCpD7B|iMy9QY+jY3btPz{kSk=H|xi#=&gwWWmD5%gf8c%Fe>h&IF8L za`v!uF?MINbAJ0TC;#O~!pzy!$;!dS%HEFr#jml6{bv_JO3Igs{`>D=>ojw>`k$HX zod3NoV1q0#PgvNPSy}$uH()C8(#xl8A?OvZBU=t-RUjF`7;{OR)DwLbBN7BupQ3`WiHsOu z!ieZVD5~5Iil!tcYnDcY{YEwT>%_8a|Vl|FyV z)vRuzYN0Ik<3V+2(nP)#nmAY0&UysP=*Hbwd4;YQLX@a9+QHVvh)nf4?@9c<=njJNr~eh-cd->nh^ z?soGhn;lK9Ch{cTuijl9n2qO%PBz&a-cDsdQPrFOV&7tIQZG|4br4!n22OSjbrVfG zV+iYh_4%pkJy?Zavm%9;?8olS4un0GT*AoxYDQ~ozq~qWn5A=BozrTPO1s`X3Z)?1 z=Q@>j&J=z?N%#@ssfj=ZH`ovMv+>ji+lpNOsApDX6br~bj+$a z)7;k`R9$vzu@0Yk;M z9#+49c)V}-eZ2c)dv5f2{}&6K&x3Dek>ydeXWV|fozCvEtIPqRGBWr)qY&a zKRvc2?3h1v<-1P9r@_|G2SH?r_o+q-X`FFr%*`Uy@+st@SYU2vX@uG6H~b`jUvt}7 zQn$zmJ8g!gic_xWOcf~vhxd?)1da5E>YPQl}i74E1$Bny)9d;m|0-h>8qOm0i$I*&Dw_GBAk1~ zP!M>xMLf5A#o0r>Si&~cGZ&EQH^7|86(Y3~oq7k?1+Lg9>u02sNldy0HVbvsC9N|p zjvF70nFofEKM8PSA5{^%?U!j)A8SPZ6Yc(}R08CnOY!y-JH)wXq<|Ns9sr@w zfu&3RWUUW$ua)~FQW<)1ssK;oOZ^cGAZ3-GUks|@NMgOWCM19rNEZR^_|nGNLf~q3 zpjwKYf0M%10Prmp)xCo+UY^FJbofYg*hPH(r;}is7U0QOEl2t?b`m4}KK)1Qi&ZUc z>?v71B@j@UzsgY|O>ihD6ldE%vJ2tD54OkZ%!VO1r+*OTk)S|C+HHe$)026gq#}uo z-7q??JMNDYkL8LWl^s&q@C!$SNwFD|g4L`U9*>MXi!eNW>(Z&&^ z9w5|=Iu52{1Z%AV9@SK?XWIq{7|A@U({W^?7C!oKC5d_j4nr3 z>>`CP9RP;{u5(n_3xj3~`LzCce!Ab5!hrgnZw|ttq3E``R^CQF^qVL@oHhWF;|EXS zkZIPdnZ{ue-My_-!F=g29*Rj!0_!`e8)&{zr0^PdZFl08Ix;|5RxEvM$l)Sz{1tBJ zI-lNE|2Y4OGGwGVDs*pC?A2O{N~I7`fKcEfu`cxdaDiC#;RN}RQItAO`)4z_gPRs^ zPMdJ2%h<`O*nh+rfs)`|P7^c`z9tn}Jyrni07=@2++|#3;!74Os}P3t=hQ3zY#UIDlh8U_j<)qk@3X4z+WCMc6 zC>GG_rS-0-V8hW&)4z!kmDQfU6PDeM`#4;wiM-;~hp&}_Vn z!i>V;{Ji7Dd6)b1vvrHPNv%xE{3T~fhPnG-O_+wl(?#}k3ce%}S?vRkbsJk~?`PH# z6E@jdhu)7S;kj5rvT)3DOtnI!upxbjDFWB)@oI!SBUysFWL}4C+O007C}Bo!+7{8l ztf}+VCAH-5#Cbwer?r%YKYAy$IU|@B>bLrGT^ga{4ZJrgSV~wFbzz{z;UUSIWmyUD zGArq0x|7ZiUDp%v_eTGNtg5)w?$v}6<|@4_Z^~Z+j@#MmppR8Vh3JXM(wv!`1&fGR z(@i2v+YP%&ZBO}VkiW(o_fN#Ate7i7g@nP$q)(4x3r;J%ph7Ny3aYdnnw3C&d*B13 zw6Ipx0C>TBftTQQ>lpxEVzpo~q&=?21^=&s^kSQhN4Y|_U=tD(ZUA&^(9-C$ToPkk zx_yVX&Uec=Axh{(LI6Ax$>xy9P9|Z$YD0;QiinpqGgBrq)$zma1;$3$oVoko%96$D zQZ)?RvJ!)A11%H$fo3w*B1*@(3T@NhlcD6LMZofoU-kzVE@$66%-GS(AgJB%G3EC& zk~qW>x^#aUNv_Ncuo^^kBBQlWK6jUKm~Z$}yk8Otg?I=NDYB*ZIQeDKVuLl!(NYs( zi1+5nVncZ@mVegKM1JV06Kf)jLDvyLkYEgYbMPYH@N1Hrh#et_^)t|ueu+e3Cj1g0 zq5TCBgZa_wQ(WD8&hp|{ECqd!E{MJCWucAN(qR(24HSloY;gu0rm$gH4 zw~q*1iCVEDkrrBdIH4U1#GymC(T4cvJ`Q?UNQFWBVvbub_T}-4ZlnFGo%_-_F9nha zg$M&{>p1M15SQKZJVKA-#!s0}-^a~?xVIq$C}K_as~sVK5k$|U`H!(<+s9GeC)`fg zy6a5)F?(-A++h~FOnft+_?O^0(E852&)K^xDoTy$)N@B1?+3+93pFE5~xwc#RIuuVSnPn z#u+sMTt^$Y585s;r=%IwFb-=9B1K*9aGiCZX6V1j8J}mer{LD%o+8lAD{)J={JP;*zYWjF*s3D^o1jt zOY}a~bS%Qiba*Fa%I~^1S3H!Y2rqzj5-vTG2S|z1QXafYsi8|HRZV>llid^gC^->}5(#=6Q z+wq5_8GZY80=WLXHoYMja8~tBTSI@yvSh+;b_$aX*n3q%qR@F!9A@#-t-p;c@qpN` z8~vy$99M$=@^6QfUk!s(2{7{Is*y;j%geDdZcKe6W~6Ll!em?tNK#mh19J`7Cq|*( z0Q-7dKHM}WFlYFDr#1D(>w3Y$Y_-FOI>yBLLZ%Rj5^rGMANu)JJdbBhuk!8sU3|wd z#|L?7j?nY9FIXY94UD;vWR)JeoyN~N0!QHI8!3)gBcI4GRoA?vvaBzpd>6gZ8=CXK zrKekO7pHU~4i7_kKaA(j^{g--Pria_x!;uLVGIc$T{i1oe&9i&c@jk; z7>3w=<@X{Q(BE#Ljc8O;15x4+c#5P0@D)edazuz1exRwjS!>z6IPl(PD(DF--YEm>C2#zJT?$?A!tu$MvTFvQD6*f?EFWJuqNWJE z>POUnNV+-$08BGXIalR?$wvT~jOkp&0k)!A3Gg2ECjZ%L04j_@oaDiU<1cJax&e;( zo>{jn_@ZnT;5QQFj_vy~{f;Ft0CnW>F0$OBtK&`+OCyVbTNM8)SqZnTfPmBFUxmOI z)%0XBnn6rkO=%53Sda=wAA2QOATPO&Xk+-R2o;hDrwxu}e-$1vR=Z~NsQleq*8|oS zsJ9T;5PcC`s-}K8G>;0zEfKV!Dcjv$?uFzc(7~7#neqKfLq&r!-aFzOWgT=65*Tp zAo(hMxKL7jqNn|Li*vCy_n_DmKmpru%cN5RfF)p9L^{6JZZ=p}wXutgO6WNxiB$}< zk83`dt>j?!x>mEQDZe#EdFN0cOYp)VlQ^8Njq+}HB_<{rjCtSEd~G!D4KbGnl_i#GN3=uOJ6WU4^!{a*%% zRD>#*v4Bia@Bz5)r4#zr8X$F8Lcq8sEHCsWR=7}i%ioqx2s%{SVd)T=8wuiu_kqD!UBO%zB+H;LXv_PI8?9GrW9Y;k_RPN5%V> zF4q6ug<|>AMfx%d!JC4kL>faH#gg&AMj?39aTL6CeWI0IuR@+8HkDkbcj6W`973nn zgjWqDk%?;tiT|r>0G&=VTU^#ioDPS_m8y!;0{13(#6RyFQR;To=1XyuT#q0ATH^Ju zgqIRUcC}GuOg*4NO(fJtdv4RSyG-Qng_rm&eGpUw)hPlS`2X#$jxMlYqJnY&yoCcJ z`~c4!fN*y28;6#yB;BCmAMK2mLe&>n?oN2)WU5XsiU&IE7~!w!w&UJPZO$3`MG0is z3tM)0)9eQn(B&w7qEXln)WEZ+s?x=x3$qw>$A1?FPAj%#P=3gb#Xpi8IV@+;s>-lg zZl1WH?8@jeMEwMCO+rrVEbpj;*`a!HOTEnky9dBsApo+!)a)%lq5-XD``%f39M<-~ z#2y^R`LvqSH?Z$bcfs@Jl+yWbehli4f$!~Bok<_6$N3=5i@I?>Aq5@;vfJje^B9*# zrCy6_mghd*p94O>C+`J-GXkOobb2md3aiwxvm?XzZV#}_9(XC#pC5LF&45DpXEMi_ zfLh6LDwZFu{Xi_m`P2Q$>ulP5`BZk}=iSeOPd6Lv<|9N44c5s3W0P`na*7nraXpKv z185&HUFb2PGVGf=eC1|Hrg>bm-PNlwX{EwnnfzKej;;r@3XQf)=7Bx6ru~raK+sFD z;#x|t0s!#?&>?=#|8O(6(uXnrFo&;c%r*D+J@U`6LP)&u{m96h$;bX7u8t zg?cGOEHcJ91Fy;#rY7`oD$b#v&m3ZUFjvjfUEy)DeGbSpLoc;o;2=xqLp%+*dk;{O z*t#RnTis5Wr;3%5n^t}KWBd>#AKETP`OSc;?WaX~gXB^C6Hwy~&Q|K$#`_Y8!Ddb~ z2e}XOajC^MBVhU-5_y*ZWOhjK?=o&urNvm*WT_fWy|Ddnf!o9SvCT}k-)y2$zX#U) zBFq5E$q5j20JSe#CSNK{quOBQQh8)F+;6uO+j~H%<^>FU2TAK2A%i$-bHGU!N=6WD zvx@Gw|NYI;-M`)Tbazmw-{xwb+;s!svorwpBOQ6Jm9l4#05%&6;;QNZ{P;DeCH8DS z!1d~^rl^f~`^J&qmV7jo4!Sy7T_wZuR3#E;3-P@4uF!Yr2AN*~qSW`$k<)MrBEc61 zB)c=3si1UFR$hg=WBjF+_9Vl36a`!jh)2YlI>OR9p=MDJmoS+{CHloAXA84HYIc!# zr@KT~tFr)-Qod*AI9Z^w|~I?HDC$Huf!TceL5+r_y8y4q_)^U(}CjMvmpY^{6AtH7h@b zD5(|6v-Q>k^3ve-(b5IV(swI0V+H&%>O!-#49CIA- z&c~=GdLuYPB;d4>7u)!^FCXp3q$3xpbFE7-) z9h3Rq7?YeaC6v!MeFFYpC_OH|WXYT%GCNm;R2*jb>Y z@oDtf>Hrvvew*}uNo)?F%SOMRceAK3$*BaWFazleZ(rJfUfeZ9bpn8FhM*ymEsk22 zxQ$lOH92!op#6o9-Sil4?%~bicNf7bBn?;qd*gkv){9`eEvFjaI5H@q4G95R&&)8L zX7lS56VaI+2_&GpMHF-Glm1N_s6%+4Gc%NQmU#y^` zM;huzf`tt_9UWLls7f7e(_FAsR4wLU-oT}{s= z67L7JK2E*JEt-I3GqPbT#!jdnnltAmCaZT$vE)>wB+dhH--N@meQiaRDc#`@H%2KJ zL0tdBhX6dIjtD0fTEAz?oj_)aK;6p!v87+Of~n{x?4MtmUJFs@+xJHjzW zB=+w4K$n~6^DTv2>W+yjnFMnTHfty}Ug7M!Wm-4x-wTSgXGO!*XX9Pe%BeI%-J17s z7oKB+e=|hU`MuPxy{nacFbCy`1r01g%pYWv{l*D7tr)AX6?)p`+$!FYqo zPcMRh@0*Ck2AYpl%#5~Af1BhO<9^qf$z5#hNJ)=D!_z<8E!9y951KyPkZg6q?Hq%h zMXzJpqzPKmf!aB?ZOtb25V1UhO^@QelVJXbDE9K~d8c+E(=(1Rb6+8ei`BgiLPKxk z;|$)I+Xm3>?%+AOc{;akMzQ3ICYh>wIHx!#D5L4ogUo8aXQfV%({H+c$bCVue25Pv(N|i}|8ZHc4tL@fRcRbnc$2vt(|K-;WUha|HBK{60>v4L?c6=q zhv!$uj^y2%yy?=I-(-u|;8&gsFT(FkDKF-;xAU&ZcS!gHjg;ZQ{9b$U+S^nv|b3)RgQBbvSITHVuxOQf{6Dgh8-zuBD zuEJ^Nc0%*cx0e^$E@4qm&^%m&O{e9Du@qmTYfKxK_hC!-zs9TdZJdV_!!g!A2m4MV!Nq|n1uZB4|| zxc{nM@(_W$_xqx0G*Z#L?Bud`9#Hc$T3NQsYmnpOjknpPj?3=n)5pF8(#%kN$n3B> sh+Q Date: Fri, 5 Apr 2019 16:07:36 -0700 Subject: [PATCH 04/62] Add note about user set env vars on bare metal (#268) --- docs/general/tensorflow/LaunchBenchmark.md | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index d787ff7a8..f9e79c87b 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -268,3 +268,11 @@ the following command can be used: --batch-size=1 \ --socket-id 0 ``` + +> When running on bare metal, be aware of environment variables that you +have set on your system. The model zoo scripts intentionally do not +overwrite environment variables that have already been set, such as +`OMP_NUM_THREADS`. The same is true when running in a docker container, +but since a new docker container instance is started with each run, you +won't have previously set environment variables, like you may have on +bare metal. From a7dc810a980e37ef25e72043686a7b0e52a06ef0 Mon Sep 17 00:00:00 2001 From: mjkyung Date: Tue, 9 Apr 2019 10:51:49 -0700 Subject: [PATCH 05/62] ssd-mobilenet int8 inference data-location for accuracy to take full file path including the file name as an input (#271) * Change usage for data location to use the full file path * Update the unit test commands --- .../object_detection/tensorflow/ssd-mobilenet/README.md | 6 +++--- .../tensorflow/ssd-mobilenet/inference/int8/model_init.py | 2 +- tests/unit/common/tensorflow/tf_model_args.txt | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index c6688f159..d2c96dd9a 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -123,8 +123,8 @@ python launch_benchmark.py \ --batch-size 1 ``` -Or for accuracy where the `--data-location` is the path the directory -where your `coco_val.record` file is located: +Or for accuracy where the `--data-location` is the path to +the tf record file that you generated in step 2: ``` python launch_benchmark.py \ --model-name ssd-mobilenet \ @@ -134,7 +134,7 @@ python launch_benchmark.py \ --socket-id 0 \ --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ --model-source-dir /home//tensorflow/models \ - --data-location /home//coco/output \ + --data-location /home//coco/output/coco_val.record \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ --accuracy-only \ --batch-size 1 diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py index 4fdfb3a06..5959abaf2 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py @@ -64,7 +64,7 @@ def __init__(self, args, custom_args=[], platform_util=None): accuracy_script = os.path.join( self.args.intelai_models, self.args.mode, self.args.precision, "coco_int8.sh") - self.command_prefix = "sh {} {} {}/coco_val.record".format( + self.command_prefix = "sh {} {} {}".format( accuracy_script, self.args.input_graph, self.args.data_location) diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index 79d76806f..4dabf304f 100755 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -32,7 +32,7 @@ run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precis run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset --in-graph=/in_graph/frozen_inference_graph.pb,sh /workspace/intelai_models/inference/fp32/coco_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset/coco_val.record /workspace/models -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --accuracy-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset/coco_val.record +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --data-location=/dataset, sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --accuracy-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --benchmark-dir=/workspace/benchmarks --data-location=/dataset,sh /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset From e922a5383d765a31f6de163765f0492335402afd Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Wed, 10 Apr 2019 10:02:35 -0700 Subject: [PATCH 06/62] Fix links to inference and preprocessing files for ResNet50 and ResNet101 (#273) * Fix links that were moved to be shared between int8 and fp32 --- docs/general/tensorflow/LaunchBenchmark.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index f9e79c87b..e52482ade 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -23,11 +23,11 @@ Below the general description is an [index of links](#model-scripts-for-tensorfl * Image Recognition * ResNet50: [init](/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py) | - [inference](/models/image_recognition/tensorflow/resnet50/fp32/eval_image_classifier_inference.py) | - [preprocessing](/models/image_recognition/tensorflow/resnet50/fp32/preprocessing.py) + [inference](/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py) | + [preprocessing](/models/image_recognition/tensorflow/resnet50/inference/preprocessing.py) * ResNet101: [init](/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py) | - [inference](/models/image_recognition/tensorflow/resnet101/fp32/benchmark.py) | - [preprocessing](/models/image_recognition/tensorflow/resnet101/fp32/preprocessing.py) + [inference](/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py) | + [preprocessing](/models/image_recognition/tensorflow/resnet101/inference/preprocessing.py) * InceptionV3: [init](/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py) | [inference](/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py) | [preprocessing](/models/image_recognition/tensorflow/inceptionv3/fp32/preprocessing.py) From 690e26123715d39eb91a602eb878acc98fc9f513 Mon Sep 17 00:00:00 2001 From: mjkyung Date: Wed, 10 Apr 2019 15:17:55 -0700 Subject: [PATCH 07/62] fix a typo (#277) --- .../image_recognition/tensorflow/inception_resnet_v2/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index 83577516f..26d2d2508 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -252,7 +252,7 @@ python launch_benchmark.py \ --accuracy-only \ --batch-size 100 \ --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ - --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb \ + --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` From 5e8d35eeb61ef0e9f9c63a739b6e21a412cbf725 Mon Sep 17 00:00:00 2001 From: mjkyung Date: Thu, 11 Apr 2019 11:03:51 -0700 Subject: [PATCH 08/62] Mobilenet V1 Int8 Inference (#264) * add mobinetv1 benchmark * added README.md * added unit tests and update readme * minor fix * code review updated * updated readme * minor fix * fixed unit tests * unit tests fix * updated readme * Arg update * fix command examples * Fix minor typos * Add unit test commands, fix minor typos * Fix hanging indent * Fix hanging indent * code review items fixed * Add custom args * Update unit test command and README.md * change custom arg to use underscore not dash * add unit test commands * update the default values on custom args * update the default values on custom args * remove Inappropriate Intel licensing header --- benchmarks/README.md | 2 +- benchmarks/common/tensorflow/start.sh | 9 +- .../inceptionv3/inference/int8/__init__.py | 2 +- .../tensorflow/mobilenet_v1/README.md | 146 ++++ .../tensorflow/mobilenet_v1/__init__.py | 2 +- .../mobilenet_v1/inference/__init__.py | 2 +- .../mobilenet_v1/inference/int8/__init__.py | 19 + .../mobilenet_v1/inference/int8/model_init.py | 99 +++ .../mobilenet_v1/inference/int8/__init__.py | 20 + .../mobilenet_v1/inference/int8/accuracy.py | 130 ++++ .../mobilenet_v1/inference/int8/benchmark.py | 146 ++++ .../mobilenet_v1/inference/int8/cnn_util.py | 50 ++ .../mobilenet_v1/inference/int8/datasets.py | 195 ++++++ .../inference/int8/preprocessing.py | 637 ++++++++++++++++++ .../unit/common/tensorflow/tf_model_args.txt | 3 + 15 files changed, 1456 insertions(+), 6 deletions(-) create mode 100644 benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py create mode 100644 models/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py create mode 100644 models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py create mode 100644 models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py create mode 100644 models/image_recognition/tensorflow/mobilenet_v1/inference/int8/cnn_util.py create mode 100644 models/image_recognition/tensorflow/mobilenet_v1/inference/int8/datasets.py create mode 100644 models/image_recognition/tensorflow/mobilenet_v1/inference/int8/preprocessing.py mode change 100755 => 100644 tests/unit/common/tensorflow/tf_model_args.txt diff --git a/benchmarks/README.md b/benchmarks/README.md index e3fda63ef..ad37797fc 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -22,7 +22,7 @@ dependencies to be installed: | Image Recognition | TensorFlow | [Inception ResNet V2](https://arxiv.org/pdf/1602.07261.pdf) | Inference | [Int8](image_recognition/tensorflow/inception_resnet_v2/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inception_resnet_v2/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception V3](https://arxiv.org/pdf/1512.00567.pdf) | Inference | [Int8](image_recognition/tensorflow/inceptionv3/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inceptionv3/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception V4](https://arxiv.org/pdf/1602.07261.pdf) | Inference | [Int8](image_recognition/tensorflow/inceptionv4/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inceptionv4/README.md#fp32-inference-instructions) | -| Image Recognition | TensorFlow | [MobileNet V1](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [FP32](image_recognition/tensorflow/mobilenet_v1/README.md#fp32-inference-instructions) | +| Image Recognition | TensorFlow | [MobileNet V1](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](image_recognition/tensorflow/mobilenet_v1/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/mobilenet_v1/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [ResNet 101](https://arxiv.org/pdf/1512.03385.pdf) | Inference | [Int8](image_recognition/tensorflow/resnet101/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet101/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [ResNet 50](https://arxiv.org/pdf/1512.03385.pdf) | Inference | [Int8](image_recognition/tensorflow/resnet50/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet50/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [SqueezeNet](https://arxiv.org/pdf/1602.07360.pdf) | Inference | [FP32](image_recognition/tensorflow/squeezenet/README.md#fp32-inference-instructions) | diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index 60500ba3e..88492f8c5 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -397,11 +397,11 @@ function inception_resnet_v2() { fi } -# language modeling lm-1b +# language modeling lm-1b function lm-1b() { if [ ${PRECISION} == "fp32" ]; then CMD="${CMD} $(add_steps_args)" - + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model else echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" @@ -435,6 +435,11 @@ function mobilenet_v1() { if [ ${PRECISION} == "fp32" ]; then export PYTHONPATH=${PYTHONPATH}:${MOUNT_EXTERNAL_MODELS_SOURCE}:${MOUNT_EXTERNAL_MODELS_SOURCE}/research:${MOUNT_EXTERNAL_MODELS_SOURCE}/research/slim PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + elif [ ${PRECISION} == "int8" ]; then + CMD="${CMD} $(add_arg "--input_height" ${input_height}) $(add_arg "--input_width" ${input_width}) \ + $(add_arg "--warmup_steps" ${warmup_steps}) $(add_arg "--steps" ${steps}) $(add_arg "--input_layer" ${input_layer}) \ + $(add_arg "--output_layer" ${output_layer})" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model else echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" exit 1 diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py index 87301fd64..139d705c0 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index 5b2d8e64d..ddbd8858a 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -2,11 +2,157 @@ This document has instructions for how to run MobileNet V1 for the following modes/precisions: +* [Int8 inference](#int8-inference-instructions) * [FP32 inference](#fp32-inference-instructions) Benchmarking instructions and scripts for model training is coming later. + +## Int8 Inference Instructions + +1. Download ImageNet dataset. + + This step is required only for running accuracy, for running benchmark we do not need to provide dataset. + + Register and download the ImageNet dataset. Once you have the raw ImageNet dataset downloaded, we need to convert + it to the TFRecord format. The TensorFlow models repo provides + [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) + to download, process and convert the ImageNet dataset to the TF records format. After converting data, you should have a directory + with the sharded dataset something like below, we only need `validation-*` files, discard `train-*` files: + ``` + $ ll /home/myuser/datasets/ImageNet_TFRecords + -rw-r--r--. 1 user 143009929 Jun 20 14:53 train-00000-of-01024 + -rw-r--r--. 1 user 144699468 Jun 20 14:53 train-00001-of-01024 + -rw-r--r--. 1 user 138428833 Jun 20 14:53 train-00002-of-01024 + ... + -rw-r--r--. 1 user 143137777 Jun 20 15:08 train-01022-of-01024 + -rw-r--r--. 1 user 143315487 Jun 20 15:08 train-01023-of-01024 + -rw-r--r--. 1 user 52223858 Jun 20 15:08 validation-00000-of-00128 + -rw-r--r--. 1 user 51019711 Jun 20 15:08 validation-00001-of-00128 + -rw-r--r--. 1 user 51520046 Jun 20 15:08 validation-00002-of-00128 + ... + -rw-r--r--. 1 user 52508270 Jun 20 15:09 validation-00126-of-00128 + -rw-r--r--. 1 user 55292089 Jun 20 15:09 validation-00127-of-00128 + ``` +2. Download the pretrained model: + + ``` + $ wget https://storage.cloud.google.com/intel-optimized-tensorflow/models/mobilenetv1_int8_pretrained_model.pb + ``` + +3. Clone the [intelai/models](https://github.com/intelai/models) repo + and then run the benchmarking scripts for either benchmarking throughput, + latency or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. + Each benchmark run has user configurable arguments separated from regular arguments by '--' at the end of the command. + Unless configured, these arguments will run with default values. Below are the example codes for each benchmark case: + + ``` + $ git clone https://github.com/IntelAI/models.git + + $ cd benchmarks + ``` + + For throughput (using `--benchmark-only`, `--socket-id 0` and `--batch-size 240`): + ``` + python launch_benchmark.py \ + --model-name mobilenet_v1 \ + --precision int8 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 240 \ + --socket-id 0 \ + --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ + input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" + ``` + + For latency (using `--benchmark-only`, `--socket-id 0` and `--batch-size 1`) + ``` + python launch_benchmark.py \ + --model-name mobilenet_v1 \ + --precision int8 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 1 \ + --socket-id 0 \ + --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ + input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" + ``` + + For accuracy (using your `--data-location`, `--accuracy-only` and + `--batch-size 100`): + ``` + python launch_benchmark.py \ + --model-name mobilenet_v1 \ + --precision int8 \ + --mode inference \ + --framework tensorflow \ + --accuracy-only \ + --batch-size 100 \ + --socket-id 0 \ + --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --data-location /home//imagenet_validation_dataset \ + -- input_height=224 input_width=224 \ + input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" + ``` + + Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands + to get additional debug output or change the default output location.. + +4. The log file is saved to the `models/benchmarks/common/tensorflow/logs` directory, + or the directory specified by the `--output-dir` arg. Below are examples of + what the tail of your log file should look like for the different configs. + + Example log tail when benchmarking for throughput: + ``` + OMP: Info #250: KMP_AFFINITY: pid 682 tid 885 thread 55 bound to OS proc set 83 + OMP: Info #250: KMP_AFFINITY: pid 682 tid 886 thread 56 bound to OS proc set 0 + OMP: Info #250: KMP_AFFINITY: pid 682 tid 884 thread 54 bound to OS proc set 82 + [Running warmup steps...] + steps = 10, 1830.24507317 images/sec + [Running benchmark steps...] + steps = 10, 1841.47811007 images/sec + steps = 20, 1848.84108679 images/sec + steps = 30, 1847.84668478 images/sec + steps = 40, 1849.15354305 images/sec + steps = 50, 1840.95611001 images/sec + Ran inference with batch size 240 + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190409_222536.log + ``` + + Example log tail when benchmarking for latency: + ``` + OMP: Info #250: KMP_AFFINITY: pid 681 tid 882 thread 53 bound to OS proc set 81 + OMP: Info #250: KMP_AFFINITY: pid 681 tid 884 thread 55 bound to OS proc set 83 + OMP: Info #250: KMP_AFFINITY: pid 681 tid 885 thread 56 bound to OS proc set 0 + [Running warmup steps...] + steps = 10, 139.81945463 images/sec + [Running benchmark steps...] + steps = 10, 140.212074614 images/sec + steps = 20, 135.230332731 images/sec + steps = 30, 133.508530685 images/sec + steps = 40, 135.724816361 images/sec + steps = 50, 132.714339957 images/sec + Ran inference with batch size 1 + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190409_223122.log + ``` + + Example log tail when running for accuracy: + ``` + Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7009, 0.8933) + Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7011, 0.8933) + Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7013, 0.8933) + Ran inference with batch size 100 + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190409_223621.log + ``` + ## FP32 Inference Instructions 1. Download the ImageNet dataset and convert it to the TF records format diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py index cf793ec6a..d9c4123de 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py @@ -1,7 +1,7 @@ # # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py index cf793ec6a..d9c4123de 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py @@ -1,7 +1,7 @@ # # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py new file mode 100644 index 000000000..0823604c0 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py @@ -0,0 +1,99 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for Mobilenet INT8 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + self.cmd = self.get_numactl_command(self.args.socket_id) + "python " + + # Set KMP env vars, if they haven't already been set + self.set_kmp_vars() + + # Set the num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + # Set env vars, if they haven't already been set + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + self.parse_args() + + if self.args.benchmark_only: + run_script = os.path.join( + self.args.intelai_models, self.args.mode, + self.args.precision, "benchmark.py") + script_args_list = [ + "input_graph", "input_height", "input_width", "batch_size", + "input_layer", "output_layer", "num_inter_threads", + "num_intra_threads", "warmup_steps", "steps"] + if self.args.accuracy_only: + run_script = os.path.join( + self.args.intelai_models, self.args.mode, + self.args.precision, "accuracy.py") + script_args_list = [ + "input_graph", "data_location", "input_height", "input_width", + "batch_size", "input_layer", "output_layer", + "num_inter_threads", "num_intra_threads"] + + self.cmd = self.add_args_to_command(self.cmd + run_script, script_args_list) + + def parse_args(self): + if self.custom_args: + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_height", default=224, + dest='input_height', type=int, help="input height") + parser.add_argument( + "--input_width", default=224, + dest='input_width', type=int, help="input width") + parser.add_argument( + '--warmup_steps', dest='warmup_steps', + help='number of warmup steps', + type=int, default=10) + parser.add_argument( + '--steps', dest='steps', + help='number of steps', + type=int, default=50) + parser.add_argument( + '--input_layer', dest='input_layer', + help='name of input layer', + type=str, default="input") + parser.add_argument( + '--output_layer', dest='output_layer', + help='name of output layer', + type=str, default="MobilenetV1/Predictions/Reshape_1") + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + def run(self): + if self.cmd: + self.run_command(self.cmd) diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py new file mode 100644 index 000000000..69c3c003f --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py @@ -0,0 +1,130 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np + +from google.protobuf import text_format +import tensorflow as tf +import preprocessing +import datasets + +NUM_TEST_IMAGES = 50000 + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--data_location", default=None, + help="full path to the validation data") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="MobilenetV1/Predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + data_location = args.data_location + dataset = datasets.ImagenetData(data_location) + preprocessor = dataset.get_image_preprocessor()( + input_height, input_width, batch_size, + 1, # device count + tf.float32, # data_type for input fed to the graph + train=False, # doing inference + resize_method='bilinear') + + images, labels = preprocessor.minibatch(dataset, subset='validation', + use_datasets=True, cache_data=False) + graph = load_graph(model_file) + input_tensor = graph.get_tensor_by_name(input_layer + ":0") + output_tensor = graph.get_tensor_by_name(output_layer + ":0") + + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + + total_accuracy1, total_accuracy5 = (0.0, 0.0) + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset='validation') \ + - num_processed_images + with tf.Session() as sess: + sess_graph = tf.Session(graph=graph, config=config) + while num_remaining_images >= batch_size: + # Reads and preprocess data + np_images, np_labels = sess.run([images[0], labels[0]]) + num_processed_images += batch_size + num_remaining_images -= batch_size + # Compute inference on the preprocessed data + predictions = sess_graph.run(output_tensor, + {input_tensor: np_images}) + accuracy1 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 1), tf.float32)) + + accuracy5 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 5), tf.float32)) + np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) + total_accuracy1 += np_accuracy1 + total_accuracy5 += np_accuracy5 + print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ + % (num_processed_images, total_accuracy1/num_processed_images, + total_accuracy5/num_processed_images)) diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py new file mode 100644 index 000000000..5ba410415 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py @@ -0,0 +1,146 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np + +from google.protobuf import text_format +import tensorflow as tf + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="MobilenetV1/Predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + parser.add_argument("--warmup_steps", type=int, default=10, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=50, help="number of steps") + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + warmup_steps = args.warmup_steps + steps = args.steps + assert steps > 10, "Benchmark steps should be at least 10." + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + + input_shape = [batch_size, input_height, input_width, 3] + images = tf.truncated_normal( + input_shape, + dtype=tf.float32, + stddev=10, + name='synthetic_images') + + image_data = None + with tf.Session() as sess: + image_data = sess.run(images) + + graph = load_graph(model_file) + + input_tensor = graph.get_tensor_by_name(input_layer + ":0"); + output_tensor = graph.get_tensor_by_name(output_layer + ":0"); + + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + + with tf.Session(graph=graph, config=config) as sess: + sys.stdout.flush() + print("[Running warmup steps...]") + for t in range(warmup_steps): + start_time = time.time() + sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size/elapsed_time)) + + print("[Running benchmark steps...]") + total_time = 0; + total_images = 0; + for t in range(steps): + start_time = time.time() + results = sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size/elapsed_time)); diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/cnn_util.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/cnn_util.py new file mode 100644 index 000000000..32902d149 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/cnn_util.py @@ -0,0 +1,50 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for CNN benchmarks.""" + +import tensorflow as tf + + +def tensorflow_version_tuple(): + v = tf.__version__ + major, minor, patch = v.split('.') + return (int(major), int(minor), patch) + + +def tensorflow_version(): + vt = tensorflow_version_tuple() + return vt[0] * 1000 + vt[1] + diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/datasets.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/datasets.py new file mode 100644 index 000000000..8734044b5 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/datasets.py @@ -0,0 +1,195 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Benchmark dataset utilities. +""" + +from abc import abstractmethod +import os + +import numpy as np +from six.moves import cPickle +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + +from tensorflow.python.platform import gfile +import preprocessing + + +IMAGENET_NUM_TRAIN_IMAGES = 1281167 +IMAGENET_NUM_VAL_IMAGES = 50000 + + +def create_dataset(data_dir, data_name): + """Create a Dataset instance based on data_dir and data_name.""" + supported_datasets = { + 'imagenet': ImagenetData, + 'cifar10': Cifar10Data, + } + if not data_dir and not data_name: + # When using synthetic data, use synthetic imagenet images by default. + data_name = 'imagenet' + + if data_name is None: + for supported_name in supported_datasets: + if supported_name in data_dir: + data_name = supported_name + break + + if data_name is None: + raise ValueError('Could not identify name of dataset. ' + 'Please specify with --data_name option.') + + if data_name not in supported_datasets: + raise ValueError('Unknown dataset. Must be one of %s', ', '.join( + [key for key in sorted(supported_datasets.keys())])) + + return supported_datasets[data_name](data_dir) + + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, height=None, width=None, depth=None, data_dir=None, + queue_runner_required=False, num_classes=1000): + self.name = name + self.height = height + self.width = width + self.depth = depth or 3 + + self.data_dir = data_dir + self._queue_runner_required = queue_runner_required + self._num_classes = num_classes + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @property + def num_classes(self): + return self._num_classes + + @num_classes.setter + def num_classes(self, val): + self._num_classes = val + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + def get_image_preprocessor(self): + return None + + def queue_runner_required(self): + return self._queue_runner_required + + def use_synthetic_gpu_images(self): + return not self.data_dir + + +class ImagenetData(Dataset): + """Configuration for Imagenet dataset.""" + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('imagenet', 300, 300, data_dir=data_dir) + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return IMAGENET_NUM_TRAIN_IMAGES + elif subset == 'validation': + return IMAGENET_NUM_VAL_IMAGES + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self): + if self.use_synthetic_gpu_images(): + return preprocessing.SyntheticImagePreprocessor + else: + return preprocessing.RecordInputImagePreprocessor + + +class Cifar10Data(Dataset): + """Configuration for cifar 10 dataset. + + It will mount all the input images to memory. + """ + + def __init__(self, data_dir=None): + super(Cifar10Data, self).__init__('cifar10', 32, 32, data_dir=data_dir, + queue_runner_required=True, + num_classes=10) + + def read_data_files(self, subset='train'): + """Reads from data file and returns images and labels in a numpy array.""" + assert self.data_dir, ('Cannot call `read_data_files` when using synthetic ' + 'data') + if subset == 'train': + filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i) + for i in xrange(1, 6)] + elif subset == 'validation': + filenames = [os.path.join(self.data_dir, 'test_batch')] + else: + raise ValueError('Invalid data subset "%s"' % subset) + + inputs = [] + for filename in filenames: + with gfile.Open(filename, 'r') as f: + inputs.append(cPickle.load(f)) + # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the + # input format. + all_images = np.concatenate( + [each_input['data'] for each_input in inputs]).astype(np.float32) + all_labels = np.concatenate( + [each_input['labels'] for each_input in inputs]) + return all_images, all_labels + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return 50000 + elif subset == 'validation': + return 10000 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self): + if self.use_synthetic_gpu_images(): + return preprocessing.SyntheticImagePreprocessor + else: + return preprocessing.Cifar10ImagePreprocessor diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/preprocessing.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/preprocessing.py new file mode 100644 index 000000000..ef94d3e3d --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/preprocessing.py @@ -0,0 +1,637 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image pre-processing utilities. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + +from tensorflow.contrib.data.python.ops import batching +from tensorflow.contrib.data.python.ops import interleave_ops +from tensorflow.contrib.image.python.ops import distort_image_ops +from tensorflow.python.layers import utils +from tensorflow.python.ops import data_flow_ops +from tensorflow.python.platform import gfile +import cnn_util + +from tensorflow.python.ops import control_flow_ops + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def get_image_resize_method(resize_method, batch_position=0): + """Get tensorflow resize method. + + If resize_method is 'round_robin', return different methods based on batch + position in a round-robin fashion. NOTE: If the batch size is not a multiple + of the number of methods, then the distribution of methods will not be + uniform. + + Args: + resize_method: (string) nearest, bilinear, bicubic, area, or round_robin. + batch_position: position of the image in a batch. NOTE: this argument can + be an integer or a tensor + Returns: + one of resize type defined in tf.image.ResizeMethod. + """ + resize_methods_map = { + 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, + 'bilinear': tf.image.ResizeMethod.BILINEAR, + 'bicubic': tf.image.ResizeMethod.BICUBIC, + 'area': tf.image.ResizeMethod.AREA + } + + if resize_method != 'round_robin': + return resize_methods_map[resize_method] + + # return a resize method based on batch position in a round-robin fashion. + resize_methods = resize_methods_map.values() + def lookup(index): + return resize_methods[index] + + def resize_method_0(): + return utils.smart_cond(batch_position % len(resize_methods) == 0, + lambda: lookup(0), resize_method_1) + + def resize_method_1(): + return utils.smart_cond(batch_position % len(resize_methods) == 1, + lambda: lookup(1), resize_method_2) + + def resize_method_2(): + return utils.smart_cond(batch_position % len(resize_methods) == 2, + lambda: lookup(2), lambda: lookup(3)) + + # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here + # because TF would not be able to construct a finite graph. + + return resize_method_0() + + +def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): + # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): + with tf.name_scope(scope or 'decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3) #, + # fancy_upscaling=False, + # dct_method='INTEGER_FAST') + + # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + return image + + + +def preprocess_for_eval(image, height, width, + central_fraction=0.875, scope=None): + """Prepare one image for evaluation. + + If height and width are specified it would output an image with that size by + applying resize_bilinear. + + If central_fraction is specified it would crop the central fraction of the + input image. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). + height: integer + width: integer + central_fraction: Optional Float, fraction of the image to crop. + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of prepared image. + """ + with tf.name_scope(scope, 'eval_image', [image, height, width]): + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + + +def apply_with_random_selector(x, func, num_cases): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + Args: + x: input Tensor. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) + # Pass the real x only to one of the func calls. + return control_flow_ops.merge([ + func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) + for case in range(num_cases)])[0] + + +def distort_color(image, color_ordering=0, fast_mode=True, scope=None): + """Distort the color of a Tensor image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: 3-D Tensor containing single image in [0, 1]. + color_ordering: Python int, a type of distortion (valid values: 0-3). + fast_mode: Avoids slower ops (random_hue and random_contrast) + scope: Optional scope for name_scope. + Returns: + 3-D Tensor color-distorted image on range [0, 1] + Raises: + ValueError: if color_ordering not in [0, 3] + """ + with tf.name_scope(scope, 'distort_color', [image]): + if fast_mode: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + elif color_ordering == 2: + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + elif color_ordering == 3: + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + raise ValueError('color_ordering must be in [0, 3]') + + # The random_* ops do not necessarily clamp. + return tf.clip_by_value(image, 0.0, 1.0) + + +def distorted_bounding_box_crop(image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using a one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image: 3-D Tensor of image (it will be converted to floats in [0, 1]). + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding box + supplied. + aspect_ratio_range: An optional list of `floats`. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `floats`. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional scope for name_scope. + Returns: + A tuple, a 3-D Tensor cropped_image and the distorted bbox + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an + # allowed range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + cropped_image = tf.slice(image, bbox_begin, bbox_size) + return cropped_image, distort_bbox + + + +def preprocess_for_train(image, height,width, bbox, + batch_position, + fast_mode=True, + scope=None, + add_image_summaries=True): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + batch_position: position of the image in a batch, which affects how images + are distorted and resized. NOTE: this argument can be an integer or a + tensor + scope: Optional scope for op_scope. + add_image_summaries: Enable image summaries. + Returns: + 3-D float Tensor of distorted image used for training with range [-1, 1]. + """ + + with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + if bbox is None: + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], + dtype=tf.float32, + shape=[1, 1, 4]) + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + if add_image_summaries: + tf.summary.image('image_with_bounding_boxes', image_with_box) + + distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([None, None, 3]) + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distorted_bbox) + if add_image_summaries: + tf.summary.image('images_with_distorted_bounding_box', + image_with_distorted_box) + + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + + # We select only 1 case for fast_mode bilinear. + num_resize_cases = 1 if fast_mode else 4 + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, method: tf.image.resize_images(x, [height, width], method), + num_cases=num_resize_cases) + + if add_image_summaries: + tf.summary.image('cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + # Randomly distort the colors. There are 1 or 4 ways to do it. + num_distort_cases = 1 if fast_mode else 4 + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, ordering: distort_color(x, ordering, fast_mode), + num_cases=num_distort_cases) + + if add_image_summaries: + tf.summary.image('final_distorted_image', + tf.expand_dims(distorted_image, 0)) + distorted_image = tf.subtract(distorted_image, 0.5) + distorted_image = tf.multiply(distorted_image, 2.0) + return distorted_image + + +def distort_color(image, batch_position=0, distort_color_in_yiq=False, + scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops based on the position of the image in a batch. + + Args: + image: float32 Tensor containing single image. Tensor values should be in + range [0, 1]. + batch_position: the position of the image in a batch. NOTE: this argument + can be an integer or a tensor + distort_color_in_yiq: distort color of input images in YIQ space. + scope: Optional scope for op_scope. + Returns: + color-distorted image + """ + with tf.name_scope(scope or 'distort_color'): + + def distort_fn_0(image=image): + """Variant 0 of distort function.""" + image = tf.image.random_brightness(image, max_delta=32. / 255.) + #if distort_color_in_yiq: + # image = distort_image_ops.random_hsv_in_yiq( + # image, lower_saturation=0.5, upper_saturation=1.5, + # max_delta_hue=0.2 * math.pi) + #else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + return image + + def distort_fn_1(image=image): + """Variant 1 of distort function.""" + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + #if distort_color_in_yiq: + # image = distort_image_ops.random_hsv_in_yiq( + # image, lower_saturation=0.5, upper_saturation=1.5, + # max_delta_hue=0.2 * math.pi) + #else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + return image + + image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0, + distort_fn_1) + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +class RecordInputImagePreprocessor(object): + """Preprocessor for images with RecordInput format.""" + + def __init__(self, + height, + width, + batch_size, + num_splits, + dtype, + train, + distortions=False, + resize_method="bilinear", + shift_ratio=0, + summary_verbosity=1, + distort_color_in_yiq=False, + fuse_decode_and_crop=False): + self.height = height + self.width = width + self.batch_size = batch_size + self.num_splits = num_splits + self.dtype = dtype + self.train = train + self.resize_method = resize_method + self.shift_ratio = shift_ratio + self.distortions = distortions + self.distort_color_in_yiq = distort_color_in_yiq + self.fuse_decode_and_crop = fuse_decode_and_crop + if self.batch_size % self.num_splits != 0: + raise ValueError( + ('batch_size must be a multiple of num_splits: ' + 'batch_size %d, num_splits: %d') % + (self.batch_size, self.num_splits)) + self.batch_size_per_split = self.batch_size // self.num_splits + self.summary_verbosity = summary_verbosity + + def image_preprocess(self, image_buffer, bbox, batch_position): + """Preprocessing image_buffer as a function of its batch position.""" + if self.train: + image_buffer = tf.image.decode_jpeg( + image_buffer, channels=3, dct_method='INTEGER_FAST') + image = preprocess_for_train(image_buffer, self.height, self.width, bbox, + batch_position) + else: + image = tf.image.decode_jpeg( + image_buffer, channels=3, dct_method='INTEGER_FAST') + image = preprocess_for_eval(image, self.height, self.width) + return image + + def parse_and_preprocess(self, value, batch_position): + image_buffer, label_index, bbox, _ = parse_example_proto(value) + image = self.image_preprocess(image_buffer, bbox, batch_position) + return (label_index, image) + + def minibatch(self, dataset, subset, use_datasets, cache_data, + shift_ratio=-1): + if shift_ratio < 0: + shift_ratio = self.shift_ratio + with tf.name_scope('batch_processing'): + # Build final results per split. + images = [[] for _ in range(self.num_splits)] + labels = [[] for _ in range(self.num_splits)] + if use_datasets: + glob_pattern = dataset.tf_record_pattern(subset) + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError('Found no files in --data_dir matching: {}' + .format(glob_pattern)) + ds = tf.data.TFRecordDataset.list_files(file_names) + ds = ds.apply( + interleave_ops.parallel_interleave( + tf.data.TFRecordDataset, cycle_length=10)) + if cache_data: + ds = ds.take(1).cache().repeat() + counter = tf.data.Dataset.range(self.batch_size) + counter = counter.repeat() + ds = tf.data.Dataset.zip((ds, counter)) + ds = ds.prefetch(buffer_size=self.batch_size) + ds = ds.shuffle(buffer_size=10000) + ds = ds.repeat() + ds = ds.apply( + batching.map_and_batch( + map_func=self.parse_and_preprocess, + batch_size=self.batch_size_per_split, + num_parallel_batches=self.num_splits)) + ds = ds.prefetch(buffer_size=self.num_splits) + ds_iterator = ds.make_one_shot_iterator() + for d in xrange(self.num_splits): + labels[d], images[d] = ds_iterator.get_next() + + else: + record_input = data_flow_ops.RecordInput( + file_pattern=dataset.tf_record_pattern(subset), + seed=301, + parallelism=64, + buffer_size=10000, + batch_size=self.batch_size, + shift_ratio=shift_ratio, + name='record_input') + records = record_input.get_yield_op() + records = tf.split(records, self.batch_size, 0) + records = [tf.reshape(record, []) for record in records] + for idx in xrange(self.batch_size): + value = records[idx] + (label, image) = self.parse_and_preprocess(value, idx) + split_index = idx % self.num_splits + labels[split_index].append(label) + images[split_index].append(image) + + for split_index in xrange(self.num_splits): + if not use_datasets: + images[split_index] = tf.parallel_stack(images[split_index]) + labels[split_index] = tf.concat(labels[split_index], 0) + images[split_index] = tf.cast(images[split_index], self.dtype) + depth = 3 + images[split_index] = tf.reshape( + images[split_index], + shape=[self.batch_size_per_split, self.height, self.width, depth]) + labels[split_index] = tf.reshape(labels[split_index], + [self.batch_size_per_split]) + return images, labels + diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt old mode 100755 new mode 100644 index 4dabf304f..be46aea9a --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -52,6 +52,9 @@ run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model- "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data-location=/dataset --input_height=224 --input_width=224, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/accuracy.py --input_height=224 --input_width=224 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data_location=/dataset --input_layer=input +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=240 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1 python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 From 63c1a9c3a664cb8da45fcbe47c31362a0b3af916 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 11 Apr 2019 11:40:51 -0700 Subject: [PATCH 09/62] Add deprecation warning for checkpoint argument (#278) --- benchmarks/common/base_benchmark_util.py | 4 +++- benchmarks/launch_benchmark.py | 3 +++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/benchmarks/common/base_benchmark_util.py b/benchmarks/common/base_benchmark_util.py index adb102c3c..3fbc4dc94 100644 --- a/benchmarks/common/base_benchmark_util.py +++ b/benchmarks/common/base_benchmark_util.py @@ -128,7 +128,9 @@ def _define_args(self): help="Specify the location of trained model checkpoint directory. " "If mode=training model/weights will be written to this " "location. If mode=inference assumes that the location points" - " to a model that has already been trained.", + " to a model that has already been trained. Note that using " + "checkpoint files for inference is being deprecated, in favor " + "of using frozen graphs.", dest="checkpoint", default=None, type=check_valid_folder) self._common_arg_parser.add_argument( diff --git a/benchmarks/launch_benchmark.py b/benchmarks/launch_benchmark.py index e3e982e70..993dc8785 100644 --- a/benchmarks/launch_benchmark.py +++ b/benchmarks/launch_benchmark.py @@ -86,6 +86,9 @@ def validate_args(self): if not self.args.benchmark_only and not self.args.accuracy_only: self.args.benchmark_only = True + if self.args.mode == "inference" and self.args.checkpoint: + print("Warning: The --checkpoint argument is being deprecated in favor of using frozen graphs.") + def get_model_use_case(self, benchmark_scripts): """ Infers the use case based on the directory structure for the specified model. From 66256b56d05e6daf728d14f0d27eee258e021706 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 11 Apr 2019 14:25:55 -0700 Subject: [PATCH 10/62] Change Inception ResNet V2 FP32 to use the frozen graph for benchmarking (#276) * Change inception resnet v2 FP32 to use the frozen graph for benchmarking * fix test * Fix file path --- .../tensorflow/inception_resnet_v2/README.md | 60 ++-- .../inference/fp32/model_init.py | 20 +- .../eval_image_classifier.py | 277 ------------------ .../unit/common/tensorflow/tf_model_args.txt | 4 +- 4 files changed, 31 insertions(+), 330 deletions(-) delete mode 100644 models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier.py diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index 26d2d2508..e547377ca 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -178,21 +178,12 @@ $ git clone git@github.com:IntelAI/models.git This repository includes launch scripts for running benchmarks and the an optimized version of the Inception ResNet V2 model code. -2. Download the pre-trained Inception ResNet V2 model files: - -For accuracy: +2. Download the pre-trained Inception ResNet V2 model: ``` $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/inception_resnet_v2_fp32_pretrained_model.pb ``` -For throughput and latency: - -``` -$ wget http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz -$ mkdir -p checkpoints && tar -C ./checkpoints/ -zxf inception_resnet_v2_2016_08_30.tar.gz -``` - 3. If you would like to run Inception ResNet V2 inference and test for accuracy, you will need the full ImageNet dataset. Benchmarking for latency and throughput do not require the ImageNet dataset. @@ -234,7 +225,7 @@ precision, and docker image to use, along with your path to the ImageNet TF Records that you generated in step 3. Substitute in your own `--data-location` (from step 3, for accuracy -only), `--checkpoint` pre-trained model checkpoint file path (from step 2). +only), `--in-graph` frozen graph file path (from step 2). Inception ResNet V2 can be run for accuracy, latency benchmarking, or throughput benchmarking. Use one of the following examples below, depending on @@ -267,9 +258,8 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --checkpoint /home//checkpoints \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ - --data-location /home//datasets/ImageNet_TFRecords + --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl ``` For throughput (using `--benchmark-only`, `--socket-id 0` and `--batch-size 128`): @@ -283,9 +273,8 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --checkpoint /home//checkpoints \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ - --data-location /home//datasets/ImageNet_TFRecords + --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands @@ -302,36 +291,31 @@ Example log tail when running for accuracy: Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.8036, 0.9526) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.8036, 0.9525) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.8037, 0.9525) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190109_081637.log ``` Example log tail when benchmarking for latency: ``` -eval/Accuracy[0] -eval/Recall_5[0.01] -INFO:tensorflow:Finished evaluation at 2019-01-08-01:51:28 -self._total_images_per_sec = 69.7 -self._displayed_steps = 10 -Total images/sec = 7.0 -Latency ms/step = 143.4 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +Iteration 38: 0.052 sec +Iteration 39: 0.051 sec +Iteration 40: 0.051 sec +Average time: 0.050 sec +Batch size = 1 +Latency: 50.094 ms +Throughput: 19.963 images/sec Ran inference with batch size 1 -Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190108_015057.log +Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190410_205213.log ``` Example log tail when benchmarking for throughput: ``` -eval/Accuracy[0.00078125] -eval/Recall_5[0.00375] -INFO:tensorflow:Finished evaluation at 2019-01-08-01:59:37 -self._total_images_per_sec = 457.0 -self._displayed_steps = 10 -Total images/sec = 45.7 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +Iteration 38: 1.848 sec +Iteration 39: 1.799 sec +Iteration 40: 1.850 sec +Average time: 1.818 sec +Batch size = 128 +Throughput: 70.402 images/sec Ran inference with batch size 128 -Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190108_015440.log +Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190410_205628.log +``` diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py index 045921acd..064bf7848 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py @@ -45,20 +45,14 @@ def __init__(self, args, custom_args=[], platform_util=None): if self.args.benchmark_only: run_script = os.path.join(self.args.intelai_models, - "eval_image_classifier.py") + "eval_image_classifier_benchmark.py") - cmd_args = " --dataset_name=imagenet" + \ - " --checkpoint_path=" + self.args.checkpoint + \ - " --eval_dir=" + self.args.checkpoint + \ - " --dataset_dir=" + self.args.data_location + \ - " --dataset_split_name=validation" + \ - " --clone_on_cpu=True" + \ - " --model_name=" + str(self.args.model_name) + \ - " --inter_op_parallelism_threads=" + \ - str(self.args.num_inter_threads) + \ - " --intra_op_parallelism_threads=" + \ - str(self.args.num_intra_threads) + \ - " --batch_size=" + str(self.args.batch_size) + cmd_args = " --input-graph=" + self.args.input_graph + \ + " --inter-op-parallelism-threads=" + \ + str(self.args.num_inter_threads) + \ + " --intra-op-parallelism-threads=" + \ + str(self.args.num_intra_threads) + \ + " --batch-size=" + str(self.args.batch_size) elif self.args.accuracy_only: run_script = os.path.join(self.args.intelai_models, "eval_image_classifier_accuracy.py") diff --git a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier.py b/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier.py deleted file mode 100644 index 361836891..000000000 --- a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier.py +++ /dev/null @@ -1,277 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - - -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generic evaluation script that evaluates a model using a given dataset.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import tensorflow as tf -import os -import time -from datetime import datetime - -import dataset_factory -import nets_factory -import preprocessing_factory - -slim = tf.contrib.slim - -tf.app.flags.DEFINE_integer( - 'batch_size', 100, 'The number of samples in each batch.') - -tf.app.flags.DEFINE_integer( - 'max_num_batches', 1, - 'Max number of batches to evaluate by default use all.') - -tf.app.flags.DEFINE_string( - 'master', '', 'The address of the TensorFlow master to use.') - -tf.app.flags.DEFINE_string( - 'checkpoint_path', '/tmp/tfmodel/', - 'The directory where the model was written to or an absolute path to a ' - 'checkpoint file.') - -tf.app.flags.DEFINE_string( - 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.') - -tf.app.flags.DEFINE_integer( - 'num_preprocessing_threads', 4, - 'The number of threads used to create the batches.') - -tf.app.flags.DEFINE_string( - 'dataset_name', 'imagenet', 'The name of the dataset to load.') - -tf.app.flags.DEFINE_string( - 'dataset_split_name', 'test', 'The name of the train/test split.') - -tf.app.flags.DEFINE_string( - 'dataset_dir', None, 'The directory where the dataset files are stored.') - -tf.app.flags.DEFINE_integer( - 'labels_offset', 0, - 'An offset for the labels in the dataset. This flag is primarily used to ' - 'evaluate the VGG and ResNet architectures which do not use a background ' - 'class for the ImageNet dataset.') - -tf.app.flags.DEFINE_string( - 'model_name', 'inception_resnet_v2', - 'The name of the architecture to evaluate.') - -tf.app.flags.DEFINE_string( - 'preprocessing_name', None, - 'The name of the preprocessing to use. If left ' - 'as `None`, then the model_name flag is used.') - -tf.app.flags.DEFINE_float( - 'moving_average_decay', None, - 'The decay to use for the moving average.' - 'If left as None, then moving averages are not used.') - -tf.app.flags.DEFINE_integer( - 'eval_image_size', None, 'Eval image size') - -tf.app.flags.DEFINE_integer( - 'eval_log_frequency', 10, - 'Number of eval steps to run between displaying ' - 'eval metrics.') - -tf.app.flags.DEFINE_integer( - 'inter_op_parallelism_threads', 1, 'The number of inter-thread.') - -tf.app.flags.DEFINE_integer( - 'intra_op_parallelism_threads', 28, 'The number of intra-thread.') - - -FLAGS = tf.app.flags.FLAGS - -class _LoggerHook(tf.train.SessionRunHook): - """ Logs loss and runtime.""" - - def begin(self): - self._step = -1 - self._displayed_steps = 0 - self._total_images_per_sec = 0 - - def before_run(self, run_context): - self._step += 1 - self._start_time = time.time() - - def after_run(self, run_context, run_values): - duration = time.time() - self._start_time - if (self._step + 1) % FLAGS.eval_log_frequency == 0: - images_per_sec = FLAGS.batch_size / duration - self._displayed_steps += 1 - self._total_images_per_sec += images_per_sec - - format_str = ('%s: step %d, %.1f images/sec') - print ( - format_str % (datetime.now(), (self._step+1), images_per_sec)) - - def end(self, run_context): - print( - 'self._total_images_per_sec = %.1f' % self._total_images_per_sec) - print('self._displayed_steps = %d' % self._displayed_steps) - images_per_sec = self._total_images_per_sec / self._displayed_steps - print('Total images/sec = %.1f' %(images_per_sec)) - if FLAGS.batch_size == 1: - latency = 1000 / images_per_sec - print('Latency ms/step = %.1f' % (latency)) - -def main(_): - if not FLAGS.dataset_dir: - raise ValueError( - 'You must supply the dataset directory with --dataset_dir') - - tf.logging.set_verbosity(tf.logging.INFO) - #os.environ["OMP_NUM_THREADS"] = "54" - with tf.Graph().as_default(): - tf_global_step = slim.get_or_create_global_step() - - ###################### - # Select the dataset # - ###################### - dataset = dataset_factory.get_dataset( - FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) - - #################### - # Select the model # - #################### - network_fn = nets_factory.get_network_fn( - FLAGS.model_name, - num_classes=(dataset.num_classes - FLAGS.labels_offset), - is_training=False) - - ############################################################## - # Create a dataset provider that loads data from the dataset # - ############################################################## - provider = slim.dataset_data_provider.DatasetDataProvider( - dataset, - shuffle=False, - common_queue_capacity=2 * FLAGS.batch_size, - common_queue_min=FLAGS.batch_size) - [image, label] = provider.get(['image', 'label']) - label -= FLAGS.labels_offset - - ##################################### - # Select the preprocessing function # - ##################################### - preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name - image_preprocessing_fn = preprocessing_factory.get_preprocessing( - preprocessing_name, - is_training=False) - - eval_image_size = \ - FLAGS.eval_image_size or network_fn.default_image_size - - image = image_preprocessing_fn( - image, eval_image_size, eval_image_size) - - images, labels = tf.train.batch( - [image, label], - batch_size=FLAGS.batch_size, - num_threads=FLAGS.num_preprocessing_threads, - capacity=5 * FLAGS.batch_size) - - #################### - # Define the model # - #################### - logits, _ = network_fn(images) - - if FLAGS.moving_average_decay: - variable_averages = tf.train.ExponentialMovingAverage( - FLAGS.moving_average_decay, tf_global_step) - variables_to_restore = variable_averages.variables_to_restore( - slim.get_model_variables()) - variables_to_restore[tf_global_step.op.name] = tf_global_step - else: - variables_to_restore = slim.get_variables_to_restore() - - predictions = tf.argmax(logits, 1) - #labels = tf.squeeze(labels) - - # Define the metrics: - names_to_values, names_to_updates = \ - slim.metrics.aggregate_metric_map({ - 'Accuracy': slim.metrics.streaming_accuracy( - predictions, labels), - 'Recall_5': slim.metrics.streaming_recall_at_k( - logits, labels, 5), - }) - - # Print the summaries to screen. - for name, value in names_to_values.items(): - summary_name = 'eval/%s' % name - op = tf.summary.scalar(summary_name, value, collections=[]) - op = tf.Print(op, [value], summary_name) - tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) - - # TODO(sguada) use num_epochs=1 - if FLAGS.max_num_batches: - num_batches = FLAGS.max_num_batches - else: - # This ensures that we make a single pass over all of the data. - num_batches = math.ceil( - dataset.num_samples / float(FLAGS.batch_size)) - - num_batches = 100 - - config = tf.ConfigProto( - inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, - intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads) - - if tf.gfile.IsDirectory(FLAGS.checkpoint_path): - checkpoint_path = tf.train.latest_checkpoint( - FLAGS.checkpoint_path) - else: - checkpoint_path = FLAGS.checkpoint_path - - tf.logging.info('Evaluating %s' % checkpoint_path) - - slim.evaluation.evaluate_once( - master=FLAGS.master, - checkpoint_path=checkpoint_path, - logdir=FLAGS.eval_dir, - num_evals=num_batches, - eval_op=list(names_to_updates.values()), - variables_to_restore=variables_to_restore, - hooks=[_LoggerHook()], - session_config=config) - - -if __name__ == '__main__': - tf.app.run() diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index be46aea9a..376f8b602 100644 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -13,8 +13,8 @@ run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precis run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier.py --dataset_name=imagenet --checkpoint_path=/checkpoints --eval_dir=/checkpoints --dataset_dir=/dataset --dataset_split_name=validation --clone_on_cpu=True --model_name=inception_resnet_v2 --inter_op_parallelism_threads=2 --intra_op_parallelism_threads=28 --batch_size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier.py --dataset_name=imagenet --checkpoint_path=/checkpoints --eval_dir=/checkpoints --dataset_dir=/dataset --dataset_split_name=validation --clone_on_cpu=True --model_name=inception_resnet_v2 --inter_op_parallelism_threads=2 --intra_op_parallelism_threads=28 --batch_size=128 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128 From c62be36960391710709a2e03811a9425d331e503 Mon Sep 17 00:00:00 2001 From: mjkyung Date: Fri, 12 Apr 2019 09:47:46 -0700 Subject: [PATCH 11/62] Fix input_height/width arg setup for MobileNet V1 Int8 inference (#280) * Fix arg parse * fix small typo --- benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md | 2 +- .../tensorflow/mobilenet_v1/inference/int8/accuracy.py | 2 ++ .../tensorflow/mobilenet_v1/inference/int8/benchmark.py | 2 ++ 3 files changed, 5 insertions(+), 1 deletion(-) diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index ddbd8858a..e4570abbe 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -104,7 +104,7 @@ later. ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands - to get additional debug output or change the default output location.. + to get additional debug output or change the default output location. 4. The log file is saved to the `models/benchmarks/common/tensorflow/logs` directory, or the directory specified by the `--output-dir` arg. Below are examples of diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py index 69c3c003f..347c39989 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py @@ -77,6 +77,8 @@ def load_graph(model_file): model_file = args.input_graph else: sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width batch_size = args.batch_size input_layer = args.input_layer output_layer = args.output_layer diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py index 5ba410415..7cccb9f23 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py @@ -94,6 +94,8 @@ def load_graph(model_file): model_file = args.input_graph else: sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width batch_size = args.batch_size input_layer = args.input_layer output_layer = args.output_layer From 12c35fa7128711448ffad6ef671440c499f5cb1e Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Fri, 12 Apr 2019 13:47:03 -0700 Subject: [PATCH 12/62] Add support for custom volumes (#279) * Add support for custom volumes * Launch script documentation update --- benchmarks/common/utils/validators.py | 20 +++++++++ benchmarks/launch_benchmark.py | 17 +++++++- docs/general/tensorflow/LaunchBenchmark.md | 51 +++++++++++++++++++++- tests/unit/common/utils/test_validators.py | 27 +++++++++++- tests/unit/test_launch_benchmark.py | 23 +++++++++- 5 files changed, 133 insertions(+), 5 deletions(-) diff --git a/benchmarks/common/utils/validators.py b/benchmarks/common/utils/validators.py index 54f280dfd..16ec18aba 100644 --- a/benchmarks/common/utils/validators.py +++ b/benchmarks/common/utils/validators.py @@ -88,3 +88,23 @@ def check_valid_file_or_dir(value): raise ArgumentTypeError("{} does not exist.".format(value)) check_for_link(value) return value + + +def check_volume_mount(value): + """ + Verifies that the value is a valid docker volume mount, where there should be + at least two fields separated by a : (for the local directory to mount and the + path to the where the directory will be mounted in the container. The third + optional field is for extra options like read only. + """ + if value: + # Check that we have at least 2 fields and at most 3 fields + if not 3 > value.count(":") > 0: + raise ArgumentTypeError( + "{} is not a valid volume mount string where ':' is used to separate the fields. " + "See https://docs.docker.com/storage/volumes for information on formatting the volume " + "mount string".format(value)) + + # Check that the local directory specified is a valid folder and not a link + check_valid_folder(value.split(':')[0]) + return value diff --git a/benchmarks/launch_benchmark.py b/benchmarks/launch_benchmark.py index 993dc8785..5dd7fbca1 100644 --- a/benchmarks/launch_benchmark.py +++ b/benchmarks/launch_benchmark.py @@ -29,7 +29,7 @@ import sys from argparse import ArgumentParser from common import base_benchmark_util -from common.utils.validators import check_no_spaces +from common.utils.validators import check_no_spaces, check_volume_mount class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): @@ -67,6 +67,13 @@ def parse_args(self): "If no docker image is specified, then no docker container will be used.", dest="docker_image", default=None, type=check_no_spaces) + arg_parser.add_argument( + "--volume", + help="Specify a custom volume to mount in the container, which follows the same format as the " + "docker --volume flag (https://docs.docker.com/storage/volumes/). " + "This argument can only be used in conjunction with a --docker-image.", + action="append", dest="custom_volumes", type=check_volume_mount) + arg_parser.add_argument( "--debug", help="Launches debug mode which doesn't execute " "start.sh when running in a docker container.", action="store_true") @@ -86,6 +93,10 @@ def validate_args(self): if not self.args.benchmark_only and not self.args.accuracy_only: self.args.benchmark_only = True + if self.args.custom_volumes and not self.args.docker_image: + raise ValueError("Volume mounts can only be used when running in a docker container " + "(a --docker-image must be specified when using --volume).") + if self.args.mode == "inference" and self.args.checkpoint: print("Warning: The --checkpoint argument is being deprecated in favor of using frozen graphs.") @@ -310,6 +321,10 @@ def run_docker_container(self, benchmark_scripts, intelai_models, env_var_dict): volume_mounts.extend([ "--volume", "{}:{}".format(in_graph_dir, "/in_graph")]) + if args.custom_volumes: + for custom_volume in args.custom_volumes: + volume_mounts.extend(["--volume", custom_volume]) + docker_run_cmd = ["docker", "run"] # only use -it when debugging, otherwise we might get TTY error diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index e52482ade..8544c7320 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -103,14 +103,61 @@ optional arguments: Folder to dump output into. -g INPUT_GRAPH, --in-graph INPUT_GRAPH Full path to the input graph + --volume CUSTOM_VOLUMES + Specify a custom volume to mount in the container, + which follows the same format as the docker --volume + flag (https://docs.docker.com/storage/volumes/). This + argument can only be used in conjunction with a + --docker-image. --debug Launches debug mode which doesn't execute start.sh ``` +## Volume mounts + +When running the launch script using a docker image, volumes will +automatically get mounted in the container for the following +directories: + +| Directory | Mount location in the container | +|-----------|---------------------------------| +| Model zoo `/benchmarks` code | `/workspace/benchmarks` | +| Model zoo `/models` code | `/workspace/intelai_models` | +| `--model-source-dir` code | `/workspace/models` | +| `--checkpoints` directory | `/checkpoints` | +| `--in-graph` file | `/in_graph` | +| `--dataset-location` | `/dataset` | + +If you would like additional directories mounted in the docker +container, you can specify them by using the `--volume` flag using the +same `:` separated field format [as docker](https://docs.docker.com/storage/volumes/). +For example, the following command will mount `/home//custom_folder_1` +in the container at `custom_folder_1` and `/home//custom_folder_2` +in the container at `custom_folder_2`: + +``` +$ python launch_benchmark.py \ + --in-graph /home//resnet50_fp32_pretrained_model.pb \ + --model-name resnet50 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size 1 \ + --socket-id 0 \ + --data-location /home//Imagenet_Validation \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --volume /home//custom_folder_1:/custom_folder_1 \ + --volume /home//custom_folder_2:/custom_folder_2 +``` + +Note that volume mounting only applies when running in a docker +container. When running on [bare metal](#alpha-feature-running-on-bare-metal), +files are accessed in their original location. + ## Debugging The `--debug` flag in the `launch_benchmarks.py` script gives you a -shell into the docker container with the volumes mounted for any -dataset, pretrained model, model source code, etc that has been +shell into the docker container with the [volumes mounted](#volume-mounts) +for any dataset, pretrained model, model source code, etc that has been provided by the other flags. It does not execute the `start.sh` script, and is intended as a way to setup an environment for quicker iteration when debugging and doing development. From the shell, you can manually diff --git a/tests/unit/common/utils/test_validators.py b/tests/unit/common/utils/test_validators.py index 369ddfd76..2f590a23e 100644 --- a/tests/unit/common/utils/test_validators.py +++ b/tests/unit/common/utils/test_validators.py @@ -26,7 +26,7 @@ from common.utils.validators import (check_for_link, check_no_spaces, check_positive_number, check_positive_number_or_equal_to_negative_one, check_valid_filename, - check_valid_folder, check_valid_file_or_dir) + check_valid_folder, check_valid_file_or_dir, check_volume_mount) @pytest.fixture() @@ -152,3 +152,28 @@ def test_check_valid_file_or_dir(mock_link, mock_exists): def test_check_valid_file_or_dir_bad(): with pytest.raises(ArgumentTypeError): check_valid_file_or_dir('3245jlnsdfnsfd234ofds') + + +@pytest.mark.parametrize("volume_mount_str", + ["foo", + "foo:foo:foo:foo", + "foo,foo"]) +def test_bad_volume_mount_strings(volume_mount_str): + with pytest.raises(ArgumentTypeError): + check_volume_mount(volume_mount_str) + + +def test_valid_volume_mount(): + # create temp directory + temp_dir = tempfile.mkdtemp() + + try: + # test string that mounts local directory with mount path + volume_mount = temp_dir + ":/mount_path" + check_volume_mount(volume_mount) + + # test string that mounts local directory with mount path and specifies read only + volume_mount = temp_dir + ":/mount_path:ro" + check_volume_mount(volume_mount) + finally: + os.rmdir(temp_dir) diff --git a/tests/unit/test_launch_benchmark.py b/tests/unit/test_launch_benchmark.py index 608adc464..35bb4b70f 100644 --- a/tests/unit/test_launch_benchmark.py +++ b/tests/unit/test_launch_benchmark.py @@ -167,7 +167,13 @@ def test_launch_benchmark_parse_unknown_args(launch_benchmark): "--accuracy-only", "--output-results"], "--output-results can only be used when running " - "inference with a dataset"] + "inference with a dataset"], + ['catch_error', SystemExit, ["--model-name", test_model_name, + "--framework", test_framework, + "--mode", test_mode, + "--precision", test_precision, + "--volume", "~:test"], + "Volume mounts can only be used when running in a docker container"], ], indirect=True) def test_launch_benchmark_parse_bad_args(launch_benchmark): """ @@ -216,3 +222,18 @@ def test_bare_metal(launch_benchmark, mock_popen): # ensure env vars are set assert os.environ["TEST_ENV_VAR_1"] == test_env_vars["TEST_ENV_VAR_1"] assert os.environ["TEST_ENV_VAR_2"] == test_env_vars["TEST_ENV_VAR_2"] + + +def test_launch_benchmark_custom_volume(launch_benchmark, mock_popen): + """ + Verifies the docker run command includes custom volumes + """ + custom_volumes = ["~:/foo1", "~:/foo2"] + launch_benchmark.args.custom_volumes = custom_volumes + launch_benchmark.main() + assert mock_popen.called + args, _ = mock_popen.call_args + # convert the run command args to a string and then check for the custom volume mounts + docker_run_cmd = " ".join(args[0]) + for custom_volume in custom_volumes: + assert "--volume {}".format(custom_volume) in docker_run_cmd From 66e48623615eed98c73c7240a6e9c841aa44a66c Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Fri, 12 Apr 2019 15:55:08 -0700 Subject: [PATCH 13/62] Fix launch_benchmark.py --help output so that it doesn't require other args (#282) * Fix help so that it doesn't require args * Add test for --help * Fixing file name in comment --- benchmarks/common/base_benchmark_util.py | 12 ++++++++---- tests/unit/test_launch_benchmark.py | 21 +++++++++++++++++++++ 2 files changed, 29 insertions(+), 4 deletions(-) diff --git a/benchmarks/common/base_benchmark_util.py b/benchmarks/common/base_benchmark_util.py index 3fbc4dc94..89df56cde 100644 --- a/benchmarks/common/base_benchmark_util.py +++ b/benchmarks/common/base_benchmark_util.py @@ -23,6 +23,7 @@ from __future__ import print_function import os +import sys from argparse import ArgumentParser from common import platform_util @@ -47,6 +48,9 @@ def _define_args(self): """define args for the benchmark interface shared by FP32 and int8 models""" + # only require the arg, if we aren't just printing out --help + required_arg = "--help" not in sys.argv + self._common_arg_parser = ArgumentParser( add_help=False, description="Parse args for base benchmark " "interface") @@ -54,7 +58,7 @@ def _define_args(self): self._common_arg_parser.add_argument( "-f", "--framework", help="Specify the name of the deep learning framework to use.", - dest="framework", default=None, required=True) + dest="framework", default=None, required=required_arg) self._common_arg_parser.add_argument( "-r", "--model-source-dir", @@ -64,15 +68,15 @@ def _define_args(self): self._common_arg_parser.add_argument( "-p", "--precision", help="Specify the model precision to use: fp32, int8, or bfloat16", - required=True, choices=["fp32", "int8", "bfloat16"], + required=required_arg, choices=["fp32", "int8", "bfloat16"], dest="precision") self._common_arg_parser.add_argument( "-mo", "--mode", help="Specify the type training or inference ", - required=True, choices=["training", "inference"], dest="mode") + required=required_arg, choices=["training", "inference"], dest="mode") self._common_arg_parser.add_argument( - "-m", "--model-name", required=True, + "-m", "--model-name", required=required_arg, help="model name to run benchmarks for", dest="model_name") self._common_arg_parser.add_argument( diff --git a/tests/unit/test_launch_benchmark.py b/tests/unit/test_launch_benchmark.py index 35bb4b70f..03b96f697 100644 --- a/tests/unit/test_launch_benchmark.py +++ b/tests/unit/test_launch_benchmark.py @@ -224,6 +224,27 @@ def test_bare_metal(launch_benchmark, mock_popen): assert os.environ["TEST_ENV_VAR_2"] == test_env_vars["TEST_ENV_VAR_2"] +def test_help(mock_platform_util, capsys): + """ Tests `launch_benchmark.py --help` output and ensures there is no error """ + with mock_patch.object(sys, 'argv', ["launch_benchmark.py", "--help"]): + with pytest.raises(SystemExit) as e: + LaunchBenchmark(mock_platform_util) + assert e.value.code == 0 + + # get the stdout and check the output + captured = capsys.readouterr() + assert "usage: launch_benchmark.py [-h] " in captured.out + + # check for an arg that is only in launch_benchmark.py + assert "--docker-image DOCKER_IMAGE" in captured.out + + # check for an arg that's in base_benchmark_util.py + assert "-f FRAMEWORK, --framework FRAMEWORK" in captured.out + + # make sure there were no errors printed + assert "error" not in captured.out.lower() + + def test_launch_benchmark_custom_volume(launch_benchmark, mock_popen): """ Verifies the docker run command includes custom volumes From 76fdf16bdaac69dd45851768e8c1aecd99bf8840 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Fri, 12 Apr 2019 16:27:11 -0700 Subject: [PATCH 14/62] Clean up log snippets in docs (#283) * Clean up log snippets in docs * Indentation --- .../adversarial_networks/tensorflow/dcgan/README.md | 2 -- .../content_creation/tensorflow/draw/README.md | 4 ---- .../tensorflow/facenet/README.md | 6 ------ .../tensorflow/inceptionv3/README.md | 12 ------------ .../tensorflow/inceptionv4/README.md | 6 ------ .../tensorflow/mobilenet_v1/README.md | 6 ------ .../image_recognition/tensorflow/resnet101/README.md | 10 ---------- .../image_recognition/tensorflow/resnet50/README.md | 10 ---------- .../tensorflow/squeezenet/README.md | 4 ---- .../image_segmentation/tensorflow/maskrcnn/README.md | 2 -- .../image_segmentation/tensorflow/unet/README.md | 2 -- .../language_translation/tensorflow/gnmt/README.md | 4 ---- .../tensorflow/transformer_language/README.md | 4 ---- .../tensorflow/faster_rcnn/README.md | 8 -------- .../object_detection/tensorflow/rfcn/README.md | 11 ++--------- .../tensorflow/ssd-mobilenet/README.md | 8 -------- .../tensorflow/ssd-resnet34/README.md | 2 -- .../recommendation/tensorflow/wide_deep/README.md | 2 -- .../text_to_speech/tensorflow/wavenet/README.md | 2 -- docs/image_recognition/tensorflow/Tutorial.md | 8 +------- docs/recommendation/tensorflow/Tutorial.md | 6 +----- 21 files changed, 4 insertions(+), 115 deletions(-) diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md index 7852bcae3..d552ac46d 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md @@ -72,8 +72,6 @@ Batch size: 100 Batches number: 500 Time spent per BATCH: 35.8268 ms Total samples/sec: 2791.2030 samples/s -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_dcgan_inference_fp32_20190117_220342.log ``` \ No newline at end of file diff --git a/benchmarks/content_creation/tensorflow/draw/README.md b/benchmarks/content_creation/tensorflow/draw/README.md index c56c08712..159f8de7b 100644 --- a/benchmarks/content_creation/tensorflow/draw/README.md +++ b/benchmarks/content_creation/tensorflow/draw/README.md @@ -82,8 +82,6 @@ modes/precisions: Time spent per BATCH: 6.6667 ms Total samples/sec: 149.9996 samples/s Outputs saved in file: /home//mnist/draw_data.npy - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_draw_inference_fp32_20190123_012947.log ``` @@ -97,8 +95,6 @@ modes/precisions: Time spent per BATCH: 28.1952 ms Total samples/sec: 3546.7006 samples/s Outputs saved in file: /home//mnist/draw_data.npy - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_draw_inference_fp32_20190123_013432.log ``` \ No newline at end of file diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md index 7d30e25f2..0e7e0d307 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md @@ -79,8 +79,6 @@ Total samples/sec: 33.1608 samples/s 2019-03-28 21:00:02.725722: W tensorflow/core/kernels/queue_base.cc:277] _1_batch_join/fifo_queue: Skipping cancelled enqueue attempt with queue not closed 2019-03-28 21:00:02.725746: W tensorflow/core/kernels/queue_base.cc:277] _1_batch_join/fifo_queue: Skipping cancelled enqueue attempt with queue not closed 2019-03-28 21:00:02.725776: W tensorflow/core/kernels/queue_base.cc:277] _1_batch_join/fifo_queue: Skipping cancelled enqueue attempt with queue not closed -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_facenet_inference_fp32_20190328_205911.log ``` @@ -112,8 +110,6 @@ Accuracy: 0.98833+-0.00489 Validation rate: 0.96200+-0.01968 @ FAR=0.00100 Area Under Curve (AUC): 0.999 Equal Error Rate (EER): 0.011 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_facenet_inference_fp32_20190329_002623.log ``` @@ -146,8 +142,6 @@ Accuracy: 0.98833+-0.00489 Validation rate: 0.96200+-0.01968 @ FAR=0.00100 Area Under Curve (AUC): 0.999 Equal Error Rate (EER): 0.011 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_facenet_inference_fp32_20190328_214145.log ``` diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index 512d4fd1e..7eb091edc 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -189,8 +189,6 @@ Example log tail when running for accuracy: ``` Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7666, 0.9333) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Executing command: python /workspace/intelai_models/int8/accuracy.py --input_height=299 --input_width=299 --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/inceptionv3_int8_pretrained_model.pb --data_location=/dataset Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190104_013246.log @@ -203,8 +201,6 @@ steps = 470, 53.7256017113 images/sec steps = 480, 52.5430812016 images/sec steps = 490, 52.9076139058 images/sec steps = 500, 53.5021876395 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190223_194002.log ``` @@ -216,8 +212,6 @@ steps = 470, 370.435654276 images/sec steps = 480, 369.710160177 images/sec steps = 490, 369.083388904 images/sec steps = 500, 370.287978128 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190223_194314.log ``` @@ -283,8 +277,6 @@ Average time: 0.014 sec Batch size = 1 Latency: 14.442 ms Throughput: 69.243 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190104_025220.log ``` @@ -315,8 +307,6 @@ Iteration 40: 0.757 sec Average time: 0.760 sec Batch size = 128 Throughput: 168.431 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190104_024842.log ``` @@ -341,8 +331,6 @@ Example log tail when benchmarking for accuracy: Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7673, 0.9341) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7674, 0.9341) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7675, 0.9342) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190104_023816.log ``` diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index 14f1bed98..1f472509b 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -106,8 +106,6 @@ other precisions are coming later. Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7983, 0.9504) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7984, 0.9504) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7984, 0.9504) - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_221608.log ``` @@ -122,8 +120,6 @@ other precisions are coming later. steps = 30, 184.620504126 images/sec steps = 40, 183.900309054 images/sec steps = 50, 184.110358713 images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 240 Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_215858.log ``` @@ -139,8 +135,6 @@ other precisions are coming later. steps = 40, 31.9682931663 images/sec steps = 50, 31.6665962009 images/sec Latency: 31.936 ms - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_215702.log ``` diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index e4570abbe..d4e10910b 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -261,8 +261,6 @@ later. self._displayed_steps = 10 Total images/sec = 81.0 Latency ms/step = 12.4 - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_fp32_20190104_200218.log ``` @@ -278,8 +276,6 @@ later. self._total_images_per_sec = 1810.2 self._displayed_steps = 10 Total images/sec = 181.0 - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_fp32_20190104_200512.log ``` @@ -288,8 +284,6 @@ later. Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7104, 0.8999) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7103, 0.8999) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7102, 0.8999) - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_fp32_20190110_211648.log ``` \ No newline at end of file diff --git a/benchmarks/image_recognition/tensorflow/resnet101/README.md b/benchmarks/image_recognition/tensorflow/resnet101/README.md index 4e25a41f1..7343f472a 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet101/README.md @@ -176,8 +176,6 @@ Example log tail when running for accuracy: Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7690, 0.9304) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7691, 0.9305) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7691, 0.9305) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_int8_20190104_205838.log ``` @@ -189,8 +187,6 @@ steps = 470, 48.3195530058 images/sec steps = 480, 47.2792312364 images/sec steps = 490, 46.3175214744 images/sec steps = 500, 45.4044245083 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_int8_20190223_191406.log ``` @@ -202,8 +198,6 @@ steps = 470, 328.906266308 images/sec steps = 480, 322.0451309 images/sec steps = 490, 315.455582114 images/sec steps = 500, 309.142758646 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_int8_20190223_192438.log ``` @@ -272,8 +266,6 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 steps = 80, 169.258177508 images/sec steps = 90, 150.457869027 images/sec steps = 100, 135.433960175 images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_fp32_20190104_204615.log ``` @@ -304,8 +296,6 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7639, 0.9289) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7641, 0.9289) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7640, 0.9289) - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_fp32_20190104_201506.log ``` diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index 8389c041f..0b73a4e56 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -71,8 +71,6 @@ Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7360, 0.9154) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7360, 0.9154) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190104_212224.log ``` @@ -108,8 +106,6 @@ steps = 470, 460.113806562 images/sec steps = 480, 460.073982602 images/sec steps = 490, 463.289831148 images/sec steps = 500, 463.521427264 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190223_180546.log ``` @@ -176,8 +172,6 @@ Average time: 0.011 sec Batch size = 1 Latency: 10.924 ms Throughput: 91.541 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190104_215326.log ``` @@ -213,8 +207,6 @@ Iteration 40: 0.652 sec Average time: 0.653 sec Batch size = 128 Throughput: 196.065 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190104_215655.log ``` @@ -243,8 +235,6 @@ something like this: ``` ... Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7430, 0.9188) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190104_213452.log ``` diff --git a/benchmarks/image_recognition/tensorflow/squeezenet/README.md b/benchmarks/image_recognition/tensorflow/squeezenet/README.md index 21bcf3fb0..355efca72 100644 --- a/benchmarks/image_recognition/tensorflow/squeezenet/README.md +++ b/benchmarks/image_recognition/tensorflow/squeezenet/README.md @@ -114,8 +114,6 @@ SqueezeNet Inference Summary: throughput[med] = 837.1 image/sec latency[median] = 1.195 ms -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 64 Log location outside container: {--output-dir value}/benchmark_squeezenet_inference_fp32_20190104_220051.log ``` @@ -129,8 +127,6 @@ SqueezeNet Inference Summary: throughput[med] = 115.3 image/sec latency[median] = 8.67 ms -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_squeezenet_inference_fp32_20190104_220712.log ``` diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md index 3c377ea30..c862032f7 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md @@ -91,7 +91,5 @@ Batch size: 1 Time spent per BATCH: 609.6943 ms Total samples/sec: 1.6402 samples/s Total time: 35.407243490219116 -lscpu_path_cmd = command -v lscpu -lscpu located here: b'/usr/bin/lscpu' Log location outside container: {--output-dir value}/benchmark_maskrcnn_inference_fp32_20190111_205935.log ``` \ No newline at end of file diff --git a/benchmarks/image_segmentation/tensorflow/unet/README.md b/benchmarks/image_segmentation/tensorflow/unet/README.md index 7660771f9..6f6671e66 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/README.md +++ b/benchmarks/image_segmentation/tensorflow/unet/README.md @@ -73,8 +73,6 @@ modes/precisions: ``` Time spent per BATCH: 1.1043 ms Total samples/sec: 905.5344 samples/s - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_unet_inference_fp32_20190201_205601.log ``` \ No newline at end of file diff --git a/benchmarks/language_translation/tensorflow/gnmt/README.md b/benchmarks/language_translation/tensorflow/gnmt/README.md index 285df8ee5..f52bcdfc6 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/README.md +++ b/benchmarks/language_translation/tensorflow/gnmt/README.md @@ -118,8 +118,6 @@ Example log tail when benchmarking for latency: done, num sentences 2169, num translations per input 1, time 1108s, Wed Feb 6 01:36:13 2019. The latency of the model is 511.2466 ms/sentences bleu: 29.2 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_gnmt_inference_fp32_20190206_011740.log ``` @@ -134,8 +132,6 @@ Example log tail when benchmarking for throughput: done, num sentences 2169, num translations per input 1, time 302s, Wed Feb 6 01:48:30 2019. The throughput of the model is 7.1780 sentences/s bleu: 29.2 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 32 Log location outside container: {--output-dir value}/benchmark_gnmt_inference_fp32_20190206_014324.log ``` diff --git a/benchmarks/language_translation/tensorflow/transformer_language/README.md b/benchmarks/language_translation/tensorflow/transformer_language/README.md index d548bb0aa..abc931d51 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_language/README.md @@ -125,8 +125,6 @@ INFO:tensorflow:Writing decodes into /workspace/models/out_dir/output_infer Inference time 6094.9205, Latency = 2810.0141 ms/setences BLEU_uncased = 22.63 BLEU_cased = 22.20 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_transformer_language_inference_fp32_20190210_050451.log ``` @@ -141,8 +139,6 @@ INFO:tensorflow:Writing decodes into /workspace/models/out_dir/output_infer Inference time 1174.0522, Throughput = 1.8474 sentences/second BLEU_uncased = 22.63 BLEU_cased = 22.20 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 32 Log location outside container: {--output-dir value}/benchmark_transformer_language_inference_fp32_20190210_072635.log ``` \ No newline at end of file diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index 89dc463f9..162acdf07 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -179,8 +179,6 @@ and latency: ``` Time spent : 167.353 seconds. Time spent per BATCH: 0.167 seconds. -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Received these standard args: Namespace(accuracy_only=False, batch_size=1, benchmark_only=False, checkpoint='/checkpoints', data_location='/dataset', framework='tensorflow', input_graph=None, intelai_models='/workspace/intelai_models', mode='inference', model_args=[], model_name='faster_rcnn', model_source_dir='/workspace/models', num_cores=-1, num_inter_threads=2, num_intra_threads=56, precision='fp32', socket_id=0, use_case='object_detection', verbose=True) Received these custom args: ['--config_file=pipeline.config'] Run model here. @@ -208,8 +206,6 @@ DONE (t=1.35s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.383 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_faster_rcnn_inference_fp32_20190114_205714.log ``` @@ -295,8 +291,6 @@ Step 4970: 0.070191860199 seconds Step 4980: 0.0755469799042 seconds Step 4990: 0.0742928981781 seconds Avg. Duration per Step:0.0760930150986 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_faster_rcnn_inference_int8_20190117_232539.log ``` @@ -317,8 +311,6 @@ DONE (t=1.34s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.375 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_faster_rcnn_inference_int8_20190117_231937.log ``` diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index 39e6ac3be..3b3a64b9d 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -182,8 +182,6 @@ Step 470: 0.202737092972 seconds Step 480: 0.117042064667 seconds Step 490: 0.103501081467 seconds Avg. Duration per Step:0.169812122345 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir}/benchmark_rfcn_inference_int8_20190227_191959.log ``` @@ -205,8 +203,6 @@ DONE (t=1.03s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.150 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir}/benchmark_rfcn_inference_int8_20190227_194752.log ``` @@ -364,8 +360,6 @@ and latency: ``` Average time per step: 0.262 sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Received these standard args: Namespace(accuracy_only=False, batch_size=1, benchmark_only=False, checkpoint='/checkpoints', data_location='/dataset', framework='tensorflow', input_graph=None, intelai_models='/workspace/intelai_models', mode='inference', model_args=[], model_name='rfcn', model_source_dir='/workspace/models', num_cores=-1, num_inter_threads=2, num_intra_threads=56, precision='fp32, socket_id=0, use_case='object_detection', verbose=True) Received these custom args: ['--config_file=rfcn_pipeline.config'] Run model here. @@ -392,8 +386,7 @@ DONE (t=1.19s). Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.400 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.400 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu -Ran inference with batch size 1 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 + Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_rfcn_inference_fp32_20181221_211905.log ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index d2c96dd9a..cee1a3848 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -159,8 +159,6 @@ Step 4980: 0.0429329872131 seconds Step 4990: 0.0358219146729 seconds Avg. Duration per Step:0.0364457404137 Avg. Duration per Step:0.0365921088491 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: /benchmark_ssd-mobilenet_inference_int8_20181203_232524.log ``` @@ -185,8 +183,6 @@ DONE (t=1.10s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.212 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: /benchmark_ssd-mobilenet_inference_int8_20181204_185432.log ``` @@ -382,8 +378,6 @@ Below is a sample log file tail when running benchmarking: INFO:tensorflow:Processed 5001 images... moving average latency 37 ms INFO:tensorflow:Finished processing records Latency: min = 33.8, max = 6635.9, mean= 38.4, median = 37.2 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_ssd-mobilenet_inference_fp32_20190130_225108.log ``` @@ -403,8 +397,6 @@ Below is a sample log file tail when testing accuracy: Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.264 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_ssd-mobilenet_inference_fp32_20190123_225145.log ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index 7cf4b2339..0a6915bac 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -180,8 +180,6 @@ Below is a sample log file tail when testing accuracy: Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.334 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.494 Current AP: 0.21082 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_ssd-mobilenet_inference_fp32_20190123_225145.log ``` diff --git a/benchmarks/recommendation/tensorflow/wide_deep/README.md b/benchmarks/recommendation/tensorflow/wide_deep/README.md index 2f5229907..95d23d68c 100644 --- a/benchmarks/recommendation/tensorflow/wide_deep/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep/README.md @@ -94,8 +94,6 @@ use in the next step. recall: 0.0 End-to-End duration is %s 36.5971579552 Latency is: %s 0.00224784460139 - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu current path: /workspace/benchmarks search path: /workspace/benchmarks/*/tensorflow/wide_deep/inference/fp32/model_init.py Using model init: /workspace/benchmarks/classification/tensorflow/wide_deep/inference/fp32/model_init.py diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/README.md b/benchmarks/text_to_speech/tensorflow/wavenet/README.md index 49c1a47fb..340736a6e 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/README.md +++ b/benchmarks/text_to_speech/tensorflow/wavenet/README.md @@ -99,8 +99,6 @@ Sample: 8500 Average Throughput of whole run: Samples / sec: 289.351783 Average Latency of whole run: msec / sample: 3.456001 Finished generating. The result can be viewed in TensorBoard. -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_wavenet_inference_fp32_20190105_015022.log ``` diff --git a/docs/image_recognition/tensorflow/Tutorial.md b/docs/image_recognition/tensorflow/Tutorial.md index 5088a2ac2..f31a49ff6 100644 --- a/docs/image_recognition/tensorflow/Tutorial.md +++ b/docs/image_recognition/tensorflow/Tutorial.md @@ -359,8 +359,6 @@ Note: As per the recommended settings `socket-id` is set to 0 for InceptionV3. T steps = 30, ... images/sec steps = 40, ... images/sec steps = 50, ... images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet50 @@ -384,9 +382,7 @@ you can implement the same strategy on different use cases demoed in Step 3. --debug Example Output - - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' + root@a78677f56d69:/workspace/benchmarks/common/tensorflow# To rerun the bechmarking script, execute the ```start.sh``` bash script from your existing directory with additional or modified flags. For e.g to rerun with the best max throughput (batch size=128) settings run with ```BATCH_SIZE``` @@ -429,8 +425,6 @@ All other flags will be defaulted to values passed in the first ```launch_benchm . Batch size = 128 Throughput: ... images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190205_201632.log diff --git a/docs/recommendation/tensorflow/Tutorial.md b/docs/recommendation/tensorflow/Tutorial.md index f814daac1..544db76d2 100644 --- a/docs/recommendation/tensorflow/Tutorial.md +++ b/docs/recommendation/tensorflow/Tutorial.md @@ -215,8 +215,6 @@ Set this parameter to a socket id to run the workload on a single socket. Average Latency (ms/batch) : ... Throughput is (records/sec) : ... -------------------------------------------------- - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu num_inter_threads: 28 num_intra_threads: 1 Received these standard args: Namespace(accuracy_only=False, batch_size=512, benchmark_dir='/workspace/benchmarks', benchmark_only=True, checkpoint=None, data_location='/dataset', data_num_inter_threads=None, data_num_intra_threads=None, framework='tensorflow', input_graph='/in_graph/wide_deep_fp32_pretrained_model.pb', intelai_models='/workspace/intelai_models', mode='inference', model_args=[], model_name='wide_deep_large_ds', model_source_dir='/workspace/models', num_cores=-1, num_inter_threads=28, num_intra_threads=1, num_parallel_batches=28, output_dir='/workspace/benchmarks/common/tensorflow/logs', output_results=False, precision='fp32', socket_id=-1, use_case='recommendation', verbose=True) @@ -276,9 +274,7 @@ perform necessary installs, run the ```launch_benchmark.py``` script, and does n --debug   Example Output: - - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' + root@a78677f56d69:/workspace/benchmarks/common/tensorflow# To rerun the benchmarking script, execute the ```start.sh``` bash script from your existing directory with additional or modified flags. For example, to rerun with the best max throughput (batch size=512) settings, run with ```BATCH_SIZE``` From 059dc96ba657ed9beef57246671170e82e9fffc2 Mon Sep 17 00:00:00 2001 From: mjkyung Date: Fri, 12 Apr 2019 16:31:52 -0700 Subject: [PATCH 15/62] MobileNet V1 INT8 Inference README.md frozen graph info update (#284) --- .../image_recognition/tensorflow/mobilenet_v1/README.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index d4e10910b..61a21c3ef 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -35,11 +35,7 @@ later. -rw-r--r--. 1 user 52508270 Jun 20 15:09 validation-00126-of-00128 -rw-r--r--. 1 user 55292089 Jun 20 15:09 validation-00127-of-00128 ``` -2. Download the pretrained model: - - ``` - $ wget https://storage.cloud.google.com/intel-optimized-tensorflow/models/mobilenetv1_int8_pretrained_model.pb - ``` +2. A link to download the pre-trained model is coming soon. 3. Clone the [intelai/models](https://github.com/intelai/models) repo and then run the benchmarking scripts for either benchmarking throughput, From 759608dfc5691ca19234d3c92db71944ae689441 Mon Sep 17 00:00:00 2001 From: Karthik Vadla Date: Mon, 15 Apr 2019 09:44:45 -0700 Subject: [PATCH 16/62] Add default config as json file (#272) --- Contribute.md | 24 +++++-- add_model_init.png | Bin 14105 -> 0 bytes add_model_init_and_config.png | Bin 0 -> 15583 bytes add_readme.png | Bin 15909 -> 17730 bytes .../dcgan/inference/fp32/config.json | 8 +++ .../dcgan/inference/fp32/model_init.py | 4 +- benchmarks/common/base_model_init.py | 25 +++++-- .../draw/inference/fp32/config.json | 8 +++ .../draw/inference/fp32/model_init.py | 5 +- .../facenet/inference/fp32/config.json | 7 ++ .../facenet/inference/fp32/model_init.py | 3 +- .../mtcc/inference/fp32/config.json | 7 ++ .../mtcc/inference/fp32/model_init.py | 3 +- .../inference/fp32/config.json | 7 ++ .../inference/fp32/model_init.py | 3 +- .../inference/int8/config.json | 7 ++ .../inference/int8/model_init.py | 6 +- .../inceptionv3/inference/fp32/config.json | 7 ++ .../inceptionv3/inference/fp32/model_init.py | 6 +- .../inceptionv3/inference/int8/config.json | 7 ++ .../inceptionv3/inference/int8/model_init.py | 5 +- .../inceptionv4/inference/config.json | 7 ++ .../inference/inceptionv4_model_init.py | 6 +- .../mobilenet_v1/inference/fp32/config.json | 6 ++ .../mobilenet_v1/inference/fp32/model_init.py | 5 +- .../mobilenet_v1/inference/int8/config.json | 7 ++ .../mobilenet_v1/inference/int8/model_init.py | 3 +- .../resnet101/inference/fp32/config.json | 7 ++ .../resnet101/inference/fp32/model_init.py | 6 +- .../resnet101/inference/int8/config.json | 7 ++ .../resnet101/inference/int8/model_init.py | 5 +- .../resnet50/inference/fp32/config.json | 7 ++ .../resnet50/inference/fp32/model_init.py | 6 +- .../resnet50/inference/int8/config.json | 7 ++ .../resnet50/inference/int8/model_init.py | 5 +- .../maskrcnn/inference/fp32/config.json | 8 +++ .../maskrcnn/inference/fp32/model_init.py | 4 +- .../unet/inference/fp32/config.json | 7 ++ .../unet/inference/fp32/model_init.py | 3 +- .../lm-1b/inference/fp32/config.json | 7 ++ .../lm-1b/inference/fp32/model_init.py | 5 +- .../gnmt/inference/fp32/config.json | 7 ++ .../gnmt/inference/fp32/model_init.py | 5 +- .../inference/fp32/config.json | 7 ++ .../inference/fp32/model_init.py | 5 +- .../inference/fp32/config.json | 7 ++ .../inference/fp32/model_init.py | 5 +- .../faster_rcnn/inference/fp32/config.json | 7 ++ .../faster_rcnn/inference/fp32/model_init.py | 3 +- .../faster_rcnn/inference/int8/config.json | 7 ++ .../faster_rcnn/inference/int8/model_init.py | 5 +- .../rfcn/inference/fp32/config.json | 6 ++ .../rfcn/inference/fp32/model_init.py | 5 +- .../rfcn/inference/int8/config.json | 7 ++ .../rfcn/inference/int8/model_init.py | 5 +- .../ssd-mobilenet/inference/fp32/config.json | 7 ++ .../inference/fp32/model_init.py | 5 +- .../ssd-mobilenet/inference/int8/config.json | 7 ++ .../inference/int8/model_init.py | 5 +- .../ssd-resnet34/inference/fp32/config.json | 7 ++ .../ssd-resnet34/inference/fp32/model_init.py | 6 +- .../tensorflow/ncf/inference/fp32/config.json | 7 ++ .../ncf/inference/fp32/model_init.py | 3 +- .../inference/fp32/config.json | 7 ++ .../inference/fp32/model_init.py | 7 +- .../inference/int8/config.json | 7 ++ .../inference/int8/model_init.py | 7 +- .../wavenet/inference/fp32/config.json | 6 ++ .../wavenet/inference/fp32/model_init.py | 5 +- tests/unit/common/test_base_model_init.py | 63 +++++++++++++++++- 70 files changed, 423 insertions(+), 67 deletions(-) delete mode 100644 add_model_init.png create mode 100644 add_model_init_and_config.png create mode 100644 benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/config.json create mode 100644 benchmarks/content_creation/tensorflow/draw/inference/fp32/config.json create mode 100644 benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/config.json create mode 100644 benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/config.json create mode 100644 benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/config.json create mode 100644 benchmarks/image_recognition/tensorflow/inceptionv4/inference/config.json create mode 100644 benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/config.json create mode 100644 benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/resnet101/inference/int8/config.json create mode 100644 benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/resnet50/inference/int8/config.json create mode 100644 benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/config.json create mode 100644 benchmarks/image_segmentation/tensorflow/unet/inference/fp32/config.json create mode 100644 benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/config.json create mode 100644 benchmarks/language_translation/tensorflow/gnmt/inference/fp32/config.json create mode 100644 benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/config.json create mode 100644 benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/config.json create mode 100644 benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/config.json create mode 100644 benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/config.json create mode 100644 benchmarks/object_detection/tensorflow/rfcn/inference/fp32/config.json create mode 100644 benchmarks/object_detection/tensorflow/rfcn/inference/int8/config.json create mode 100644 benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/config.json create mode 100644 benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json create mode 100644 benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/config.json create mode 100644 benchmarks/recommendation/tensorflow/ncf/inference/fp32/config.json create mode 100644 benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/config.json create mode 100644 benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/config.json create mode 100644 benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/config.json diff --git a/Contribute.md b/Contribute.md index 84e1b2f64..55d2b7e44 100644 --- a/Contribute.md +++ b/Contribute.md @@ -17,11 +17,25 @@ required: ![Benchmarks Directory Structure](benchmarks_directory_structure.png) 2. Next, in the leaf folder that was created in the previous step, you - will need to create a `model_init.py` file: + will need to create `config.json` and `model_init.py` files: - ![Add model init](add_model_init.png) + ![Add model init](add_model_init_and_config.png) - This file is used to initialize the best known configuration for the + The `config.json` file contains the best known KMP environment variable + settings to get optimal performance for the model. Below default settings are recommended for most of + the models in Model Zoo. + + ``` + { + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } + } + ``` + + The `model_init.py` file is used to initialize the best known configuration for the model, and then start executing inference or training. When the [launch script](/docs/general/tensorflow/LaunchBenchmark.md) is run, it will look for the appropriate `model_init.py` file to use @@ -33,7 +47,7 @@ required: [base model init class](/benchmarks/common/base_model_init.py) that includes functions for doing common tasks such as setting up the best known environment variables (like `KMP_BLOCKTIME`, `KMP_SETTINGS`, - `KMP_AFFINITY`, and `OMP_NUM_THREADS`), num intra threads, and num + `KMP_AFFINITY` by loading **config.json** and `OMP_NUM_THREADS`), num intra threads, and num inter threads. The `model_init.py` file also sets up the string that will ultimately be used to run inference or model training, which normally includes the use of `numactl` and sending all of the @@ -93,7 +107,7 @@ Optional step: the original repository, then it can be added to the [models](/models) directory in the zoo repo. As with the first step in the previous section, the directory structure should be setup like: - `/models/////`: + `/models/////`. ![Models Directory Structure](models_directory_structure.png) diff --git a/add_model_init.png b/add_model_init.png deleted file mode 100644 index 6bacd1bb67252070894e20ef9dd53c7e9432d1df..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14105 zcmb7rby!qi*Y?aXbPXL6Lw6%c51rD|jUY&e(lA4Zga}ATODWx=z@Q)qC|%M>OLxk5 z{5@|x@AZD)A0O8R*O`6x+I!YIYp->$dmZ9*wN(l6Xz@TG5TUx7l0FCo1_Re?I9R}6 z18s$Q5D4#wv!bG|x}qXn*W1I<+0_9AQj1G}jcsV)OxfFcZWUO_ri9Sge3)WY63Kwr zdSs|TnyRayCf`#{khEl?OVTA=#KMf1gqczYFPg>z-vQ}I z0_<}3{IIEODJRi)Ib!x7{@mW(aX(|Q{)!2TSCUBN_~lp}3~BPPh%+Ptg$oN>alU@x zCEoAL)h3^Y-D@KFBePEWq!G6?xwl*8y4V#ALVaFV!vyKR(UlWP=0P0m+UYfu9V^+T z;p?8Q<4H)Tnd{}f%*_?~s>o^n_LePq$wOi+O+tbWmC6xFL&bz(ef8Lj@-DyTR~V0! zrjY3ug8T8hw4^d!x_*w@nW;U_KFsW<-8nB}+M?o9TGiM4Z(ea^_2S|fRDaJVll`_; z1aB5jenT(yE!fm%kX6&CD7|U4MP#^={s%XE=O~ zeAsirbE~i1=zSOLRl8_74|mv~sqZ}DvE!NQkE&zpUyv}k-bgStkZ_3WZ$G>MqYQ++ z`u*ePbWAlHqVHgZc7+h)?5^(%J zFc}m?k^;uPO28QY#kwJontXpFpT(4y0yol|9l!lqFuVC+Q*M`UZG#`S%?^RbNDK{b1?eo|L*zMQ5mM@qV=&b&$GzRhOeT0nCtF_c>Y-46H~E$<1X!ysG| zeL&2Qaw))=!y-lyZ(<8{!TgC;;5eTZ4P&vK6|qvd9urC++*8EKaiSDQbV<|jaN$C4 zkr@gu1$O07?wB)$iQ%DPu1#rQ%iy&s{m%v&YX|m@Lrk>Pv_(Ktx zp8Ri!snGY9gj-2v38ubmEV~@IwPJ_j60?AqSO~gdb@FqtFjk1V$|wm}Jsp>V#!HR2 zl+Fr2Q*zXZg=2$1FA0hgkl9BZc1PN*2wG_IJ4!gpl+yXY43eFD=DJVpq6HUg4J-L4 zX@8>}?8)jNI|{B$&E!F#AkrX`pnJ2)A<18ssUP^f;o*nAjvPSqbyfB5ex6v)_R4LQ zofBk8p89J3tNnn)pU5gnpnD(9_%;8F-}3b#BktpHa2E=uq{beL`yRI>QZce2lK->F zXB@pmCyf)h3M`vyZ4`smMbcCD|9Q2xAX%_Gz{-Qa;W%W{N0%EkxX@<;xpaEa>vs3ny-51)kcNS zHFm3i)=EB{w~|qiF%N0KVmjs79=d-(>qv{Dl}>~thR{-pUOC}>&s^8}H99$bHpV}C zwe_G)BKP4?0o7RY@a&diHf^?c_F;B{D39on)0rSFnkM3VpPlPtI{P`>;Ji}t#QTYI zZu5;loh`>?ogPjdO{}<3I8QjwPhM4iA150BF}_eaUvR62U2I=MR??vRq~(WlZr9ey zR^Hf$v4m2_oKS13$)?f7Nz0;-I_GVBwRs&@wRuV&F$a6rsgI%r)|EMM4ff7ho(yGok-wgE#c=P+TsgO z=j@~(+t~=uKCVzt{1#$RRFP9L?EBF@j=XKG5ca^*s8T;P6F1jh`E*7;lbr z4sDK=d9+*W?$cSUS!0ohj_gBo(hY9w;!;mO@_xL1PP}t}M{(D9R(z(@tJZr-((u8Y z_ghIUiE2+*-{9THyWM_2cgJ_NeDZcbO@8RPe4p?>VZ3D00X@-uf8tr$Z%gqu^S85s zEdzTdesbp&du8)-0j>UhGD!h9Th70-wgUQ&FL{qmZ%nVBoCeSYczrqxJoOi8H)sz# z**g?ES-vp5l0LkDwsy&I5c0MDu|q9UgVp8CrTVS*Rmj2lf%W<#9TEz~CdX{VehU4H znSm`!B#cK2oq!TzN?{A&x)XIpM@G34eHfUu_nDd4Db$@Bb||c}O{>kUJ*wqcHd-D% zD4jXj8J;nBlzU?@sVZH@1W~GGX;zYs7fl{aykWzmqM&(1aw_^$HqhDEoPf_!ey}j} zye_ovo;_o?X*bu3*~+6|1}pD>$+1FN(^yYfe`r}``17fAsHcr*zR*e1kxh}77Wdq1 z{&36ngDZ;5Z#`?sTy(!bIQP=@c!*cm5^d@!O_7tG63BUq&k=6$jYiwPY**{v%9T%x zkM&V-fO!B$us|s8ooSnKp!$^$>a|UR4Pujpmx3t%MtQAhiiU|MNLco<%3~vkBuBQ# z6Aonc1`RrK_q(%JCh2b9C`;s$x~Lbv<(OjeGv7-Q)e1yleOv!VyXNa_(pFuo8(g;e z{rJcGzUx-O(B(tr?S$@xYJq06Pws~eHw8EO88#W7uN!&qTeLlO-WQ&%ZyzXJa~?1^ zqORdI8R-=EU+a4G_EFaRP)oO#w#TO*@8h~;;>)F^B`*fSey3hL+$ z7P#sN$^F$v|GR_YFus~{mf|&KnCXyZ*eUikV}B_1d`d@QNb$he`5e!j?wmc>vzg#p zrFPcNw;lsKe5dAF<_XOmv&}ZM>H1}(GPCNl6thA7p9g6DGIypMe`$WGy?GvGtvs(i zpZoiH=ycEQMANLF-!4PDOiz&P9p_bDkjKHol}1DVXh>^Fz;)hU^5Wvzt67~1Y+oXB z#tSpH>TLI{xdxeSjJrj!@mpuEIlUTs2v@UK>&nJbp~q?>=+%8T`^ z#3$-c@P8!b4akYl@j5d|3p`kysxMgUit5I1F)KEk6^OR{6wI|b8YN~l`(eQ9wT|^+ ztM~T1*@k>p-hllWB(3a5ROM}*Gd29>LC?hJ!L&U@W2WcZy{KH9uNhpog( z1Y7Wf3@;(D7E+|AugXkfHdjCSiG&z?&lO~PM9=vyjdy;;mYE7OSFDavRdsP2jP!sk zz4!I?eLS_fId!|~xDOtFBPBGOj2jbrn3nn5=9``+J=c=;DRHzbL;Tvn(DQ4wv?R>?o+;?n=j|6h0hd&hs1H2pV8VTpg2{I@&*At^2JSAl<1=wH_QXB04(ES|K$ zf0J@XvSG?Uf#9J2Q*tyZapi``6MU zL18X6zdd$O_x##}suq9v%(JjEUxo@+EOftN!T;ZM_XUYBn4J)7Yq+pYQ%`$lVn#hF(9sm-g!u z0tFd=_vk&HZQ;!W?-S_q(Cfy46M5^`tQIgBl=%gMNOLJU?522eXTR+?f9F(jz+vA@ zmhpxsI&J3*L0kJ_cT#SP9rNaMd-ET+CXB7uT-;`xOSZ?2r`5%~KjMogp0*Yp!C9qRS)U(ui7r>QEUX;2EQaR%Y;sX8 zR!J;MyeB?7@!mK9$!pda+xC1{L*UZ^B;?QK#)5qDXr6>mwBUuHXPI`c20j)4iI`7U zI3}H}4H|T{_i^88tv@~Z;acKi*!Aq<=3y^m*_3ry?)MA8hQ|fU?al7<8h6)=cTPLg zb%yPM&x{n(d5lZ_7DHbD$dmb9#gYcy#)&FQfArp{E;VqwqO9C`tTZhzPdfsOut@UR z_V8QvSDR6Pn7ei{UU*$cHpg-bIN z8WbMyr1n}PhJ*-P{STS*?IjDKI?h!~&J}MTe@k6xwneOn=e^U|o_JrTeR&Zhchhw( z#m3fSo`(k&OL_a2RYOZ_5M@wj*JF}p7YO!Yq*PH+!Aj>d&!G6;5ggEDQBygm&Zca}Rfyi$^K#`XQh zusazY+`&~j_qhjwt1ngk+c4g%;#MY_HaON<7n(IajpRx-a(?gcO%5G|+mu5OC<_## z@ZNOjmuN&T9#Rf|U-pfVAQJfq%umb6P-%KP!Tdl}?)JohVbGKMvr_aI{*>`*%bbFP z6w0wk!h2%6`wQ&>k|dmNoKv>_ir)q^g}xXbk+NwFCkS-(QGU%7vghRCNhlNaBvlwm z=eNwh1)*{U||J+_5&q4-ompy})P}tMo%5-8DxpV)_aaPAihyvg0 zE#N;H6a;i(wFqwiw901wjw3Xp*AV*;{sX=dfdh#b3^Lgi)Rp<-i@~obifW{RAa)Rg z$a8Xse~$0c?Ov1wXZHLIXcN0DR!t^d-^IkI9LLI$lzYBIO{_imyO^!><_KS>VVs41A zn#5@MKv6a%Fg6&k9jP_UlNwS4rJF1qrIUx7K$UQz37OqgP`YR&CphL=xu_}(hZ1$u z#UA@4xVT3(y+6G~a)PI~!A!pyZsP)V^}vl(Fn0XS@TwmlLZSkHmw_BkS8i=#m6C_! z;V4az)(Tr@QEIxfVT}>@8P-bRrahicc?%V_{neXnESI%DK-Kg+sFO&T;eEJ2F1!FHLfVu#&Dc)WN))V@iwf1^`BVPAR&$h!o(4im;l>p(l3-1+CqWg(joGuCZtZQ%8kk0d*ErN*f4VFy;gviDQ;`M?PJR!0trDBN-(yu*7 zS`)ts^e_6FQ@s;GENk<$dihI?h>S#DBzzl$QvGuy4hNIfl6n$y^23>L_hwDp1}UgeI<3#tFu{Z_Oq{RzF^r*^xsjgrA(v~fPk_`} zQF>8wju*W>NYIo;LQhjl((%_W$@}+_4D|7P#mTaEJfQxe7i_DT2yMC;gkh(0xiY@t zg3X_AHw? zVyB6&T>rf}i9beQI{yL=3{9a@eol_@qeCIePZP+Mgi}9yLMa4<4~4ACjCx^2kS#yL zXqnY{g4+CeV@V9CMb%^qm#kMy-zUcSupSI`ErGsSq#sp~ho9kecXPn59o6H)k#HMI zupY$mWvYKTQV}>vOT$U&j<^s<;L?VYD~oY`&j&F0gb|_u>AP^W@Owco#|4A0bB?5%~Xk~cAal~ zcB5>|oAs5Qt*jrwQ8?1L%x4N{qYR|wR{z=VZ0|?Vq3JUcdGu49X$@RtHG$3}>!Rk> zt2T!Bp2O*UruC8A;}wec#9Wu$#%;I(v_M5jEr>)Aj25dBKb@?k>3wh7z)nab9NBVV z=OWyE{rRcW&qgkptIgubE$B*%^H{Xs{cwg#pN$`pgu3~mS6m)qPiN}cP@>TA`D7q7 zIIOeQN}(PAsgR!z3#%mb_R34z6BjNZ$R>{#KqhK);y>$Q=h~_4-dND1a&_m{D~fE9 zy}I&Y7r(0luuzM_-Q`f&dgGG2(|9?F%*n9gCwa|##y@@~Agj`lMLb`Kq}pw;;nlqc z2;*q_NbBH3%ws}8OY^is5E=>%1F6X)XzKy=wD38BKAM8Zh*g;j%N@g&BKiolZwo+> zPiar6k<)me{PwL%mA~wL1whT%w^tln>Bd&JlpH%+-FqrF;CPW8S*V8sfc{J_wQ|G~ zuTHiUJMaETHQ;vz@6R-F9J3+4Ha~3Su?$z(Qn5ZD#v*3W%F4=$^qp3Zqkk12PH3L; z=(edf!Vv=JcV(zQdRDTf49~v%? z-!=9@Ie{`bDXfFDhugS%&}+%}3p&Q$YxJ#pr4t$(bh*i~ci zEvs<8f$2X4Nf6hPKLA+6HP2s0D=}pL;^AN?$7rP}fTnH!PWCi`=~4@H-Ut&vr!X%6 zPEI;0^-~-7K~yzCOZb`kX0pE4yjc7^js|8h6s>@3l88X9C$CfjP-IXYqzUPV7^xI0 zL}drT9ThA}8g;RF2?#vX^fIg(L>v#VrG>FRe+FCO!iDm-3DdcG?g zYUIEPz3H+BpV^+S#uV3EwFip2O!7LahBuJXz5JddPHaMP!DkhGsN8J$^#zThsj2B3 zmGbWp1*#UDWJ?s7@*EG8$t|8A2BGx38n zbP_cFXv!)m(d+9hI9BiW^7HpFH2B59!TlvZOlYIilDCr8rWDC<_|j8HrecZ+$2NAu zHerzf>>_fJS26Kq1U*?A#d6_S1V0q-e;>Afy?CaIp3p%5ndjhxK|iaao5xTGIs6hh zkZ=$T#s)t}!I!*&bl!S{m9mZxr1(_^7}O#O$m*{bL8HO)e=9kRzm*&izK-r8vITIa zz4wuiK7MyP)Dzw8FPi!DFh}grIJ3(GrWUEq3sX32<-6HsOKANBZ*SOiNGMY*gEX$0exW35AK;wi?uZdvdh;Ez%j(Od2t% z#nVyV-{^I+5l=+O%85_OYeNLtK|mW)SCmXTf+dt+X%Idm)es38qP~0HL|V&YnN@Tq ztau-59sYMMwgONJM=+>2nX!z^pwxe#TsQf$h}+*27OSs!Pv|cQ9xer^-JYv+cO>>u&YCoSUT-CN17u+2s*6V81lS?$H4I!{2vu< zh`7xfortufxgWjr_n=i~!*U0)`S<5g98hx0%b*>hahU1Z+{T>1k?TkXWRYLvd>x#O z;n{)*pghTE@m&2BBfW)+F)3-vUO33J%D~fY?gu;emF45QdWE9+bCyL=X=<*v`Uq@d z;X?M%UIX-6jNEW~1RlW)X#h$++4kIhC8t_6)tI^NoG`Kc3*5to&^if`vU4| z8JF>nz*QBfR*K`uI7UOx=X(h>xUI@nl}p5){CJa6k7FpOo4tIE4OCWe-CQ1JDt%ib zZPwpV_vh7?oBa^Iph5|=e-w>PNRB{0r8_<+mb*#mWQ+AwgTJf4SLb~v{;v2PnO$$Z z#zW5U-sg#Wv4Z$Cg`{?wBk}Sgrpm9XwbB`5*$EisMuaim`)*U?z7tM=Eoqjh@o-|m zO;r!#{@csB240|uD8-OOtd_x-#QyqjRc}dqIFblRrvLwgk7sJ5g65nKU%uMv=bfWS zs?Iini2b(W7)zh29Tc!ByK;nLD*-4B zHYz9wrZ8A$4WlsIz7+XbrXeD|DB9|K$hrTHDPK5>X615%Um>y**?k?BN78ZWGiRju zybqOdm54Mwv=xk-X9rfuc>hig@F>pAjU|nRi~~#3{v*EZDEhLbl1vb+z#&FkML zT!?|crj0L`D2N}GY&-5PXeQ)TD%dibp4FwYvI@{I2zM=3C734#M3~<5k)NhLCTb1< z^?iwPkpNSp037P~ZRWH*IR9g65!7O0e$+%BE)IkNZ+m8@aO8U+L@d4A%lL{1`9PB5 z@a2P$muu|E3d_flbZ4mzfIK+?7=RCjN0c>-xz8*AfQj!Qz|y_Og^mq#imbDn~hx@_k%!MF|@f?BUNUX{uSh%(M*ILScvg;d+Mwr_YhNCjfaA5qfi| zT8%H;l2qS#sLQx2{jM}C^BRR?T`Dc8A;f?&TAJ5kD0{?K)idy7L_^{KHs9;&f+Tb7X9}|RTnK|PQyyLnU(Ae%{%x@nDmxC8OnPk@0$JPQ%sML@22iwd}>bq zFkAHSYZq?6La5)ISB&^4fO?R2&^twAnviP17&DTL-Y7+|kAdBW%ueWZ&;TRi`zTL3 zpl|Vat%>L|P>vNm$qR_yILW*l%D&Gt1s@rW-(Bp?kEasQ2Iw!bN-Kbic|do32|&&R zMqZ2yoD`xl+zX;K^hu1ui01Ffst^1CBrbV6@ve?oUe5nf=+6`XD4de?H*YG3MmIc$ z3vBT)5}2$zf=>Vp7sQ91AIBRg&kIg?{-W9J`CFcdFI9#W`mWhSSt2nH??y{B|AN-0 zhrf;iZiS>{29#lUfwN?c)LHJh-YW(;F|ssxH4|MJb#UfW>9cA3g-^|{GYTxKNpBfs z0uKxr94uQsQFnM0+^dP$G0eQUoYg$W)w+(3?AKJfNv2-3gxQ?n1{uZ;{{o(fCzdXh zy&p6pQImD{s!zvC39BqVq;C9qr=2W)ghSll9(1k`5RT;@iygtZ8857C+`phMfJ(To z9&JFg#Ta(u6*@5785S0PiYnvB5?bGG#fmC`UM%rpEU1->6M1x8vX_*U9L|C%h#i1Z z1f9dk8v*zU;MBPf;`MG}rGznTlVv)LKg`!$Hi|3BYJK)GP0vj#$u{HkM#8hlc zCk#nm*Nqw=_Kw)ozxCU#Z2cob0jp1a_<@2$F*=CuDmc3GQVCl`l>5~M1pkPiyJ_>=r*?R2QcMM?PR1ljz z^>rx?Fc=z%ZCY_gJd>u)C4Zl%OwUe(n}X}57KOnpu8P{b`*)7zsfk~nw>b?beiK(-ggW}De5{Do)OU4&d`gn z2TygGJF$`EBhv%FfS;)!z+BUgOu#WeEQ;`Y8qNA^73mAo&@XYo z_eKETqkqHbibDgf(QbR(md2B#;Zc1VCp(+2&O?-2n44vqwpWLfw)!oc)c-Z-FG7md zSeC(Hwm|MCAU+E_bE_F6g0Pxd-^X1*D<@gRs)>Wa{n!N_Dit>zn|q&KJfPNp)Wam$ zJG+>n>oyOtyPW9y#W4i)1@wxb4RjS6*G2kYo|$kRsJ@_=%GeriG>Xt*+%GnP9PWW* zGQ#{u&8|Kj6^oYjCw3k>Tb3&nV%WAAX&#rz7}K~^piidmi8`SdIgu?4j8eGDlhu}* zvn?J&FAeo7Q0FiY0`iHrcxd4L;WB{Tz2)xHSFMBcPSBrK4=N8?d@@{yN^Tuu8=esO zV@P)0BjojzVwB&Hs=?aAWMn~+=?GjNZ#)r@HiB@E+^3NotPYPI7|6EXZ zH9C(O8tx0}&HxzpTv+bCk}m)JN56CR5&#THtgAGx7=qkjYz2Y(NwzGWOP*Nm;{|wb zvM-hI*92N&Ivyhz`mQOHjp@RA>a#))vvmY;MDV8MW=^*TZ@!X))}_f)^k`81Vk@#r zANr)qmusuM#~4`5(3p6&KIaK}*UUP3MDWKkuO{hJ zrGjgisA5UI9(p8T-()C!s#hXMF_dzZ6mq94x&fC!=hy>7q< zJdN`J^}3oq6LVx${}n}q%3(yDOPXwkZ4uNit{G#Xz8(Ua11j84K|=&51Fr=(F8-|8 z36c2Y4aSP`Qxb$6_E0&d_DCmPK<$V=G~h)NgTmZX0917FB*;*Pee|Y z=>ElZMdMs}Ln!$yVl~Lsi>#~H%ic3!<&j>7ife_f57I#JhrSTYY4I3UF5SPi!Gv7) zevZQ50)9UomWEuPyh-bnfJ+tTU+IsF~bj7jn) zW{~a^wa*dvU>{B>oC6wZ8?nEBV^_w@M_`Q{56KPX%c{25(KrLp!}n~*^mDTl;V@W4dAOl>A1w*^I2z4uH7@I)04p zC!?ARzGUoL1wbC%m>$_|r|yp9M8VOiKt`gc!-+QRX9tQ`8}0IIG75eas9zOVd5S4cUc)(AGzr;Sn1rQt|aZ zpUIj4dQKV?PtK)4t#&@=Lrk0v;HP7Eg!Gy`eKZP$k07M%nS1r}W!G`o&SGzpjPx>N z;3ks?lRqAd%ybA^$fU+f<_C6!fMFL%)sn$~30w{IWrBPh9s%9BX4e8iGzUmq$vV7? zSQ+KkTN?{JRMpxHn+cniB5w_Mfbu+sSemvnXCPW}Gy(-6QKUx(C_ApkUvtkSeIhJh z^!qzuCC{@`aRyR-Ps4OTjWIAo}a{pNV3^ae*4?JVs(FPOCxXGLIZv8 z+YoLV*r{8H6X;EU6*}Z5(m4g{f;xp^>zJ8J=oS+OHqmN8GhWLm$>}5C)l6D>n$M|7 z1PO9x6QI>?@Z}$(K38>eqC82`POPAK{%Uf;Tp+%7lh3EW7DT(=EPadZ>hwRfbHoYP zu{qtI*hx{8ovu&d>Ml}FxR5jn^|Ztt{SuG-Lc%T{jP0{NXuHAmCSin}6TUNsnt+sG zhNlG_uh%(@y;Udk81YPE=LqT;S)#eI;frn%Vq=S{`Ld1n1$T4W@Qj)fE)Xa@AjwDI z6sV`7aWYG&43wC*ozg_AmxFt31opYTq0!uwRWq99(_P%Q?M-gCh{k77+gmB z8tBlGNzJMx()MU85b%nmihG=S>YYAP4GDC!u!tyQ)c6a0@ZbYa54VYfP)Zlt` zE84BscgdzOD|%`W%hAzMA7CTZvM#QC4{m;)W(mj#YB3#9uvEe6emrQm(RE2r{L_}T z=x=}&C*OX?zCa1LHo^4AAFVLp_*^z>Q?=w*p1d)YbB>qttOAv=L=Uu zv$lCawR0tOhBA26gQSzX@Gu(|3ChW-jwab|wsOO_0|k2#`LN?MkZw}viNBqyqTqBo z1o0;j^KHM9Gz+F+z|rHp1z#YC+2Mg4;@JvPUfZkTIPrngVC{&{AA`5nE>n!V{`Y6Vd!u zk|^b~uC=gVbM(^Q7Y5|Ik**1&4`ve_NVdj{g+-uY=guR&YcvB81KsZF8CTIsAgmh{ z3ys8|6!Wb#Qo{ysTDH=+`Y%bp8467_ysGlF+r*sLimPfjS&u(GFkzpqG8b>gL;p;0 zt21FCc4XYi9{ZXrWREq5v~3s!7=rTWyFWu;Uu9QtKBp)O4n7M8>H$%^ZXCzsE+fle zA%B3y|D>q%am%SMg$equM$LU%$Y9Fk>&q65I;Ghn2R2Gxwf{!5iO-JJaW6L~qA|-S zt2#_Q2Su^Rkf*<=jwh`uzx7IB1q10g%N@TT&c~N%>f7kmMFiymHCjxIC-1f_0}W7J zS+PCeo#~e}@dpz&uHP}O;pBvsM4WiTiMHa*)rzJPrrjJ%s|)u>#16n2{{?AfL=obc z{Wcrlcuz%&R6Vg*oD(bq3N}7xn6=vw%H&Pk;C&Xe*!_AhWX>kc{-ak65q&0kBT!d< z@?a#8h3`5|R%T7b;YNY4_=jU8OqFmSeDK3)ta8$~2{>bB`rLOQoH3ctla`}s(Zx+6U{xq(3 z6~;e4p2M)%8HZm+ zFb8jqmXQ29G*~k|E znw>1M3X_(6+F-t1{`v2S6zGZFg%NUv5t$AN>jsLufVQG0TEoCHdVrrYOOBpqXz;b^ za;zy?9L;+l3@iOSp=XIPK{-i?4yz41cc-8m_uBp0WR>ln4r9Lqc&-@%q&U_Au@mM^ zNRF7B-A@mvBGe(Q+@AJC#Ey%=8x(8tzJG24p08{P zATJq$4_M^tK1MyU$cBr9g3QSh#g+ax+yD$1AN5^hzbwdf=ax?5jN1(U4+dG)@b+i9EhG9lG_;TR$;3KEI=${NdC^%=|Tn*r6AyV$WJOMScs8u z#>ee}U6klaencfH=r@K^zRDoYN4zCT^bS?PX7{yu+0g?O>a-)A>m0r|N&wA!c~2`1 zu_}~kmZX5K!U|02YzKP00b0cw#lt9EdJ|E1{yJz8>M6kyxn!+t%Df78#{5y=n>aQY z4V`^z!Z0dV^f89_J$jS39%6csmnx$1mw=SJEJAz~*qrheMFcSR`I4rJ=wI?`fczC8 zPZ~>*R5zr&iMC7rpr}dy-~D4CTK-?U%KwbwG35=TqCnZKaS{+aTqWEiz?e!ET&Y!G z0M-H)MU?cPrqn|!*EezkahZ8e=&vES|A!&HjR8ZxkPo}ZcOhQ~m;mFTBo>K-({6%@ z4Nrjk+5kgy^kjF_N6+C{A^-XuG0VHk4A@23cbycEK64nA_0eF3N;3^XUI*A2u=~m` zMO~#bt`b5291XyK52MyPcq*xwWl@}GasSwn+@mJefiDBmU2u~Bav0_Ci^bPw#>8Y< zix-#@{%=z>;C8@kU22=?NFv^IOWSsssSx$DtdJQ!Cm;aFr#BI!G?E}=gT2p#N!FS$ z@vTAQkN{){zuF8(j{VIw+Q0Rjli@H{K>cv2RuMi#TM@B9

PQi59i#t8jtAG?yHW zdm{+C@B3wmj*kM4c;KtUOMwt1x9;k0I8tkKp27!NO>M{is^)w38aDk!<{??!JitIpl;|b9AK%3rjRgpl$RhI6+SSRCR VZ&g*)|GxLDuB@$8tzZ-We*l|k?J@uW diff --git a/add_model_init_and_config.png b/add_model_init_and_config.png new file mode 100644 index 0000000000000000000000000000000000000000..ef9b882905e62cd543114eafb21f9c70f261dddb GIT binary patch literal 15583 zcmZ|01yo!?vo$(cAcG~iLy+L^1P$))65O3ZLtuaa!QI_0XmEFj;O-tE1b2Cd@7{O+ z`>(eiYr#4*r@Q)eovP|xyNVDc1xZve0T=`Vp-M}MsenK*xWM%T(i`BfF!@9Z2m~It z5*1aF78NB^a&|DcvNZ#Nq(Tys5S3K_;6Pu#PK$n)MI!#};D1R+bqNAbbxBBl#*jgx zg)YSOVQV4Nk!xv~iima9fogEXu$IXeI-AMDLTi|CY>Qiw`MXcPUaY0Ln~y)P3*0;w zt=zq&xWJIvREgz6wD6%QIch}sJF-!VQu*6&U{D-|-)n&|!WiGPl9P9X$k~t9cc1h@ z@)Zp~sZE|99$x6^h$_CLg0TD?*(AHig>T_=*B8jEu|QbeoivhksXKz-ON{e{ze|2h z;qE&a;+fg8Dpa@=lf3_6iw4T2d{_}U_ch|-j+C<+! zy~~V?iQyf}LrSB5p+fuTz#6y9s?Cs@Ui38uqqRerT*iX&MM!AiB{3YbD2JvhChhIy zxWgg(%_=3w8RsK2QkD^nML+4`_K3fUc{1afY`PKJh>%)4_cxIRlp-Y9lye>x^Gg&L zDi~#UhVk(DeVO+Utmes7nPc>_VKK{hdWHNxBtm@lLB9~Hk( zo4U!?7hC<#*Vrk2|7do`|4TT}3)pS@Wqz~|#{b1ib#}hQqxS;U&cbWzkUFIQO(hc* z{opbku;V7}7Vlk&#T)(WE5*(_O9D0qYY!R0sUHdw45FhS9%X=BG+I6s?a>b;1x4~S zhS}kfZfcqu+IwUZAL)Z7`66RM)(|Oc0bb;CIk>KOvj@qoZT_J+Pvmmt{;)Kwt*b!@ zvKRjm!);@N594@93yQz_#g)fBc(w%FnPOirmo8wJ)@*|(3@-x(bNG?4!0vStEWlZ8 zz<3LTkYF~YsNWjW8d5x&l4a~b6AX!I;QI`b7hvc*1sy>|@RFUZ6DSG+7aJIjZ<@P^ zPeGRfA5JmC{cjA3{K<&Kf@q>Y=A)+wYeWm|fIUTEl<=_LQVC1HL&x~cW=T~`LKh^H z1GW@WLwrKX@E;e|%qBS?;Q*nEI%NN50e|dL)57Hn-0!YnfkO+-+oWPaANRl5fc3!C zL(}XE-u!sW?~U3HchkeX$%-XRn{7!07tcA2y&DXcq{>xMGM=$kdtZv89%wCGm&>Qz zp!8JyeukO@M_y!la?TPR!ov4vOu`~ZGf#E``yk-pBLwjt#|tD7`W*DZ1j3?$=Z=Dj z8MSHT%o3Q)CWpF5xktW-W{z}064p0WWv+#5%fbbd7OL;Z*mcsyWxQM^upn}bhZa2D ztG&tZw8?{`5K7>qi3}jbyIK)`}E%9t2de8d*qQ;Re1v)`xN@MivR zyE5J>Y`jave`OG*h$)sUkfyS;C=#J5DZbP*5`L=m_H1HrI? zb>LmFBsLR03XL~;Ni2i3PktXIz7=Lk;*3PIq?hc+IQb#L9kH7D)(}0(c3~g6=VB@4 zT4hr@XRLV|4LUB`H8t-7F(o!-V;V;FVO2U+z6yw_l~_u)ZRv5*sER?kUWvC%lcuL! zV6JfyMn!CnO0KiiZs@R!dU|PzHIz=WPWTFXh%hSx4mB1#%d4V2qMeNoi~m5EXf0u; zqFT~7Lovg2Se#m#ny2$qr$VP-3BS&VAk9{rqoo z5AA3_O-e9Jetq7Dj=N^C>IBpQfts*y5cyS1jwyCxK^|H7Mx}v=|xQBiEd%u7A zWn%UK#fz^keNBXvbAlC#D;ocp`+yCPn}a=^b(Gtd!`oDh!h^p+Qp>iR73`wYFAOir7}gDxTA-OnE)#90m-4pFQQ8E#*1NV{K~cv(j(%J( zT-=-0+inVRX&C>T{xCK-#I;(#bnTZ>s{cd3UB7IV@T=_C`fmkKc8E2i**yfoIKlIg z8j*SfZIRy;u!`;!2-BA`+WA^|!ZHZcgZ0DpWA)rxE}K>KDfDFZm|KiiyjSNMGu$3_ zPL7#eh4!9{g6kwx2R=mYUySZK?W_MLyyUnv^*p&bzRCSc<@t0l`_FD;?7ZUKXUk-# z=d9$+v%ktPpAC)O;BM(Y^qbsEB3myTKOf)%~$4LoIQVi$2F0$4Nw_<8I?D^KrVv z-x9Y{R8r5=_lK7I3HQEUjNDSd3W)@6xLP|Nw$GqjZCce?Vg7OY6M=slo*%CIS=}hL zib9o{o%@%hDHR>ol|_@q-$4TG;7}XNXw#5k!snZ}4q3 z*SdD>E$=1mb?A~dwATqY7}|AhxQndM;?m)SB7f!fuwA$jxc}O9qfbc8b+U9@{zDZ~ zw@}~s^H%n4%g!OXn^61b&{>*w+Tb2Be0S)x)+c!x9-QgFhSfcgv~u(+#+G zz6~aek@RMfW)E%{?!#`^Nx=yX$82kLX7fp4pS}#4_2K?iz83D2=c?de`+EjiZKw~B z8|;4%k9ivd?N1^PX{B;y!)S)+rlcM^#Ja0@aYu@-oI0F+oL_7L>z)uP(A{R5W@d?} zcx#(TDw1Z=Ne#RkAAY>VB)&NcdWxv+-^$x4K0XM`it}mz%5bUm;55IDN-d{wEGW&Z zJp<3l=s;k%$9>a4S$mld=AB(Tob)=!P5WZ4NSI#1pXX+Np}n=}{|j^fd_1BqWJzND zVjR7m*~ZuL!*loF$UtI!Tv29&b}WxSpQ-aMR?L2n6je5U;dPz8vcBNlw~?REsK|Pp z{WBk@Kdvv&t!G~gX>o119zEjyI{mPBr~BN%v}(TA?q<=Ik=erP{@{gsbA972tb&O^pW z=UVPXd)4J(zbF_kxaA9j7we_UrL%YbDSCpCTL+u3>y~1cP-Rqq6j~cjN2Z7BxyFUv z-%kVXe@)(-M6Fx2lD;TiDqn1ODOP0h2@QPn;mW)~xG8un>zki*$XO>*FqM=0p=CTb zLj(Pcr63KnA^&4W8N&w6`rGebfuXQirdK$at^ znSZGf!XD4!Q>}^#TUL2;z|ZQ>Fg<5@DftlBYXItISgC12wB+UZOdRZ(j7%Mj&6qsw z90BAD0`YtB0he}W5F;`VJ6n4fJ`Vx%|BT=Ru3x`qCMWyP5QvQcxt6>VnW%%a85t)N z2NMgqAefAdjNjSRoKHne;=iW@p9IJ)ArMDCW@dMHcP4jsCI@E=W>#KaUS<|HW;QlP zU<9L!r#-~TgVElF;=hFazjDOPTuhv;93fT?_GGVejf@>!Ap+#&uM7Rp&wo9qnTOT? zS;^kzzqbW!koolvGbOrt-gj%co@JVP>l(W@Tq)?*gnL$i~gf|DW;ypF96& z#s4)^`+v>kWc}YW|JR-Wp2^Srx`F?-q5sm>f4&9OB?#tc{vXu~f=vn>|A9a(6w+cM zY926uGZ5pk`tGm4u#Lpx8odvsvQYBJ>kLOS78Vv0dA{i(I)PyyglRIwVfu&#-{emw zEvJWd=?^9+%Qh9O!APQEAVc!Ln9=#w*0vhwR#;?l@A*6B@5$j|>0y!iT6yWoy}I8o zK}-VdY*;n2VK7#>vzJ^XSP6zM8!Ovo6W0%{#$gq3egjz~y`2oyTTolFYf@MG#ebE1!7mWqeunXuH936dy60z9z1* zy}GZO$znALA`!>Oq0UsgGyG&Rp04*N z9G_hx!F3{AuK3_Kz@Fk&Kb1 z{Y?4-nS>e!qHZTagX^r9N6DvT7F)>qjx_!!x1V(2X4&5YH|a~he+rWFWs{iBhv_Q^ zrwU|uFaJy*2H*7Iei_tvU!_79yv>$N;~IN0nVBcygGim`1&QKu+ozAFa*p_>v0IF* zZ9YBRB>n4nF=)TrBXm_Lir5Y&@shG{{`1juFU3O5z0m#N+Aw~$9=*oT=yl&0eS@!m zIZHGvGo9`0tY>34dxA%R-zT&07b5tZ#o^bh?(4Ms?nmVfGJu}UMiLp>95;lmrt(9= zNd$DpgWL`fGZWT-jDkc+F$U-gDy9+GzTpbP!XbCE^ZDioF)M_`N(#ik$56yP->-%odm zvi09;GhcWZ8OO4+(7tn3&CIAtOG^(GsHU))#m7l-Bd?Ow{%|{9_@G{@ri^wxn!?V9 zKSFIJ8jPmyw8>JJ!ykZ%o*05j4IXztm=NJ@w5e>}H9a{Fgahhat{+y}agcmIYR#N0#S1L)I&W_N&eQ6}e|wevt!= z*p4!O(HY2@U^7$GV&~bP+NzF34m}HqZ)7JKMcniBTijIIyj-mos&Wj;?g4G8DZs+D z9ncwOb0%|I&q!@ZIWacN$|f?z2!6XSwpXvRq2P^TLQS7XB=vqbwYxi<5GP6EDOUGG zA{_67wzhUDeQ=jP&xA|@UCHlilL2^A0_-12upd6*Vx9UUGbxqBnk0QNK#*+r0AG%O z`d7^8lkGtcC))4^t|X1iPQ?W0+rzcG{@d`$>{&Ce?|&RyljkeH2whzwnb70Wv^}1W z%x?U=<0Cpc8hy}kawXDpSu{(MzByS<*zSfh>d4^>?%Oeh=@U4T9h@6^BL$2V@+dm8hKyLcz}^D%U4KDT1xm$c9e;$~_CRr^GoBz=fcPZY{;RxH52^347xo|wF1i~2*m7=5@D^? z+}>94#8e=fd%Q+-^w|_@>{BQ#go&isbd4PQM=)@Crj+5S0)r0R-|~~$V^sP79U8bD zW5sb%>qT2>K1(;1Z%}S&-cwr)!bVc{%eQS3^B+S|<(4P{&XXUokqvr3#!yr!_~j8| z#b5?>ne6+`368J^5u!IiL*4mO*{rPZRR|puC?q3ig2CKY!6~((_V>(O$MBzRmyB4Q zJjq7QvL9KyFx3RdI$5y-Thz$DfO$^zR>H@=$6oCJ)?_c;llz+QPUo?2+$!J;5Vk8i9bD-O+Rz6XGq=fN)c z3@;BSEsD@YP~j#JXf%8MvUgSoub`F@a$t) zXx~%dxiE2%no2I-eqTWO-Z$#--0AF33)f5r0ac?5v+&{Sv#&_x3hSjKgTYILx7CLI zxd`tZZ1>U}+DqQWFt?ui>jc5SG{G3cCYLyF+NKEa&A*uoGv6ggNtX6zG52RhG5pwd zHJgq@#wc=+ZYt4RXi$T|NLwp2p)W#!x9{gRNcxW6?rw-QSw4IpWQq2D@8MrZrH1;1 z6%eh+l{?Q^arLDdjM$H6O7fJFwanp7eIF+#1VBeP-y6#++jUS+j2$oI6a`~`3qHuL z%LtE}@(hkCy-U;3l7kc-vm|w$4|pkRy+J^`Y`a-gEyv-RRnoEdfoT*)V8C+pF1Iwn zuQElHa?+Y%Q0Zx&j?sEb#ZOF1V9-{n2+91mYgR~;4 zL>x&ErZ$Ay4Vd2^eaPX|$Q64#+jo0D!n}L8pQWj1z(DVR^<$LCc*pMqX->fFtPj`h z>Tq_r#OMyI$}ftDH+PP&yOW+JMT--p&$EMR^{hpSrr5Nhlwsh$Tmy@j%mJq44;ep1 zT~)HCe#gZ=an;2V>o*=8uu>~(`=-ZOg8oIA;FJg(N!;(dJ5$aI zHBu-5M^m5!Tql2NGz{z^1;dAnuFmC<0OlzHWrW39voq6ed_XwR<5yd3Rj1p?2mDQ; zbgZ=)8Vum9GbOVNjS^@axYy1;CXO3l!9as~w+}WggxdsWT&KyoV8!R*uW{xFVuh4d zBKJ-x3~;*RHf{JLujt7oGQI&C{RAr%6$vprNyA%@Tk$;L{0GZ?L3p z6J>La*?pg`G&#$fT=vJ;9_G#>1#*PDKj9j(PlBu1Bbcf_{Zr$qO8I5+JMp7@>R>FD zES5d2X-l2Wyv)n<6GCSZU?X3a>THU(I4viH-=LHHR6-Kw0fj-|X4);+CmdC@C`^=p zepJZdE74x+RJ%Ty6lFO(e~sF4M4uoBWKfYVmNY^}s_1KBIQVyE7QZv;PnvV3&< z;6@Ov{C*VQ}3She0bl`u)r8fY3~x z;eM_Cs!m1gxeO=k3|`VhNagEhBF1{djeZ$Pu#XQ+pPKZCA4CZ~Yn>TD$KZ!yA`y0Y z!W%%>@OU#@ePP&#AofC=nc@!wc5mxZqQQugHJ)J@?tDkki~o6giuVHH}~;_5SLJzKF}`(c{l@L*?Vu{FhLPVEa)xbpC7) zPO_QRZ#S#3KdH@K19;gjC&&P3iKQ2uvY8B1p`0zud2mkXiWq^eP^*|FI9$DvE@2CM z1)_fY6T!lqP+j);2{5r}p~r6(zcCVrU46;>}>l7oR(E=#S1$ z_Nh+1P!jK;9+>4f-`?10<>nVgpphs`N4G%lVE>L;hpEnxnT_pWZF0 z_!4uXqK||5@ho#tvG}uj;BvU=)rzx|SuF25-{z>RwR$-9#dfjkU|MQ30$}W&<|ok| zKb-eO!3v~C{pZ_wT#y5$zFrZVQCl(FRBY+YZ9sl;q*%9k(EU}{f&^KuS@9ouQM)Zx z)^zxO1Fx$?s=iQcReD&%WAP|$D|l*_Ov|CpdWUr;vfBjaFKg@PHFVRu4s8X^-Uq2U zx`-A95kKV1#m?>G_LL#yM(jOL1ll=_J<=3-hZyDWm9DFXVTe!#@zA@iLCppJFnfDr zX(KyBaoF_-%j8oVq zF%C+<@o3JM4j-Rds_Y#vD~$Nr#>x@2cr(CQX31y{9xVl+`!?+w1KbX-{1A&%(>Gzw zM*N5Y4Z`lm*?4ad{pa8_mo}_0)A2L6@an_}@S+jq(Pd-!uVz$bEchAywG+N%+&-W~ zI>AVIBVDTWc&F)x*c6O2#EeZ?WFg~fSsD%pskvGt5}{+Ya-&#yD0Wt%B&RGyGllw} zg&mD2G~FAHw^&z}!4pj$Ezh1eYGmeMO7#XBb2?TdI$?xv4d^(P?T`ly5V3Mz!9b`? zxF|xg@B%RYJ0m{xYa)mRM5@oqAx^J{x-JY~QWe!Uf*KV3Jr|TAOGC$s223@gkLGX* z>uz@mW~%a{WyV<;)mj^`F&o+W^;=NYJH_zsoejR+hkxl7BUEUv#K^Yh3=VL=^7cnl zFja7Rb%H44sL88$jZ62$`pi@91A^7aPJs23OUd3>14hp?I3nCAGw3%I2srV3NHA0n zWH4>%HYjI8bkW}L3~SHnvM1LE?FzS!zU7EBKUrzQkjT9EJ*sT|MFlA#Vp2${zUOAHUg&vOGd+xcN7&h06H&ShE1nRB$IE7ND>tJeAZKx#=LL#}lPo$I}qUTt>8WU$I5wiHV_%Y}kZ z1(HDLlwdA)JY$RoOEH5t-Rk5_IE+29H>>WmI=_*E5k$?+2ZkT!OT(Mxe*DFn z0v@^56|?MJ?16E?@%kT~wj4s4#Ohx2!^eP4x@wxVn3>AT>19hYc2^y*Bd+`+CftTi zdcb(w&0GBeRQnFguz?0X1}_k}KE=*UZtHnY(GzPycGv zB7jl96Ento^#@JBAH2sr5CEh8@ahl#QsJM0skh+MtCv&Gup?~XFs5SXVy`e}zwoB* zOq6ZkaT(|v3Lk?6cQc4nzMj4z)<$}ww&r-otkx>DfQaowN-lQa1vz6{lShOIqjLxo zD;BOaW?G7n?Z@hP)@T2I7Eit;ywI|^k6ahBAy{eB3uXHnyJ( z85s7%I)Sn}<>+yy!Ld4}S4HPr-U_M2{+1H)`!sxft~f23;P!0R!JvOly~1iOlp$j% zq_6T;8-|`XDJPpcBApW^09d2<-WIJD2pKT}U1+ZuA38Yd5B5-oaSLnJI%>6`+jva} z(0+_E6hmNnTE zru$0QMm<48Va)9`HuDwON_%u_#bggR|1|esZnx1rHv@6u=M7wE)d5f7^W06)G~MX* zJAY+Rfap+QHC;$oc6BsA0)Sw;+xu6vg+{_pgG#`mbaw^d9qaiD#sqrJ#8*0J&F8`H zZocE0s^jrIjBW%VFsKJd@X&Wfyk3)VMWH9Pgtu1t`T4ga!E+O`%x#na(Uky)h$a<5 zz)8u;sh)tyqY#g7g-ff_%MeT*wa6$_Wj((jS7zK(vf<5d_xyP0PpUItp>Os(Q{WKM zXKxh$l{9+wQ(n~cbs#U7OXpF>zIvsA9$z`Aik6euW&0NSND=|MSGI^RiWHK>q#vKB z4Fs!~l^oGvQ-G9mz8K>kX>{5)1sJu`Pv2W7b{A{R!kp#gQ#oQL^CU+Aq+LnWljZxI zLQg_824p>zM3_?mzm_(d&Z|0kp#;(w|E;uPa4CeQ5Kt~9Kc>&yAR$LUrZ2SSb)l!-A1O4NrJrV>$)H=y6#NJt zg**cCW1QVN3ZuwV(iX$6<3oC-Y~jv<3Wb#!Aj_i#NUIhC_O9Du0Hja_x1rN1oqqfS z5$R*`2Up@9nm5s z2rs)-srhrJ6te-Ug?zS5Q&TYv!oM@y;p^+`WjO>nSjJEA>WV584iFYyb98 zTIuEc^R^f5qK0?V7v_EW_Dt07PadJ!f+%1Yqd8%bA40 zz+Wa!hXsa+!YuMkL z^%K!$8?9;~O|U=yVsc}M1_sf)LQ$v_&|!1_G0ka|YSDTo@J6*4QK9s}rp{IX@|kUT z&rxl|bnbX8R62w+7Q-7in*f^6K`5*a4Kl&upn-rwBu6-=Fc%ayJ^>7}P2fOh@0i=) zr8~r|%^%R9MxZj0n^0#nmE+v*nB%SrRmj8Dx|j*E0Xf$QRvT6rv!D9+oda*e;m^&mog-r2Qbro@c>V64Pl-62(*Hp%<*(j6Ebw;gfQwgDghBK;YcOo5`G?6G5Ru=jt_$+OY38Sd>R)Y z&7XSPCQAK;=A)plt1-?kY!y(4>JlhX7za`(#^S>m*)CT+DSQ3EQ?>Gc|` zyyybB9qRCJ-Uo?~)*4OBd5|dDR$U@Z4d_U04@Au?KP?-20&LJ!2Sz|Ul0q3!(x{jE z4bYD?*A6vqA=z*lVb5HSgqR3icFP~4>Ps0%ZcL>ErrRaQc^{9DA7$<{V>eVk2G>Rn z44I22t2Z`L8MiaVn?M35%Kc1!4O3{XY1n?*34@etb!pY$*!aL5pz?Us5Ufwg&!t9e zf75P@(EYmtDM{iJ_$zFKre2{EqAZrW=aPu2;HJ^0DD0C zTqfI$4EXnH0N8lp@Mv*n^;ipyn2$2++jUCo{b8B4pI7TTEJMH@3`cj<0%=@AmZhk6hn-yiBTtB2Cvf^O^+Rc+*wu<9ACK zb?XNK5{lWOV@w!eC(R1N9-|6mEyi>@?MK9nLPB+XciwsO9gV4Hl!x zAA-?IHR7A%>(cT{yThlAd^Q58&pBz0 z2KEs)0U)hbtz1VPItZVI;II)1U@NFT=zS#+`VVU!eZLvt>oYFatPNP37lqU-2@+mTd+Dc13MA0irEd z)4)@l)#3w;4wfU^Mup)iRzP*~BudXuvs%N>uGu#`eiD*f;*-EBkCcwAgXhSPfQ*AR zrrd>MW@kB>eRcNF_eGZ%;d|cfB>;#IXzF!GCnWH@ev5|{0Px_sv3)bxJ7GCX3HI+q z6Qzb0r9NDldITaL;?V=!8Qb{Hgs5%aL##6=?)O>{rtdgmbqQDI&X#kJQpVwJM`T;NH550 z(xTW9x#!toeLPk`h%sIU*MM&S%cMv~*>pjJZL*+7#{4;SILZlJ3t}PhNAIT>9z)c7 ztFSx5+@ZEjgy)hX!*Q55Z{&X5Ga}TAb2`RVG7o^&iiM)`n@-tefGgqSt(f zIP%U(%Rh|(fEj#lgkHWD3!4s0%dlqN?!R)EJlw{;NLQl$800LdQB%6Uxk1Diyx|>q zs3zP%wxS`%4=D;a;r$WJngtMHvmPN!!J@rTc1t(+HP50)peB9j#0^zGNZoB%@nnN| z0;t2u6M;(S4AL-Ix|`dh(Z(%D@X|4$Hd3JV$N7Ap6ft3Xa+Q2rVA~3{8Oy`jlq+47 zXIygRR1GPrm~}6AKp!Ztmg4Xca*Z~?hyG?UL=OHAv$F2hQ zm=&ZLd)jNcH<~h4V$B8cc{v1_vC4p?zlaQbx|!^`VK$C%z8=t20Y8!|q*YbX|-!h=UT-3}(Y z)YSnh$9}0R_PQBYunVAM-p3z1$if} znsC=1v(mG3rx5uTU?YSPY5*v=$^Bmq7M=FWa;9dvIxxmebr@^QHM?7FeD24Qt7s9< za!66;V*6W@p;1?MEsNEl7!b#2nrk3#6sIh>IrWH`24T)Kb%=qBT%_}tWTI=D7psN4 zfrK|u9Z7r~;QC8%{kDPv208lsye2o=D9Zp?qiw?#c@tP5c6#P-gr)+p`=ZV2E_$Q7 zu(fpB5##@L+iYNjQ9rjao<4yNQ4u$Bx;O2_z!5Qd(phvdSATm!DP08^wm?6y{7lpyd05yeKSIG$cf5Q$)Sz zh52hEcI|zc1&21VS)(zzKIQkKy z!Hx2@rYvbo!D;I3MG>CH{WlXiqPwp}JH8D;LT-Boj{rOew1CdwxA9Se_r))4h9X@F zO;>ZePex};IW8ed=OiNyQaPWU8!hoY9k%g#H(eO2wh)kIqYfOGr_Z87teaQ#q`r#GLeCHz{IRh>(4HZ(;K4439%QmHm={Y*PHS1YlV5 zuF9R&N;&St8G9U(JpvvrSZffqJ-qmVtU{v0m<(UylvbFJjfl4`n!RiUhBXfa&QRTn z3|N0mOYkUO$K769g2_^oOA%1GmjuG4sRA9-0Fg^b*zwwMZFH1_?(n?zqU+;NHH9H3 zN@{OG^NSc;Ns|I39T1~Vt&$MK=r&9th7{Z*o*#E7yl6yQFFFb%&J)E93XY@Qq|Cug zNt|(4wzxCEYTW-4ErUTqc()Bohh2jCvmr@bV%56`R7NX?kx%0nq03^jD3M5UBz}Fh z>^#^j>^e4o7vI?-!>|S6#rVL#tqn(d?Hkfkv`?7wj}K{^1uSi6b|ig zJ3oo|62848e7pRv@2i;3Kz^p%OgnSn)?~FX*+>2Atm?e zn5voo_C28mqe%hCjv(T#c?DN~8fh3-wN+A4bt_aM=L%tJH4xXJI34-j`vbl%Y1qPD z93Vi;%2QhL4ksuiaMx$kn|=!@vCxzX?D#lX0{h1^J?sj*Np#Z`)`+=lo-<4GMue~; z7!{92IKt$>k=st$0P@!8fV_JscNZ=o;I)+oRy9Z5TdbWjsnXDR zwHiaKq)y^s;S@G+%O`Sa|4j;ZHa4|+_)DK~SI`!`U>KKPlm>cq>VcP`t@-`sAL|Lt z>A#Q_ONY&cDmIu2w1Zd}L9@UK_;{j0Ii3U{Th%e*nVyme&_BnsVTj@jR!eS?9bM?HM^KN5h8s8j3Psi6l5xzk+ z51i<21TVB|d7~;uZ@@X$bzcgx{4H6J16$KxEU% z^hXq{T5TXP#gbqYd`y8g}P^AaO&9ylkXj+1wk6V?`<_R0ux zag%eLsi82e0ewMbbJ&rax1&LGDCv8z#8GGB4a4vl0)w6IeAlU)(G1F2q(s9 z5-;+l)}O2W zye=IROhlR%ohR!C>}If7>h&PGyWg09bJB43aC>|*KpYBz<@~0w%eMGs=jG98Wk?ys zjJ|cowr0_l9F|}dg3S=@FTTA2T``3)*_TF8GRs!86BY~)`Q;)yOkyu7Uw%1cg~YBm zQ#(YB_r=YQrxn6hYi4(9XY~mZ?g=9lL+1h1ZCN?X^Dha^lMii?fD$!ZnVVF09-^id zC}$9`eSJ0_k@~w-TZ3XjhRs6XnHZ-19-Xx3Ke@RPg2&M$K(A7zT7PuZQQ*TFH9wgy ziek{Np)5Au>_tt*qu?n8n1MX!7It=O;xG-6Lt)T8Ll>U=RK9d*L+2MgTfi zBu3n&uqjYKRqoWh7Nw;zm!^1mrDLm%dUgPGsFqDbhWbjaVt$lUVvmf7BL*5(eomy? zr_taIKk?xQD~6@3eeye(q1BL6_@88c@xodq3@etJnVt^ycXd%=pmW#(8lyq0)`xd2 z7KjgylX}L zpyQ&UlvDDM88WlB5bSp;q4T+Pu7sIhF<)x0+Lp=8H6S1+Pw2-n$w%kgkz#8& z<8&^FWV(TK&yOznG@N7oQ&%HLx6~nI5@wH<+6{noho2u*Wi8c^PWpiC6IWuxZnicj zTJw=zw*FR!WeIHB)r5k(c=i)bCX})H(-IfZ&<1qassD<3AG9k{b&Qk<3!}d-pRS{C z&TY@Tx7Du`Vv@o#mph;}+Q(u{ySwACHyuf|E!hG2qatq5lkEWXt#yy%Zc~v>;Ll&* zT}~@vGb0>G8!}bTMelWFs|TKuFj)B8r&T9ly6urPLm7eQ2Dior0=h)D6-Wf0)ONc9 zq7k2-!xb+X>Dd}`zr|d+Zc#wb5|duzLS?^Z-0~XlEso~PVOiyQFR#~SF@ZN{ z7H9%V(d=}uoNHUKyUHVl3uTGm_jU-X;fxe1OTpRpz)|tDT~%v!baILXa?xyyyTf9O zd!<^5N?%su8n5%t4_LNp*?jD`?FBAxD`JX_ZI|jrN$k{h6aNjxzA5?^l|ClxAAc!8 z)m0>iM!5ur31KO7bdjaRaGJ#a-0&uHj4sO1(*%{0UbI;wau+y}l*)*V#U)U`2MHEUDNsJK633qEwsOPWa_NX^gzK!3t=S9> z4k|&Or20KD4Dyj2fDrH*_>ZQ=Gfr|{uQ1$oe)`D)GGvBG z$0aAFRR%PeyEo^y+OUwSV^R|P}%&zu7z&@@(_f;247U>0fFtFmYLzEYrGZ>zvN{+uc? zR%@8%TS9IoN`~wVl`EHsG3~{KMUf9C!c;7jr- zCH>nPQV?FBE5}RgsH}a>BgqKsag}}5Kbr6ERw6O*aSBrw zDE;+(X=*eZV8snCpawJ~6WrnKG!n^})F{pT8BpTwt0H0;zS=hN=*9g<$)}o3_KTBE zJPZQW@x3a1j|pMAZr-BvFn`L|`l%AxBt492U23HYUsX2;)GyD}vOEX0fIZ_OkM5K} zHL*9Q$t&mh|D}O@|L<>e$v71m%-;EVq=FY{05^|dL5f7&p%r%W2nAEd1M1B?H31_m#q0C3w*;!YQPnRyAbvOaq|9MSsEo3Uj2bj{g82w9Hh6 zCd-D={b!3|l?VadYJgXK#tMJ6HqpmYmZf#D<0#elO4!#;1)XvlVZ{MUeiRuEe%-Xy zv%)D42k@5t55!(oDAG>OZ+s0b)k8Xdfvp4O|@x&&+hkE)*Q zBb9lb^UMtA-mL;S?GL@z8TdQ&-P2s)Q&fFqqo4n!Iht3^ZDh6q3tuno2#7M68OY;( zMz95%zN+5`bpbsUJBomH4QKhZ0?nB)guwIIem7zPeECk5%y_@X=AU3!uml27vaV7A pKC`);-1wtMy!PiC7IeNK-mez99&hTDz5e%xw77y;#b-nR{|B(-hJOG6 literal 0 HcmV?d00001 diff --git a/add_readme.png b/add_readme.png index 4899a9fa3a7bc648de4b73c504fb7db5174c60c0..f28783bad6b21892459047001dbec03f5c1469a6 100644 GIT binary patch literal 17730 zcmZ^~1yCHpzdwk(ySpy#65K85;_eWFySo!0!3hvFSb*S8a1ZY8?hssV_`P@U-rc{B zDynA3dZxGg+aKF#Rb^RJBqAgT2nbYpIVp7r2uNJu9|Zvp*mJ~3RSf}wG-fL)sVXli zNulcEXl47^5&}XlIwcKWRpSsZ=*@RpQbG}dRKhXj21I)Ufi%@6BPW5SfWR2E@U;(D z2N^`Aqh%p3)m0BsgC~WvOtsM2LJ=EN!;1I0qzzf9`^@)x?YpPd*z>yZ-E;BE!&`MMZb^UJ zj64@SIyQtKrjXYC3qY(6+GUiZ;ZA4diB`cO|ib% z7JRkFMH6_a-I;uza#awz+g@vRypKd!%Rzg2wZw1m7Nq-wp!tv75z{!WLPFw`>vZUj zhrCC!Uloo(Vy!Rr&N_PvE*3`*1<^$?3IZg&Q!oKds8S+YVG!DLFoawXg0~su4!?YJ z^VHDZGpF=O9~7Cff;B~ByrMPIBCqSgZG(pcgwopPA@x-%uR9;*(yXq&7BPf_bktWq zJ99!vrz1v)8VQ-4(e> zcQ53J+5vsn!?wwRBgU9(!vLMkGmN_%i6l##udZq~^9hVuhN2n%Nvu9!P_0q*r37<^ zo*VC@`1ItQ4Z0hpblO#R=Sl&;vR*_(wb+2;rF52ugD|b}a%=6m0B-O;Z>4 z@N`Zk)IFL#sy#F-gln?czNz0)qcTxrn^5zT~?czu1Pc3_ycBMzhN=2r{1k-4Ff!PbaAj@s>o+mhar+|q}Ig_VTm3lB$v3a>}nMUus3 zWkO-_qbmKzEFVzVM?+|fU79*0(<1AmNSE|+NMuKBeFCK!|;F{*2M*aEs=lEal(R3?stAAGLe~0$g_H_3&_ZnG#<5l9>;`uVS zu$U^+D|T2G?V>U*3@<8})DM(dqgll-lWb;{3AE4A*hRQExVQfcLj6lOO1J)Vac|aO zyE)pmaqKkf>F?YS?`p%+ZE$v(VU%HqVfiYtucB{*f63wdU@L!YHgQ&@VVvPN1CQ357Ii~v14RS2R?`)~)w!l@kEflp6IOT8 zz1QN%dfChY%7p#v(LLvV&2i!z?i&m5v%8bKd|z7cmxI}J`;EU>l~(~<<~u!?rI+6Q zzfB4`(YTBrmL6mLRUQK_T<`RE=l2q>(6898&XH76qYz6Fyhv;a29bZElc2gF>0!>` z#UY}ht`V(_%vjcBxQ|X^q$O%nlI9goluX1-(3_^3ZvCEnxOh%{Sl$ygFsTu1$`v39 zGi((vV5zG)`k`VOApIAx;mOC+B_Bt?^K7oZo$@hbE{QxG;dB>Nyf7m~DD?>f= zDr8cI|=e8c^d(^1C^y6vWIy)E{k^Whi5`?$h54GB%t%-_@+Y+QV` zvKF);oPXBM)~AC+xREh-G>I0`h0+Sr5k<$Gu8W>E!Jn&6hEEa%ss#pIHC@7+=hO6A z*PEVojZD^5m?oGOn3lAOei<~nnBbbvzRpe@?aZFq#m(XTBGePov$wpL zw)aP$ys@KRtkJ~2Yr|7~eHI^t7lZ7}=k&}pvl=p1uz9LG)t$v}Q>DRp? zV(ZQkx`$};8+$dQrN+F~{$JlllSYKbmDQv6e{>Sf0Zkw`q|njVGLI^_KH#NCAD>3hTrDtAee3XRp5_Yxnnzezc=Lx!vKOKRp+0 z40ODRKYcG#Dj!BOLAN0H(j(PheMmZ1ap%$F>EkiB3$K5Hr$+afX`Y!Sof4>PCaX-F zMJG4%YkG=$`!X75;bQPb$P&l|ls`R@b^)iy^hx`&VOM>Z6xr#;(WE8`$gu zohVHV$#Bf@ zBcFjoom;DWqS9VS-hv$!6_x{drz^+xS_yGy0=hoI0x7^ zFOxHEONd)n`SXxoHD4inE(tOU-P~^hn44`2)^XGMs3d6aXwPbD;b>;b>SgZ)AYTXw zAumB-)85j}l)}sYvxBRkmoU|T&JYB)-*>Z7QT*o=H#=b}osX&%l8!Ex6g;fltn5@G zNE8$lLM|3og6dK-|9v^|NtnvU&CN-Wjm^{3lhu=p)zQV8jYB{{fQ_A#jgylFID^I2 z+riD$i^ai}`oC`Sf8R&S($(C>*2&G*(ShRqzNTi5?ry?VRPPP_&)wmu%@PcgbN7y)6+1dW*-oT|o?|TJRZM`f%>qyz!TROM`ZHRF4@d^Fs{Qu*~|I_0C zx>EOlUCGP;zpwmXNB;XtA-4Ay_+KydU%mC8y+C(~APKSkPuGhe8P6cwLqPCH$V-WX zy&zAs;gfOt<_6eIgI8%;o0!G7z9nHveG^GW31Nt0*nF5M&JTdki3uVJ34#fSMk>z@ z+93hcL|Y6|2w3Ze8PW`W>vDYctjP29a5*^4X}-%^MAJ0Xe5uN4)toi_Gknq5C#Qsi z0V@S5%9uE*awx}{AH+a`^cyPUQJ+~ykJ+F)6^&Fhd(ZXmOnCSQTYmx7zr*QBGcz*= zR@SkCW4|vXzx>qBO!iL89bEivm`s{dhcCIJ>&(|F6Trs5O&|^b1gsX=J!1;!JmjBr zjV_Zf$rI(3i&?2SoEHBG$X{-B{Ln_Dn94kpFO3|zc?Opy;2C`3Y*9fhe99m<77LXx zmWX>%Qgn5{o$!IhVYW;&I|82bVl;#6=62apqe8F#;O+JN&E|ZqeeMK}Oh``H=Vs$K zzOmoepUT<8rh%^y^!4`3saE4TA4xxW6z=`~KAmP9AU`xTG-roSDkvrFdt2DzemoWU z_GFyNX^FJj?mJHuu+@v`v;BpyB;e)o=3)QG^x<^zbfK_$KZ?&`QQATCe$KYvM*0W0 z12LfeeCO?c-D%zWPq_Z~n~_=Q4NO|U0`L zr%iVlq5+OuEF3!7=u=MwY5}kP(uDO7e;J_ril6SzuhAV2r;27uHRz9)>Y4s#f0%!y z2G1KcyM$kuM^LA8+hVck*7W*(kHu%(yKeWr`)M5bk}~&8x5pG@Px9w_?88irrQ##d z#m&o8MT+{DtF4D8i?!T7H#Q*s2Idd-_xD~z0xmR$?LM=5GW?{;m}m-i$8#0t z#dqDvY^uOR9qtSz%``e1ml-s3ejyjlA@jdco9os^*7%-t5)0J<+EBZL&k%mtNvd<% zRhSZq9!sQz{&82&-eHhHep+7DF)Y6`nvrzT)0glMc#|;lwdaTHQn6qtaK04cQnM@l z=|XjA3=U)W<{H89%d?{FWj0{M%jToHr2VB(BHYN(MW}TlFPuQ66=9)!i_I{YoBrlM+!^*Nqnd4<0?YdtSJsKJGGQ|mM@ z4#AWE{F&LX>B4Dr`>QOO^FqkvE-Q0JGDvecQ?p1R#fyA3=R2R{l}L-|FaXAgUcWPq zq_O?@w{`(N!0~#_RIXK#xwLD)*&SXn;C;PUzgMg*L9FBE#w{%)^K+6>yVBq!YcgBn z4vrGv7YD0(mDRA*Qw`tZ6V^~)UoK%46~z!%)z{lY)2wL9hFzv0G;qGJ_2h< zp`H4{)1k%dk}9t+7iAT>@LWZ*5plQYe&FZF?@h=%%8%v)@kPK0Ol|eFNnw<7&xowZcIdc4oZEo z_{B)LAPl&4>#?k%F-Yj911uAW*%ZxRI$t-$oLO3=^&1_E99CPL?}ldU*TM>pp^ACz zm$(~Q$CK#M+wazVSD=V^KH(G<7CNP&hBi25JzVeakAETNLm=l+6GMM*pJBl~LAbvH z!hT*X0qVmrw2FJj>!V#ao~V+LX>i_C2;|iEK*Dvp)ZN)?^GFZ}L0!#f)cqOnHr4rZ zZMFcSuxf0SAd!DF8bSx3d`aVH-5Z{6x61gNfbt?Z)8X&yp+gWQja-`EXKOdI#em9> z=61^d8Y*rvgWUgFCXS#B4YQ&)7#hAN)IB*O3m^8mR4l?|?4^siel5NB4m^Q65$|d< zE$VqjV!A?YH}`94+Trx=a)A8Ve4t4#_vC7KWTgUZ$ZLEp5ct}*T1{X)opLMWt$Hc~OPTQ5VYvd@)40w&k&E$;o#dMf3u2^s#FOp4Pz9fFf zhxG$fl(67-X*}xqx->JGh{1*|sdI4VsOxKskq7m{@#E4lL5W8r#ta9AE6K^JM#j0j zg}J(gHPjjXT6B)6aSQv=yF2FeZR~2GS5fr>CsalQXAu{6MT??Dxo^-*fuSkj#V2PI zg&v9wanKnFyA)Q~u+`(sWI_|1@&M+T+9o6BIG%m9tDc7QX&!U-CcXaS-glT@XeUI`*L&&$nhn+9Xa>Eyk_N9DHY9#9#es5P zl-Dp@$9+hi%mEri4}$FGFi!0};TWcuLONGn(eMD_@DyRd)ckYsADC%aZqWk>2{JK* zEim}+;9LrnxF?8#;^5OpZOBl}+kIERMm!-`gj$?r1j^>F(8Uj;4Qhz5oYu-ID22Y3 zCKU{LEMt)SQRw|r>@tv9)J@GXNlS_mz)a?soS6KD_6WJRNC z&%w35UZ`T5Vw%c1LK)hOsj|~riU63aNd$~DC*2; znO3hwqWEB1C8j#BK3A4d#3p9dV`j2OG-r9+Kb*pBbyowdC9cTOZx7-tvf8MHz7y*K z54-q3XA9iJY55_b2BwIb(A^9#$c-xa!v2L#JjTStY*qHwSdJV_tB6{jv83*I`iL}S z8Sex;z|Wx)3A!0owEU^88-ZKZ($g8{a;6h=W;UKA4Dh!Y7Dt zD?o87@jPFHnV8adY?lT~=bqQ@NLn!@G#vXk2nN^B6f3*ileW$VV4_ zPGs~=e~4S0o`8~59Cd)>`)SbZax(f;DMMnl)w9_5?u_FFbu|S*b>F=?D7xEUACF=D z=&W=@AEv5|MHQ5l=?o$>meRjgBz_m8E4i|BhkrpEs|7miL>1$wYN{A(rvMZiC09{> zS%$S}VHT1FA8`(rWqf@69yyJeGCMycfgbwD@4UPI&(2|qaR~|g`~)!J)Lmld_m|s? z&cTsT@1P~`J8a}Jf^GB}{)PtdM3w|N7|5MGL6cB7JwVYzEP+E|phk`RS|V=APxRYz z1Dw2=O1?kY9SrUB_HwuAUD6a%16D{6Bz94z1z#lJf3khk_#i1)R+X1O-X$MA0L79V zEC|0-#8qh7;rEa}6q1UTg{~*QSYv6PiX1=bBc}Lie=Iw!X&8|Ty)I~j9sI~&%pw|Z z8VZMMMu)0r+(5^ms4qS0P2!>qJ8MEGRMcur9B0TcDcABno~dy6eHk8uG-m?v?9X4i zwVbBp=1iZI6!KVuN2EqzJ@!Tkp*Yy+==ztYYP5e=n-93mqZzWht#1@2BMz%Gnx5Iq ziKz2p=S)pkB?yNV(%UvkDCGZxEF;xI4GQgo-xkh2E>A^-i!awKmjTA3nGY#kLKlI< zRAKZ;zRAiy02a1Wfk9lrKua55g_&HWc&LO;Nt2BNMv*@6pZu1Z%!%|^G zce3-N)sbD`z^No`^j?5v%4w%M#h4Do$YaADcCLW@<| zP1w7jWb{fta9o2ZcKGPst+}B^u!mpiP+T z_Ygjoelc&Yx=0IBU@>Y-CuC0FM7hc~>;9!z9|ILVCY1qWYOBxnDu4&vNEWye?(igO z9nGM_f9{0_Ya8;__x0I3lif5#lOvaoG>;J%bBsv0wqO_`9mgrj^K8Ymj&^t&;F^XL z84N!tn0ZpR1`M4+ti9aCIqG=~!s3d8-4Z!M-rq7>f~=waV4)Z7Z%-E6613u|xr?}p zyYcm3A-+R+@qK=}J+1S-bMOF(vB3~!M_(a!e=E9sA_s0o!^4!L?oMrxN1JU*Def_n z!DUTE_pwyH=vNG*8_fd4@C1_cd}q66gi8=F{4{iS=@o8Rk$QKazB95MV=EeK?Y^=_9~;s5k!9Qh+ary^9l8R8^ny>f;PLKefMTo9vzKoI72qL9&q1axO_))RBs2D`Vw+G`Tfp~& zF)Ec}f5Ln@>k5VEc39yrGi>F>J_8FmHX>yUIIam*eRI+jp8;WS3cn_;-C#7#BXLgCt1IMUrr9#9W@G*ve3{_b@enaD(eItk#BKTM*G=GQ zidqU{^*;$~q;5|C)%)$Z#P#RA{O`64?<=w*baXNJp|xAWjDw5i!N{<<;Z;ndtZrt` z=0Hi4bx}}TEGx#VK9}qb9fx+!unGgmH`k=`7fv(CP%?iwtG2F!Yo9tur165zP z>2S`RsfiTI@LCWB3vM}1UN!cx*YHG1-ozhd+tLG_fso=Ewa-H!8{qm$H6H-_Mik>b z7sMNDk=I}qZ-sj07y+W<`bBa`;LQy&e%AdK*lo@bSak>Wg^D)F-Ct{bGP3z`*}|`H zIsfmA8`p(iDzS_6f6UEg!KipoDCl0}s4(8k9?uW{{;&lOQRN9KzCi=_3{d##whyYW zXmHF#*&LY3hiR_ue{x?=3?~d2!$? z;C_GeD9YCNUT=lE%Ir8NicmrV;0zZqgY*djBA54-T6T)5s!hU+c>ZfM@%ZFF7m*66 zDRuesNRmYc6JDXBkC%sQ?Y=hvuU^hk_B?TDbHg8wc0(E@u0N#V7)K1;W<0G2uc%2T z%)hQOM)q<;q6^TZg51gKLp}E%P&1Kl(moc2iHvZs#FcGYf z0+NPB!pZ#+_?pr2c*OL$lcaKRvDssg+8vIJeKplD>7BS11POicbpB_blk&^ppq$;q z#3Lc^XM&IE?7{E41Fb@G?!kc#EBfy=2J)%+sHop5)H?}9Pv%=7EiVP%MhNSW3zMbC zf5(WTzZ>kArvS)vANjJ;1-(>bi44HC_-xp2HlsGV>0*`r{Ix%UKYFw)^rW)|-Sg9o ze4;Cj|0svTqwhIACob37_Mlk-b!+Z7lXj)*pMaMV&2lYn+bJ5o8Vf{E-$eWtUV4=r zi%4*@izSh0fROOxLBR=z!+J+H9T~|epw}Hk!dA~_ z$J1HsoJp2O0|1=I>;Tye-0tHpf+|~UGfCw^0a`<5YZF5y=G~FxZv?=bMl2r7J^xB| z+7@`GvA0kkcpSwvR(s7h0O2L4r^_Am=YEF_HCK%|gZ(9&Kz4-FNZ=IYfZFoWCYS-x zWr)rJJfNAqRf*8TWrK-I#2uB%W7p4;1E?!H+a~bdlc@>*8SFMQ$~UyrYK9e zRY3c*-l^Y6;ZFAHBm1GqtEBa4x?Tz)r-jqwJvc$1gv$!0{rPG{h%V$ZNw?Oj=xNfH zCNHO=zq1aqSF}+VE(@b2+|e0<0r3xJ)HV z8Jt3J=Eo>tpQYo=K|*DA073D}M-Frv<&Q+*eFOlk&DGA(T8-qcpA>%G6*mCCQr4~)Q{;TLd;YVdC>EQV(S^R6b8ev41@l^;K5a#QkahHVOo z+;bGz&9-^F+5wW=X+D5Brvb*$SAntPzTJ=dYB7@fmk1rY&E~KZ6PVm--;$EN{oo#& z>^9@uaf-Lj`1&?3gNl2Tuci@jM zavu~rF;&Ck#>Sre)Q4~U`&zsla=!8N;5;oM*VWD@8K{ZCbG@v?#%upI!oHS8f@hMPcz=E%8$QTI065(#q)g3w%o19mg&4S zOvAplNn3bLLtGICI@hKY3}{dKbm2(2E^0NAUBGZGX!E{S2Og6==h^x^LG*Rw41?@N zkW1*xWc(t3Ozcg5e!iUhACkVLYg+){eA7Y~0Mv86;tj2ZqOcWiF`a6Rbk+eZhb!|6Wdj3&4hCJHfq2;i01Z8Yp^ejF5pS$SN=fI{zw z59=#oWYR$*8jw|{RdJ*^C*H1=^9rN+dNC?A#YuRfT*-gbu}4z0(mqZlC2NsS=3b*3 zE~_q3wFZKQ&&T+N^n&2QJQ2Uq4<#F&F_c?pyKP3tqsN%r6ATO8@vACAcZvb|s_(P? z#2MfL{EdgKm=M?yiy536ic()fz5kYAn$?izLD3 zPkWJPglwh`4}dX-9fZjxw?_?P=xpL(BAnraXnCC{%u1w#)Ij&e@#@LY{8?H;a1;9+sS_a?_ z;=M%8Mdj=vWJhy$4h2^uL$g$>709$X?ul-JM-6=Gn0&uVICDGooOADsaaM@9hu_i{ zPsl6AO6x}8@TYv0ihh|bOlge<%3ev>f8TT2#4ujm8=8&gJ@QNberb>U3mJI}~(c1L{z# zbZhCVFvE&DblOIVmz$iaPgh&{{uTlK<-nSQP+BAprrF@GUXg3}`8UI;;aF5;c67w0 zz3Hge&uLab#V`a&h}~OaCmS{zjtnCOZLnjQ;A* zS8DljHLq(>o1g|%=%?O3rRHbjtCu|cj}&Z28XV2E)#~T?Y$P31Hj^cU+oSHEWH;J- zFBt8QakK~>badt|j`V1*-7Yi_;vX)VUz*u!|B6Oc<;`8rGr5wnXPC(kKJQvvu9Zga zS6e*rE)A_IJNb&sJXTn3iCcX)OE~SgDsH{io@~|^b2;L$7Pm>DM&k^U`IX|tF#U04 z%b30uqf}hi%{xYgBE%Jws~(uCN{0UGUTU3QNQ2Yl+q2Qhayab&=WhAIR5_<_l=}6y z4oZ3;+nvNvF-8m&9gW;G=_&Vj7`Xo^S3-`%UL)C+f9o}birl-6>u-#aWIdi^nY;`5 zVlTOw-bon6W@{~6(nDWPR4FJ(It%5K2>2330|Vy3vFSHp3**Xv+-wE?X|lsI6&V<; zY3=x1POV)!`Q`wzc6+$8q^k`7z0$2-2;g8K3SFp-}kqa!c(0blMn4Uji)H!q=l*bxxZS zP)JjA>gwtT>u;}o?`y_-VqY6=F5@H-)E2~VhEeE{2X%*Kzb_ul%6&FGRJ-y85Of!) zWJ2jdVJGudZ!Ftfej+t;q<|EBDFOQ(DRwcUBsZ2{5>}&qs1>T)oH@)^vz@@6{HzMik5|e@$GlHcGVSLU_6=dFbm;@>cpqu zymD^WJ*73zTlj0Q4WW(SE<1zb=N(U#JdXgH^aC~WP==mGX>i!|g(+0~k#Hs$dOlq3R$M|jx(q;!!4Z-( zgo}s5sR7bU6Y*TGI5aEq6e_rRmC=}DJ4yqwu*EdzcHqOeHZ2pUdYukDLztj108)1} zY=AOEu7d&-^cw(nT=(Ur9KlUpdOE>gnwKHONH~CkwrNEtsMIS8sb%AvvUgm^g=Oz^ z{Qv-0^!oBtOO5E(yvk}4L^Vvk3xUWqBmy>hNd#v>Hhg;P9X|Ia*C!Y=7I0l5s;uni z#Jd+S7zwUKPUY&W`T4=~YzGfUj|F(#E%s+8j*FNsoj`&_7#q_#(hsSvIt#CW8*2Lv zP6TU}O<%GRb;KptH$ryy9{7GkMQt8nYE~L7C!ch(V%B0JodcRnd|&wX&v+j zo)$ogORXPI^%`xQH*AsMOgVT%y2guhd}8z(J`WjU3l#|feov(T&7|zg$L?wJbYYt$ zJo?ftn{C@W%=qfWeCgOJ0CP0$7LXE)HwpXEjG2q!K@U;O#F-Q~`TYCTZNfH3A~8QO z??R1Ukl9vjHj{}M>{5D$u1D!aSC*20TfLgfp8m=0CB3I+HzrSCo6Xr^_GNN7sF>|Q zP)g>f@eqcoPA$Aj__L+gaHwqe0o*3^6EF=A0cSj~*>(Slqs5fqps$lz3oneiYuoOW zU;;qrh@m65%ow{3X$d*|7E018^*!uWzKxE0=S0dD<2NwJ93C4|$-7nHP-RD*O0;Qv zWE_UXvn;_)W~IJMw^-RpFQ_n2^$q3;J*Y>_3R+7f@@Nk7I+2T)X??W1M4#J}p;fZe2d9;^GZp^17UjENEuw1{)>Tj29|c{$yuK4xW7i=y90Xg~ zokJBl?W&b5hTK0ug}s?KaKV9<(1Ro_K|MU;nfr^VJf-swvt&w6kydqBp~V)5UGNv< zN7LO-dH}lJ)BG?viHDN$V9+5TU4ZfYrSd~0Z2p`ygPuV_WlL~)ykx(xl%U>9qR;?1 zCy^kw;9V4BsplAg_rtf|LsV8L_Uw-%ysLhx0=3`z4_F5Qen_ z{u)T7zhScrEhG%AI_WSxT5RfO2ug4u zg!EbcypehQXovsP!4UnA!g=Gs?BP_F5e>xKvsJ~ONc6^3^uwUz5Y9tepl`wYS7xwO z0Ll5#yjAj)>6+|vC-NgUeo#vI$RZCZL8@3yo|5uq2e z0vK~lPyL^n@>~}K<}ci z;MILvaRy~Y{Uw)WP_(ZDf)x(lmNhm7MP0VrCI0+L9;Zu}rm4)j2%!TTBM_d8P0r?f ze>+Qq{vtn10KjslsKvgo^nkA3BAH7thK$kdA}ZH*pJZz-4O7WTDcLN-!{v zp$1B=vM!f);@tFp9Q_8escM)#1O4p**GK)SXJ(65+~i%G%+&4Om;q@Z43i1_lxVD! zRg-yDZ?g@t&dW9jGJ`ZPMXQ@tV$KaKXV6hqT{8}}W615%~!S~K4#MjuKiI1v3b(oF)|KTQR zlw8X!<+thN!@xzbo)NN;5|ALb1Of*5CcU2CJ)i8vVFEvHc3A3XX4J%Ax}<{13#0fY zGBf?)jI-yn6azh_ZGhQl1m^cCrZQ?oc++>PY&o;OtC=XPaBkE=m3}}_2Vt8)C%*vb zO`-U_Em$+;eF%d+fVDO+!yaJwOR}eiu_kk(9Q611yC|Y^w*bKg&N8e`8(dRAtDA$# z8Gz_o{VO}ZSsC^P`INDE>*AJRwtwC0=gaplQWr(+`=DO^d7xtR(*WLi$(UUu66PV& zbd=o$PbWIQ=*+wRiFgNMF-YiA##p&9KV578fC~agO{Re2YbzRlE&02D0nx|X_f!uO zkdRWS*vNOCfwNeP=8buLxEu7ZUMV^iSmSzPx< zMH;OpTDq|r>Xc3Nz_d9oroRbTaD@)LI7o6THerQPEL6Vpe$j1e*z_S$ypu&wp+(o) zxIdJ7H2$kDq^kegu{&9PTF$u0q5`_8CM^6XBch!yV~qy;uvuM0-Lq|`Ukjk*h*sA) z)&v)ZD97UoFj18Cc;3b(pHrHRY2GX@b5g3p?_~5Ozj_v2_Yr9pw)T`SIo5>9q@~I) z*S*Enum_nfSpz)~i=j|n39xpY<}2{#pj(uL{_0-{Mn|FBh^assOugF7Isf}2MbF~s zQ}$mtS%$rT_FuR&s zVUbz>@KkMyOeE7`Y52}=2CxUzswE~RcC}h zRmU7<6;$v#Fi;YZu2vC_k5F53&1H^RHrzoG$4oRCeau$;#@h;LpjV2f=~dKLB(R+S z8q=L)sA&-!pqwZM#%X>&+QatEjzd&93W5(W*kM! z!w60d<~_7n735F2ApO|SQbt`;ypP95&|l;EQ^ZP$sBEF$bpDI*a-zOY`uN5Ll8C#x zXZ%YR_4te#CkAm~8=v@v0Xl1Csot)cAGr9aG_u$`TH_L2%^rjPye)?O>O2-~q33^u zBw1D=q#KJ?dM`VowB7qkSrmiEVT-=9ACiV=*VJR>O=7WH#L#&5Y-LMEwF4Nd0g<2JJ?H~h zj=>nuVB!G)o`gj^RgHiy(NDdJtGSF6EGfD2=j(ByklI)0q;j7C7ll6sqeNbqG=^SI zR%I(fjAxMLxd0%JNrAXGZVU<_mMOJt*Sgt$+~)%76p7lntZc!;f!{mQV9O!73 z`#$E4Vtl+vkO~PzdF9jW2ZtEL2QUS1kh@L*z!?GO8Hf(nRtDKL0s^B<0@=shYrDL; z0Bj>Hg#th%u4F2Pj#mNR|9;<6FSa6` zYO}UE4zC8YV4wmiZ1A8dqX7>afA+1M{nPFT3czf6)ugxvILyoU{6ae5qiIDD(4)sk zX+W;(xhhEL$c6K@016jsDB@zCXgY-ZiRfthFTSK4O4#`bC8jJQG z*UxjLu=38*7a;I@?LIv1Kzy`=2Ag+Y}0fN8U>>8P0 zP{0V-?f|)!`FA{D{Ein%A-Ww`nz}^2Qp5qfbxI%wFczi&r?b>?y#rjX#exfDE&T7Y zdA1&c$D!9-3bk?7<<0i-nbp2a}y%IgYZ7JJnzhM|v44XG%- z`8+=2?DU?xGkEbI6F$s*5!ZFZmO+LTJphaXtjw3;(k2B*AYB+HzGFRFu-<7<*361^=m=xjiPEMIl%1&7< zdUYGDOMn8>1RFjS0Vof<;LVG@@gL)N=px81u4ZTN_MLwP3O?7?lqRU{n!?g+qh^S_ENr}unko>qn44j?ZhuIf=Bd_2IQfQ()U>5tSI5FIvuNMPjz=iKU=n7(7Y{>)43!fC{r{o1) zQ8mD-I5@All*-5w8mxUT;Up1~xl70{>}_0HZqMeio14fH_C2^gT@DkU1$^+U1SRQL zpt&aLym-K+`Q#1)GTmXvt?tMD=2SM8pVvE9i!{Xbnq79cI7m_)f}voWQo=Y*#?V?M zZJKj^t&YK2De^_dtGu5V63eu!#@h7w;{l5+^F13IU*q@SLS_+B8;0G}q}&&SD}cg6 z8W!6OBq(t2+&C?MjcUK;KN)_&+#QpDdny!dzgr4Ccx``5W_RcOaE zNH0JrP@7dY<4@+%g$M*!_deiTJ{v#&9fn3L5%)Jg&cf8}p9wy$pVPepwO*ozrC0ptwX2R^K0=BCef?tB?)_;Ww{%B7%&0VXr3s8N+x*W877McXe|E^ZNMKPye-l`%MUV@VOgbx)c^!qGbpsippCR5CuB$EX zLcAfty6FngDlrpOP>}fD3gQ4V9y3K;9=FYe z7;J03bS#2levx9@l`qeGgyPBd*etpi;Qky>_?yE#XRjN)YxQn{Cgptgg)Kav^ltlA z#Yb`(KO~Q63yfxq5;*nwQr?>~7GM3FH!g0*L9-l9=LnmVdDptO(Lq=wqd;fdu6Otg zQN&z>P~9Xa+5r(RD@M2noj4(j<1Ei#T$xzAsCcZDbCX~GaP%MAkdr3!qp2fzluKC+ni8!HhO+Hw;`_W~H8{@aArg|$ z;Wob3c;kNkMl23__)3t6*TxLJCLo|+|7}@&gxRvO=v$P?do&3CGR~=#ABlP@mKH7kmq(CV+sDDkP+_@Bic`i|Z__sqY@X6DAAp}(1k4&0QR}O9lD@;14I`Ge1&xNVI!?Wel;?0v2JaKD9**@pP|q4t{fm@)3N_U3G*36nd{HywI>3~Ym{yc1P-K5A?=B;2K(r} z^eY7A7Xpru2sF}RsRlrpQ3av{1ewDAk8a&CB9_o_C>t3Q@m8R6uNt0ssH>X+2zN+} zu51J68UMcm3eDeJJXGAI=#JFrku|+~8(}*qPJu8TvdT6*73Px@1)w;Xg@|M+0)}PA zV%e8vK*4b+3W9-$F$c<{zZMb(%}&BlKvmln(zrTM`*^#)qLA<>lH%)0I|*%6XVowd z1v~MJa$GY2Ux|2SIRbJT&D@lNfcq{U?#PRiF4BM5SB#`hzid#s9oMpgr#oR5h)fi1 z)J$H;#LBwpF4G`cf{Fw(dF_Y8{DacPVtt=42MBKq;Zin(f>b9Ye!pnK$PJ44@&5@*=rBexjyL&rDZ}46Pq65>H7Q2Zcp_`OK0? zS;KPqy{|`LxzCdoY91#G9K+ck_{Bs%IJTNou8PJG2w@V=Ep?X=3h6V@xsEmwNZ_fdeoeXMJPp7Q=8$`tiCU9$-6o!FQ!o7ywnbX1)37a)rc8@Qpx4H>gCIj* zCi*WH^1FR3+uY52!arEDHZ8Ket-2##ShD4o1gYd%^yLQI{d_HZ-LTrTB8#g53V^(0 z|IE)%7xQ7aOFq|y&&7-DTK>oC;!s<8I*L1N^!k8*OjiJlLriRU7jR*OlseS;AIodu zBUt&U3|LGJcRju>zO(uNF};2>7h5~Cn&k&Y12IN`oPW!YfwEE+%Xv-DOD&B1KX-en zDgp^8G3IEZ1|*GZ^}tFVC&ksS1o+=K?{1yW~)i3P&IJ+biIxb-pJYohb`) z(4=vtW8J{}B=+^rux|%)v;tMdP*-w`$n~e~oe>rx?S&B=4;YJH_%s{a69GM)tRAb8_k_Gl0g^(wLPbsR@}z09)#h(}5I z2|)Fg`<5L;fM?fKuKd5oT?krfP{DfoZ6{NeHqhB$!eTxNvpU59dxnZ}2aBo~HQeL+ zadhRMi6s+12nqw&$N>Y$FZJW9cuvs%8Q^NKfAWUH$_pC4Z$1XvAnNJr=d#Wzp$P!= CY^c8g literal 15909 zcmZv@1y~$GwAg4cE25PWQ#eshF)5267dzF~lEC?>CLt&Oy%sHL zM^6pA|9vqHHA)hEN&{u_3Ia6A5;T!knrpq8B!r?qB73u`%lhB(#!XO1$9DI2;PLZx z2PRY%zAW`v*do*@{zTbaf6(MmsU=+xrkEH!7Bm7SX|2*gTml;67rKJr-j9fYO6=VyOIQ4V1$FzuOoECW`h#fPacaak!u@LA zRw`K^zuUS~)9DZ8Bt&;c#-+5&AC13!qR$>dM$)XE`iv_wc3Mo?`aU_AOmHkf-)!=o zqI+?A%Y5128u=r+il2<{_tS!?3cgMjuq7yG9SEtv!e*L*&X*Vp8S#Ey4gJwS(Qa=4EB#BQkSD}Dr3CLCMC zGrcx`BRn+V_oJ*>l40C7N1*3&`zX$g1p|)-OTGikAP37&i8>063kQXr0*(9>4?Da6 z?wIXkGWT%-jXpC7Il_$&wc9g*&Tz6NuivA-$s5t^97B~fwJqVQ3z?%<<NrH7&uDxrqP6Y5wMlU-;n*9Aoq! ziN)EOS%woK5BQ%Ezr?#|p>7eL&}i7<%Lo1HiG*;y!u4GUNl|CQ;aymBF$wxecHXz+ zh@dSG9`D-!c5FusLdK+lqM|_?!fFuTzzTi`BQGOFWv6hk}hxtAx zU|@%x3k}yI{Awt|Y>(YYiPehVO1PZV9ZNIW_WQ=r?T0A#?Ru>m)+OSzP>2O?1I)RE z163=YKa@X?Kej)~dU9a$;4UGbdoB|zTxP^XA4~t&k&A)F-OsLh?IIiO^v83eaVvwtm%OWHrnj%;SI0ul_6Kxc3DP^#H zhrSOb4!sW1?V*1Vb&V)hc~9?~`azzkRJ-)O2Foe+Db*=Uwh|9@*Qb(E>T3lzqDS!K z`;N?)*kbX9;s;F)Fv>WB0;nw8E%`b_q3pAIVC9$=zb9rbZQ)u!Nl&%JFIMG8b;Ih0wK_#!3Kz9&^#aP9Cc+ZJ zhJoErRCi22XSjb8TM>s63njuN1`-o+J=q{lWgV&R&o9mXSzw)iI_2x+&r_Z$Bv?qE zT|brlO#E5p^VR2gE+(!Sn?LMWQAFWWqaPeBNi8gL0Y?PO; z7x(Nzwu`o#OHVaZKQMkw|JbV8EPRziEU_rVEo)Mp#6c&0pwPh%cwe2{I*E zYMD=5GA`~?z3=>0pWkCrpD*Pcy#?Pb9vIbA7ZmkZ=e6kx7J&=a88)?FC)^!@E$g!q zK4-lCumc2x=qGU$AtTy7aVtp@VK&KkqOUwvoPL%{@4Yxxc(?2}J_uQUFnhmlSt*}5 z7N}WVnNvCI(Pf)+ipc5S1lQDJg>E%7)iSwy^mxR5gvs&+Uj)AcKOMh~*)_u{BRivu zS+K^8$(pH1J8}`-er4^ufo1nZr#pgI)HB*u!Z?w)BbbLdM<|Cl$HXwovHiks9bsLU zQ`w4cW<#jS@rYN@x{JB%)eG~S`&{xucb#{&*R|elM?i~j!>v>Rfxp(p!6V?p@?yw) z?c&FUl6(Hex22BnkJa(j@juFzAbpEN+>4$SXU4pphNbI%Z4eeMQT|osDe(9GZ_}e~(hxW}L_GLrL_4@)_zXl5jQ1!waEoy0@PdeJ z$W9pjQ4x_27#$N!7VfKy=S6DEvyh^%=4tg=_1E?EyE?n`zspyDpUCAyCfe6hH7CDt(yd7?Fxa#f1{d-jjQqyXkd+uc0q&09z39KmAT! zKlvy3&`h&%e+*z3+Vv`?X88v#W!Q z9osrB@4C2=?LVX3qUhk?1`@m&D;FsgHUIu)Fw^4SGZMh|)DxJu-${1X1G|vP%qW}uD8^A;+*BqFxxP`wP(H6 zY&~70VqSP%ejT*#KRz%)?45PK(!8(OQUBr0w@+U-jH<)e7h1mGE zmm3QY`y+=?+YCw!*4d(rzXdRy%t!L*tanV9WU888wY&YST3?K^XI=hTCul1ASbbAR zzizyC=G?wzbMA6!^D}qRw{@c8>}4t>C)26H*|{aCIjdGX-=^bX66X6hK%lZHc3OdhLOV2}3SxLac*^$-U()j~~)yL5V zK)z5=LOue(p(Dh@oYKe9!O2~~N0|CQcL)H-Z>QO)DgSeehrKYhp0XOHq_Z1@lAD!_ zm7Q7yg_4p|$j#D9KtoFAzl#H(gsE*jJX{3W*u1^HS-m+~o!zY2IQaSb+1TH+y?@UF z+`;1R>*QhX!{X#l^Iw(x?|P&l?iOygE*`edPLyx;ntyQi^bn?|erxFe{QTE*LVRrh zZ%a<@|J^O1gKTeC*f?0(+5S)6KvAK$vjS?iJ`e{zDO*Q~lRMCc$a`)Mq5s_f|GDyi zTl^m-_5Wu{_WxP(e_Z+Rl0s~69rzy|`mfRY&sku&L{Nm-{?E{hpkVnLazjCJ*vdqcUv}I-FDXp(`)~CI_6q0Syn5vu z@=#%mi(w)8!(B%OqL+n*g;4e(#WYS}ietrK!AY&slopkJ4ioF~cEGVA?9p$C`cp1}wbipncO{7$T9|lVIWlj1RvSoO z)Znr!(dlzlEbID$&8PQReBM8~PO&0STof)AR8q`r@-3^@q}%S`yAwPLULgjV0N<_m z^8QM!W`wBhe5E$0tjQiL5)uqKwHaTd=FGj+rjAa==D**&gSW}SFX5kgomD6TZ?VaQ{kUY; znyjXZEAsu~$o-`GU?QercM)>2J}P7 z-dn~9Cw1!g1_|Gt%!LO%>>_#%+%4*w9yV|J?4z$W+kQe})D@>BfQcFpni0NN+Z~8> zhmV5Y*ri~8yq;it97+5dWxrfIcQE#;1Dv&$Ovn*-Df+Mj`*XewHSl3Kx*0No-|4!K zz%(#lrdX+4XL37$$~L=f8tkZx0lEiz4M!UurjjR+p24jDq3)Z}%Wl>`I^3=8RvT%{%F5<;`ok-|F4nkg=ihZ+_B<1!T64a4 z^mC)|MIuz$haQx~6qXYG`#@vc+cEZu9viYexzF78d`{sA=zPpP&~w>f zaibOwevcEZ*Qc|(A6?$ghf{fqz&raA^!ij2K)B0+8Zw!~n~Xs&Bcz!tk@&#;SXWZcyDY%nq z;wKNESxspc6Cw#2K{Bb*Vi55BIsN(RSJ$N%-yTUhIn>B_I@7x5SM6&2ZHuR%$7@K> z2$u=6jPX~qTTP8dOL_mzLLZ-DaAm1Ux8Kif%3&lLOg_v$70VNuErufygr)32vdLUN2Jg)5%2l{ryJ(UBV(Enrv@I4K*qIM ztkQmoP9@j$)zzrc;}CE^LNkxh4zqnfe_vttlzeR-_&UMX`&(-(?{*}aLN!_B3CyO+ z9Pxi4Sj+o2 zE3Aa`@n?12Mx4nvA6LJ}!%W}d-@vNVw6daO6Ry{f#`x-SI{)H*I!`-L5JPDe0kkh0 z@qF1^Agg?Vw+yCg+NhuSyJ}s4U8_cT?jy;LJ~FIlXUACQi-<z4>7KzI$3!yDSfk?_|B-Ac4nLOw;{&*0kO7 z+}f>bE}yubE&;Qi+S&Qi>t*l1X@wY1@4|x_(SHYrsRn)zpJ#y8fxi6IQ_gfXhzhNl zCj5=^(;5=Yl2qRz*VgC7j~b{jVIkzu>R!Us*UU%}3^2v#=4b}@Y+ro1 z0X%5+KkSZrl^K+UvSMk*$uUBxFy?U~VE-(h8ZAYvS(+5nOS4Gpl4K9YbSA}?YFdM$ z2Mrl_V0c_J!ugm8QJ8Y2eP|2tyiq?7IhVRxE`7dFnD40>0Veca zS}OQP$GTaqzQtQA6@HEJEAT{N+66SkOkn1RqXe5uqr-nMH#4|OzUKJP1^-^6m80h7 z`bpaZ907~MMfCGhq41pMR<}#uZkWC3?Ly8 z4zQ&SkiM-XXS2$D%0etGh_2kIHWrdQaCPuoXqjJFh(zBH=Y8$+A}{VIIeIKE-3K>x zDFC3H7giJmeZIwaDT<13J0#+=!72bH57J9ZAVL$Zk~K?H+SKwmZH&^_?`fJ7u18ce z8?~pHRj}b0AnCbTwlPjE_J<>`+M$mq$0Na5(U{3Y5T`&g^0LKB(qxZ0hdn^NGUy55 z-*tG~Fq?eYvYh{%uS&uqV!XpryS-S?G-z|0+O5*9Qvsp_WI;1)asUm~6q3PWs8Ox+ z6=?!0;_^o&uHkN|4$ASuV2U+R*nw-I)>sgRA0BSWk8grZNyrS4C}rK;-gkwj9#8RZ zAGp**WDU%=Li>8i5Clh2A(0K3%+RiAJmWAld3)dE*C$HYMCsk0w*h3fDs>O+xrgmL z7&dR{>h&_%T9U?5uLF}ACPT|h+8sWgy}R!N`rm#$Fah&c&*KqEt2fX;CF`OSvy6T} zvIAVoij`(t+?P6g(h4>}1?vnE7*&aBvPrw_V(A&lFkKqTBG?b@dw6^%AjVDeV93^dXo8pP9Hiye!>iq|1WH z<5&>$5YKl*BXQ~vLIfs=xOK)p(yr2xbHaL{WO3s$YQ;F7UM0-ob0#~kw_*=eO(Xl7 z4MR!moDM1*1_?<rDtWG7UASzTzI(J_l*rJgUg^g z5lTa%&5#(_s4g?@){i|JU$EjXF1~(;uKPoa@N547EWR;VJ)vXIbywke>wdt#gdR|9p+#g+$Ts?4B$4FYfz4ACrMX-X| zXGQv@wf+h*RKlY9do7>W&0L3BrcpfYd>;bIikF!1>oAEk`}9g;G+}yyu+u+S46~K* zCQw@_Vqz8WwW6hwt+*VR(B@SQ=8m`Ud4kG2EK})%wq3==ZuID&VO%?AkZdqK1fS>X z$F}J9y zX4FNBjOtA|r)!Y}wjCIK553;6dJO|#nIodcS+c^&^rjz>Sy_h`(5}DvSn`W`r2AetR*3jthq*;Dx z%6|dq%aDR+1pcIE4rxmOccCLP%^NW&fW5(0eNy`gCMcrtV1fA|VzT3I)qElTXMzvU znB$DHc)D@@!{mDICF4jR(PwpxySx$^8rAXvQkW&tA9;-o((x*ZY25znYLIxOpl;wR zp?(-+!pOps$bTbdfGmN9lx1@?oi}k$ct@sg#INa<oB9BuGY_Ej4b2m4gbwcK@FpRUu?2)N8=F%gdf{jO#xfG zE9V~R*HN4aqq+eA^DI9DYz0z&H#l|uG<3m}#wzXV^4!3l&(i&oVJM&suzwOoO>u`n zzymuj(;j~oOfBzAXcRG?hMAANyu7)B&Bf?hRf2&ZLqd6B#L~Q?_E&@Siq*mbm=><3 z^WVC~W$iHj(?Afg-5ZJpfDw(=f%@*ZZbp>a$D3pS^oZ)~P!!acyUk8d8a2IYjH&8L zW>WdBrZeIhMlRi2aFH4di*!1tA49}*^oXK5-;2k)-Vi<-EFE0--3~Tp**v`6wxZn zs)N+7k~?IM4*BuJ#3*?P9nR-!s1Qc^rv#wn4Azl~m`!a!Ne2#bYH7%EJ%SF4* z7|%u#Pz{5iM^G0wvKh%Ahz!OBb*)3Zi$CkdBqd#M7*P3Vlm>vBukH1G}tv9i)grI!BU zeYq9Um`pDGStf~89^l$k^|L$Q?g42a{FTk9T@2tvB2-Q{M^m;ykh$HBrkDqW1zRAL zdR(k=S7}yYShqNDi#(l_BqNEgCX(>tB>4eKlECqDleKEtR6zE7$VoIsP$4od-E7xo z&xG}K2{T_5v9Il5k&vA8$!yW>ruVjMc8ZLLM{{>T7E8eIenc+cYs)aQAPEVW@594x zzG4ND*Hjk(?H4Acx@TT^NU{I80k6TO+YuO(PWrb$>#tJLmz}rph!cR2l|0zxjUR zaa@zarB|_}jrs>HdYhxkY@;6}yiWE1fGzPq(##J{)w%ES1hzvl*yot=X8zOsM-BwQ5B%NGFruq-?wDG=5hHM8N+1K7i%rH0DAD)4!mzD2Ern@EFbu< z|7G_G8T|a&(pM4|gXWl3Pc(XnK zv1*6eNXw=J)--L~+QKxocSz^Yi0H&p0OV-XWq;GhS{)~5&X(&c0AXhy5MkN}`@o%! z>pW|no=k7*$5M^{U{KbW*xvrWujOI%`akvoi3V))raV9t8T*CKOAAb{op1Q8Msjy& z%huK&P;VTAsTF1Uza=8k;)T_oKta!w+0cToG6l5bwFcZnbvk7oRT^b+WUHm6?-W#3 z>6J!6${;+QSk6vHr5TRbk(jS!C-xuG*Ax%4_365|=kpMp68I!D$fX>%P_oH7+ z__f$AHg@S&X~kv|SCRhin-oJHusA9PG0j^(@g-%6TknIrHuA) zB8jZ3=qp!UM}tTKr?m14#Xq_FqEk8Oa{&1E{pDNyG6#ogL28-LVM;! zAuZiFN~?3OCx;CqPa?s6#nf*^>pL1r_H& z|A3zCDZ2EMNX%UfOfD<|!H0gbV72$m*np1dta{eA9aN%TBBQ?D6X^Rx#w3%~c&}F? z5@mJJ+yWB-d}{jo`e~`D{b#`k`?Y$BB8q97n;Gb-E`=QfDy?1uBBS(N`@(`UP7VR)mUa3~bjb&R+~wINL$8us zh(oKO)*lA%{{8Xwe%sd|S=VwJu4{iy7+Tgm)kH}Ntr8MZ;-ceC0BQ3>jk0-c8BeP0 zkoyhng_uDk5fEfn)1FC+9y78e?ymoRRt^~sS`#Zp*SWlpIU5P4|Bd!NI8xGL!6}K0 zrETJ6(c+3zJ|KoNZ+4Z~{-fyR zwBkK=?Uv4$dT$IIaQ61WJj0K?7r*c{6*=JT#j;ieic{vJ;2rUi82?;ut4?IH9bf)2 z|MV-q?{&QEb72h{ZhJq0k+Fa|&j!A+pZwK6f`rbw_CNg!eHnWdn`IKt!c%sI&<%`L5iy8Jc z@MXV#QTOS5K%E<#lRD1y{a7=cTnpLU70bxxHp6<@?Bqa2Ry|1J3ET%+j_vw#2a2?MQl%caMn z+ZYfxt)|o?y`=i#gC_;*P1(K9^~&ewr(v{S#7rTUm|O$c`9a|;=(nC9m_D2GKeLs6 zc&`mk`^m{@;0M0wR?t(VD!X@Hsi$(EwD2a4*6^Vr;c+MgY>}gwDsX#Wd^DM#bkSFs z&~x&I2hRL6>5VdDer6qUc1a8YhH|>sePgy_E*%<_HgW8J%gIgbw0865Ic{41uQ4VZ~`jS;MKW z;f>kVFb9Padoq}URfV+U^9s6&Dt?k050)Q%bsXvVR&2x^>=_+~db!A?^`(zu&ZkLs z2Qx)#^)z7_tMi9zvLWM|= zWZDC&f-CA@z%j#C3HN-LKWpTRv!ovM*Np&jPw){tUj@u0@EO6ht{>_K78n{c#Hu3SHd-7{O44Mujy15v3;Ma|x-;1sotNURK|WmgR?N%+5-{NJ*Zt84(bo zp$Sja2W$^}-jL8@8f7qyj3x^_UV@QK0%5-b##7bpt;ayy8NjXr%D+`B*xS}>b-YT) zg5=_u)V*|BnEskj71~sKZIqY6gu+YBY4GnA4k97L*O~T;D1YXPeKP>Skm*pM%h+kK!Pdi8UiG(;!Z&{5DyP75UbIONfhAs zz4v{GN04D*5Lu~JiDPbu*$Yetjy59zQ(m|-0c5sUmT>f}uGh(X5a}ZRgrNa23lA0D zN@?5r5@4G!f1oDt*5}AtM^*OF)5Q7v|`t2TWPft#hjy_pt71c zzt9zGi(qf~@^JQ}a&XJ%J3M@BeAArv`ik{VvL}BG8I{2}*n>C)faSjhvM40ZDaKJ7 zQDd;bgEBa(oi&YY9q;qBCDS&E1onEG@Dyy4^J|CV6ETB?)e?O5INz?0mn5iF+fYe=2G5GJ}@i)WT7c2^)Uwoy|%`=o~ zP`gqiNo3ks6}pc4E%%Hc8R_%y{>r53S*aVv##Y-=bv#9c{8=Q~YD-?ZreDzTWa0?# z99}IymkF~CxJ#4~ae5mP(%*J=waGp&53Uu6;)R1?^_=2=M9yMe>C#c*1yL zesWWR>#pdaOYtjI3yzd!zZDPe;`s1<=;=Rt$HVSAkk~cJ`#$l$H2qzTA(D@z;-Ss} zLTG)?FD0YAhcNH+q#>IH6;TbrxU)%P^$(ToPf0Qz&pCSvGG382Mu8Gyf{oP?)R7qq zi^~+tVj93qdM=54hdzRY#FB2|p!5eD!b{{brhuxw(5RMxoQCe=51wpEEY;)^AvZ(XuMRv`EoDD@TV- zEAbyo0;p-W{cqu}n0urH{}H+^*~}~*X{fk7KW5<(ik~pVl|N%a44qO!AXX<%g2d#K zl`NZrYGgO$S*8A3QcJXM|9h)-5BBeUKT&y(*p(y6x_Fp-UJ)4J6AUugOsBWwnR2|H zQQ>cN50du_Ws}L3X=IZu4Sm#q;$p(BGh%0H@Y)iG&L5XR8e~z`)`2lX4zp5*CSV|y$ z2sgpQzJSM|Aw@BCW-a{+$GLoPUEq!RvFt1-=tQB zrJ2zN!)5HU5@qbR^3e=D4@g5v=cfs*tDSigf4sqzfvP_SRg5%0=~L7ge^vklL%-)& zqg7h~?vqKV*n-@nfdmaBz3Kv2JkU9}Y$6jGFV4AEF_T~Oa=r#2mecS*CS|a^hqbli zGcn`xj~)Kpz>S;Na5T9I=lx>BL)YIN44E?H8q}$c$BQE*DtXG>)x>7yR)?@5xL-1U z3n;S1jU$^5Gt6xbDo>=uDf4|5MWVi9L_0MFSS4nJ+Ub|->TjAF8d*>Bkt3iyBo%d6 zsn>{`T`?yj3{9UOx@lx!sdgVp8i>>?950RctK0?p22dkeqB?o)H4pGCIi1-c*`7j9 z)Kjd9krVq4xuR5*cXygG-0n$xF^$I|lH_tWr2~It0r8|&nF)jo=SEH0b;W;`LN89~ z@WvF@%hThKVfe9uh@b1Dwn;r7K6YDp@O^O8_P;#hq4s*Q4`B4;Wb>OcmL2 zzmU1p=*?N+yjMWlX`9r_cICxPa?L-~#;4}ANpx&VNsI7|ynXNRja~xBH%|r$-+{R_ zMEZ$aQzJ9Rsg;?yG*V@04lfJJ&xfB$)NvPZi7T_Y99Gzm%3z6TeGbjIUhV4 z<;6~6U+a&37Cy)sZ4Z&40sZCD7-qTbt_FAAufVgE&&pxb($cE?`3YPADJ|<%q_(9m zuduj~d){KYQhF{8sZ_DIc0Sguz9xP_K1EK{GXE?ZED{Q81+4P~zELJY&!192q6cmL zqTSd#Wg8qtODdW!+RtOIRFN`2F?fKOSM-*G0bqnnzVSRVz`wds(>s>2DOJ$~qMSRo zUi+NfVO3!=1`r@%2op{uXwnVmwuAo#8hrENKqq7mNp?Yz>5A~n2hiG=w%LnoqCghl zxx9%n{|l8_H-y%yyyFEVs}7EajQCkO{p#*8Jp=VWlwoZ8s4!O4DZzknZ*@fPiwqMc zmh#5i#9>StL2#2YVqW`GG8>qWd0igqzZeOn>1RxNlrFZ~^(4Jr6CB_yE!*^-CFeNn zw(9jN%i4=VLK4o4W>K6npyPtB#yy{*W1M7~5ZAx?Hocy_BOTMK)S&Hr{?nocuq zYd>2U>UY3|rY0t>U7(h-8%A$Pr131f0J;&^V|jgI~zG7(1&d;Ou}d4}~0;WImM*mSZ^iu6=UlK;~2~qd+l$x?En(DUu=bd_b4s4S0W^`*wP- zn~b|J+}*zFYzg6M^Tsyr#dX#YEo~uUkQM?htq<+Q zNHaN&a0Zbd#9N0hJ^#;IBSf(ZfVwwx;zc>!_A;0v50r@dfY*ey4a95v56zme*~L8MFi>%)VG)3+eMiLucDoTrXU zfW3G<@SfNK6>RS=pv&u`eZT(;#`op;ruV0Vdqg>LSj-eC_{>=r`XOylSPDs%Ijp1-P~5D@Y( zvbDASV+myRnD=6ikN`f!QWZoYoo-e`+~wa?BW<%Nclr^_6DI9SS{vXGoge?CC|@KK z^eP2p$Fy`b6tw;$lBFw!!h{n_4M$%y0`i3|AUDL+_ojS#t;OE=A`zWGMgdmvK1)1A z0(iqZ{5<%Nk z`en;ixbFB{*3==eNGW)oNXlz2vKF~vH zm(kZD@xU*h*T~*E)h}H+JH1;xGfggy_kR5BxB5DPHQ}7I^Q>{NZ@9|snR)8kqmI(I zi5dIBA$8eZm7f(VdE*VvyDFqiQEwBmdUw%K2MR!yg+DcDzLfr+e(~8f zaH40^adwgfsY6SzvdnK3_u<=q!D6-g1#gB#pc6+2B!O(vP?CCVSFq_^_Iq8@7!t9A zLP+jixmi7>@Gh?n%w=58J zff3oyVps0g;mSv-5fRVUU){KDP=7qR8;1WfyxBu`9&-8&WF)N^;c6+iM}N5gxfo9b zDY^yH|14E_99ICAdG!V3h**t(6EHH!mlY|X53oilWEh40ZUI`G+lT`HCL?=;v!n|p zLxf+jNj$k;<``n(Frf9D5HZ$C8Dv+_2NYLj?r4r{)gnK5pL{C1?NYTqLOQEM{9S8) z>#*G;j4M1qs>McG)Q5!j*E>aWfM>%L23JE(t_?`gAhH%$gZ9cnB=-rCXa8YEEr5_U zZZ}5%s*NCji~#@@DoJz#y_0niD3bV( zA88feU*`Jp>{pcq!1yyEvG#K#LhrtFIHf9Vb+f#jhCV1T1>X8SYQi)wgDy{g)YpF$ zQ|q6^tCMm|Hi@#+tVnj#55@p5j7lL`mOt`4Xu+Kp2x7$_zQ=Lofj0VVP{{?c^lAZ* zp-+|`M-R?&id-*%-?L_Mk=VGv`2Q^`(!wb+`}}B#B>;IIv!i&UhTQRF+17igT?0Y< z>PnT;7Y-J!{HCxV%4{k06-G_aBD*LL{>y{g9UIL1#%BW6%u4QxBpT9Yc zUTOq#*7a;2@-j!Kk0Y}3+`W3u&nQ$v+_{QZGzK!32Q#T4a z0uqS6WtZPW(c*e_UqNzlE`&E+u)=U025es&Xp-{WN|QCU`XG6E0UQb)%57a-DZ6mGjn1^ts+q0z8g&6$pUVB~ zk$K@F^#XFVxbHjT?CzTyH5S!4+wZtZs1z*UJ+Vo+c1h=bSWSdGC1w!Nvm;s!gNXrr zT&Vu^Qut6c31E#$R&tjzhlxSG(G$w{kIj@=3P__)-^A^a64Ri;dzQWcyj|utXP2Y* zkWP^mr=$dS9(yxx%{k$ssOxt%WLB?F;M?e0n>M%lP93-*lj1A}%Ec0p_O5%OGfHu3O3rE{SY4-m( mE43OZt+s~>gD<}G4|+t~vQOt;OZe?OF!ItWQneChVgCz^{q$P^ diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/config.json b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/config.json new file mode 100644 index 000000000..dfac18793 --- /dev/null +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/config.json @@ -0,0 +1,8 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1, + "KMP_HW_SUBSET": "1T" + } +} diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py index aed323e94..8f4602c2c 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py @@ -37,8 +37,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() - set_env_var("KMP_HW_SUBSET", "1T") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) benchmark_script = os.path.join( self.args.intelai_models, args.mode, args.precision, diff --git a/benchmarks/common/base_model_init.py b/benchmarks/common/base_model_init.py index 6294190d8..9a25ca92a 100644 --- a/benchmarks/common/base_model_init.py +++ b/benchmarks/common/base_model_init.py @@ -18,6 +18,7 @@ # SPDX-License-Identifier: EPL-2.0 # +import json import os @@ -135,14 +136,28 @@ def set_num_inter_intra_threads(self, num_inter_threads=None, num_intra_threads= print("num_inter_threads: {}\nnum_intra_threads: {}".format( self.args.num_inter_threads, self.args.num_intra_threads)) - def set_kmp_vars(self, kmp_settings="1", kmp_blocktime="1", kmp_affinity="granularity=fine,verbose,compact,1,0"): + def set_kmp_vars(self, config_file_path, kmp_settings=None, kmp_blocktime=None, kmp_affinity=None): """ Sets KMP_* environment variables to the specified value, if the environment variable has not already been set. - The default values for this function's args are the most common values that we have seen in the model zoo. + The default values in the json file are the best known settings for the model. """ + if os.path.exists(config_file_path): + with open(config_file_path, 'r') as config: + config_object = json.load(config) + + # First sets default from config file + for param in config_object.keys(): + for env in config_object[param].keys(): + set_env_var(env, config_object[param][env]) + + else: + print("Warning: File {} does not exist and \ + cannot be used to set KMP environment variables".format(config_file_path)) + + # Override user provided envs if kmp_settings: - set_env_var("KMP_SETTINGS", kmp_settings) + set_env_var("KMP_SETTINGS", kmp_settings, overwrite_existing=True) if kmp_blocktime: - set_env_var("KMP_BLOCKTIME", kmp_blocktime) + set_env_var("KMP_BLOCKTIME", kmp_blocktime, overwrite_existing=True) if kmp_affinity: - set_env_var("KMP_AFFINITY", kmp_affinity) + set_env_var("KMP_AFFINITY", kmp_affinity, overwrite_existing=True) diff --git a/benchmarks/content_creation/tensorflow/draw/inference/fp32/config.json b/benchmarks/content_creation/tensorflow/draw/inference/fp32/config.json new file mode 100644 index 000000000..dfac18793 --- /dev/null +++ b/benchmarks/content_creation/tensorflow/draw/inference/fp32/config.json @@ -0,0 +1,8 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1, + "KMP_HW_SUBSET": "1T" + } +} diff --git a/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py b/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py index 390bcae82..08c145bca 100644 --- a/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py +++ b/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py @@ -22,7 +22,6 @@ import os import sys from common.base_model_init import BaseModelInitializer -from common.base_model_init import set_env_var class ModelInitializer(BaseModelInitializer): @@ -32,8 +31,8 @@ def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() - set_env_var("KMP_HW_SUBSET", "1T") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) if self.args.accuracy_only: print("Accuracy testing for DRAW inference is not supported yet.") diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/config.json b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py index 9bd9c6243..bf4b8132c 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py @@ -34,7 +34,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.python_exe + " " # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) pairs_file = os.path.join(self.args.model_source_dir, "data/pairs.txt") diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/config.json b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py index 34409b702..4ef889b36 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py @@ -33,7 +33,8 @@ def __init__(self, args, custom_args, platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py index 064bf7848..641821520 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py @@ -32,7 +32,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.cmd = self.get_numactl_command(self.args.socket_id) + self.python_exe + " " # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # use default batch size if -1 if self.args.batch_size == -1: diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py index f2e2e1469..0d7dda4db 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py @@ -31,7 +31,11 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.set_kmp_vars() + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + self.cmd = self.get_numactl_command(self.args.socket_id) + "{} ".format(self.python_exe) # use default batch size if -1 diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py index dd504259e..53c2643bd 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py @@ -60,8 +60,10 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) benchmark_script = os.path.join( diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py index 6d586ea80..bd4794638 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py @@ -60,8 +60,9 @@ def parse_args(self): self.args = parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) def run_benchmark(self): benchmark_script = os.path.join(self.args.intelai_models, diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/config.json b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py index c7d546477..d4294a179 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py @@ -38,7 +38,11 @@ def __init__(self, args, custom_args=[], platform_util=None): # Environment variables set_env_var("OMP_NUM_THREADS", platform_util.num_cores_per_socket if self.args.num_cores == -1 else self.args.num_cores) - self.set_kmp_vars(kmp_blocktime="0") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + self.set_num_inter_intra_threads(num_inter_threads=platform_util.num_threads_per_core, num_intra_threads=platform_util.num_cores_per_socket) diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/config.json new file mode 100644 index 000000000..f0b327528 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py index e75c72194..d4e3ca5d7 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py @@ -33,8 +33,9 @@ def __init__(self, args, custom_args=[], platform_util=None): if self.args.batch_size == -1: self.args.batch_size = 128 - # Set KMP env vars (except KMP_SETTINGS is not set) - self.set_kmp_vars(kmp_settings=None) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads (override inter threads to 2) self.set_num_inter_intra_threads(num_inter_threads=2) diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py index 0823604c0..6f22fd12a 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py @@ -37,7 +37,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.cmd = self.get_numactl_command(self.args.socket_id) + "python " # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Set the num_inter_threads and num_intra_threads self.set_num_inter_intra_threads() diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py index 5e35e462b..43f862159 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py @@ -60,8 +60,10 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) benchmark_script = os.path.join( diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py index 5e32d3e92..4bd21a12e 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py @@ -41,8 +41,9 @@ def __init__(self, args, custom_args=[], platform_util=None): set_env_var("OMP_NUM_THREADS", platform_util.num_cores_per_socket if args.num_cores == -1 else args.num_cores) - # Set KMP env vars, but override default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime="0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) def parse_args(self): parser = argparse.ArgumentParser() diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py index a2e6be8a3..4c3dfbd1d 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py @@ -61,8 +61,10 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) benchmark_script = os.path.join( diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py index 07dfa5d2f..75e9db07c 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py @@ -65,8 +65,9 @@ def parse_args(self): self.args = parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) def run_benchmark_or_accuracy(self): cmd = os.path.join( diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/config.json b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/config.json new file mode 100644 index 000000000..23d5de76e --- /dev/null +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/config.json @@ -0,0 +1,8 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1, + "KMP_HW_SUBSET": "1T" + } +} diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py index 1fe96fe2b..43f9cdacc 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py @@ -37,8 +37,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars(kmp_affinity="granularity=fine, compact, 1, 0") - set_env_var("KMP_HW_SUBSET", "1T") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) benchmark_script = os.path.join( self.args.intelai_models, "coco.py") diff --git a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/config.json b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/config.json new file mode 100644 index 000000000..ca15cfe6d --- /dev/null +++ b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine, compact", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py index cd4f5837d..d4998afae 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py +++ b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py @@ -41,7 +41,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars(kmp_affinity="granularity=fine, compact") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Get path to the inference script script_path = os.path.join( diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/config.json b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/config.json new file mode 100644 index 000000000..8ae78e72a --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py index 77d903020..6a2b7244f 100644 --- a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py @@ -35,8 +35,9 @@ def __init__(self, args, custom_args, platform_util=None): self.set_num_inter_intra_threads() - # Set the KMP env vars - self.set_kmp_vars(kmp_blocktime="0", kmp_affinity="granularity=fine,compact,1,0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/config.json b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/config.json new file mode 100644 index 000000000..4d0e2acf5 --- /dev/null +++ b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py index 61ef1bda6..a23403eb0 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py @@ -37,8 +37,9 @@ def __init__(self, args, custom_args=[], platform_util=None): (str(self.args.num_cores - 1)) + " " self.cmd += "{} ".format(self.python_exe) - # Set the KMP env vars - self.set_kmp_vars(kmp_affinity="granularity=fine,compact,1,0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # use default batch size if -1 if self.args.batch_size == -1: diff --git a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/config.json b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/config.json new file mode 100644 index 000000000..8ae78e72a --- /dev/null +++ b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py index 20790b541..4e1519e03 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py @@ -37,8 +37,9 @@ def __init__(self, args, custom_args, platform_util=None): self.set_num_inter_intra_threads() - # Set the KMP env vars - self.set_kmp_vars(kmp_blocktime="0", kmp_affinity="granularity=fine,compact,1,0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) TEMP_DIR = str(self.args.model_source_dir) + "/out_dir" if os.path.exists(TEMP_DIR): diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/config.json b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/config.json new file mode 100644 index 000000000..8ae78e72a --- /dev/null +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py index b598191f0..00f8b9f3f 100644 --- a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py @@ -36,8 +36,9 @@ def __init__(self, args, custom_args, platform_util=None): self.set_num_inter_intra_threads() - # Set the KMP env vars - self.set_kmp_vars(kmp_blocktime="0", kmp_affinity="granularity=fine,compact,1,0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) MODEL_EXEC_DIR = str(self.args.model_source_dir) + "/official/transformer/" diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py index 3e0167f75..a605cc8e3 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py @@ -43,7 +43,8 @@ def __init__(self, args, custom_args, platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/config.json b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py index 749026f3c..705ef72c1 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py @@ -41,8 +41,9 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, self.RFCN_ACCURACY_SCRIPT) - # Set KMP env vars, except override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime="0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) self.validate_args() diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/config.json new file mode 100644 index 000000000..d7f51a4c2 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py index 712da5777..a4ab51dfa 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py @@ -45,8 +45,9 @@ def __init__(self, args, custom_args, platform_util): self.args.intelai_models, self.args.mode, self.args.precision, "eval.py") - # Set KMP env vars, except override the default KMP_BLOCKTIME and KMP_AFFINITY values - self.set_kmp_vars(kmp_blocktime="0", kmp_affinity=None) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) self.run_inference_sanity_checks(self.args, self.custom_args) self.parse_custom_args() diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/config.json b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py index eec69455d..d6cb2cc97 100755 --- a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py @@ -54,8 +54,9 @@ def __init__(self, args, custom_args=[], platform_util=None): self.parse_args() - # Set KMP env vars with defaults, except for KMP_BLOCKTIME - self.set_kmp_vars(kmp_blocktime=0) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Set num_inter_threads and num_intra_threads self.set_num_inter_intra_threads() diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py index 379e47c67..585d3ed0e 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py @@ -44,8 +44,9 @@ def __init__(self, args, custom_args, platform_util): self.run_inference_sanity_checks(self.args, self.custom_args) self.research_dir = os.path.join(args.model_source_dir, "research") - # Set KMP env vars, except override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime="0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads (override inter threads to 2) self.set_num_inter_intra_threads(num_inter_threads=2) diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py index 5959abaf2..57114447a 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py @@ -31,7 +31,10 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.set_kmp_vars(kmp_blocktime="0") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads (override inter threads to 2) self.set_num_inter_intra_threads(num_inter_threads=2) diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py index 0e6657a11..1ad534ed9 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py @@ -42,7 +42,11 @@ def __init__(self, args, custom_args, platform_util): super(ModelInitializer, self).__init__(args, custom_args, platform_util) self.run_inference_sanity_checks(self.args, self.custom_args) - self.set_kmp_vars() + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + self.set_num_inter_intra_threads() set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/config.json b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py index 1b6eb1eda..1704839cb 100644 --- a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py +++ b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py @@ -40,7 +40,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.batch_size = 256 # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads self.set_num_inter_intra_threads() diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/config.json b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/config.json new file mode 100644 index 000000000..4efe60b15 --- /dev/null +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "noverbose,warnings,respect,granularity=core,none", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py index 8f3e15359..6655dce85 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py @@ -36,9 +36,10 @@ def __init__(self, args, custom_args=[], platform_util=None): # Set the num_inter_threads and num_intra_threads self.set_num_inter_intra_threads(num_inter_threads=platform_util.num_cores_per_socket, num_intra_threads=1) - # Use default KMP AFFINITY values, override KMP_BLOCKTIME & enable KMP SETTINGS - self.set_kmp_vars(kmp_settings="1", kmp_blocktime="0", - kmp_affinity="noverbose,warnings,respect,granularity=core,none") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Set env vars, if they haven't already been set set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/config.json b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/config.json new file mode 100644 index 000000000..4efe60b15 --- /dev/null +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "noverbose,warnings,respect,granularity=core,none", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py index 2bd55b5a5..9fdef4537 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py @@ -36,9 +36,10 @@ def __init__(self, args, custom_args=[], platform_util=None): # Set the num_inter_threads and num_intra_threads self.set_num_inter_intra_threads(num_inter_threads=platform_util.num_cores_per_socket, num_intra_threads=1) - # Use default KMP AFFINITY values, override KMP_BLOCKTIME & enable KMP SETTINGS - self.set_kmp_vars(kmp_settings="1", kmp_blocktime="0", - kmp_affinity="noverbose,warnings,respect,granularity=core,none") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Set env vars, if they haven't already been set set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/config.json b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/config.json new file mode 100644 index 000000000..f0b327528 --- /dev/null +++ b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1 + } +} diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py index 91ebe227c..1756e33ae 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py +++ b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py @@ -32,8 +32,9 @@ def __init__(self, args, custom_args, platform_util): self.command = "" command_prefix = "{} generate.py".format(self.python_exe) - # Set default KMP env vars, except for KMP_SETTINGS - self.set_kmp_vars(kmp_settings=None) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) self.parse_custom_args() # Set the num_inter_threads and num_intra_threads (override inter threads to 1) diff --git a/tests/unit/common/test_base_model_init.py b/tests/unit/common/test_base_model_init.py index 43f3076f1..979a6ac4c 100644 --- a/tests/unit/common/test_base_model_init.py +++ b/tests/unit/common/test_base_model_init.py @@ -17,8 +17,22 @@ # # SPDX-License-Identifier: EPL-2.0 # - +from contextlib import contextmanager import os +import pytest +import sys +import tempfile + +try: + # python 2 + from cStringIO import StringIO +except ImportError: + # python 3 + # only supports unicode so can't be used in python 2 for sys.stdout + # because (from `print` documentation) + # "All non-keyword arguments are converted to strings like str() does" + from io import StringIO + from mock import MagicMock, patch @@ -26,6 +40,22 @@ from benchmarks.common.base_model_init import set_env_var +@contextmanager +def catch_stdout(): + _stdout = sys.stdout + sys.stdout = caught_output = StringIO() + try: + yield caught_output + finally: + sys.stdout = _stdout + caught_output.close() + + +@pytest.fixture +def mock_json(patch): + return patch('json') + + # Example args and output strings for testing mocks test_model_name = "resnet50" test_framework = "tensorflow" @@ -109,3 +139,34 @@ def test_env_var_not_already_set(): finally: if os.environ.get(env_var): del os.environ[env_var] + + +def test_set_kmp_vars_config_json_does_not_exists(): + """Test config.json does not exist""" + # Setup base model init with test settings + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + os.environ["PYTHON_EXE"] = "python" + base_model_init = BaseModelInitializer(args, [], platform_util) + + config_file_path = '/test/foo/config.json' + + with catch_stdout() as caught_output: + base_model_init.set_kmp_vars(config_file_path) + output = caught_output.getvalue() + + assert "Warning: File {} does not exist and \ + cannot be used to set KMP environment variables".format(config_file_path) == output.strip() + + +def test_set_kmp_vars_config_json_exists(mock_json): + """Test config.json when exists""" + # Setup base model init with test settings + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + os.environ["PYTHON_EXE"] = "python" + base_model_init = BaseModelInitializer(args, [], platform_util) + + file_descriptor, config_file_path = tempfile.mkstemp(suffix=".json") + + base_model_init.set_kmp_vars(config_file_path) From c0d1fed01542dd59efb9521333dc8ce80ec94dbc Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Mon, 15 Apr 2019 09:52:16 -0700 Subject: [PATCH 17/62] Add support for dummy data with MobileNet V1 FP32 (#275) --- .../tensorflow/mobilenet_v1/README.md | 14 +++- .../mobilenet_v1/inference/fp32/model_init.py | 7 +- .../inference/fp32/eval_image_classifier.py | 78 +++++++++++-------- .../unit/common/tensorflow/tf_model_args.txt | 1 + 4 files changed, 60 insertions(+), 40 deletions(-) diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index 61a21c3ef..a32138d86 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -151,7 +151,11 @@ later. ## FP32 Inference Instructions -1. Download the ImageNet dataset and convert it to the TF records format +1. The ImageNet dataset is required for testing accuracy and can also be + used when running benchmarking. If no datset is provided when running + benchmarking, synthetic data will be used. + + Download the ImageNet dataset and convert it to the TF records format using the instructions [here](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data). @@ -198,7 +202,9 @@ later. [tensorflow/models](https://github.com/tensorflow/models) repo that was cloned in step 3. - * Run benchmarking for latency (with `--batch-size 1` and `--checkpoint` with a path to the checkpoint file directory): + * Run benchmarking for latency (with `--batch-size 1`, `--checkpoint` + with a path to the checkpoint file directory, and the `--data-location` + is optional): ``` python launch_benchmark.py \ --precision fp32 \ @@ -212,7 +218,9 @@ later. --data-location /dataset/Imagenet_Validation \ --checkpoint /home//mobilenet_v1_fp32_pretrained_model ``` - * Run benchmarking for throughput (with `--batch-size 100` and `--checkpoint` with a path to the checkpoint file directory): + * Run benchmarking for throughput (with `--batch-size 100`, + `--checkpoint` with a path to the checkpoint file directory, and + the `--data-location` is optional): ``` python launch_benchmark.py \ --precision fp32 \ diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py index d4e3ca5d7..8fa7391ae 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py @@ -57,7 +57,6 @@ def __init__(self, args, custom_args=[], platform_util=None): self.command_prefix = ("{prefix} " "--dataset_name imagenet " "--checkpoint_path {checkpoint} " - "--dataset_dir {dataset} " "--dataset_split_name=validation " "--clone_on_cpu=True " "--model_name {model} " @@ -65,9 +64,11 @@ def __init__(self, args, custom_args=[], platform_util=None): "--intra_op_parallelism_threads {intra} " "--batch_size {bz}").format( prefix=self.command_prefix, checkpoint=self.args.checkpoint, - dataset=self.args.data_location, model=self.args.model_name, - inter=self.args.num_inter_threads, + model=self.args.model_name, inter=self.args.num_inter_threads, intra=self.args.num_intra_threads, bz=self.args.batch_size) + + if self.args.data_location: + self.command_prefix += " --dataset_dir {}".format(self.args.data_location) else: # add args for the accuracy script script_args_list = [ diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py index fd3165387..974913258 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py @@ -146,56 +146,66 @@ def end(self, run_context): print('Latency ms/step = %.1f' % (latency)) def main(_): - if not FLAGS.dataset_dir: - raise ValueError('You must supply the dataset directory with --dataset_dir') - tf.logging.set_verbosity(tf.logging.INFO) - #os.environ["OMP_NUM_THREADS"] = "54" + with tf.Graph().as_default(): tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset # ###################### - dataset = dataset_factory.get_dataset( - FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) + if FLAGS.dataset_dir: + print("Inference using real data") + dataset = dataset_factory.get_dataset( + FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) + num_classes = dataset.num_classes - FLAGS.labels_offset + else: + print("Inference using synthetic data") + num_classes = 1000 #################### # Select the model # #################### network_fn = nets_factory.get_network_fn( FLAGS.model_name, - num_classes=(dataset.num_classes - FLAGS.labels_offset), - is_training=False) - - ############################################################## - # Create a dataset provider that loads data from the dataset # - ############################################################## - provider = slim.dataset_data_provider.DatasetDataProvider( - dataset, - shuffle=False, - common_queue_capacity=2 * FLAGS.batch_size, - common_queue_min=FLAGS.batch_size) - [image, label] = provider.get(['image', 'label']) - label -= FLAGS.labels_offset - - ##################################### - # Select the preprocessing function # - ##################################### - preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name - image_preprocessing_fn = preprocessing_factory.get_preprocessing( - preprocessing_name, + num_classes=num_classes, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size - image = image_preprocessing_fn(image, eval_image_size, eval_image_size) - - images, labels = tf.train.batch( - [image, label], - batch_size=FLAGS.batch_size, - num_threads=FLAGS.num_preprocessing_threads, - capacity=5 * FLAGS.batch_size) + if FLAGS.dataset_dir: + ############################################################## + # Create a dataset provider that loads data from the dataset # + ############################################################## + provider = slim.dataset_data_provider.DatasetDataProvider( + dataset, + shuffle=False, + common_queue_capacity=2 * FLAGS.batch_size, + common_queue_min=FLAGS.batch_size) + [image, label] = provider.get(['image', 'label']) + label -= FLAGS.labels_offset + + ##################################### + # Select the preprocessing function # + ##################################### + preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name + image_preprocessing_fn = preprocessing_factory.get_preprocessing( + preprocessing_name, + is_training=False) + + image = image_preprocessing_fn(image, eval_image_size, eval_image_size) + + images, labels = tf.train.batch( + [image, label], + batch_size=FLAGS.batch_size, + num_threads=FLAGS.num_preprocessing_threads, + capacity=5 * FLAGS.batch_size) + else: + # Generate random images and labels with constant 0 when no dataset is used + input_shape = [FLAGS.batch_size, eval_image_size, eval_image_size, 3] + label_shape = [FLAGS.batch_size] + images = tf.random.uniform(input_shape, 0.0, 255.0, dtype=tf.float32, name='synthetic_images') + labels = tf.constant(0, shape=label_shape, dtype=tf.int64) #################### # Define the model # @@ -258,4 +268,4 @@ def main(_): if __name__ == '__main__': - tf.app.run() \ No newline at end of file + tf.app.run() diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index 376f8b602..50fb40e1f 100644 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -58,6 +58,7 @@ run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model- run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1 python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset,python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only From fdee53ebe72af653889e1b83b86f3ea78df66c07 Mon Sep 17 00:00:00 2001 From: Abolfazl Shahbazi <12436063+ashahba@users.noreply.github.com> Date: Mon, 15 Apr 2019 11:36:07 -0700 Subject: [PATCH 18/62] Use --no-cache-dir option during pip and virtualenv install (#285) --- Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Jenkinsfile b/Jenkinsfile index eac6e7fc9..0eb363206 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -16,8 +16,8 @@ node('skx') { sudo apt-get install -y python3-dev || sudo yum install -y python36-devel.x86_64 # virtualenv 16.3.0 is broken do not use it - python2 -m pip install --force-reinstall --user --upgrade pip virtualenv!=16.3.0 tox - python3 -m pip install --force-reinstall --user --upgrade pip virtualenv!=16.3.0 tox + python2 -m pip install --no-cache-dir --user --upgrade pip==19.0.3 virtualenv!=16.3.0 tox + python3 -m pip install --no-cache-dir --user --upgrade pip==19.0.3 virtualenv!=16.3.0 tox """ } stage('Style tests') { From c827585ea8087ad8f5428e513d00014dceef911c Mon Sep 17 00:00:00 2001 From: mjkyung Date: Wed, 17 Apr 2019 10:42:35 -0700 Subject: [PATCH 19/62] Add DenseNet 169 FP32 inference benchmarking scripts (#281) * Initial structure of Densenet169 * Densenet169 * add custom args * Updated custom args default values * add argparse import * Unify the default value for output layer name in both accuracy and benchmark * Add Intel License header * flake8 style fix * Update README.md and add unit test commands * Remove unused files and add minor fix per code review * Update unit test command * Update KMP setting * import fix * Change default batch_size to 100 * Fix jason file * Fix typos --- benchmarks/README.md | 1 + benchmarks/common/tensorflow/start.sh | 15 + .../tensorflow/densenet169/README.md | 137 ++++++ .../tensorflow/densenet169/__init__.py | 19 + .../densenet169/inference/__init__.py | 19 + .../densenet169/inference/fp32/__init__.py | 19 + .../densenet169/inference/fp32/config.json | 7 + .../densenet169/inference/fp32/model_init.py | 107 +++++ .../densenet169/inference/fp32/accuracy.py | 134 ++++++ .../densenet169/inference/fp32/benchmark.py | 161 +++++++ .../densenet169/inference/fp32/cnn_util.py | 50 +++ .../densenet169/inference/fp32/dataset.py | 103 +++++ .../inference/fp32/densenet_preprocessing.py | 391 ++++++++++++++++ .../inference/fp32/image_preprocessing.py | 420 ++++++++++++++++++ .../unit/common/tensorflow/tf_model_args.txt | 3 + 15 files changed, 1586 insertions(+) create mode 100644 benchmarks/image_recognition/tensorflow/densenet169/README.md create mode 100644 benchmarks/image_recognition/tensorflow/densenet169/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/densenet169/inference/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py create mode 100644 models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py create mode 100644 models/image_recognition/tensorflow/densenet169/inference/fp32/benchmark.py create mode 100644 models/image_recognition/tensorflow/densenet169/inference/fp32/cnn_util.py create mode 100644 models/image_recognition/tensorflow/densenet169/inference/fp32/dataset.py create mode 100644 models/image_recognition/tensorflow/densenet169/inference/fp32/densenet_preprocessing.py create mode 100644 models/image_recognition/tensorflow/densenet169/inference/fp32/image_preprocessing.py diff --git a/benchmarks/README.md b/benchmarks/README.md index ad37797fc..4f5a83172 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -19,6 +19,7 @@ dependencies to be installed: | Content Creation | TensorFlow | [DRAW](https://arxiv.org/pdf/1502.04623.pdf) | Inference | [FP32](content_creation/tensorflow/draw/README.md#fp32-inference-instructions) | | Face Detection and Alignment | Tensorflow | [FaceNet](https://arxiv.org/pdf/1503.03832.pdf) | Inference | [FP32](face_detection_and_alignment/tensorflow/facenet/README.md#fp32-inference-instructions) | | Face Detection and Alignment | TensorFlow | [MTCC](https://arxiv.org/pdf/1604.02878.pdf) | Inference | [FP32](face_detection_and_alignment/tensorflow/mtcc/README.md#fp32-inference-instructions) | +| Image Recognition | TensorFlow | [DenseNet169](https://arxiv.org/pdf/1608.06993.pdf) | Inference | [FP32](image_recognition/tensorflow/densenet169/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception ResNet V2](https://arxiv.org/pdf/1602.07261.pdf) | Inference | [Int8](image_recognition/tensorflow/inception_resnet_v2/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inception_resnet_v2/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception V3](https://arxiv.org/pdf/1512.00567.pdf) | Inference | [Int8](image_recognition/tensorflow/inceptionv3/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inceptionv3/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception V4](https://arxiv.org/pdf/1602.07261.pdf) | Inference | [Int8](image_recognition/tensorflow/inceptionv4/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inceptionv4/README.md#fp32-inference-instructions) | diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index 88492f8c5..d93183325 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -278,6 +278,19 @@ function dcgan() { fi } +# DenseNet 169 model +function densenet169() { + if [ ${PRECISION} == "fp32" ]; then + CMD="${CMD} $(add_arg "--input_height" ${input_height}) $(add_arg "--input_width" ${input_width}) \ + $(add_arg "--warmup_steps" ${warmup_steps}) $(add_arg "--steps" ${steps}) $(add_arg "--input_layer" ${input_layer}) \ + $(add_arg "--output_layer" ${output_layer})" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi +} + # DRAW model function draw() { if [ ${PRECISION} == "fp32" ]; then @@ -806,6 +819,8 @@ echo "Log output location: ${LOGFILE}" MODEL_NAME=$(echo ${MODEL_NAME} | tr 'A-Z' 'a-z') if [ ${MODEL_NAME} == "dcgan" ]; then dcgan +elif [ ${MODEL_NAME} == "densenet169" ]; then + densenet169 elif [ ${MODEL_NAME} == "draw" ]; then draw elif [ ${MODEL_NAME} == "facenet" ]; then diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md new file mode 100644 index 000000000..bf6b1f84f --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -0,0 +1,137 @@ +# DenseNet 169 + +This document has instructions for how to run DenseNet 169 for the +following modes/precisions: +* [FP32 inference](#fp32-inference-instructions) + +## FP32 Inference Instructions + +1. Download ImageNet dataset. + + This step is required only for running accuracy, for running benchmark we do not need to provide dataset. + + Register and download the ImageNet dataset. Once you have the raw ImageNet dataset downloaded, we need to convert + it to the TFRecord format. The TensorFlow models repo provides + [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) + to download, process and convert the ImageNet dataset to the TF records format. After converting data, you should have a directory + with the sharded dataset something like below, we only need `validation-*` files, discard `train-*` files: + ``` + $ ll /home/myuser/datasets/ImageNet_TFRecords + -rw-r--r--. 1 user 143009929 Jun 20 14:53 train-00000-of-01024 + -rw-r--r--. 1 user 144699468 Jun 20 14:53 train-00001-of-01024 + -rw-r--r--. 1 user 138428833 Jun 20 14:53 train-00002-of-01024 + ... + -rw-r--r--. 1 user 143137777 Jun 20 15:08 train-01022-of-01024 + -rw-r--r--. 1 user 143315487 Jun 20 15:08 train-01023-of-01024 + -rw-r--r--. 1 user 52223858 Jun 20 15:08 validation-00000-of-00128 + -rw-r--r--. 1 user 51019711 Jun 20 15:08 validation-00001-of-00128 + -rw-r--r--. 1 user 51520046 Jun 20 15:08 validation-00002-of-00128 + ... + -rw-r--r--. 1 user 52508270 Jun 20 15:09 validation-00126-of-00128 + -rw-r--r--. 1 user 55292089 Jun 20 15:09 validation-00127-of-00128 + ``` + +2. A link to download the pre-trained model is coming soon. + +3. Clone the [intelai/models](https://github.com/intelai/models) repo + and then run the benchmarking scripts for either benchmarking throughput, + latency or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. + Each benchmark run has user configurable arguments separated from regular arguments by '--' at the end of the command. + Unless configured, these arguments will run with default values. Below are the example codes for each benchmark case: + + ``` + $ git clone https://github.com/IntelAI/models.git + + $ cd benchmarks + ``` + + For throughput (using `--benchmark-only`, `--socket-id 0` and `--batch-size 100`): + ``` + python launch_benchmark.py \ + --model-name densenet169 \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 100 \ + --socket-id 0 \ + --in-graph /home//densenet169_fp32_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ + input_layer="input" output_layer="densenet169/predictions/Reshape_1" + ``` + + For latency (using `--benchmark-only`, `--socket-id 0` and `--batch-size 1`) + ``` + python launch_benchmark.py \ + --model-name densenet169 \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 1 \ + --socket-id 0 \ + --in-graph /home//densenet169_fp32_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ + input_layer="input" output_layer="densenet169/predictions/Reshape_1" + ``` + + For accuracy (using your `--data-location`, `--socket-id 0`, `--accuracy-only` and + `--batch-size 100`): + ``` + python launch_benchmark.py \ + --model-name densenet169 \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --accuracy-only \ + --batch-size 100 \ + --socket-id 0 \ + --in-graph /home//densenet169_fp32_pretrained_model.pb \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --data-location /home//imagenet_validation_dataset \ + -- input_height=224 input_width=224 \ + input_layer="input" output_layer="densenet169/predictions/Reshape_1" + ``` + + Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands + to get additional debug output or change the default output location. + +4. The log file is saved to the `models/benchmarks/common/tensorflow/logs` directory, + or the directory specified by the `--output-dir` arg. Below are examples of + what the tail of your log file should look like for the different configs. + + Example log tail when benchmarking for throughput: + ``` + steps = 80, 159.83471377 images/sec + Latency: 625.646317005 ms + steps = 90, 159.852789241 images/sec + Latency: 625.57557159 ms + steps = 100, 159.853966416 images/sec + Latency: 625.570964813 ms + Ran inference with batch size 100 + Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_023940.log + ``` + + Example log tail when benchmarking for latency: + ``` + steps = 80, 34.9948442873 images/sec + Latency: 28.5756379366 ms + steps = 90, 34.9644341907 images/sec + Latency: 28.6004914178 ms + steps = 100, 34.9655988121 images/sec + Latency: 28.5995388031 ms + Ran inference with batch size 1 + Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_024505.log + ``` + + Example log tail when running for accuracy: + ``` + 0.757505030181 + 0.757489959839 + 0.75749498998 + 0.75748 + Ran inference with batch size 100 + Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_021545.log + ``` diff --git a/benchmarks/image_recognition/tensorflow/densenet169/__init__.py b/benchmarks/image_recognition/tensorflow/densenet169/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/__init__.py b/benchmarks/image_recognition/tensorflow/densenet169/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/__init__.py b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/config.json new file mode 100644 index 000000000..812311847 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters":{ + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py new file mode 100644 index 000000000..19569b555 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py @@ -0,0 +1,107 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for Densenet169 FP32 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + self.cmd = self.get_numactl_command(self.args.socket_id) + "python " + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + if self.args.batch_size == -1: + self.args.batch_size = 100 + + # set num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + self.parse_args() + + if self.args.benchmark_only: + run_script = os.path.join(self.args.intelai_models, + self.args.mode, self.args.precision, + "benchmark.py") + + script_args_list = [ + "input_graph", "input_height", "input_width", "batch_size", + "input_layer", "output_layer", "num_inter_threads", + "num_intra_threads", "warmup_steps", "steps"] + + elif self.args.accuracy_only: + run_script = os.path.join(self.args.intelai_models, + self.args.mode, self.args.precision, + "accuracy.py") + + script_args_list = [ + "input_graph", "data_location", "input_height", "input_width", + "batch_size", "input_layer", "output_layer", + "num_inter_threads", "num_intra_threads"] + + self.cmd = self.add_args_to_command(self.cmd + run_script, + script_args_list) + + def parse_args(self): + if self.custom_args: + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_height", default=224, + dest='input_height', type=int, help="input height") + parser.add_argument( + "--input_width", default=224, + dest='input_width', type=int, help="input width") + parser.add_argument( + '--warmup_steps', dest='warmup_steps', + help='number of warmup steps', + type=int, default=20) + parser.add_argument( + '--steps', dest='steps', + help='number of steps', + type=int, default=100) + parser.add_argument( + '--input_layer', dest='input_layer', + help='name of input layer', + type=str, default="input") + parser.add_argument( + '--output_layer', dest='output_layer', + help='name of output layer', + type=str, default="densenet169/predictions/Reshape_1") + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + def run(self): + if self.cmd: + self.run_command(self.cmd) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py new file mode 100644 index 000000000..35d598a48 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py @@ -0,0 +1,134 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np +from tensorflow.core.protobuf import rewriter_config_pb2 +from google.protobuf import text_format +import tensorflow as tf +import image_preprocessing +import dataset + +NUM_TEST_IMAGES = 50000 + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--data_location", default=None, + help="full path to the validation data") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="densenet169/predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + data_location = args.data_location + dataset = dataset.ImagenetData(data_location) + preprocessor = image_preprocessing.ImagePreprocessor( + input_height, input_width, batch_size, + 1, # device count + tf.float32, # data_type for input fed to the graph + train=False, # doing inference + resize_method='crop') + images, labels = preprocessor.minibatch(dataset, subset='validation') + graph = load_graph(model_file) + input_tensor = graph.get_tensor_by_name(input_layer + ":0") + output_tensor = graph.get_tensor_by_name(output_layer + ":0") + + rewrite_options = rewriter_config_pb2.RewriterConfig( + layout_optimizer=rewriter_config_pb2.RewriterConfig.ON) + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + config.graph_options.rewrite_options.remapping = ( + rewriter_config_pb2.RewriterConfig.OFF) + + total_accuracy1, total_accuracy5 = (0.0, 0.0) + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset='validation') \ + - num_processed_images + top1 = 0 + with tf.Session(config=config) as sess: + sess_graph = tf.Session(graph=graph, config=config) + + while num_remaining_images >= batch_size: + # Reads and preprocess data + #import pdb + #pdb.set_trace() + np_images, np_labels = sess.run([images[0], labels[0]]) + np_labels -= 1 + #print(np_labels.shape) + num_processed_images += batch_size + num_remaining_images -= batch_size + # Compute inference on the preprocessed data + predictions1 = sess_graph.run(output_tensor, + {input_tensor: np_images}) + if(batch_size !=1): + predictions1 = sess.run(tf.squeeze(predictions1)) + else : + predictions1 = sess.run(tf.reshape(predictions1,[1,1000])) + predictions2 = tf.argmax(predictions1, axis=1) + predictions = sess.run(predictions2) + top1 += batch_size - (np.count_nonzero(predictions - np_labels)) + print(top1/num_processed_images) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/benchmark.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/benchmark.py new file mode 100644 index 000000000..4091b4137 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/benchmark.py @@ -0,0 +1,161 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. # You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np +from tensorflow.core.protobuf import rewriter_config_pb2 +from google.protobuf import text_format +import tensorflow as tf + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="densenet169/predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + parser.add_argument("-gpu", "--gpu", + default = -1, + type=int, help="Run on gpu, other wise cpu", + required=False) + + parser.add_argument("--warmup_steps", type=int, default=40, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=100, help="number of steps") + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + warmup_steps = args.warmup_steps + steps = args.steps + print(steps) + assert steps > 10, "Benchmark steps should be at least 10." + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + + input_shape = [batch_size, input_height, input_width, 3] + images = tf.truncated_normal( + input_shape, + dtype=tf.float32, + stddev=10, + name='synthetic_images') + + image_data = None + graph = load_graph(model_file) + + input_tensor = graph.get_tensor_by_name(input_layer + ":0"); + output_tensor = graph.get_tensor_by_name(output_layer + ":0"); + + rewrite_options = rewriter_config_pb2.RewriterConfig( + layout_optimizer=rewriter_config_pb2.RewriterConfig.ON) + config = tf.ConfigProto() + if (args.gpu < 0): + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + config.graph_options.rewrite_options.remapping = ( + rewriter_config_pb2.RewriterConfig.OFF) + #os.environ["OMP_NUM_THREADS"] = "14" + with tf.Session(config=config) as sess: + image_data = sess.run(images) + + with tf.Session(graph=graph, config=config) as sess: + sys.stdout.flush() + print("[Running warmup steps...]") + for t in range(warmup_steps): + start_time = time.time() + sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size/elapsed_time)) + avg = 0 + print("[Running benchmark steps...]") + total_time = 0; + total_images = 0; + for t in range(steps): + start_time = time.time() + results = sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + avg += elapsed_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size*(t+1)/avg)); + print(" Latency: {0} ms" + "".format(avg*1000. /(t+1))) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/cnn_util.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/cnn_util.py new file mode 100644 index 000000000..32902d149 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/cnn_util.py @@ -0,0 +1,50 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for CNN benchmarks.""" + +import tensorflow as tf + + +def tensorflow_version_tuple(): + v = tf.__version__ + major, minor, patch = v.split('.') + return (int(major), int(minor), patch) + + +def tensorflow_version(): + vt = tensorflow_version_tuple() + return vt[0] * 1000 + vt[1] + diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/dataset.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/dataset.py new file mode 100644 index 000000000..88fdebce6 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/dataset.py @@ -0,0 +1,103 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Benchmark dataset utilities. +""" + +from abc import abstractmethod +import os + +import tensorflow as tf + + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, data_dir=None): + self.name = name + if data_dir is None: + raise ValueError('Data directory not specified') + self.data_dir = data_dir + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @abstractmethod + def num_classes(self): + pass + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + +class FlowersData(Dataset): + + def __init__(self, data_dir=None): + super(FlowersData, self).__init__('Flowers', data_dir) + + def num_classes(self): + return 5 + + def num_examples_per_epoch(self, subset): + if subset == 'train': + return 3170 + elif subset == 'validation': + return 500 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + +class ImagenetData(Dataset): + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('ImageNet', data_dir) + + def num_classes(self): + return 1000 + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return 1281167 + elif subset == 'validation': + return 50000 + else: + raise ValueError('Invalid data subset "%s"' % subset) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/densenet_preprocessing.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/densenet_preprocessing.py new file mode 100644 index 000000000..298694af0 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/densenet_preprocessing.py @@ -0,0 +1,391 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to preprocess images. + +The preprocessing steps for VGG were introduced in the following technical +report: + + Very Deep Convolutional Networks For Large-Scale Image Recognition + Karen Simonyan and Andrew Zisserman + arXiv technical report, 2015 + PDF: http://arxiv.org/pdf/1409.1556.pdf + ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf + CC-BY-4.0 + +More information can be obtained from the VGG website: +www.robots.ox.ac.uk/~vgg/research/very_deep/ +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +slim = tf.contrib.slim + +_R_MEAN = 123.68 +_G_MEAN = 116.78 +_B_MEAN = 103.94 + +_SCALE_FACTOR = 0.017 + +_RESIZE_SIDE_MIN = 256 +_RESIZE_SIDE_MAX = 512 + + +def _crop(image, offset_height, offset_width, crop_height, crop_width): + """Crops the given image using the provided offsets and sizes. + + Note that the method doesn't assume we know the input image size but it does + assume we know the input image rank. + + Args: + image: an image of shape [height, width, channels]. + offset_height: a scalar tensor indicating the height offset. + offset_width: a scalar tensor indicating the width offset. + crop_height: the height of the cropped image. + crop_width: the width of the cropped image. + + Returns: + the cropped (and resized) image. + + Raises: + InvalidArgumentError: if the rank is not 3 or if the image dimensions are + less than the crop size. + """ + original_shape = tf.shape(image) + + rank_assertion = tf.Assert( + tf.equal(tf.rank(image), 3), + ['Rank of image must be equal to 3.']) + with tf.control_dependencies([rank_assertion]): + cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) + + size_assertion = tf.Assert( + tf.logical_and( + tf.greater_equal(original_shape[0], crop_height), + tf.greater_equal(original_shape[1], crop_width)), + ['Crop size greater than the image size.']) + + offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) + + # Use tf.slice instead of crop_to_bounding box as it accepts tensors to + # define the crop size. + with tf.control_dependencies([size_assertion]): + image = tf.slice(image, offsets, cropped_shape) + return tf.reshape(image, cropped_shape) + + +def _random_crop(image_list, crop_height, crop_width): + """Crops the given list of images. + + The function applies the same crop to each image in the list. This can be + effectively applied when there are multiple image inputs of the same + dimension such as: + + image, depths, normals = _random_crop([image, depths, normals], 120, 150) + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the new height. + crop_width: the new width. + + Returns: + the image_list with cropped images. + + Raises: + ValueError: if there are multiple image inputs provided with different size + or the images are smaller than the crop dimensions. + """ + if not image_list: + raise ValueError('Empty image_list.') + + # Compute the rank assertions. + rank_assertions = [] + for i in range(len(image_list)): + image_rank = tf.rank(image_list[i]) + rank_assert = tf.Assert( + tf.equal(image_rank, 3), + ['Wrong rank for tensor %s [expected] [actual]', + image_list[i].name, 3, image_rank]) + rank_assertions.append(rank_assert) + + with tf.control_dependencies([rank_assertions[0]]): + image_shape = tf.shape(image_list[0]) + image_height = image_shape[0] + image_width = image_shape[1] + crop_size_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(image_height, crop_height), + tf.greater_equal(image_width, crop_width)), + ['Crop size greater than the image size.']) + + asserts = [rank_assertions[0], crop_size_assert] + + for i in range(1, len(image_list)): + image = image_list[i] + asserts.append(rank_assertions[i]) + with tf.control_dependencies([rank_assertions[i]]): + shape = tf.shape(image) + height = shape[0] + width = shape[1] + + height_assert = tf.Assert( + tf.equal(height, image_height), + ['Wrong height for tensor %s [expected][actual]', + image.name, height, image_height]) + width_assert = tf.Assert( + tf.equal(width, image_width), + ['Wrong width for tensor %s [expected][actual]', + image.name, width, image_width]) + asserts.extend([height_assert, width_assert]) + + # Create a random bounding box. + # + # Use tf.random_uniform and not numpy.random.rand as doing the former would + # generate random numbers at graph eval time, unlike the latter which + # generates random numbers at graph definition time. + with tf.control_dependencies(asserts): + max_offset_height = tf.reshape(image_height - crop_height + 1, []) + with tf.control_dependencies(asserts): + max_offset_width = tf.reshape(image_width - crop_width + 1, []) + offset_height = tf.random_uniform( + [], maxval=max_offset_height, dtype=tf.int32) + offset_width = tf.random_uniform( + [], maxval=max_offset_width, dtype=tf.int32) + + return [_crop(image, offset_height, offset_width, + crop_height, crop_width) for image in image_list] + + +def _central_crop(image_list, crop_height, crop_width): + """Performs central crops of the given image list. + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the height of the image following the crop. + crop_width: the width of the image following the crop. + + Returns: + the list of cropped images. + """ + outputs = [] + for image in image_list: + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + + offset_height = (image_height - crop_height) / 2 + offset_width = (image_width - crop_width) / 2 + + outputs.append(_crop(image, offset_height, offset_width, + crop_height, crop_width)) + return outputs + + +def _mean_image_subtraction(image, means): + """Subtracts the given means from each image channel. + + For example: + means = [123.68, 116.779, 103.939] + image = _mean_image_subtraction(image, means) + + Note that the rank of `image` must be known. + + Args: + image: a tensor of size [height, width, C]. + means: a C-vector of values to subtract from each channel. + + Returns: + the centered image. + + Raises: + ValueError: If the rank of `image` is unknown, if `image` has a rank other + than three or if the number of channels in `image` doesn't match the + number of values in `means`. + """ + if image.get_shape().ndims != 3: + raise ValueError('Input must be of size [height, width, C>0]') + num_channels = image.get_shape().as_list()[-1] + if len(means) != num_channels: + raise ValueError('len(means) must match the number of channels') + + channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image) + for i in range(num_channels): + channels[i] -= means[i] + return tf.concat(axis=2, values=channels) + + +def _smallest_size_at_least(height, width, smallest_side): + """Computes new shape with the smallest side equal to `smallest_side`. + + Computes new shape with the smallest side equal to `smallest_side` while + preserving the original aspect ratio. + + Args: + height: an int32 scalar tensor indicating the current height. + width: an int32 scalar tensor indicating the current width. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + new_height: an int32 scalar tensor indicating the new height. + new_width: and int32 scalar tensor indicating the new width. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + height = tf.to_float(height) + width = tf.to_float(width) + smallest_side = tf.to_float(smallest_side) + + scale = tf.cond(tf.greater(height, width), + lambda: smallest_side / width, + lambda: smallest_side / height) + new_height = tf.to_int32(height * scale) + new_width = tf.to_int32(width * scale) + return new_height, new_width + + +def _aspect_preserving_resize(image, smallest_side): + """Resize images preserving the original aspect ratio. + + Args: + image: A 3-D image `Tensor`. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + resized_image: A 3-D tensor containing the resized image. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + shape = tf.shape(image) + height = shape[0] + width = shape[1] + new_height, new_width = _smallest_size_at_least(height, width, smallest_side) + image = tf.expand_dims(image, 0) + resized_image = tf.image.resize_bilinear(image, [new_height, new_width], + align_corners=False) + resized_image = tf.squeeze(resized_image) + resized_image.set_shape([None, None, 3]) + return resized_image + + +def preprocess_for_train(image, + output_height, + output_width, + resize_side_min=_RESIZE_SIDE_MIN, + resize_side_max=_RESIZE_SIDE_MAX): + """Preprocesses the given image for training. + + Note that the actual resizing scale is sampled from + [`resize_size_min`, `resize_size_max`]. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + resize_side_min: The lower bound for the smallest side of the image for + aspect-preserving resizing. + resize_side_max: The upper bound for the smallest side of the image for + aspect-preserving resizing. + + Returns: + A preprocessed image. + """ + resize_side = tf.random_uniform( + [], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32) + + image = _aspect_preserving_resize(image, resize_side) + image = _random_crop([image], output_height, output_width)[0] + image.set_shape([output_height, output_width, 3]) + image = tf.to_float(image) + image = tf.image.random_flip_left_right(image) + + image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) + return image * _SCALE_FACTOR + + +def preprocess_for_eval(image, output_height, output_width, resize_side): + """Preprocesses the given image for evaluation. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + resize_side: The smallest side of the image for aspect-preserving resizing. + + Returns: + A preprocessed image. + """ + image = _aspect_preserving_resize(image, resize_side) + image = _central_crop([image], output_height, output_width)[0] + image.set_shape([output_height, output_width, 3]) + image = tf.to_float(image) + + image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) + return image * _SCALE_FACTOR + + +def preprocess_image(image, output_height, output_width, is_training=False, + resize_side_min=_RESIZE_SIDE_MIN, + resize_side_max=_RESIZE_SIDE_MAX): + """Preprocesses the given image. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + is_training: `True` if we're preprocessing the image for training and + `False` otherwise. + resize_side_min: The lower bound for the smallest side of the image for + aspect-preserving resizing. If `is_training` is `False`, then this value + is used for rescaling. + resize_side_max: The upper bound for the smallest side of the image for + aspect-preserving resizing. If `is_training` is `False`, this value is + ignored. Otherwise, the resize side is sampled from + [resize_size_min, resize_size_max]. + + Returns: + A preprocessed image. + """ + if is_training: + return preprocess_for_train(image, output_height, output_width, + resize_side_min, resize_side_max) + else: + return preprocess_for_eval(image, output_height, output_width, + resize_side_min) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/image_preprocessing.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/image_preprocessing.py new file mode 100644 index 000000000..fe5d0eee0 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/image_preprocessing.py @@ -0,0 +1,420 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image pre-processing utilities. +""" +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf +from random import randint +import densenet_preprocessing +from tensorflow.python.ops import data_flow_ops +import cnn_util + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): + # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): + with tf.name_scope(scope or 'decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3, + fancy_upscaling=False, + dct_method='INTEGER_FAST') + + # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') + + return image + + +def eval_image(image, height, width, bbox, thread_id, resize): + """Get the image for model evaluation.""" + with tf.name_scope('eval_image'): + if not thread_id: + tf.summary.image( + 'original_image', tf.expand_dims(image, 0)) + + if resize == 'crop': + # Note: This is much slower than crop_to_bounding_box + # It seems that the redundant pad step has huge overhead + # distorted_image = tf.image.resize_image_with_crop_or_pad(image, + # height, width) + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256, 256*shape[1]/shape[0]], dtype=tf.int32)), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256*shape[0]/shape[1], 256], dtype=tf.int32))) + shape = tf.shape(image) + + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + #y0=tf.random_uniform([],minval=0,maxval=(shape[0] - height + 1), dtype=tf.int32) + #x0=tf.random_uniform([],minval=0,maxval=(shape[1] - width + 1), dtype=tf.int32) + ## distorted_image = tf.slice(image, [y0,x0,0], [height,width,3]) + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, + width) + else: + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.5, + aspect_ratio_range=[0.90, 1.10], + area_range=[0.10, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + resize_method = { + 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, + 'bilinear': tf.image.ResizeMethod.BILINEAR, + 'bicubic': tf.image.ResizeMethod.BICUBIC, + 'area': tf.image.ResizeMethod.AREA + }[resize] + # This resizing operation may distort the images because the aspect + # ratio is not respected. + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], + resize_method, + align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', tf.expand_dims(distorted_image, 0)) + image = distorted_image + return image + + +def distort_image(image, height, width, bbox, thread_id=0, scope=None): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D float Tensor of image + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + thread_id: integer indicating the preprocessing thread. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor of distorted image used for training. + """ + # with tf.op_scope([image, height, width, bbox], scope, 'distort_image'): + # with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + with tf.name_scope(scope or 'distort_image'): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # After this point, all image pixels reside in [0,1) + # until the very end, when they're rescaled to (-1, 1). The various + # adjust_* ops all require this range for dtype float. + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + # Display the bounding box in the first thread only. + if not thread_id: + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + tf.summary.image( + 'image_with_bounding_boxes', image_with_box) + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an allowed + # range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.99, 1.01], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + if not thread_id: + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distort_bbox) + tf.summary.image( + 'images_with_distorted_bounding_box', + image_with_distorted_box) + + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + resize_method = thread_id % 4 + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], resize_method, align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. + distorted_image = distort_color(distorted_image, thread_id) + + # Note: This ensures the scaling matches the output of eval_image + distorted_image *= 256 + + if not thread_id: + tf.summary.image( + 'final_distorted_image', + tf.expand_dims(distorted_image, 0)) + return distorted_image + + +def distort_color(image, thread_id=0, scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: Tensor containing single image. + thread_id: preprocessing thread ID. + scope: Optional scope for op_scope. + Returns: + color-distorted image + """ + # with tf.op_scope([image], scope, 'distort_color'): + # with tf.name_scope(scope, 'distort_color', [image]): + with tf.name_scope(scope or 'distort_color'): + color_ordering = thread_id % 2 + + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +class ImagePreprocessor(object): + """Preprocessor for input images.""" + + def __init__(self, + height, + width, + batch_size, + device_count, + dtype=tf.float32, + train=True, + distortions=None, + resize_method=None): + self.height = height + self.width = width + self.batch_size = batch_size + self.device_count = device_count + self.dtype = dtype + self.train = train + self.resize_method = resize_method + if distortions is None: + distortions = False + self.distortions = distortions + if self.batch_size % self.device_count != 0: + raise ValueError( + ('batch_size must be a multiple of device_count: ' + 'batch_size %d, device_count: %d') % + (self.batch_size, self.device_count)) + self.batch_size_per_device = self.batch_size // self.device_count + + def preprocess(self, image_buffer, bbox, thread_id): + """Preprocessing image_buffer using thread_id.""" + # Note: Width and height of image is known only at runtime. + image = tf.image.decode_jpeg(image_buffer, channels=3, + dct_method='INTEGER_FAST') + if self.train and self.distortions: + image = distort_image(image, self.height, self.width, bbox, thread_id) + else: + #image = eval_image(image, self.height, self.width, bbox, thread_id, + # self.resize_method) + image = densenet_preprocessing.preprocess_image(image,224,224,False) + # Note: image is now float32 [height,width,3] with range [0, 255] + + # image = tf.cast(image, tf.uint8) # HACK TESTING + + return image + + def minibatch(self, dataset, subset): + with tf.name_scope('batch_processing'): + images = [[] for i in range(self.device_count)] + labels = [[] for i in range(self.device_count)] + record_input = data_flow_ops.RecordInput( + file_pattern=dataset.tf_record_pattern(subset), + seed=randint(0, 9000), + parallelism=64, + buffer_size=10000, + batch_size=self.batch_size, + name='record_input') + records = record_input.get_yield_op() + records = tf.split(records, self.batch_size, 0) + records = [tf.reshape(record, []) for record in records] + for i in xrange(self.batch_size): + value = records[i] + image_buffer, label_index, bbox, _ = parse_example_proto(value) + image = self.preprocess(image_buffer, bbox, i % 4) + + device_index = i % self.device_count + images[device_index].append(image) + labels[device_index].append(label_index) + label_index_batch = [None] * self.device_count + for device_index in xrange(self.device_count): + images[device_index] = tf.parallel_stack(images[device_index]) + label_index_batch[device_index] = tf.concat(labels[device_index], 0) + + # dynamic_pad=True) # HACK TESTING dynamic_pad=True + images[device_index] = tf.cast(images[device_index], self.dtype) + depth = 3 + images[device_index] = tf.reshape( + images[device_index], + shape=[self.batch_size_per_device, self.height, self.width, depth]) + label_index_batch[device_index] = tf.reshape( + label_index_batch[device_index], [self.batch_size_per_device]) + # Display the training images in the visualizer. + # tf.summary.image('images', images) + + return images, label_index_batch diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index 50fb40e1f..21f680eda 100644 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -1,4 +1,7 @@ run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose, OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose,python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 From 7db6647e7490b42cd0e96b6fd69d980769c5f02d Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 18 Apr 2019 15:13:03 -0700 Subject: [PATCH 20/62] Add support for TCMalloc (#287) --- .../dcgan/inference/fp32/model_init.py | 2 +- benchmarks/common/base_benchmark_util.py | 16 ++ benchmarks/common/base_model_init.py | 32 +++- benchmarks/common/tensorflow/start.sh | 11 ++ .../draw/inference/fp32/model_init.py | 2 +- .../facenet/inference/fp32/model_init.py | 2 +- .../mtcc/inference/fp32/model_init.py | 2 +- .../densenet169/inference/fp32/model_init.py | 2 +- .../tensorflow/inception_resnet_v2/README.md | 30 ++-- .../inference/fp32/model_init.py | 2 +- .../inference/int8/model_init.py | 2 +- .../tensorflow/inceptionv3/README.md | 22 +-- .../inceptionv3/inference/fp32/model_init.py | 2 +- .../inceptionv3/inference/int8/model_init.py | 6 +- .../tensorflow/inceptionv4/README.md | 42 ++--- .../inference/inceptionv4_model_init.py | 2 +- .../mobilenet_v1/inference/int8/model_init.py | 2 +- .../resnet101/inference/fp32/model_init.py | 2 +- .../resnet101/inference/int8/model_init.py | 4 +- .../tensorflow/resnet50/README.md | 13 +- .../resnet50/inference/fp32/model_init.py | 2 +- .../resnet50/inference/int8/model_init.py | 4 +- .../maskrcnn/inference/fp32/model_init.py | 2 +- .../unet/inference/fp32/model_init.py | 2 +- .../lm-1b/inference/fp32/model_init.py | 2 +- .../gnmt/inference/fp32/model_init.py | 2 +- .../inference/fp32/model_init.py | 2 +- .../inference/fp32/model_init.py | 2 +- benchmarks/launch_benchmark.py | 2 + .../faster_rcnn/inference/fp32/model_init.py | 2 +- .../faster_rcnn/inference/int8/model_init.py | 2 +- .../tensorflow/rfcn/README.md | 16 +- .../rfcn/inference/fp32/model_init.py | 2 +- .../rfcn/inference/int8/model_init.py | 6 +- .../tensorflow/ssd-mobilenet/README.md | 12 +- .../inference/fp32/model_init.py | 2 +- .../ssd-mobilenet/inference/int8/config.json | 2 +- .../inference/int8/model_init.py | 2 +- .../ssd-resnet34/inference/fp32/model_init.py | 2 +- .../ncf/inference/fp32/model_init.py | 2 +- .../tensorflow/wide_deep_large_ds/README.md | 6 +- .../inference/fp32/model_init.py | 2 +- .../inference/int8/model_init.py | 2 +- docs/general/tensorflow/LaunchBenchmark.md | 10 ++ .../eval_image_classifier_inference.py | 4 +- .../tensorflow/test_run_tf_benchmarks.py | 9 +- .../unit/common/tensorflow/tf_model_args.txt | 158 +++++++++--------- tests/unit/common/test_base_model_init.py | 38 +++++ 48 files changed, 303 insertions(+), 194 deletions(-) diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py index 8f4602c2c..2e2f88104 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py @@ -43,7 +43,7 @@ def __init__(self, args, custom_args=[], platform_util=None): benchmark_script = os.path.join( self.args.intelai_models, args.mode, args.precision, "inference_bench.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/common/base_benchmark_util.py b/benchmarks/common/base_benchmark_util.py index 89df56cde..0768e8871 100644 --- a/benchmarks/common/base_benchmark_util.py +++ b/benchmarks/common/base_benchmark_util.py @@ -161,6 +161,22 @@ def _define_args(self): "with --accuracy-only and --mode=inference.", dest="output_results", action="store_true") + self._common_arg_parser.add_argument( + "--disable-tcmalloc", + help="Disables the use of TCMalloc for int8 benchmarking. TCMalloc is " + "currently not used for FP32 benchmarking, so using this flag with " + "FP32 models will have no effect.", + dest="disable_tcmalloc", action="store_true" + ) + + self._common_arg_parser.add_argument( + "--tcmalloc-large-alloc-report-threshold", + help="Sets the TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD environment variable to " + "the specified value. The environment variable sets the threshold (in bytes) " + "for when large memory allocation messages will be displayed.", + dest="tcmalloc_large_alloc_report_threshold", default=2147483648, type=int + ) + self._common_arg_parser.add_argument( "-v", "--verbose", help="Print verbose information.", dest="verbose", action="store_true") diff --git a/benchmarks/common/base_model_init.py b/benchmarks/common/base_model_init.py index 9a25ca92a..8e8d1abb2 100644 --- a/benchmarks/common/base_model_init.py +++ b/benchmarks/common/base_model_init.py @@ -18,6 +18,7 @@ # SPDX-License-Identifier: EPL-2.0 # +import glob import json import os @@ -62,15 +63,32 @@ def run_command(self, cmd): os.system(cmd) - def get_numactl_command(self, socket_id): + def get_command_prefix(self, socket_id, numactl=True): """ - Returns the numactl command with --cpunodebind and --membind set to the - specified socket_id. If socket_id is set to -1 (undefined) then an - empty string is returned. + Returns the command prefix with: + - LD_PRELOAD for int8 models (if tcmalloc is not disabled) + - The numactl command with --cpunodebind and --membind set to the specified socket_id (if numactl=True) """ - return "" if socket_id == -1 else \ - "numactl --cpunodebind={0} --membind={0} ".format( - str(socket_id)) + command = "" + + if not self.args.disable_tcmalloc: + # Try to find the TCMalloc library file + matches = glob.glob("/usr/lib/libtcmalloc.so*") + + if len(matches) == 0: + matches = glob.glob("/usr/lib64/libtcmalloc.so*") + + if len(matches) > 0: + command += "LD_PRELOAD={} ".format(matches[0]) + else: + # Unable to find the TCMalloc library file + print("Warning: Unable to find the TCMalloc library file (libtcmalloc.so) in /usr/lib or /usr/lib64, " + "so the LD_PRELOAD environment variable will not be set.") + + if socket_id != -1 and numactl: + command += "numactl --cpunodebind={0} --membind={0} ".format(str(socket_id)) + + return command def add_args_to_command(self, command, arg_list): """ diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index d93183325..d1ec29216 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -45,6 +45,8 @@ echo " NUM_CORES: ${NUM_CORES}" echo " BENCHMARK_ONLY: ${BENCHMARK_ONLY}" echo " ACCURACY_ONLY: ${ACCURACY_ONLY}" echo " OUTPUT_RESULTS: ${OUTPUT_RESULTS}" +echo " DISABLE_TCMALLOC: ${DISABLE_TCMALLOC}" +echo " TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD: ${TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD}" echo " NOINSTALL: ${NOINSTALL}" echo " OUTPUT_DIR: ${OUTPUT_DIR}" @@ -62,6 +64,11 @@ if [[ ${NOINSTALL} != "True" ]]; then apt install -y libsm6 libxext6 pip install --upgrade pip pip install requests + + # install google-perftools for tcmalloc + if [[ ${DISABLE_TCMALLOC} != "True" ]]; then + apt-get install google-perftools -y + fi fi verbose_arg="" @@ -170,6 +177,10 @@ if [ ${DATA_NUM_INTRA_THREADS} != "None" ]; then CMD="${CMD} --data-num-intra-threads=${DATA_NUM_INTRA_THREADS}" fi +if [ ${DISABLE_TCMALLOC} == "True" ]; then + CMD="${CMD} --disable-tcmalloc" +fi + function install_protoc() { pushd "${MOUNT_EXTERNAL_MODELS_SOURCE}/research" diff --git a/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py b/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py index 08c145bca..e306ecd55 100644 --- a/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py +++ b/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py @@ -44,7 +44,7 @@ def __init__(self, args, custom_args=[], platform_util=None): # Create the command prefix with numactl and executing the script script_path = os.path.join(self.args.intelai_models, self.args.mode, self.args.precision, "draw_inf.py") - self.command_prefix = self.get_numactl_command(args.socket_id) + \ + self.command_prefix = self.get_command_prefix(args.socket_id) + \ " {} {} ".format(self.python_exe, script_path) # Add additional args to the command diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py index bf4b8132c..e00bf70f7 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py @@ -30,7 +30,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + \ + self.cmd = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " # Set KMP env vars, if they haven't already been set diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py index 4ef889b36..5d1983139 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py @@ -42,7 +42,7 @@ def __init__(self, args, custom_args, platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, "one_image_test.py") self.command_prefix = \ - self.get_numactl_command(self.args.socket_id) + \ + self.get_command_prefix(self.args.socket_id) + \ "{} ".format(self.python_exe) + benchmark_script self.run_cmd = \ diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py index 19569b555..3e4a376af 100644 --- a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py @@ -34,7 +34,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + "python " + self.cmd = self.get_command_prefix(self.args.socket_id) + "{} ".format(self.python_exe) # Set KMP env vars, if they haven't already been set config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index e547377ca..7c9c246fc 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -139,30 +139,30 @@ Log location outside container: /benchmark_inception_resnet_v2 Example log tail when benchmarking for latency: ``` ... -Iteration 37: 0.046 sec -Iteration 38: 0.046 sec -Iteration 39: 0.046 sec -Iteration 40: 0.046 sec -Average time: 0.045 sec +Iteration 37: 0.043 sec +Iteration 38: 0.042 sec +Iteration 39: 0.043 sec +Iteration 40: 0.043 sec +Average time: 0.043 sec Batch size = 1 -Latency: 45.441 ms -Throughput: 22.007 images/sec +Latency: 42.793 ms +Throughput: 23.368 images/sec Ran inference with batch size 1 -Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190330_012557.log +Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190415_231020.log ``` Example log tail when benchmarking for throughput: ``` ... -Iteration 37: 0.975 sec -Iteration 38: 0.975 sec -Iteration 39: 0.987 sec -Iteration 40: 0.974 sec -Average time: 0.976 sec +Iteration 37: 0.932 sec +Iteration 38: 0.928 sec +Iteration 39: 0.927 sec +Iteration 40: 0.928 sec +Average time: 0.928 sec Batch size = 128 -Throughput: 131.178 images/sec +Throughput: 137.978 images/sec Ran inference with batch size 128 -Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190330_012719.log +Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190415_225215.log ``` diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py index 641821520..13fd8a79f 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py @@ -29,7 +29,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + self.cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " # Set KMP env vars, if they haven't already been set config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py index 0d7dda4db..90ce7bcb2 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py @@ -36,7 +36,7 @@ def __init__(self, args, custom_args=[], platform_util=None): config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") self.set_kmp_vars(config_file_path) - self.cmd = self.get_numactl_command(self.args.socket_id) + "{} ".format(self.python_exe) + self.cmd = self.get_command_prefix(self.args.socket_id) + "{} ".format(self.python_exe) # use default batch size if -1 if self.args.batch_size == -1: diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index 7eb091edc..1da257669 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -197,23 +197,25 @@ Log location outside container: {--output-dir value}/benchmark_inceptionv3_infer Example log tail when benchmarking for latency: ``` ... -steps = 470, 53.7256017113 images/sec -steps = 480, 52.5430812016 images/sec -steps = 490, 52.9076139058 images/sec -steps = 500, 53.5021876395 images/sec +steps = 470, 134.912798739 images/sec +steps = 480, 132.379245045 images/sec +steps = 490, 133.977640069 images/sec +steps = 500, 132.083262478 images/sec +Average throughput for batch size 1: 133.440858806 images/sec Ran inference with batch size 1 -Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190223_194002.log +Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190415_220455.log ``` Example log tail when benchmarking for throughput: ``` ... -steps = 470, 370.435654276 images/sec -steps = 480, 369.710160177 images/sec -steps = 490, 369.083388904 images/sec -steps = 500, 370.287978128 images/sec +steps = 470, 369.151656047 images/sec +steps = 480, 373.174541014 images/sec +steps = 490, 372.402638382 images/sec +steps = 500, 371.836748659 images/sec +Average throughput for batch size 128: 371.269087408 images/sec Ran inference with batch size 128 -Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190223_194314.log +Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190416_162155.log ``` ## FP32 Inference Instructions diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py index 53c2643bd..f550765f4 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py @@ -70,7 +70,7 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.intelai_models, self.args.precision, "eval_image_classifier_inference.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script num_cores = self.platform_util.num_cores_per_socket if self.args.num_cores == -1 \ diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py index bd4794638..645f2f92e 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py @@ -74,7 +74,7 @@ def run_benchmark(self): "data_num_inter_threads", "data_num_intra_threads", "warmup_steps", "steps"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + benchmark_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) # add num_cores @@ -94,7 +94,7 @@ def run_accuracy(self): "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + accuracy_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) @@ -106,7 +106,7 @@ def run_calibration(self): "input_graph", "data_location", "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + calibration_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index 1f472509b..13fb7c060 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -112,31 +112,31 @@ other precisions are coming later. Example log tail when benchmarking for throughput: ``` - [Running warmup steps...] - steps = 10, 185.108768528 images/sec - [Running benchmark steps...] - steps = 10, 184.482999017 images/sec - steps = 20, 184.561572444 images/sec - steps = 30, 184.620504126 images/sec - steps = 40, 183.900309054 images/sec - steps = 50, 184.110358713 images/sec - Ran inference with batch size 240 - Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_215858.log + [Running warmup steps...] + steps = 10, 184.497605972 images/sec + [Running benchmark steps...] + steps = 10, 184.664702184 images/sec + steps = 20, 184.938455688 images/sec + steps = 30, 184.454197634 images/sec + steps = 40, 184.491891402 images/sec + steps = 50, 184.390001575 images/sec + Ran inference with batch size 240 + Log location outside container: /benchmark_inceptionv4_inference_int8_20190415_233517.log ``` Example log tail when benchmarking for latency: ``` - [Running warmup steps...] - steps = 10, 30.8738415788 images/sec - [Running benchmark steps...] - steps = 10, 31.8633787623 images/sec - steps = 20, 31.1129375635 images/sec - steps = 30, 31.2716048462 images/sec - steps = 40, 31.9682931663 images/sec - steps = 50, 31.6665962009 images/sec - Latency: 31.936 ms - Ran inference with batch size 1 - Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_215702.log + [Running warmup steps...] + steps = 10, 32.6095380262 images/sec + [Running benchmark steps...] + steps = 10, 32.9024373024 images/sec + steps = 20, 32.5328989723 images/sec + steps = 30, 32.5988932413 images/sec + steps = 40, 31.3991914957 images/sec + steps = 50, 32.7053998207 images/sec + Latency: 30.598 ms + Ran inference with batch size 1 + Log location outside container: /benchmark_inceptionv4_inference_int8_20190415_232441.log ``` ## FP32 Inference Instructions diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py index d4294a179..74da197fd 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py @@ -73,7 +73,7 @@ def parse_args(self): def add_command_prefix(self, script_path): """ Uses the specified script path and adds on the command prefix """ - return self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + \ + return self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + \ script_path def run_benchmark(self): diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py index 6f22fd12a..c693b055c 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py @@ -34,7 +34,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + "python " + self.cmd = self.get_command_prefix(self.args.socket_id) + "python " # Set KMP env vars, if they haven't already been set config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py index 43f862159..98962a670 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py @@ -70,7 +70,7 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script self.benchmark_command = \ diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py index 4bd21a12e..a53cf6884 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py @@ -78,7 +78,7 @@ def run_benchmark_or_accuracy(self): self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - cmd = self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + cmd + cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + cmd cmd += " --input-graph=" + self.args.input_graph + \ " --num-inter-threads=" + str(self.args.num_inter_threads) + \ @@ -107,7 +107,7 @@ def run_calibration(self): "input_graph", "data_location", "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + calibration_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index 0b73a4e56..a34a52139 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -102,12 +102,15 @@ The tail of the log output when the benchmarking completes should look something like this: ``` ... -steps = 470, 460.113806562 images/sec -steps = 480, 460.073982602 images/sec -steps = 490, 463.289831148 images/sec -steps = 500, 463.521427264 images/sec +Iteration 497: 0.253495 sec +Iteration 498: 0.253033 sec +Iteration 499: 0.258083 sec +Iteration 500: 0.254541 sec +Average time: 0.254572 sec +Batch size = 128 +Throughput: 502.805 images/sec Ran inference with batch size 128 -Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190223_180546.log +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190416_172735.log ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py index 4c3dfbd1d..88520cbdd 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py @@ -71,7 +71,7 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script num_cores = self.platform_util.num_cores_per_socket if self.args.num_cores == -1 \ diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py index 75e9db07c..41571564c 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py @@ -74,7 +74,7 @@ def run_benchmark_or_accuracy(self): self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - cmd = self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + cmd + cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + cmd cmd += " --input-graph=" + self.args.input_graph + \ " --num-inter-threads=" + str(self.args.num_inter_threads) + \ @@ -106,7 +106,7 @@ def run_calibration(self): "input_graph", "data_location", "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + calibration_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py index 43f9cdacc..35412be2f 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py @@ -42,7 +42,7 @@ def __init__(self, args, custom_args=[], platform_util=None): benchmark_script = os.path.join( self.args.intelai_models, "coco.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script + " evaluate " set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py index d4998afae..3cdcf1701 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py +++ b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py @@ -51,7 +51,7 @@ def __init__(self, args, custom_args=[], platform_util=None): "unet_infer.py") # Create the command prefix using numactl - self.command_prefix = self.get_numactl_command(self.args.socket_id) +\ + self.command_prefix = self.get_command_prefix(self.args.socket_id) +\ "{} {}".format(self.python_exe, script_path) # Add batch size arg diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py index 6a2b7244f..535f42416 100644 --- a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py @@ -31,7 +31,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args, platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.cmd = self.get_command_prefix(self.args.socket_id) self.set_num_inter_intra_threads() diff --git a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py index a23403eb0..6f46f2c80 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py @@ -30,7 +30,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.cmd = self.get_command_prefix(self.args.socket_id) if self.args.socket_id != -1 and self.args.num_cores != -1: self.cmd += "--physcpubind=0-" + \ diff --git a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py index 4e1519e03..b4fd1bc30 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py @@ -32,7 +32,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args, platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.cmd = self.get_command_prefix(self.args.socket_id) self.bleu_params = "" self.set_num_inter_intra_threads() diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py index 00f8b9f3f..85dae1e68 100644 --- a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py @@ -31,7 +31,7 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args, platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.cmd = self.get_command_prefix(self.args.socket_id) self.bleu_params = "" self.set_num_inter_intra_threads() diff --git a/benchmarks/launch_benchmark.py b/benchmarks/launch_benchmark.py index 5dd7fbca1..7515936eb 100644 --- a/benchmarks/launch_benchmark.py +++ b/benchmarks/launch_benchmark.py @@ -175,6 +175,8 @@ def get_env_vars(self, benchmark_scripts, use_case, intelai_models): "BENCHMARK_ONLY": args.benchmark_only, "ACCURACY_ONLY": args.accuracy_only, "OUTPUT_RESULTS": args.output_results, + "DISABLE_TCMALLOC": args.disable_tcmalloc, + "TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD": args.tcmalloc_large_alloc_report_threshold, "DOCKER": str(args.docker_image is not None), "PYTHON_EXE": sys.executable if not args.docker_image else "python" } diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py index a605cc8e3..c30f39ada 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py @@ -65,7 +65,7 @@ def __init__(self, args, custom_args, platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, "eval.py") self.command_prefix = \ - self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + \ + self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + \ benchmark_script config_file_path = os.path.join(self.args.checkpoint, diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py index 705ef72c1..37eaf2722 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py @@ -83,7 +83,7 @@ def parse_args(self): def run_perf_command(self): set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) self.parse_args() - command = self.get_numactl_command(self.args.socket_id) + command = self.get_command_prefix(self.args.socket_id) command += " {} ".format(self.python_exe) + self.perf_script_path command += " -g " + self.args.input_graph if self.custom_args: diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index 3b3a64b9d..10a0342ce 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -174,16 +174,16 @@ to get additional debug output or change the default output location. Below is a sample log file tail when running benchmarking for throughput and latency: ``` -Step 0: 10.6923000813 seconds -Step 10: 0.168856859207 seconds +Step 0: 11.4450089931 seconds +Step 10: 0.25656080246 seconds ... -Step 460: 0.181148052216 seconds -Step 470: 0.202737092972 seconds -Step 480: 0.117042064667 seconds -Step 490: 0.103501081467 seconds -Avg. Duration per Step:0.169812122345 +Step 460: 0.256786823273 seconds +Step 470: 0.267828941345 seconds +Step 480: 0.141321897507 seconds +Step 490: 0.127830982208 seconds +Avg. Duration per Step:0.195356227875 Ran inference with batch size -1 -Log location outside container: {--output-dir}/benchmark_rfcn_inference_int8_20190227_191959.log +Log location outside container: {--output-dir}/benchmark_rfcn_inference_int8_20190416_182445.log ``` And here is a sample log file tail when running for accuracy: diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py index a4ab51dfa..031c0f2ca 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py @@ -55,7 +55,7 @@ def __init__(self, args, custom_args, platform_util): "research") def run_benchmark(self): - command_prefix = self.get_numactl_command(self.args.socket_id) + \ + command_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + self.benchmark_script # set num_inter_threads and num_intra_threads diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py index d6cb2cc97..4f2a29ab4 100755 --- a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py @@ -54,6 +54,9 @@ def __init__(self, args, custom_args=[], platform_util=None): self.parse_args() + # Get the command previx, but numactl is added later in run_perf_command() + self.command.append(self.get_command_prefix(self.args.socket_id, numactl=False)) + # Set KMP env vars, if they haven't already been set config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") self.set_kmp_vars(config_file_path) @@ -158,7 +161,8 @@ def run_perf_command(self): def run_accuracy_command(self): # already validated by parent - self.command = "FROZEN_GRAPH=" + self.args.input_graph + self.command = self.get_command_prefix(self.args.socket_id, numactl=False) + self.command += "FROZEN_GRAPH=" + self.args.input_graph if self.args.data_location and os.path.exists( self.args.data_location): diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index cee1a3848..2d129384f 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -154,13 +154,13 @@ Below is a sample log file tail when running benchmarking for throughput and latency: ``` -Step 4970: 0.0340421199799 seconds -Step 4980: 0.0429329872131 seconds -Step 4990: 0.0358219146729 seconds -Avg. Duration per Step:0.0364457404137 -Avg. Duration per Step:0.0365921088491 +Step 4970: 0.0305020809174 seconds +Step 4980: 0.0294089317322 seconds +Step 4990: 0.0301029682159 seconds +Avg. Duration per Step:0.0300041775227 +Avg. Duration per Step:0.0301246762276 Ran inference with batch size 1 -Log location outside container: /benchmark_ssd-mobilenet_inference_int8_20181203_232524.log +Log location outside container: /benchmark_ssd-mobilenet_inference_int8_20190417_175418.log ``` And here is a sample log file tail when running for accuracy: diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py index 585d3ed0e..927f73048 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py @@ -68,7 +68,7 @@ def __init__(self, args, custom_args, platform_util): self.args.precision, "infer_detections.py") # get command with numactl - self.run_cmd = self.get_numactl_command( + self.run_cmd = self.get_command_prefix( self.args.socket_id) + "{} {}".format(self.python_exe, benchmark_script) output_tf_record_path = os.path.join(os.path.dirname( diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json index 6f1228ba7..273b45b40 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json @@ -1,7 +1,7 @@ { "optimization_parameters": { "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", - "KMP_BLOCKTIME": 0, + "KMP_BLOCKTIME": 1, "KMP_SETTINGS": 1 } } diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py index 57114447a..28522ada4 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py @@ -52,7 +52,7 @@ def __init__(self, args, custom_args=[], platform_util=None): benchmark_script = os.path.join( self.args.intelai_models, self.args.mode, self.args.precision, "run_frozen_graph_ssdmob.py") - self.command_prefix = self.get_numactl_command(self.args.socket_id) + \ + self.command_prefix = self.get_command_prefix(self.args.socket_id) + \ "{} {}".format(self.python_exe, benchmark_script) set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py index 1ad534ed9..20bfcccf5 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py @@ -57,7 +57,7 @@ def __init__(self, args, custom_args, platform_util): benchmark_script = os.path.join(self.model_dir, "infer_detections.py") # get command with numactl - self.run_cmd = self.get_numactl_command(self.args.socket_id) + self.run_cmd = self.get_command_prefix(self.args.socket_id) self.run_cmd += "{0} {1}".format(self.python_exe, benchmark_script) self.run_cmd += " --input-graph {0}".format(self.args.input_graph) self.run_cmd += " --batch-size {0}".format(args.batch_size) diff --git a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py index 1704839cb..960c2523a 100644 --- a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py +++ b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py @@ -50,7 +50,7 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, "ncf_main.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md index 89fb2b244..ede163b61 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md @@ -72,7 +72,7 @@ Benchmarking instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision int8 \ --mode inference \ @@ -92,7 +92,7 @@ Benchmarking instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision int8 \ --mode inference \ @@ -109,7 +109,7 @@ Benchmarking instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision int8 \ --mode inference \ diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py index 6655dce85..6293b3d0c 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py @@ -62,7 +62,7 @@ def run_benchmark(self): script_args_list = ["input_graph", "num_parallel_batches", "batch_size", "num_inter_threads", "num_intra_threads", "accuracy_only", "data_location"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + benchmark_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py index 9fdef4537..c6a3b25fd 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py @@ -62,7 +62,7 @@ def run_benchmark(self): script_args_list = ["input_graph", "num_parallel_batches", "batch_size", "num_inter_threads", "num_intra_threads", "accuracy_only", "data_location"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + benchmark_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index 8544c7320..ccab76c9c 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -101,6 +101,16 @@ optional arguments: conjunction with --accuracy-only and --mode=inference. --output-dir OUTPUT_DIR Folder to dump output into. + --disable-tcmalloc Disables the use of TCMalloc for int8 benchmarking. + TCMalloc is currently not used for FP32 benchmarking, + so using this flag with FP32 models will have no + effect. + --tcmalloc-large-alloc-report-threshold TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD + Sets the TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD + environment variable to the specified value. The + environment variable sets the threshold (in bytes) for + when large memory allocation messages will be + displayed. -g INPUT_GRAPH, --in-graph INPUT_GRAPH Full path to the input graph --volume CUSTOM_VOLUMES diff --git a/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py index 198509a23..791c1b761 100644 --- a/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py @@ -165,12 +165,12 @@ def run(self): input_tensor = infer_graph.get_tensor_by_name('input:0') output_tensor = infer_graph.get_tensor_by_name('predict:0') - data_sess = tf.Session(graph=data_graph, config=data_config) + data_sess = tf.Session(graph=data_graph, config=data_config) infer_sess = tf.Session(graph=infer_graph, config=infer_config) num_processed_images = 0 num_remaining_images = dataset.num_examples_per_epoch(subset=subset) - num_processed_images \ - if self.args.data_location else datasets.IMAGENET_NUM_VAL_IMAGES + if self.args.data_location else (self.args.batch_size * self.args.steps) if (not self.args.accuracy_only): iteration = 0 diff --git a/tests/unit/common/tensorflow/test_run_tf_benchmarks.py b/tests/unit/common/tensorflow/test_run_tf_benchmarks.py index b5407f00e..dba3e6ca6 100644 --- a/tests/unit/common/tensorflow/test_run_tf_benchmarks.py +++ b/tests/unit/common/tensorflow/test_run_tf_benchmarks.py @@ -73,13 +73,15 @@ def clear_kmp_env_vars(): @patch("os.stat") @patch("os.chdir") @patch("os.remove") +@patch("glob.glob") @patch("common.platform_util.os") @patch("common.platform_util.system_platform") @patch("common.platform_util.subprocess") @patch("common.base_model_init.BaseModelInitializer.run_command") -def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, - mock_os, mock_remove, mock_chdir, mock_stat, mock_path_exists, mock_is_file, mock_is_dir, - mock_listdir, mock_rmtree, mock_mkdir, test_args, expected_cmd): +def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, mock_os, + mock_glob, mock_remove, mock_chdir, mock_stat, mock_path_exists, + mock_is_file, mock_is_dir, mock_listdir, mock_rmtree, mock_mkdir, + test_args, expected_cmd): """ Runs through executing the specified run_tf_benchmarks.py command from the test_args and verifying that the model_init file calls run_command with @@ -92,6 +94,7 @@ def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, mock_stat.return_value = MagicMock(st_nlink=0) parse_model_args_file() mock_listdir.return_value = True + mock_glob.return_value = ["/usr/lib/libtcmalloc.so.4.2.6"] clear_kmp_env_vars() platform_config.set_mock_system_type(mock_platform) platform_config.set_mock_os_access(mock_os) diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index 21f680eda..55137c9e3 100644 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -1,90 +1,92 @@ -run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose, OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose,python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose,python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --data-location=/dataset --calibration-only,python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50_int8_pretrained_model.pb --data_location=/dataset -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50.pb --intelai-models . --accuracy-only --verbose,python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 128 --in-graph /final_int8_resnet50.pb --intelai-models . --benchmark-only --verbose,python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=128 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt +run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose,OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --disable-tcmalloc,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --disable-tcmalloc --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --data-location=/dataset --calibration-only,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50_int8_pretrained_model.pb --data_location=/dataset +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50.pb --intelai-models . --accuracy-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 128 --in-graph /final_int8_resnet50.pb --intelai-models . --benchmark-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=128 --warmup-steps=10 --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 64 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 64 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 1 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 1 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval +run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval +run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset --in-graph=/in_graph/frozen_inference_graph.pb,sh /workspace/intelai_models/inference/fp32/coco_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset/coco_val.record /workspace/models run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --data-location=/dataset, sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --accuracy-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --benchmark-dir=/workspace/benchmarks --data-location=/dataset,sh /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 +run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset --accuracy-only --split=accuracy_message,FROZEN_GRAPH=/in_graph/frozen_inference_graph.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/fp32/coco_mAP.sh -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --benchmark-only --number_of_steps=500,python /workspace/intelai_models/inference/int8/run_rfcn_inference.py -m /workspace/models -g /in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb -x 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --accuracy-only --split=accuracy_message,FROZEN_GRAPH=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/int8/coco_mAP.sh +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --benchmark-only --number_of_steps=500,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_rfcn_inference.py -m /workspace/models -g /in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb -x 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --accuracy-only --split=accuracy_message,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 FROZEN_GRAPH=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/int8/coco_mAP.sh run_tf_benchmark.py --framework tensorflow --use-case text_to_speech --precision fp32 --mode inference --model-name wavenet --num-cores 1 --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --checkpoint_name=model.ckpt-99 --sample=8510,numactl --physcpubind=0-0 --membind=0 python generate.py /checkpoints/model.ckpt-99 --num_inter_threads=1 --num_intra_threads=1 --sample=8510 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_int8_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-intra-threads=28 --num-inter-threads=1 --input-graph=/in_graph/resnet101_int8_model.pb --warmup-steps=40 --steps=100 -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data-location=/dataset --input_height=224 --input_width=224, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/accuracy.py --input_height=224 --input_width=224 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data_location=/dataset --input_layer=input -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=240 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_int8_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-intra-threads=28 --num-inter-threads=1 --input-graph=/in_graph/resnet101_int8_model.pb --warmup-steps=40 --steps=100 +"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" +"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data-location=/dataset --input_height=224 --input_width=224,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/accuracy.py --input_height=224 --input_width=224 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data_location=/dataset --input_layer=input +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=240 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1 -python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 +python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset,python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only +run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1 +run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,/workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/pretrained_int8_faster_rcnn_model.pb /dataset /workspace/models -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset, python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500, python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -d /dataset --num-inter-threads 2 --num-intra-threads 56 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 +run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1 +run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200 +run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200 +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search +run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200 +run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000 +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 diff --git a/tests/unit/common/test_base_model_init.py b/tests/unit/common/test_base_model_init.py index 979a6ac4c..2e147ee62 100644 --- a/tests/unit/common/test_base_model_init.py +++ b/tests/unit/common/test_base_model_init.py @@ -56,6 +56,11 @@ def mock_json(patch): return patch('json') +@pytest.fixture +def mock_glob(patch): + return patch('glob.glob') + + # Example args and output strings for testing mocks test_model_name = "resnet50" test_framework = "tensorflow" @@ -170,3 +175,36 @@ def test_set_kmp_vars_config_json_exists(mock_json): file_descriptor, config_file_path = tempfile.mkstemp(suffix=".json") base_model_init.set_kmp_vars(config_file_path) + + +@pytest.mark.parametrize('precision', ['int8', 'fp32']) +def test_command_prefix_tcmalloc(precision, mock_glob): + """ Models should include LD_PRELOAD in the command prefix, as long as tcmalloc is not disabled""" + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + test_tcmalloc_lib = "/usr/lib/libtcmalloc.so.4.2.6" + mock_glob.return_value = [test_tcmalloc_lib] + os.environ["PYTHON_EXE"] = "python" + args.socket_id = 0 + args.precision = precision + + # If tcmalloc is not disabled, we should have LD_PRELOAD in the prefix + args.disable_tcmalloc = False + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If tcmalloc is disabled, LD_PRELOAD shouild not be in the prefix + args.disable_tcmalloc = True + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) not in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If numactl is set to false, we should not have numactl in the prefix + args.disable_tcmalloc = False + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id, numactl=False) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) in command_prefix + assert "numactl" not in command_prefix From 1659cdb4f3caa9615f55252684359ba004b5dc4a Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Mon, 22 Apr 2019 18:56:10 -0700 Subject: [PATCH 21/62] Add SSD-VGG16 COCO int8/fp32 inference benchmarks (#286) * add ssd_vgg16 coco based benchmarks * configure the env vars, clean up and update start.sh and readme. * cleanup and update readme. * update readme and other files to highlight this model works only with py3. * add and use the anchor_manipulator.py customized file. * apply changes to install cocoapi, and update readme with SKX based benchmark log snippet. * use a cocoapi that works with py3, modify to get the expected performance, add unit tests. * add changes based on code review * fix the model name to use ssd_vgg16 stead of ssd-vgg16 to fix python import. * fix unit tests, update readme. * uppdate log snippet in readme. * update unit tests after supporting memory alloc. * changes for Karthik's code review. * update the int8 log snippet based on CLX. * update the fp32 log snippet based on CLX. --- benchmarks/README.md | 1 + benchmarks/common/tensorflow/start.sh | 28 ++ .../object_detection/tensorflow/__init__.py | 2 +- .../tensorflow/ssd_vgg16/README.md | 367 ++++++++++++++++++ .../tensorflow/ssd_vgg16/__init__.py | 19 + .../ssd_vgg16/inference/__init__.py | 19 + .../ssd_vgg16/inference/config.json | 6 + .../ssd_vgg16/inference/fp32/__init__.py | 19 + .../ssd_vgg16/inference/fp32/model_init.py | 28 ++ .../ssd_vgg16/inference/int8/__init__.py | 19 + .../ssd_vgg16/inference/int8/model_init.py | 28 ++ .../inference/ssd_vgg16_model_init.py | 107 +++++ .../tensorflow/ssd_vgg16/__init__.py | 19 + .../ssd_vgg16/inference/__init__.py | 19 + .../ssd_vgg16/inference/anchor_manipulator.py | 353 +++++++++++++++++ .../ssd_vgg16/inference/eval_ssd.py | 316 +++++++++++++++ .../ssd_vgg16/inference/validate_ssd_vgg16.py | 111 ++++++ .../unit/common/tensorflow/tf_model_args.txt | 4 + 18 files changed, 1464 insertions(+), 1 deletion(-) create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/README.md create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/__init__.py create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/inference/__init__.py create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/inference/config.json create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/__init__.py create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/model_init.py create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/__init__.py create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/model_init.py create mode 100644 benchmarks/object_detection/tensorflow/ssd_vgg16/inference/ssd_vgg16_model_init.py create mode 100644 models/object_detection/tensorflow/ssd_vgg16/__init__.py create mode 100644 models/object_detection/tensorflow/ssd_vgg16/inference/__init__.py create mode 100644 models/object_detection/tensorflow/ssd_vgg16/inference/anchor_manipulator.py create mode 100644 models/object_detection/tensorflow/ssd_vgg16/inference/eval_ssd.py create mode 100644 models/object_detection/tensorflow/ssd_vgg16/inference/validate_ssd_vgg16.py diff --git a/benchmarks/README.md b/benchmarks/README.md index 4f5a83172..c8959af5a 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -36,6 +36,7 @@ dependencies to be installed: | Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [Int8](object_detection/tensorflow/rfcn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [Faster R-CNN](https://arxiv.org/pdf/1506.01497.pdf) | Inference | [Int8](object_detection/tensorflow/faster_rcnn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/faster_rcnn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-MobileNet](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-mobilenet/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | SSD-VGG16 | Inference | [Int8](object_detection/tensorflow/ssd_vgg16/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd_vgg16/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [NCF](https://arxiv.org/pdf/1708.05031.pdf) | Inference | [FP32](recommendation/tensorflow/ncf/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep Large Dataset](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [Int8](recommendation/tensorflow/wide_deep_large_ds/README.md#int8-inference-instructions) [FP32](recommendation/tensorflow/wide_deep_large_ds/README.md#fp32-inference-instructions) | diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index d1ec29216..ab17c9bc6 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -642,6 +642,32 @@ function ssd-resnet34() { fi } +# SSD-VGG16 model +function ssd_vgg16() { + + if [ ${NOINSTALL} != "True" ]; then + pip install opencv-python Cython + + if [ ${ACCURACY_ONLY} == "True" ]; then + # get the python cocoapi + get_cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/coco ${MOUNT_INTELAI_MODELS_SOURCE}/inference + fi + fi + + cp ${MOUNT_INTELAI_MODELS_SOURCE}/__init__.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/dataset + cp ${MOUNT_INTELAI_MODELS_SOURCE}/__init__.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/preprocessing + cp ${MOUNT_INTELAI_MODELS_SOURCE}/__init__.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/utility + export PYTHONPATH=${PYTHONPATH}:${MOUNT_EXTERNAL_MODELS_SOURCE} + + if [ ${PRECISION} == "int8" ] || [ ${PRECISION} == "fp32" ]; then + CMD="${CMD} $(add_steps_args)" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi +} + # UNet model function unet() { if [ ${PRECISION} == "fp32" ]; then @@ -868,6 +894,8 @@ elif [ ${MODEL_NAME} == "ssd-mobilenet" ]; then ssd_mobilenet elif [ ${MODEL_NAME} == "ssd-resnet34" ]; then ssd-resnet34 +elif [ ${MODEL_NAME} == "ssd_vgg16" ]; then + ssd_vgg16 elif [ ${MODEL_NAME} == "unet" ]; then unet elif [ ${MODEL_NAME} == "transformer_language" ]; then diff --git a/benchmarks/object_detection/tensorflow/__init__.py b/benchmarks/object_detection/tensorflow/__init__.py index cf793ec6a..d9c4123de 100644 --- a/benchmarks/object_detection/tensorflow/__init__.py +++ b/benchmarks/object_detection/tensorflow/__init__.py @@ -1,7 +1,7 @@ # # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md new file mode 100644 index 000000000..47233e7e2 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -0,0 +1,367 @@ +# SSD-VGG16 + +This document has instructions for how to run SSD-VGG16 for the +following modes/precisions: +* [Int8 inference](#int8-inference-instructions) +* [FP32 inference](#fp32-inference-instructions) + +Benchmarking instructions and scripts for model training and inference +other precisions are coming later. + +## Int8 Inference Instructions + +1. Clone the [original model](https://github.com/HiKapok/SSD.TensorFlow) repository: +``` +$ git clone https://github.com/HiKapok/SSD.TensorFlow.git +$ cd SSD.TensorFlow +$ git checkout 2d8b0cb9b2e70281bf9dce438ff17ffa5e59075c +``` + +2. Download the 2017 validation +[COCO dataset](http://cocodataset.org/#home) and annotations: +This is required if you would like to run the accuracy test, +or the throughput and latency benchmark with real data. + +The [TensorFlow models](https://github.com/tensorflow/models) repo will be used for +converting the coco dataset to the TF records format. +``` +$ mkdir val +$ cd val +$ wget http://images.cocodataset.org/zips/val2017.zip +$ unzip val2017.zip +$ cd .. +``` + +Continue the instructions below to generate the +TF record file. +``` +$ mkdir annotations +$ cd annotations +$ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip +$ unzip annotations_trainval2017.zip +$ cd .. +``` + +Since we are only using the validation dataset in this example, we will +create an empty directory and empty annotations json file to pass as the +train and test directories in the next step. +``` +$ mkdir empty_dir + +$ cd annotations +$ echo "{ \"images\": {}, \"categories\": {}}" > empty.json +$ cd .. +``` + +3. Now that you have the raw COCO dataset, we need to convert it to the +TF records format in order to use it with the inference script. We will +do this by running the `create_coco_tf_record.py` file in the TensorFlow +models repo. + +Follow the steps below to navigate to the proper directory and point the +script to the raw COCO dataset files that you have downloaded in step 2. +The `--output_dir` is the location where the TF record files will be +located after the script has completed. + +``` +# We are going to use an older version of the conversion script to checkout the git commit +$ cd models +$ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 + +$ cd research/object_detection/dataset_tools/ +$ python create_coco_tf_record.py --logtostderr \ + --train_image_dir="/home//coco/empty_dir" \ + --val_image_dir="/home//coco/val/val2017" \ + --test_image_dir="/home//coco/empty_dir" \ + --train_annotations_file="/home//coco/annotations/empty.json" \ + --val_annotations_file="/home//coco/annotations/instances_val2017.json" \ + --testdev_annotations_file="/home//coco/annotations/empty.json" \ + --output_dir="/home//coco/output" + +$ ll /home/myuser/coco/output +total 1598276 +-rw-rw-r--. 1 0 Nov 2 21:46 coco_testdev.record +-rw-rw-r--. 1 0 Nov 2 21:46 coco_train.record +-rw-rw-r--. 1 818336740 Nov 2 21:46 coco_val.record +``` + +4. Download the pretrained model: + +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_int8_pretrained_model.pb +``` + +5. Clone the [intelai/models](https://github.com/intelai/models) repo +and then run the benchmarking scripts for either benchmarking throughput +and latency or accuracy. +``` +$ git clone git@github.com:IntelAI/models.git +$ cd benchmarks +``` + +* Run benchmarking for throughput and latency where the `--model-source-dir` is the model source directory from step 1, +and the `--in-graph` is the pretrained model graph from step 4, +if you specify the `--data-location` which is the path to the tf record file that you generated in step 3, +the benchmark will run with real data, otherwise dummy data will be used: +``` +python launch_benchmark.py \ + --model-name ssd_vgg16 \ + --mode inference \ + --precision int8 \ + --framework tensorflow \ + --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --model-source-dir /home//SSD.TensorFlow \ + --data-location /home//coco/output \ + --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ + --batch-size 1 \ + --socket-id 0 \ + --num-inter-threads 11 \ + --num-intra-threads 21 \ + --data-num-inter-threads 21 \ + --data-num-intra-threads 28 \ + -- warmup-steps=100 steps=500 +``` + +* For the accuracy test: + + * Clone the customized [cocoapi repo](https://github.com/waleedka/coco) in +the model directory `SSD.TensorFlow` from step 1. + ``` + $ git clone https://github.com/waleedka/coco.git + + ``` + * The `--data-location` is required, which is the path to the tf record file that you generated in step 3. + * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//coco/output`. + * Use the `--accuracy-only` flag: +``` +python launch_benchmark.py \ + --model-name ssd_vgg16 \ + --mode inference \ + --precision int8 \ + --framework tensorflow \ + --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --model-source-dir /home//SSD.TensorFlow \ + --data-location /home//coco/output \ + --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ + --accuracy-only \ + --batch-size 1 +``` + +>Notes: +>* For the throughput and latency benchmark, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, + `--data-num-intra-threads=28` for optimized performance on `28-cores Cascade Lake (CLX)` machine. + +>* SSD-VGG16 model accuracy test works only with the `Python3` based docker images. + +>* The `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. + +6. The log file is saved to the value of `--output-dir`. + +Below is a sample log file tail when running benchmarking for throughput +and latency, the following results are based on CLX 28-cores with hyper-threading enabled: + +``` +Batch size = 1 +Throughput: 30.382 images/sec +Latency: 32.915 ms +Ran inference with batch size 1 +Log location outside container: {--output-dir value}/benchmark_ssd_vgg16_inference_int8_20190417_231832.log +``` + +And here is a sample log file tail when running for accuracy: + +``` + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.231 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.386 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.243 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.058 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.265 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.391 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.224 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.330 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.355 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.091 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.420 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.558 +``` + +## FP32 Inference Instructions + +1. Clone the [original model](https://github.com/HiKapok/SSD.TensorFlow) repository: +``` +$ git clone https://github.com/HiKapok/SSD.TensorFlow.git +$ cd SSD.TensorFlow +$ git checkout 2d8b0cb9b2e70281bf9dce438ff17ffa5e59075c +``` + +2. Download the 2017 validation +[COCO dataset](http://cocodataset.org/#home) and annotations: + +This is required if you would like to run the accuracy test, +or the throughput and latency benchmark with real data. + +The [TensorFlow models](https://github.com/tensorflow/models) repo will be used for +converting the coco dataset to the TF records format. +``` +$ mkdir val +$ cd val +$ wget http://images.cocodataset.org/zips/val2017.zip +$ unzip val2017.zip +$ cd .. +``` + +Continue the instructions below to generate the +TF record file. +``` +$ mkdir annotations +$ cd annotations +$ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip +$ unzip annotations_trainval2017.zip +$ cd .. +``` + +Since we are only using the validation dataset in this example, we will +create an empty directory and empty annotations json file to pass as the +train and test directories in the next step. +``` +$ mkdir empty_dir + +$ cd annotations +$ echo "{ \"images\": {}, \"categories\": {}}" > empty.json +$ cd .. +``` + +3. Now that you have the raw COCO dataset, we need to convert it to the +TF records format in order to use it with the inference script. We will +do this by running the `create_coco_tf_record.py` file in the TensorFlow +models repo. + +Follow the steps below to navigate to the proper directory and point the +script to the raw COCO dataset files that you have downloaded in step 2. +The `--output_dir` is the location where the TF record files will be +located after the script has completed. + +``` +# We are going to use an older version of the conversion script to checkout the git commit +$ cd models +$ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 + +$ cd research/object_detection/dataset_tools/ +$ python create_coco_tf_record.py --logtostderr \ + --train_image_dir="/home//coco/empty_dir" \ + --val_image_dir="/home//coco/val/val2017" \ + --test_image_dir="/home//coco/empty_dir" \ + --train_annotations_file="/home//coco/annotations/empty.json" \ + --val_annotations_file="/home//coco/annotations/instances_val2017.json" \ + --testdev_annotations_file="/home//coco/annotations/empty.json" \ + --output_dir="/home//coco/output" + +$ ll /home/myuser/coco/output +total 1598276 +-rw-rw-r--. 1 0 Nov 2 21:46 coco_testdev.record +-rw-rw-r--. 1 0 Nov 2 21:46 coco_train.record +-rw-rw-r--. 1 818336740 Nov 2 21:46 coco_val.record +``` + +4. Download the pretrained model: +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_fp32_pretrained_model.pb +``` + +5. Clone the [intelai/models](https://github.com/intelai/models) repo +and then run the benchmarking scripts for either benchmarking throughput +and latency or accuracy. +``` +$ git clone git@github.com:IntelAI/models.git +$ cd benchmarks +``` + +* Run benchmarking for throughput and latency where the `--model-source-dir` is the model source directory from step 1, +and the `--in-graph` is the pretrained model graph from step 4, +if you specify the `--data-location` which is the path to the tf record file that you generated in step 3, +the benchmark will run with real data, otherwise dummy data will be used: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --data-location /home//coco/output \ + --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ + --model-source-dir /home//SSD.TensorFlow \ + --model-name ssd_vgg16 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --batch-size 1 \ + --socket-id 0 \ + --num-inter-threads 11 \ + --num-intra-threads 21 \ + --data-num-inter-threads 21 \ + --data-num-intra-threads 28 \ + -- warmup-steps=100 steps=500 +``` + +* For the accuracy test: + + * Clone the customized [cocoapi repo](https://github.com/waleedka/coco) in +the model directory `SSD.TensorFlow` from step 1. + ``` + $ git clone https://github.com/waleedka/coco.git + + ``` + * The `--data-location` is required, which is the path to the tf record file that you generated in step 3. + * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//coco/output`. + * Use the `--accuracy-only` flag: +``` +python launch_benchmark.py \ + --model-name ssd_vgg16 \ + --mode inference \ + --precision fp32 \ + --framework tensorflow \ + --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --model-source-dir /home//SSD.TensorFlow \ + --data-location /home//coco/output \ + --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ + --accuracy-only \ + --batch-size 1 +``` + +>Notes: +>* For the throughput and latency benchmark, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, + `--data-num-intra-threads=28` for optimized performance on `28-cores Cascade Lake (CLX)` machine. + +>* SSD-VGG16 model accuracy test works only with the `Python3` based docker images. + +>* The `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. + +6. The log file is saved to the value of `--output-dir`. + +Below is a sample log file tail when running throughput and latency benchmarking, +the following results are based on CLX 28-cores with hyper-threading enabled: + +``` +Batch size = 1 +Throughput: 15.662 images/sec +Latency: 63.848 ms +Ran inference with batch size 1 +Log location outside container: {--output-dir value}/benchmark_ssd_vgg16_inference_fp32_20190417_232130.log +``` + +Below is a sample log file tail when testing accuracy: + +``` + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.236 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.391 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.248 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.058 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.264 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.399 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.227 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.334 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.358 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.091 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.423 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.564 +``` diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/config.json b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/config.json new file mode 100644 index 000000000..14d129748 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_SETTINGS": 1, + "TF_ENABLE_WINOGRAD_NONFUSED": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/model_init.py new file mode 100644 index 000000000..5698700f4 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/model_init.py @@ -0,0 +1,28 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from object_detection.tensorflow.ssd_vgg16.inference.ssd_vgg16_model_init import SSDVGG16ModelInitializer + + +class ModelInitializer(SSDVGG16ModelInitializer): + """Model initializer for SSD-VGG16 FP32 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/model_init.py new file mode 100644 index 000000000..01d1822ba --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/model_init.py @@ -0,0 +1,28 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from object_detection.tensorflow.ssd_vgg16.inference.ssd_vgg16_model_init import SSDVGG16ModelInitializer + + +class ModelInitializer(SSDVGG16ModelInitializer): + """Model initializer for SSD-VGG16 Int8 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/ssd_vgg16_model_init.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/ssd_vgg16_model_init.py new file mode 100644 index 000000000..c54994170 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/ssd_vgg16_model_init.py @@ -0,0 +1,107 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import argparse + +from common.base_model_init import BaseModelInitializer, set_env_var + + +class SSDVGG16ModelInitializer(BaseModelInitializer): + """Common model initializer for SSD-VGG16 inference""" + + def run_inference_sanity_checks(self, args, custom_args): + if not args.input_graph: + sys.exit("Please provide a path to the frozen graph directory" + " via the '--in-graph' flag.") + if not args.data_location and self.args.accuracy_only: + sys.exit("For accuracy test, please provide a path to the data directory via the " + "'--data-location' flag.") + if args.batch_size != -1 and args.batch_size != 1: + sys.exit("SSD-VGG16 inference supports 'batch-size=1' " + + "only, please modify via the '--batch_size' flag.") + + def __init__(self, args, custom_args, platform_util): + super(SSDVGG16ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.parse_custom_args() + self.run_inference_sanity_checks(self.args, self.custom_args) + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + self.set_num_inter_intra_threads(num_inter_threads=self.args.num_inter_threads, + num_intra_threads=self.args.num_intra_threads) + + omp_num_threads = str(int(platform_util.num_cores_per_socket / 2))\ + if self.args.precision == "int8" else platform_util.num_cores_per_socket + + set_env_var("OMP_NUM_THREADS", omp_num_threads + if self.args.num_cores == -1 else self.args.num_cores) + + script_path = os.path.join( + self.args.intelai_models, self.args.mode, "eval_ssd.py") + + self.run_cmd = self.get_command_prefix( + self.args.socket_id) + "{} {}".format(self.python_exe, script_path) + + self.run_cmd += " --input-graph={} " \ + " --num-inter-threads={} --num-intra-threads={} ". \ + format(self.args.input_graph, self.args.num_inter_threads, + self.args.num_intra_threads) + + if self.args.data_num_inter_threads: + self.run_cmd += " --data-num-inter-threads={} ".format( + self.args.data_num_inter_threads) + + if self.args.data_num_intra_threads: + self.run_cmd += " --data-num-intra-threads={} ".format( + self.args.data_num_intra_threads) + + if self.args.benchmark_only: + self.run_cmd += " --warmup-steps={} --steps={} ". \ + format(self.args.warmup_steps, self.args.steps) + + # if the data location directory is not empty, then include the arg + if self.args.data_location and os.listdir(self.args.data_location): + self.run_cmd += " --data-location={} ".format(self.args.data_location) + + if self.args.accuracy_only: + self.run_cmd += "--accuracy-only " + + def parse_custom_args(self): + if self.custom_args: + parser = argparse.ArgumentParser() + parser.add_argument("--warmup-steps", type=int, default=10, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=50, + help="number of steps") + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + def run(self): + self.run_command(self.run_cmd) diff --git a/models/object_detection/tensorflow/ssd_vgg16/__init__.py b/models/object_detection/tensorflow/ssd_vgg16/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/__init__.py b/models/object_detection/tensorflow/ssd_vgg16/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/anchor_manipulator.py b/models/object_detection/tensorflow/ssd_vgg16/inference/anchor_manipulator.py new file mode 100644 index 000000000..f52acdc08 --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/anchor_manipulator.py @@ -0,0 +1,353 @@ +# Copyright 2018 Changan Wang + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +import math + +import tensorflow as tf +import numpy as np + +from tensorflow.contrib.image.python.ops import image_ops + +def areas(gt_bboxes): + with tf.name_scope('bboxes_areas', values=[gt_bboxes]): + ymin, xmin, ymax, xmax = tf.split(gt_bboxes, 4, axis=1) + return (xmax - xmin) * (ymax - ymin) + +def intersection(gt_bboxes, default_bboxes): + with tf.name_scope('bboxes_intersection', values=[gt_bboxes, default_bboxes]): + # num_anchors x 1 + ymin, xmin, ymax, xmax = tf.split(gt_bboxes, 4, axis=1) + # 1 x num_anchors + gt_ymin, gt_xmin, gt_ymax, gt_xmax = [tf.transpose(b, perm=[1, 0]) for b in tf.split(default_bboxes, 4, axis=1)] + # broadcast here to generate the full matrix + int_ymin = tf.maximum(ymin, gt_ymin) + int_xmin = tf.maximum(xmin, gt_xmin) + int_ymax = tf.minimum(ymax, gt_ymax) + int_xmax = tf.minimum(xmax, gt_xmax) + h = tf.maximum(int_ymax - int_ymin, 0.) + w = tf.maximum(int_xmax - int_xmin, 0.) + + return h * w +def iou_matrix(gt_bboxes, default_bboxes): + with tf.name_scope('iou_matrix', values = [gt_bboxes, default_bboxes]): + inter_vol = intersection(gt_bboxes, default_bboxes) + # broadcast + union_vol = areas(gt_bboxes) + tf.transpose(areas(default_bboxes), perm=[1, 0]) - inter_vol + + return tf.where(tf.equal(union_vol, 0.0), + tf.zeros_like(inter_vol), tf.truediv(inter_vol, union_vol)) + +def do_dual_max_match(overlap_matrix, low_thres, high_thres, ignore_between=True, gt_max_first=True): + ''' + overlap_matrix: num_gt * num_anchors + ''' + with tf.name_scope('dual_max_match', values=[overlap_matrix]): + # first match from anchors' side + anchors_to_gt = tf.argmax(overlap_matrix, axis=0) + # the matching degree + match_values = tf.reduce_max(overlap_matrix, axis=0) + + #positive_mask = tf.greater(match_values, high_thres) + less_mask = tf.less(match_values, low_thres) + between_mask = tf.logical_and(tf.less(match_values, high_thres), tf.greater_equal(match_values, low_thres)) + negative_mask = less_mask if ignore_between else between_mask + ignore_mask = between_mask if ignore_between else less_mask + # fill all negative positions with -1, all ignore positions is -2 + match_indices = tf.where(negative_mask, -1 * tf.ones_like(anchors_to_gt), anchors_to_gt) + match_indices = tf.where(ignore_mask, -2 * tf.ones_like(match_indices), match_indices) + + # negtive values has no effect in tf.one_hot, that means all zeros along that axis + # so all positive match positions in anchors_to_gt_mask is 1, all others are 0 + anchors_to_gt_mask = tf.one_hot(tf.clip_by_value(match_indices, -1, tf.cast(tf.shape(overlap_matrix)[0], tf.int64)), + tf.shape(overlap_matrix)[0], on_value=1, off_value=0, axis=0, dtype=tf.int32) + # match from ground truth's side + gt_to_anchors = tf.argmax(overlap_matrix, axis=1) + + if gt_max_first: + # the max match from ground truth's side has higher priority + left_gt_to_anchors_mask = tf.one_hot(gt_to_anchors, tf.shape(overlap_matrix)[1], on_value=1, off_value=0, axis=1, dtype=tf.int32) + else: + # the max match from anchors' side has higher priority + # use match result from ground truth's side only when the the matching degree from anchors' side is lower than position threshold + left_gt_to_anchors_mask = tf.cast(tf.logical_and(tf.reduce_max(anchors_to_gt_mask, axis=1, keep_dims=True) < 1, + tf.one_hot(gt_to_anchors, tf.shape(overlap_matrix)[1], + on_value=True, off_value=False, axis=1, dtype=tf.bool) + ), tf.int64) + # can not use left_gt_to_anchors_mask here, because there are many ground truthes match to one anchor, we should pick the highest one even when we are merging matching from ground truth side + left_gt_to_anchors_scores = overlap_matrix * tf.to_float(left_gt_to_anchors_mask) + # merge matching results from ground truth's side with the original matching results from anchors' side + # then select all the overlap score of those matching pairs + selected_scores = tf.gather_nd(overlap_matrix, tf.stack([tf.where(tf.reduce_max(left_gt_to_anchors_mask, axis=0) > 0, + tf.argmax(left_gt_to_anchors_scores, axis=0), + anchors_to_gt), + tf.range(tf.cast(tf.shape(overlap_matrix)[1], tf.int64))], axis=1)) + # return the matching results for both foreground anchors and background anchors, also with overlap scores + return tf.where(tf.reduce_max(left_gt_to_anchors_mask, axis=0) > 0, + tf.argmax(left_gt_to_anchors_scores, axis=0), + match_indices), selected_scores + +# def save_anchors(bboxes, labels, anchors_point): +# if not hasattr(save_image_with_bbox, "counter"): +# save_image_with_bbox.counter = 0 # it doesn't exist yet, so initialize it +# save_image_with_bbox.counter += 1 + +# np.save('./debug/bboxes_{}.npy'.format(save_image_with_bbox.counter), np.copy(bboxes)) +# np.save('./debug/labels_{}.npy'.format(save_image_with_bbox.counter), np.copy(labels)) +# np.save('./debug/anchors_{}.npy'.format(save_image_with_bbox.counter), np.copy(anchors_point)) +# return save_image_with_bbox.counter + +class AnchorEncoder(object): + def __init__(self, allowed_borders, positive_threshold, ignore_threshold, prior_scaling, clip=False): + super(AnchorEncoder, self).__init__() + self._all_anchors = None + self._allowed_borders = allowed_borders + self._positive_threshold = positive_threshold + self._ignore_threshold = ignore_threshold + self._prior_scaling = prior_scaling + self._clip = clip + + def center2point(self, center_y, center_x, height, width): + return center_y - height / 2., center_x - width / 2., center_y + height / 2., center_x + width / 2., + + def point2center(self, ymin, xmin, ymax, xmax): + height, width = (ymax - ymin), (xmax - xmin) + return ymin + height / 2., xmin + width / 2., height, width + + def encode_all_anchors(self, labels, bboxes, all_anchors, all_num_anchors_depth, all_num_anchors_spatial, debug=False): + # y, x, h, w are all in range [0, 1] relative to the original image size + # shape info: + # y_on_image, x_on_image: layers_shapes[0] * layers_shapes[1] + # h_on_image, w_on_image: num_anchors + assert (len(all_num_anchors_depth)==len(all_num_anchors_spatial)) and (len(all_num_anchors_depth)==len(all_anchors)), 'inconsist num layers for anchors.' + with tf.name_scope('encode_all_anchors'): + num_layers = len(all_num_anchors_depth) + list_anchors_ymin = [] + list_anchors_xmin = [] + list_anchors_ymax = [] + list_anchors_xmax = [] + tiled_allowed_borders = [] + for ind, anchor in enumerate(all_anchors): + anchors_ymin_, anchors_xmin_, anchors_ymax_, anchors_xmax_ = self.center2point(anchor[0], anchor[1], anchor[2], anchor[3]) + + list_anchors_ymin.append(tf.reshape(anchors_ymin_, [-1])) + list_anchors_xmin.append(tf.reshape(anchors_xmin_, [-1])) + list_anchors_ymax.append(tf.reshape(anchors_ymax_, [-1])) + list_anchors_xmax.append(tf.reshape(anchors_xmax_, [-1])) + + tiled_allowed_borders.extend([self._allowed_borders[ind]] * all_num_anchors_depth[ind] * all_num_anchors_spatial[ind]) + + anchors_ymin = tf.concat(list_anchors_ymin, 0, name='concat_ymin') + anchors_xmin = tf.concat(list_anchors_xmin, 0, name='concat_xmin') + anchors_ymax = tf.concat(list_anchors_ymax, 0, name='concat_ymax') + anchors_xmax = tf.concat(list_anchors_xmax, 0, name='concat_xmax') + + if self._clip: + anchors_ymin = tf.clip_by_value(anchors_ymin, 0., 1.) + anchors_xmin = tf.clip_by_value(anchors_xmin, 0., 1.) + anchors_ymax = tf.clip_by_value(anchors_ymax, 0., 1.) + anchors_xmax = tf.clip_by_value(anchors_xmax, 0., 1.) + + anchor_allowed_borders = tf.stack(tiled_allowed_borders, 0, name='concat_allowed_borders') + + inside_mask = tf.logical_and(tf.logical_and(anchors_ymin > -anchor_allowed_borders * 1., + anchors_xmin > -anchor_allowed_borders * 1.), + tf.logical_and(anchors_ymax < (1. + anchor_allowed_borders * 1.), + anchors_xmax < (1. + anchor_allowed_borders * 1.))) + + anchors_point = tf.stack([anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax], axis=-1) + + # save_anchors_op = tf.py_func(save_anchors, + # [bboxes, + # labels, + # anchors_point], + # tf.int64, stateful=True) + + # with tf.control_dependencies([save_anchors_op]): + overlap_matrix = iou_matrix(bboxes, anchors_point) * tf.cast(tf.expand_dims(inside_mask, 0), tf.float32) + matched_gt, gt_scores = do_dual_max_match(overlap_matrix, self._ignore_threshold, self._positive_threshold) + # get all positive matching positions + matched_gt_mask = matched_gt > -1 + matched_indices = tf.clip_by_value(matched_gt, 0, tf.int64.max) + # the labels here maybe chaos at those non-positive positions + gt_labels = tf.gather(labels, matched_indices) + # filter the invalid labels + gt_labels = gt_labels * tf.cast(matched_gt_mask, tf.int64) + # set those ignored positions to -1 + gt_labels = gt_labels + (-1 * tf.cast(matched_gt < -1, tf.int64)) + + gt_ymin, gt_xmin, gt_ymax, gt_xmax = tf.unstack(tf.gather(bboxes, matched_indices), 4, axis=-1) + + # transform to center / size. + gt_cy, gt_cx, gt_h, gt_w = self.point2center(gt_ymin, gt_xmin, gt_ymax, gt_xmax) + anchor_cy, anchor_cx, anchor_h, anchor_w = self.point2center(anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax) + # encode features. + # the prior_scaling (in fact is 5 and 10) is use for balance the regression loss of center and with(or height) + gt_cy = (gt_cy - anchor_cy) / anchor_h / self._prior_scaling[0] + gt_cx = (gt_cx - anchor_cx) / anchor_w / self._prior_scaling[1] + gt_h = tf.log(gt_h / anchor_h) / self._prior_scaling[2] + gt_w = tf.log(gt_w / anchor_w) / self._prior_scaling[3] + # now gt_localizations is our regression object, but also maybe chaos at those non-positive positions + if debug: + gt_targets = tf.stack([anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax], axis=-1) + else: + gt_targets = tf.stack([gt_cy, gt_cx, gt_h, gt_w], axis=-1) + # set all targets of non-positive positions to 0 + gt_targets = tf.expand_dims(tf.cast(matched_gt_mask, tf.float32), -1) * gt_targets + self._all_anchors = (anchor_cy, anchor_cx, anchor_h, anchor_w) + return gt_targets, gt_labels, gt_scores + + # return a list, of which each is: + # shape: [feature_h, feature_w, num_anchors, 4] + # order: ymin, xmin, ymax, xmax + def decode_all_anchors(self, pred_location, num_anchors_per_layer): + assert self._all_anchors is not None, 'no anchors to decode.' + with tf.name_scope('decode_all_anchors', values=[pred_location]): + anchor_cy, anchor_cx, anchor_h, anchor_w = self._all_anchors + + pred_h = tf.exp(pred_location[:, -2] * self._prior_scaling[2]) * anchor_h + pred_w = tf.exp(pred_location[:, -1] * self._prior_scaling[3]) * anchor_w + pred_cy = pred_location[:, 0] * self._prior_scaling[0] * anchor_h + anchor_cy + pred_cx = pred_location[:, 1] * self._prior_scaling[1] * anchor_w + anchor_cx + + return tf.split(tf.stack(self.center2point(pred_cy, pred_cx, pred_h, pred_w), axis=-1), num_anchors_per_layer, axis=0) + + def ext_decode_all_anchors(self, pred_location, all_anchors, all_num_anchors_depth, all_num_anchors_spatial): + assert (len(all_num_anchors_depth)==len(all_num_anchors_spatial)) and (len(all_num_anchors_depth)==len(all_anchors)), 'inconsist num layers for anchors.' + with tf.name_scope('ext_decode_all_anchors', values=[pred_location]): + num_anchors_per_layer = [] + for ind in range(len(all_anchors)): + num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind]) + + num_layers = len(all_num_anchors_depth) + list_anchors_ymin = [] + list_anchors_xmin = [] + list_anchors_ymax = [] + list_anchors_xmax = [] + tiled_allowed_borders = [] + for ind, anchor in enumerate(all_anchors): + anchors_ymin_, anchors_xmin_, anchors_ymax_, anchors_xmax_ = self.center2point(anchor[0], anchor[1], anchor[2], anchor[3]) + + list_anchors_ymin.append(tf.reshape(anchors_ymin_, [-1])) + list_anchors_xmin.append(tf.reshape(anchors_xmin_, [-1])) + list_anchors_ymax.append(tf.reshape(anchors_ymax_, [-1])) + list_anchors_xmax.append(tf.reshape(anchors_xmax_, [-1])) + + anchors_ymin = tf.concat(list_anchors_ymin, 0, name='concat_ymin') + anchors_xmin = tf.concat(list_anchors_xmin, 0, name='concat_xmin') + anchors_ymax = tf.concat(list_anchors_ymax, 0, name='concat_ymax') + anchors_xmax = tf.concat(list_anchors_xmax, 0, name='concat_xmax') + + anchor_cy, anchor_cx, anchor_h, anchor_w = self.point2center(anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax) + + pred_h = tf.exp(pred_location[:,-2] * self._prior_scaling[2]) * anchor_h + pred_w = tf.exp(pred_location[:, -1] * self._prior_scaling[3]) * anchor_w + pred_cy = pred_location[:, 0] * self._prior_scaling[0] * anchor_h + anchor_cy + pred_cx = pred_location[:, 1] * self._prior_scaling[1] * anchor_w + anchor_cx + + return tf.split(tf.stack(self.center2point(pred_cy, pred_cx, pred_h, pred_w), axis=-1), num_anchors_per_layer, axis=0) + +class AnchorCreator(object): + def __init__(self, img_shape, layers_shapes, anchor_scales, extra_anchor_scales, anchor_ratios, layer_steps): + super(AnchorCreator, self).__init__() + # img_shape -> (height, width) + self._img_shape = img_shape + self._layers_shapes = layers_shapes + self._anchor_scales = anchor_scales + self._extra_anchor_scales = extra_anchor_scales + self._anchor_ratios = anchor_ratios + self._layer_steps = layer_steps + self._anchor_offset = [0.5] * len(self._layers_shapes) + + def get_layer_anchors(self, layer_shape, anchor_scale, extra_anchor_scale, anchor_ratio, layer_step, offset = 0.5): + ''' assume layer_shape[0] = 6, layer_shape[1] = 5 + x_on_layer = [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]] + y_on_layer = [[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4], + [5, 5, 5, 5, 5]] + ''' + with tf.name_scope('get_layer_anchors'): + x_on_layer, y_on_layer = tf.meshgrid(tf.range(layer_shape[1]), tf.range(layer_shape[0])) + + y_on_image = (tf.cast(y_on_layer, tf.float32) + offset) * layer_step / self._img_shape[0] + x_on_image = (tf.cast(x_on_layer, tf.float32) + offset) * layer_step / self._img_shape[1] + + num_anchors_along_depth = len(anchor_scale) * len(anchor_ratio) + len(extra_anchor_scale) + num_anchors_along_spatial = layer_shape[1] * layer_shape[0] + + list_h_on_image = [] + list_w_on_image = [] + + global_index = 0 + # for square anchors + for _, scale in enumerate(extra_anchor_scale): + list_h_on_image.append(scale) + list_w_on_image.append(scale) + global_index += 1 + # for other aspect ratio anchors + for scale_index, scale in enumerate(anchor_scale): + for ratio_index, ratio in enumerate(anchor_ratio): + list_h_on_image.append(scale / math.sqrt(ratio)) + list_w_on_image.append(scale * math.sqrt(ratio)) + global_index += 1 + # shape info: + # y_on_image, x_on_image: layers_shapes[0] * layers_shapes[1] + # h_on_image, w_on_image: num_anchors_along_depth + return tf.expand_dims(y_on_image, axis=-1), tf.expand_dims(x_on_image, axis=-1), \ + tf.constant(list_h_on_image, dtype=tf.float32), \ + tf.constant(list_w_on_image, dtype=tf.float32), num_anchors_along_depth, num_anchors_along_spatial + + def get_all_anchors(self): + all_anchors = [] + all_num_anchors_depth = [] + all_num_anchors_spatial = [] + for layer_index, layer_shape in enumerate(self._layers_shapes): + anchors_this_layer = self.get_layer_anchors(layer_shape, + self._anchor_scales[layer_index], + self._extra_anchor_scales[layer_index], + self._anchor_ratios[layer_index], + self._layer_steps[layer_index], + self._anchor_offset[layer_index]) + all_anchors.append(anchors_this_layer[:-2]) + all_num_anchors_depth.append(anchors_this_layer[-2]) + all_num_anchors_spatial.append(anchors_this_layer[-1]) + return all_anchors, all_num_anchors_depth, all_num_anchors_spatial + diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/eval_ssd.py b/models/object_detection/tensorflow/ssd_vgg16/inference/eval_ssd.py new file mode 100644 index 000000000..fdbb4a44d --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/eval_ssd.py @@ -0,0 +1,316 @@ +# Copyright 2018 Changan Wang + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import time +from argparse import ArgumentParser +import sys +from google.protobuf import text_format +import tensorflow as tf + +from dataset import dataset_common +from preprocessing import ssd_preprocessing +import anchor_manipulator + +SSD_VGG16_IMAGE_SIZE = 300 +NUM_CLASSES = 81 +NEGATIVE_RATIO = 1.0 +SELECT_THRESHOLD = 0.1 +MATCH_THRESHOLD = 0.5 +NEG_THRESHOLD = 0.5 +DATA_FORMAT = 'channels_last' +NUM_READERS = 10 +NUM_PREPROCESSING_THREADS = 28 + + +def input_fn(dataset_pattern='val-*', batch_size=1, data_location=None): + out_shape = [SSD_VGG16_IMAGE_SIZE] * 2 + anchor_creator = anchor_manipulator.AnchorCreator(out_shape, + layers_shapes=[(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), + (1, 1)], + anchor_scales=[(0.1,), (0.2,), (0.375,), (0.55,), (0.725,), + (0.9,)], + extra_anchor_scales=[(0.1414,), (0.2739,), (0.4541,), (0.6315,), + (0.8078,), (0.9836,)], + anchor_ratios=[(1., 2., .5), (1., 2., 3., .5, 0.3333), + (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), + (1., 2., .5), (1., 2., .5)], + layer_steps=[8, 16, 32, 64, 100, 300]) + all_anchors, all_num_anchors_depth, all_num_anchors_spatial = anchor_creator.get_all_anchors() + + num_anchors_per_layer = [] + for ind in range(len(all_anchors)): + num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind]) + + anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(allowed_borders=[1.0] * 6, + positive_threshold=MATCH_THRESHOLD, + ignore_threshold=NEG_THRESHOLD, + prior_scaling=[0.1, 0.1, 0.2, 0.2]) + + image_preprocessing_fn = lambda image_, labels_, bboxes_: ssd_preprocessing.preprocess_image(image_, labels_, + bboxes_, out_shape, + is_training=False, + data_format=DATA_FORMAT, + output_rgb=False) + anchor_encoder_fn = lambda glabels_, gbboxes_: anchor_encoder_decoder.encode_all_anchors(glabels_, gbboxes_, + all_anchors, + all_num_anchors_depth, + all_num_anchors_spatial) + + image, filename, shape, loc_targets, cls_targets, match_scores = \ + dataset_common.slim_get_batch(NUM_CLASSES, + batch_size, + 'val', + os.path.join( + data_location, + dataset_pattern), + NUM_READERS, + NUM_PREPROCESSING_THREADS, + image_preprocessing_fn, + anchor_encoder_fn, + num_epochs=1, + is_training=False) + return image, filename, shape + + +class EvaluateSSDModel(): + def __init__(self): + + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument('-b', "--batch-size", + help="Specify the batch size. If this " \ + "parameter is not specified or is -1, the " \ + "largest ideal batch size for the model will " \ + "be used.", + dest="batch_size", type=int, default=1) + + arg_parser.add_argument('-e', "--num-inter-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + + arg_parser.add_argument('-a', "--num-intra-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) + + arg_parser.add_argument('--data-num-inter-threads', dest='data_num_inter_threads', + help='number threads across operators', + type=int, default=21) + + arg_parser.add_argument('--data-num-intra-threads', dest='data_num_intra_threads', + help='number threads for data layer operator', + type=int, default=28) + + arg_parser.add_argument('--kmp-blocktime', dest='kmp_blocktime', + help='number of kmp blocktime', + type=int, default=1) + + arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') + + arg_parser.add_argument('-d', "--data-location", + help='Specify the location of the data. ' + 'If this parameter is not specified, ' + 'the benchmark will use random/dummy data.', + dest="data_location", default=None) + + arg_parser.add_argument('-r', "--accuracy-only", + help='For accuracy measurement only.', + dest='accuracy_only', action='store_true') + + arg_parser.add_argument("--warmup-steps", type=int, default=10, + help="number of warmup steps") + + arg_parser.add_argument("--steps", type=int, default=50, + help="number of steps") + + self.args = arg_parser.parse_args() + + os.environ["KMP_BLOCKTIME"] = str(self.args.kmp_blocktime) + + def eval(self): + + data_config = tf.ConfigProto() + data_config.inter_op_parallelism_threads = self.args.data_num_inter_threads + data_config.intra_op_parallelism_threads = self.args.data_num_intra_threads + data_config.use_per_session_threads = 1 + + infer_config = tf.ConfigProto() + infer_config.inter_op_parallelism_threads = self.args.num_inter_threads # self.args.num_inter_threads + infer_config.intra_op_parallelism_threads = self.args.num_intra_threads # self.args.num_intra_threads + infer_config.use_per_session_threads = 1 + + data_graph = tf.Graph() + with data_graph.as_default(): + if self.args.data_location: # real data + image, filename, shape = \ + input_fn(dataset_pattern='val-*', batch_size=self.args.batch_size, data_location=self.args.data_location) + else: # dummy data + input_shape = [self.args.batch_size, SSD_VGG16_IMAGE_SIZE, SSD_VGG16_IMAGE_SIZE, 3] + image = tf.random.uniform(input_shape, -123.68, 151.06, dtype=tf.float32, name='synthetic_images') + + infer_graph = tf.Graph() + model_file = self.args.input_graph + with infer_graph.as_default(): + graph_def = tf.GraphDef() + file_ext = os.path.splitext(model_file)[1] + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name='') + + # Define input and output Tensors for inference graph + output_names = ["ExpandDims"] + for i in range(1, 160): + output_names.append("ExpandDims_" + str(i)) + + input_operation = infer_graph.get_operation_by_name("input") + output_operations = [] + for name in output_names: + output_operations.append(infer_graph.get_operation_by_name(name).outputs[0]) + + infer_sess = tf.Session(graph=infer_graph, config=infer_config) + + if not self.args.accuracy_only: # benchmark + step = 0 + total_steps = self.args.warmup_steps + self.args.steps + + total_images = 0 + total_duration = 0 + + if not self.args.data_location: # inference with dummy data + print("Inference with dummy data") + data_sess = tf.Session(graph=data_graph, config=data_config) + + while step < total_steps: + step += 1 + image_np = data_sess.run(image) + start_time = time.time() + + infer_sess.run(output_operations, {input_operation.outputs[0]: image_np}) + duration = time.time() - start_time + + if step > self.args.warmup_steps: + total_duration += duration + total_images += self.args.batch_size + print('Iteration %d: %.6f sec' % (step, duration)) + sys.stdout.flush() + + else: # benchmark with real data + print("Inference with real data") + with data_graph.as_default(): + with tf.train.MonitoredTrainingSession(config=data_config) as data_sess: + while not data_sess.should_stop() and step < total_steps: + step += 1 + start_time = time.time() + image_np, _, _ = data_sess.run([image, filename, shape]) + infer_sess.run(output_operations, {input_operation.outputs[0]: image_np}) + duration = time.time() - start_time + + if step > self.args.warmup_steps: + total_duration += duration + total_images += self.args.batch_size + print('Iteration %d: %.6f sec' % (step, duration)) + sys.stdout.flush() + + print('Batch size = %d' % self.args.batch_size) + print('Throughput: %.3f images/sec' % (total_images / total_duration)) + if (self.args.batch_size == 1): + latency = (total_duration / total_images) * 1000 + print('Latency: %.3f ms' % (latency)) + + else: # accuracy only + results = [] + filenames = [] + shapes = [] + total_processed_images = 0 + with data_graph.as_default(): + with tf.train.MonitoredTrainingSession(config=data_config) as data_sess: + while not data_sess.should_stop(): + image_np, filename_np, shape_np = data_sess.run([image, filename, shape]) + total_processed_images += self.args.batch_size + predict = infer_sess.run(output_operations, {input_operation.outputs[0]: image_np}) + if (total_processed_images % 30 == 0): + print("Predicting results for {} images...".format(total_processed_images)) + sys.stdout.flush() + results.append(predict) + filenames.append(filename_np[0]) + shapes.append(shape_np[0]) + + log_dir = os.path.join('./', 'logs') + # if it doesn't exist, create. + if not os.path.exists(log_dir): + os.makedirs(log_dir) + for class_ind in range(1, NUM_CLASSES): + with open(os.path.join(log_dir, 'results_{}.txt'.format(class_ind)), 'wt') as f: + for image_ind, pred in enumerate(results): + shape = shapes[image_ind] + filename = filenames[image_ind] + # parsing prediction results and calculate bbox + scores = pred[(class_ind * 2) - 2][0] + bboxes = pred[(class_ind * 2) - 1][0] + bboxes[:, 0] = (bboxes[:, 0] * shape[0]).astype(np.int32, copy=False) + 1 + bboxes[:, 1] = (bboxes[:, 1] * shape[1]).astype(np.int32, copy=False) + 1 + bboxes[:, 2] = (bboxes[:, 2] * shape[0]).astype(np.int32, copy=False) + 1 + bboxes[:, 3] = (bboxes[:, 3] * shape[1]).astype(np.int32, copy=False) + 1 + + valid_mask = np.logical_and((bboxes[:, 2] - bboxes[:, 0] > 0), + (bboxes[:, 3] - bboxes[:, 1] > 0)) + + for det_ind in range(valid_mask.shape[0]): + if not valid_mask[det_ind]: + continue + f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'. + format(filename.decode('utf8')[:-4], scores[det_ind], + bboxes[det_ind, 1], bboxes[det_ind, 0], + bboxes[det_ind, 3], bboxes[det_ind, 2])) + + coco_eval = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "validate_ssd_vgg16.py") + cmd_prefix = "python " + coco_eval + cmd_prefix += " --detections_path ./logs" + cmd_prefix += " --annotations_file {}/instances_val2017.json".format(self.args.data_location) + cmd = cmd_prefix + os.system(cmd) + +if __name__ == "__main__": + obj = EvaluateSSDModel() + obj.eval() diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/validate_ssd_vgg16.py b/models/object_detection/tensorflow/ssd_vgg16/inference/validate_ssd_vgg16.py new file mode 100644 index 000000000..c580fc022 --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/validate_ssd_vgg16.py @@ -0,0 +1,111 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +import argparse +import os +import json +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + + +def convert_detection(label, detection): + + ID_INDEX = 0 + SCORE_INDEX = 1 + XMIN_INDEX = 2 + YMIN_INDEX = 3 + XMAX_INDEX = 4 + YMAX_INDEX = 5 + LABEL_MAP = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, + 12: 13, 13: 14, 14: 15, 15: 16, 16: 17, 17: 18, 18: 19, 19: 20, 20: 21, 21: 22, + 22: 23, 23: 24, 24: 25, 25: 27, 26: 28, 27: 31, 28: 32, 29: 33, 30: 34, 31: 35, + 32: 36, 33: 37, 34: 38, 35: 39, 36: 40, 37: 41, 38: 42, 39: 43, 40: 44, 41: 46, + 42: 47, 43: 48, 44: 49, 45: 50, 46: 51, 47: 52, 48: 53, 49: 54, 50: 55, 51: 56, + 52: 57, 53: 58, 54: 59, 55: 60, 56: 61, 57: 62, 58: 63, 59: 64, 60: 65, 61: 67, + 62: 70, 63: 72, 64: 73, 65: 74, 66: 75, 67: 76, 68: 77, 69: 78, 70: 79, 71: 80, + 72: 81, 73: 82, 74: 84, 75: 85, 76: 86, 77: 87, 78: 88, 79: 89, 80: 90} + + # Extract image ID and bounding box score from detection + image_id = int(detection[ID_INDEX]) + score = float(detection[SCORE_INDEX]) + + # Convert bounding box coordinates [xmin, ymin, xmax, ymax] to [x, y, width, height] + x = float(detection[XMIN_INDEX]) + y = float(detection[YMIN_INDEX]) + width = float(detection[XMAX_INDEX]) - x + height = float(detection[YMAX_INDEX]) - y + bbox = [x, y, width, height] + + return {'category_id': LABEL_MAP[label], 'image_id': image_id, 'score': score, 'bbox': bbox} + + +def generate_results_file(detections_path, results_filename): + + DETECTIONS_EXTENSION = '.txt' + + # Retrieve detections filenames + filenames = [filename for filename in os.listdir(detections_path) if filename.endswith(DETECTIONS_EXTENSION)] + + results = [] + for filename in filenames: + # Read detections from current file + with open(os.path.join(detections_path, filename), 'r') as detections_file: + lines = detections_file.readlines() + + # Convert detections from current file + label = int(os.path.splitext(filename)[0].split('_')[1]) + for line in lines: + results.append(convert_detection(label, line.strip().split())) + + # Write results to file + with open(os.path.join(detections_path, results_filename), 'w') as results_file: + json.dump(results, results_file) + + +def main(): + + RESULTS_FILENAME = 'results.json' + ANNOTATION_TYPE = 'bbox' + + parser = argparse.ArgumentParser() + parser.add_argument('--detections_path', type = str, required = True, help = 'path to the input detected bounding box files') + parser.add_argument('--annotations_file', type = str, required = True, help = 'name of the input validation annotations file') + + args = parser.parse_args() + + # Generate COCO results file + print('Generating COCO results...') + generate_results_file(args.detections_path, RESULTS_FILENAME) + + # Create COCO instance + cocoGt = COCO(args.annotations_file) + + # Load COCO results + cocoDt = cocoGt.loadRes(os.path.join(args.detections_path, RESULTS_FILENAME)) + + # Evaluate results + cocoEval = COCOeval(cocoGt, cocoDt, ANNOTATION_TYPE) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index 55137c9e3..32d9b51e3 100644 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -90,3 +90,7 @@ run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignme run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000 run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset \ No newline at end of file From f4fd7a2e3d8abb5fa08e797f8e4b5056f94b8a64 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 25 Apr 2019 09:54:35 -0700 Subject: [PATCH 22/62] Make TCMalloc enabled for int8 by default, but disabled for other precisions (#290) * Make TCMalloc enabled for int8 by default, but disabled for other precisions. * Code cleanup * update start script * Updating doc to add more info on TCMalloc --- benchmarks/common/base_benchmark_util.py | 16 ++- benchmarks/common/base_model_init.py | 7 ++ benchmarks/common/tensorflow/start.sh | 4 +- benchmarks/launch_benchmark.py | 4 + docs/general/tensorflow/LaunchBenchmark.md | 15 ++- .../unit/common/tensorflow/tf_model_args.txt | 109 +++++++++--------- tests/unit/common/test_base_model_init.py | 40 ++++++- tests/unit/test_launch_benchmark.py | 12 ++ 8 files changed, 139 insertions(+), 68 deletions(-) diff --git a/benchmarks/common/base_benchmark_util.py b/benchmarks/common/base_benchmark_util.py index 0768e8871..e4c92639d 100644 --- a/benchmarks/common/base_benchmark_util.py +++ b/benchmarks/common/base_benchmark_util.py @@ -161,12 +161,20 @@ def _define_args(self): "with --accuracy-only and --mode=inference.", dest="output_results", action="store_true") + # Note this can't be a normal boolean flag, because we need to know when the user + # does not explicitly set the arg value so that we can apply the appropriate + # default value, depending on the the precision. self._common_arg_parser.add_argument( "--disable-tcmalloc", - help="Disables the use of TCMalloc for int8 benchmarking. TCMalloc is " - "currently not used for FP32 benchmarking, so using this flag with " - "FP32 models will have no effect.", - dest="disable_tcmalloc", action="store_true" + help="When TCMalloc is enabled, the google-perftools are installed (if running " + "using docker) and the LD_PRELOAD environment variable is set to point to " + "the TCMalloc library file. The TCMalloc memory allocator produces better " + "performance results with smaller batch sizes. This flag disables the use of " + "TCMalloc when set to True. For int8 benchmarking, TCMalloc is enabled by " + "default (--disable-tcmalloc=False). For other precisions, the flag is " + "--disable-tcmalloc=True by default.", + dest="disable_tcmalloc", choices=["True", "False"], + default=None ) self._common_arg_parser.add_argument( diff --git a/benchmarks/common/base_model_init.py b/benchmarks/common/base_model_init.py index 8e8d1abb2..4a334ca65 100644 --- a/benchmarks/common/base_model_init.py +++ b/benchmarks/common/base_model_init.py @@ -44,6 +44,13 @@ def __init__(self, args, custom_args=[], platform_util=None): self.custom_args = custom_args self.platform_util = platform_util + # Set default values for TCMalloc and convert string value to a boolean + if self.args.disable_tcmalloc is None: + # Set to False for int8 and True for other precisions + self.args.disable_tcmalloc = self.args.precision != "int8" + elif isinstance(self.args.disable_tcmalloc, str): + self.args.disable_tcmalloc = self.args.disable_tcmalloc == "True" + # Ensure that we are using the proper version of python to run the benchmarking script self.python_exe = os.environ["PYTHON_EXE"] diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index ab17c9bc6..26d25af86 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -177,8 +177,8 @@ if [ ${DATA_NUM_INTRA_THREADS} != "None" ]; then CMD="${CMD} --data-num-intra-threads=${DATA_NUM_INTRA_THREADS}" fi -if [ ${DISABLE_TCMALLOC} == "True" ]; then - CMD="${CMD} --disable-tcmalloc" +if [ ${DISABLE_TCMALLOC} != "None" ]; then + CMD="${CMD} --disable-tcmalloc=${DISABLE_TCMALLOC}" fi function install_protoc() { diff --git a/benchmarks/launch_benchmark.py b/benchmarks/launch_benchmark.py index 7515936eb..32c0f68ae 100644 --- a/benchmarks/launch_benchmark.py +++ b/benchmarks/launch_benchmark.py @@ -93,6 +93,10 @@ def validate_args(self): if not self.args.benchmark_only and not self.args.accuracy_only: self.args.benchmark_only = True + # default disable_tcmalloc=False for int8 and disable_tcmalloc=True for other precisions + if not self.args.disable_tcmalloc: + self.args.disable_tcmalloc = str(self.args.precision != "int8") + if self.args.custom_volumes and not self.args.docker_image: raise ValueError("Volume mounts can only be used when running in a docker container " "(a --docker-image must be specified when using --volume).") diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index ccab76c9c..017b303f1 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -101,10 +101,17 @@ optional arguments: conjunction with --accuracy-only and --mode=inference. --output-dir OUTPUT_DIR Folder to dump output into. - --disable-tcmalloc Disables the use of TCMalloc for int8 benchmarking. - TCMalloc is currently not used for FP32 benchmarking, - so using this flag with FP32 models will have no - effect. + --disable-tcmalloc {True,False} + When TCMalloc is enabled, the google-perftools are + installed (if running using docker) and the LD_PRELOAD + environment variable is set to point to the TCMalloc + library file. The TCMalloc memory allocator produces + better performance results with smaller batch sizes. + This flag disables the use of TCMalloc when set to + True. For int8 benchmarking, TCMalloc is enabled by + default (--disable-tcmalloc=False). For other + precisions, the flag is --disable-tcmalloc=True by + default. --tcmalloc-large-alloc-report-threshold TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD Sets the TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD environment variable to the specified value. The diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt index 32d9b51e3..386d1185c 100644 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ b/tests/unit/common/tensorflow/tf_model_args.txt @@ -1,25 +1,24 @@ run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose,OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --disable-tcmalloc,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --disable-tcmalloc=True,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --disable-tcmalloc --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose,python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128 @@ -27,70 +26,70 @@ run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model- run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50.pb --intelai-models . --accuracy-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 128 --in-graph /final_int8_resnet50.pb --intelai-models . --benchmark-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=128 --warmup-steps=10 --steps=50 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 64 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 64 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 1 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 1 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval +run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval +run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset --in-graph=/in_graph/frozen_inference_graph.pb,sh /workspace/intelai_models/inference/fp32/coco_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset/coco_val.record /workspace/models run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --data-location=/dataset, sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --accuracy-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --benchmark-dir=/workspace/benchmarks --data-location=/dataset,sh /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 +run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset --accuracy-only --split=accuracy_message,FROZEN_GRAPH=/in_graph/frozen_inference_graph.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/fp32/coco_mAP.sh run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --benchmark-only --number_of_steps=500,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_rfcn_inference.py -m /workspace/models -g /in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb -x 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --accuracy-only --split=accuracy_message,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 FROZEN_GRAPH=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/int8/coco_mAP.sh run_tf_benchmark.py --framework tensorflow --use-case text_to_speech --precision fp32 --mode inference --model-name wavenet --num-cores 1 --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --checkpoint_name=model.ckpt-99 --sample=8510,numactl --physcpubind=0-0 --membind=0 python generate.py /checkpoints/model.ckpt-99 --num_inter_threads=1 --num_intra_threads=1 --sample=8510 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 +run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_int8_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-intra-threads=28 --num-inter-threads=1 --input-graph=/in_graph/resnet101_int8_model.pb --warmup-steps=40 --steps=100 -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt +"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" +"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data-location=/dataset --input_height=224 --input_width=224,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/accuracy.py --input_height=224 --input_width=224 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data_location=/dataset --input_layer=input run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=240 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1 -python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 +python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset,python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,/workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/pretrained_int8_faster_rcnn_model.pb /dataset /workspace/models +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only +run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1 +run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1 +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/pretrained_int8_faster_rcnn_model.pb /dataset /workspace/models run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -d /dataset --num-inter-threads 2 --num-intra-threads 56 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1 +run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200 +run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200 +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search +run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search +run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200 +run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 +run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000 +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 +run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset \ No newline at end of file +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only +run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset \ No newline at end of file diff --git a/tests/unit/common/test_base_model_init.py b/tests/unit/common/test_base_model_init.py index 2e147ee62..7a376fb35 100644 --- a/tests/unit/common/test_base_model_init.py +++ b/tests/unit/common/test_base_model_init.py @@ -177,9 +177,10 @@ def test_set_kmp_vars_config_json_exists(mock_json): base_model_init.set_kmp_vars(config_file_path) -@pytest.mark.parametrize('precision', ['int8', 'fp32']) -def test_command_prefix_tcmalloc(precision, mock_glob): - """ Models should include LD_PRELOAD in the command prefix, as long as tcmalloc is not disabled""" +@pytest.mark.parametrize('precision', ['int8']) +def test_command_prefix_tcmalloc_int8(precision, mock_glob): + """ For Int8 models, TCMalloc should be enabled by default and models should include + LD_PRELOAD in the command prefix, unless disable_tcmalloc=True is set """ platform_util = MagicMock() args = MagicMock(verbose=True, model_name=test_model_name) test_tcmalloc_lib = "/usr/lib/libtcmalloc.so.4.2.6" @@ -208,3 +209,36 @@ def test_command_prefix_tcmalloc(precision, mock_glob): command_prefix = base_model_init.get_command_prefix(args.socket_id, numactl=False) assert "LD_PRELOAD={}".format(test_tcmalloc_lib) in command_prefix assert "numactl" not in command_prefix + + +@pytest.mark.parametrize('precision', ['fp32']) +def test_command_prefix_tcmalloc_fp32(precision, mock_glob): + """ FP32 models should have TC Malloc disabled by default, but models should + include LD_PRELOAD in the command prefix if disable_tcmalloc=False is explicitly set. """ + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + test_tcmalloc_lib = "/usr/lib/libtcmalloc.so.4.2.6" + mock_glob.return_value = [test_tcmalloc_lib] + os.environ["PYTHON_EXE"] = "python" + args.socket_id = 0 + args.precision = precision + + # By default, TCMalloc should not be used + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) not in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If tcmalloc is disabled, LD_PRELOAD shouild not be in the prefix + args.disable_tcmalloc = False + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If numactl is set to false, we should not have numactl in the prefix + args.disable_tcmalloc = True + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id, numactl=False) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) not in command_prefix + assert "numactl" not in command_prefix diff --git a/tests/unit/test_launch_benchmark.py b/tests/unit/test_launch_benchmark.py index 03b96f697..32a9eaec1 100644 --- a/tests/unit/test_launch_benchmark.py +++ b/tests/unit/test_launch_benchmark.py @@ -258,3 +258,15 @@ def test_launch_benchmark_custom_volume(launch_benchmark, mock_popen): docker_run_cmd = " ".join(args[0]) for custom_volume in custom_volumes: assert "--volume {}".format(custom_volume) in docker_run_cmd + + +@pytest.mark.parametrize("precision,expected_disable_tcmalloc", [["int8", "False"], + ["fp32", "True"]]) +def test_disable_tcmalloc(launch_benchmark, mock_popen, precision, expected_disable_tcmalloc): + launch_benchmark.args.precision = precision + launch_benchmark.main() + assert mock_popen.called + args, _ = mock_popen.call_args + # convert the run command args to a string and then check for the custom volume mounts + docker_run_cmd = " ".join(args[0]) + assert "--env DISABLE_TCMALLOC=".format(expected_disable_tcmalloc) in docker_run_cmd From c21b9ed54fa0b2af7c61ec1f95ae0adb65606781 Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Fri, 26 Apr 2019 09:44:48 -0700 Subject: [PATCH 23/62] add the required dependencies for coco dataset conversion to tf records, and the instructions to install. (#292) --- benchmarks/object_detection/tensorflow/ssd_vgg16/README.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 47233e7e2..514abe6a7 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -24,6 +24,7 @@ or the throughput and latency benchmark with real data. The [TensorFlow models](https://github.com/tensorflow/models) repo will be used for converting the coco dataset to the TF records format. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). ``` $ mkdir val $ cd val @@ -65,6 +66,7 @@ located after the script has completed. ``` # We are going to use an older version of the conversion script to checkout the git commit +$ git clone https://github.com/tensorflow/models.git $ cd models $ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 @@ -203,6 +205,7 @@ or the throughput and latency benchmark with real data. The [TensorFlow models](https://github.com/tensorflow/models) repo will be used for converting the coco dataset to the TF records format. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). ``` $ mkdir val $ cd val @@ -244,6 +247,7 @@ located after the script has completed. ``` # We are going to use an older version of the conversion script to checkout the git commit +$ git clone https://github.com/tensorflow/models.git $ cd models $ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 From 9f6387db3609a731b2df071f0f206ce187de34b4 Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Fri, 26 Apr 2019 10:19:22 -0700 Subject: [PATCH 24/62] update object detection models readme for dataset converion. (#293) --- .../object_detection/tensorflow/faster_rcnn/README.md | 1 + benchmarks/object_detection/tensorflow/rfcn/README.md | 6 ++++-- .../object_detection/tensorflow/ssd-mobilenet/README.md | 2 ++ .../object_detection/tensorflow/ssd-resnet34/README.md | 1 + 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index 162acdf07..cad38b7de 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -78,6 +78,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index 10a0342ce..ec9fad2e6 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -44,7 +44,7 @@ sed -i.bak 95s/input_config/input_config[0]/ offline_eval_map_corloc.py ``` -2. Download the 2017 validation +2. Download the 2017 validation [COCO dataset](http://cocodataset.org/#home) and annotations: ``` @@ -78,6 +78,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be @@ -222,7 +223,7 @@ $ git clone https://github.com/cocodataset/cocoapi.git The TensorFlow models repo will be used for running inference as well as converting the coco dataset to the TF records format. -2. Download the 2017 validation +2. Download the 2017 validation [COCO dataset](http://cocodataset.org/#home) and annotations: ``` @@ -256,6 +257,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index 2d129384f..6425640e5 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -61,6 +61,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be @@ -241,6 +242,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index 0a6915bac..f4e419f79 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -61,6 +61,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be From 3bffb2ae6c3882861eed080da013c453277d7ce1 Mon Sep 17 00:00:00 2001 From: Melanie Buehler Date: Mon, 29 Apr 2019 09:57:20 -0700 Subject: [PATCH 25/62] Update Int8 docs to reflect use of tcmalloc (#291) --- .../tensorflow/inception_resnet_v2/README.md | 7 ++++++- .../image_recognition/tensorflow/inceptionv3/README.md | 5 +++++ .../image_recognition/tensorflow/inceptionv4/README.md | 5 +++++ .../image_recognition/tensorflow/mobilenet_v1/README.md | 5 +++++ .../image_recognition/tensorflow/resnet101/README.md | 5 +++++ benchmarks/image_recognition/tensorflow/resnet50/README.md | 5 +++++ .../object_detection/tensorflow/faster_rcnn/README.md | 5 +++++ benchmarks/object_detection/tensorflow/rfcn/README.md | 5 +++++ .../object_detection/tensorflow/ssd-mobilenet/README.md | 5 +++++ benchmarks/object_detection/tensorflow/ssd_vgg16/README.md | 5 +++++ .../recommendation/tensorflow/wide_deep_large_ds/README.md | 5 +++++ 11 files changed, 56 insertions(+), 1 deletion(-) diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index 7c9c246fc..4b0543e56 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -7,6 +7,11 @@ following modes/precisions: ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: @@ -69,7 +74,7 @@ are required to run Inception ResNet V2 Int8. Inception ResNet V2 can be run for accuracy, latency benchmarking, or throughput benchmarking. Use one of the following examples below, depending on -your use case. +your use case. For accuracy (using your `--data-location`, `--accuracy-only` and `--batch-size 100`): diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index 1da257669..3e8cf2f0b 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -10,6 +10,11 @@ other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index 13fb7c060..edb391d84 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -10,6 +10,11 @@ other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: ``` diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index a32138d86..bc84ba6c2 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -11,6 +11,11 @@ later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Download ImageNet dataset. This step is required only for running accuracy, for running benchmark we do not need to provide dataset. diff --git a/benchmarks/image_recognition/tensorflow/resnet101/README.md b/benchmarks/image_recognition/tensorflow/resnet101/README.md index 7343f472a..4bb6a8ded 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet101/README.md @@ -7,6 +7,11 @@ following modes/precisions: ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index a34a52139..5a666c6dd 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -10,6 +10,11 @@ precisions. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Download the full ImageNet dataset and convert to the TF records format. * Clone the tensorflow/models repository: diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index cad38b7de..e69fba728 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -213,6 +213,11 @@ Log location outside container: {--output-dir value}/benchmark_faster_rcnn_infer ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Please follow step 1, 2 and 3 of Faster R-CNN FP32 instructions written above. 2. Download the pre-trained model. diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index ec9fad2e6..f42ab9313 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -10,6 +10,11 @@ other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone the [tensorflow/models](https://github.com/tensorflow/models) and [cocodataset/cocoapi](https://github.com/cocodataset/cocoapi) repositories: ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index 6425640e5..33ac1d237 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -10,6 +10,11 @@ other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone the [tensorflow/models](https://github.com/tensorflow/models) repository at the specified SHA and clone the [cocoapi repo](git clone https://github.com/cocodataset/cocoapi.git) in diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 514abe6a7..653d6a3ce 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -10,6 +10,11 @@ other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone the [original model](https://github.com/HiKapok/SSD.TensorFlow) repository: ``` $ git clone https://github.com/HiKapok/SSD.TensorFlow.git diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md index ede163b61..41870f762 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md @@ -55,6 +55,11 @@ Benchmarking instructions and scripts for model training coming later. ## INT8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Download and extract the pre-trained model. ``` wget https://storage.googleapis.com/intel-optimized-tensorflow/models/wide_deep_int8_pretrained_model.pb From b98fc4b14c13e8ed771249c25bee96660b1e2063 Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Mon, 29 Apr 2019 17:00:11 -0700 Subject: [PATCH 26/62] add a reference publication for the ssd_vgg16 doc. (#295) --- benchmarks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/README.md b/benchmarks/README.md index c8959af5a..d0f2a15b5 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -36,7 +36,7 @@ dependencies to be installed: | Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [Int8](object_detection/tensorflow/rfcn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [Faster R-CNN](https://arxiv.org/pdf/1506.01497.pdf) | Inference | [Int8](object_detection/tensorflow/faster_rcnn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/faster_rcnn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-MobileNet](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-mobilenet/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) | -| Object Detection | TensorFlow | SSD-VGG16 | Inference | [Int8](object_detection/tensorflow/ssd_vgg16/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd_vgg16/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [SSD-VGG16](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [Int8](object_detection/tensorflow/ssd_vgg16/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd_vgg16/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [NCF](https://arxiv.org/pdf/1708.05031.pdf) | Inference | [FP32](recommendation/tensorflow/ncf/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep Large Dataset](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [Int8](recommendation/tensorflow/wide_deep_large_ds/README.md#int8-inference-instructions) [FP32](recommendation/tensorflow/wide_deep_large_ds/README.md#fp32-inference-instructions) | From 6d068b7d09fd4df099f28c055a9ed5cb19faae03 Mon Sep 17 00:00:00 2001 From: Melanie Buehler Date: Tue, 30 Apr 2019 14:23:16 -0700 Subject: [PATCH 27/62] Fixes tutorial link and text (#296) --- docs/image_recognition/tensorflow/Tutorial.md | 2 +- docs/object_detection/tensorflow_serving/Tutorial.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/image_recognition/tensorflow/Tutorial.md b/docs/image_recognition/tensorflow/Tutorial.md index f31a49ff6..4fe43bb83 100644 --- a/docs/image_recognition/tensorflow/Tutorial.md +++ b/docs/image_recognition/tensorflow/Tutorial.md @@ -393,7 +393,7 @@ and to skip the run from reinstalling packages pass ```True``` to ```NOINSTALL`` NOINSTALL=True BATCH_SIZE=128 ./start.sh -All other flags will be defaulted to values passed in the first ```launch_benchmark.py``` that starts the container. [See here](google.com) to get the full list of flags. +All other flags will be defaulted to values passed in the first ```launch_benchmark.py``` that starts the container. [See here](/docs/general/tensorflow/LaunchBenchmark.md) to get the full list of flags. Example Output diff --git a/docs/object_detection/tensorflow_serving/Tutorial.md b/docs/object_detection/tensorflow_serving/Tutorial.md index 479a34aea..c464b2e3b 100644 --- a/docs/object_detection/tensorflow_serving/Tutorial.md +++ b/docs/object_detection/tensorflow_serving/Tutorial.md @@ -105,7 +105,7 @@ This tutorial assumes you have already: (rfcn_venv)$ cp rfcn_resnet101_fp32_coco/saved_model/saved_model.pb rfcn/1 ``` -4. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. To compute *num_physical_cores* and *tf_session_parallelism* with bash commands: +4. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. To compute *num_physical_cores* with bash commands: ``` (rfcn_venv)$ cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` (rfcn_venv)$ num_sockets=`lscpu | grep "Socket(s)" | cut -d':' -f2 | xargs` From d2547e507d5963c4107a604485fe626a306b2679 Mon Sep 17 00:00:00 2001 From: Nathan Greeneltch Date: Wed, 1 May 2019 17:52:55 -0500 Subject: [PATCH 28/62] Adds TF Transformer-LT tutorial (#247) * submit for PR language translations tutorial * Removes .nfs files and adds model to main docs README * Deleted more .nfs files * Updated for legal, marketing, punctuation, and official model location * Improved code snippets and added transformer files to launch_benchmark guide * Fixed alignment in main doc README * Remove refs to RNN and LSTM and correct a sentence * Transformer_LT tutorial updates * Update Tutorial.md * Update Tutorial.md * Update Tutorial.md * Update Tutorial.md * Update Tutorial.md * Update Tutorial.md * Update Tutorial.md * Update Tutorial.md --- docs/README.md | 3 +- docs/general/tensorflow/LaunchBenchmark.md | 7 +- .../tensorflow/Tutorial.md | 266 ++++++++++++++++++ 3 files changed, 273 insertions(+), 3 deletions(-) create mode 100644 docs/language_translation/tensorflow/Tutorial.md diff --git a/docs/README.md b/docs/README.md index 7ade8475e..11e99bf97 100644 --- a/docs/README.md +++ b/docs/README.md @@ -12,7 +12,8 @@ ## Tutorials by Use Case * Inference with IntelĀ® Optimization of Tensorflow: - * [Image Recognition](/docs/image_recognition/tensorflow/Tutorial.md) (ResNet50, ResNet101, and InceptionV3) + * [Image Recognition](/docs/image_recognition/tensorflow/Tutorial.md) (ResNet50, ResNet101, and InceptionV3) + * [Language Translation](/docs/language_translation/tensorflow/Tutorial.md) (Transformer-LT) * [Recommendation Systems](/docs/recommendation/tensorflow/Tutorial.md) (Wide and Deep) * Inference with IntelĀ® Optimization of Tensorflow Serving: * [Image Recognition](/docs/image_recognition/tensorflow_serving/Tutorial.md) (ResNet50 and InceptionV3) diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index 017b303f1..59b9eb68d 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -29,8 +29,11 @@ Below the general description is an [index of links](#model-scripts-for-tensorfl [inference](/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py) | [preprocessing](/models/image_recognition/tensorflow/resnet101/inference/preprocessing.py) * InceptionV3: [init](/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py) | - [inference](/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py) | - [preprocessing](/models/image_recognition/tensorflow/inceptionv3/fp32/preprocessing.py) + [inference](/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py) | + [preprocessing](/models/image_recognition/tensorflow/inceptionv3/fp32/preprocessing.py) +* Language Translation + * Transformer-LT: [init](/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py) | + [inference](/models/language_translation/tensorflow/transformer_lt_official/inference/fp32/infer_ab.py) * Recommendation Systems * Wide and Deep: [init](/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py) | [inference](/models/recommendation/tensorflow/wide_deep_large_ds/inference/inference.py) | diff --git a/docs/language_translation/tensorflow/Tutorial.md b/docs/language_translation/tensorflow/Tutorial.md new file mode 100644 index 000000000..13f827a50 --- /dev/null +++ b/docs/language_translation/tensorflow/Tutorial.md @@ -0,0 +1,266 @@ +# Language Translation with Transformer-LT + + +## Goal +This tutorial will introduce CPU performance considerations of the deep learning Transformer-LT model for language translation and how to use IntelĀ® Optimizations for TensorFlow to improve inference time on CPUs. +This tutorial will also provide code examples to use Intel Model Zoo's pretrained English to German model that can be copy/pasted for quick off-the-ground implementation on real data. + +## Background +Language Translation with deep learning is a computationally expensive endeavor. This tutorial will show you how to reduce the inference runtime of your Transformer-LT network, a popular topology solution to translation. +It is based on an encoder-decoder architecture with an added attention mechanism. The encoder is used to encode the original sentence to a meaningful fixed-length vector, and the decoder is responsible for extracting the context data from the vector. +The encoder and decoder process the inputs and outputs, which are in the form of a time sequence. + +In a traditional encoder/decoder model, each element in the context vector is treated equally. This is typically not the ideal solution. +For instance, when you translate the phrase ā€œI travel by trainā€ from English into Chinese, the word ā€œIā€ has a greater influence than other words when producing its counterpart in Chinese. +Thus, the attention mechanism was introduced to differentiate contributions of each element in the source sequence to their counterpart in the destination sequence, through the use of a hidden matrix. +This matrix contains weights of each element in the source sequence when producing elements in the destination sequence. + + +## Recommended Settings +In addition to TensorFlow optimizations that use the IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN) to utilize instruction sets appropriately, the runtime settings also significantly contribute to improved performance. +Tuning these options to optimize CPU workloads is vital to optimize performance of TensorFlow on IntelĀ® processors. +Below are the set of run-time options tested empirically on Transformer-LT and recommended by Intel: + + +| Run-time options | Recommendations | +| ------------- | ------------- | +| Batch Size | 64. Regardless of the hardware | +| Hyperthreading | Enabled. Turn on in BIOS. Requires a restart. | +|intra_op_parallelism_threads |# physical cores | +|inter_op_parallelism_threads | 1 | +|NUMA Controls| --cpunodebind=0 --membind=0 | +|KMP_AFFINITY| KMP_AFFINITY=granularity=fine,verbose,compact,1,0| +|KMP_BLOCKTIME| 1 | +|OMP_NUM_THREADS |physical cores| + +Note 1: Refer to this [link](https://software.intel.com/en-us/articles/maximize-tensorflow-performance-on-cpu-considerations-and-recommendations-for-inference) to learn more about the run time options. + +Note 2: You can remove `verbose` from `KMP_AFFINITY` setting to avoid verbose output at runtime. + +Run the following commands to get your processor information: + +a. #physical cores per socket : `lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` + +b. #all physical cores: `lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` + +Below is a code snippet you can incorporate into your existing TensorFlow application to set the best settings. +You can either set them in the CLI or in the Python script. Note that inter and intra_op_parallelism_threads settings can only be set +in the Python script. + +```bash +export OMP_NUM_THREADS=physical cores +export KMP_AFFINITY="granularity=fine,verbose,compact,1,0" +export KMP_BLOCKTIME=1 +export KMP_SETTINGS=1 +``` +(or) +``` +import os +os.environ["KMP_BLOCKTIME"] = "1" +os.environ["KMP_SETTINGS"] = "1" +os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0" +os.environ["OMP_NUM_THREADS"]= <# physical cores> +config = tf.ConfigProto() +config.intra_op_parallelism_threads = <# physical cores> +config.inter_op_parallelism_threads = 1 +tf.Session(config=config) +``` + +## Hands-on Tutorial +This section shows how to measure inference performance on Intel's Model Zoo pretrained model (or your pretrained model) by setting the above-discussed run time flags. +### FP32 inference + +### Initial Setup + +1. The model source is based off a specific commit from the TensorFlow models repo. Follow the instructions below to clone an older commit into your home directory. + +``` +cd ~ +mkdir tensorflow-models +cd tensorflow-models +git clone https://github.com/tensorflow/models.git +cd models +git checkout 8367cf6dabe11adf7628541706b660821f397dce +``` + +2. Clone IntelAI models and download into your home directory, skip this step if you already have Intel AI models installed. + +```bash +cd ~ +git clone https://github.com/IntelAI/models.git +``` + +3. Skip to step 4 if you already have a pretrained model or download the file `transformer_lt_official_fp32_pretrained_model.tar.gz` into your ~/transformer_LT_german location. +``` +mkdir ~/transformer_LT_german +cd ~/transformer_LT_german +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/transformer_lt_official_fp32_pretrained_model.tar.gz +tar -xzvf transformer_lt_official_fp32_pretrained_model.tar.gz +``` + +4. After extraction, you should see the following folders and files in the `transformer_lt_official_fp32_pretrained_model` directory: +``` +$ ls -l transformer_lt_official_fp32_pretrained_model/* + +transformer_lt_official_fp32_pretrained_model/data: +total 1064 +-rw-r--r--. 1 359898 Feb 20 16:05 newstest2014.en +-rw-r--r--. 1 399406 Feb 20 16:05 newstest2014.de +-rw-r--r--. 1 324025 Mar 15 17:31 vocab.txt + +transformer_lt_official_fp32_pretrained_model/graph: +total 241540 +-rwx------. 1 247333269 Mar 15 17:29 fp32_graphdef.pb + +``` +`newstest2014.en`: Input file with English text
+`newstest2014.de`: German translation of the input file for measuring accuracy
+`vocab.txt`: A dictionary of vocabulary
+`fp32_graphdef.pb`: Pretrained model + +Or, if you have your own model/data, ensure the folder structure following the structure depicted below to run the pretrained model in Intel Model Zoo. + +``` +ā”œā”€ transformer_LT_german +ā”‚ ā”œā”€ā”€ transformer_pretrained_model +ā”‚ ā”œā”€ā”€ data +ā”‚ ā”‚ ā”œā”€ā”€ newstest2014.en(Input file) +ā”‚ ā”‚ ā”œā”€ā”€ newstest2014.de (Reference file, this is optional) +ā”‚ ā”‚ ā””ā”€ā”€ vocab.txt +ā”‚ ā””ā”€ā”€ graph +ā”‚ ā””ā”€ā”€ pretrained_model.pb +``` +5. Install [Docker](https://docs.docker.com/v17.09/engine/installation/) since the tutorial runs in a Docker container. + +### Run inference + +1. Pull the relevant Intel-optimized TensorFlow Docker image. + [Click here](https://software.intel.com/en-us/articles/intel-optimization-for-tensorflow-installation-guide) to find all the available Docker images. +```bash +docker pull docker.io/intelaipg/intel-optimized-tensorflow:latest +``` +2. cd to the inference script directory in local IntelAI repo +```bash +cd ~/models/benchmarks +``` +3. Run the Python script ``` launch_benchmark.py``` with the pretrained model. +```launch_benchmark.py``` script can be treated as an entry point to conveniently perform out-of-box high performance +inference on pretrained models trained of popular topologies. +The script will automatically set the recommended run-time options for supported topologies, +but if you choose to set your own options, refer to full of available flags and a detailed +explanation on ```launch_benchmarking.py``` script [here](/docs/general/tensorflow/LaunchBenchmark.md). + This step will automatically launch a new container on every run and terminate. Go to [Step 4](#step_4) to interactively run the script on the container. + +Substitute the `--model-source-dir` for the location where you cloned the +[tensorflow/models](https://github.com/tensorflow/models.git) repo + + +``` +~/tensorflow-models/models +``` +3.1. *Real Time inference* (using `--socket-id 0` and `--batch-size 1` for latency) + +If you wish to calculate the [BLEU](https://en.wikipedia.org/wiki/BLEU) metric to find out the machine-translation quality, pass the file as `reference` flag. +`newstest2014.en` file must have only one sentence per line + + +console in: +```bash +python launch_benchmark.py \ + --model-name transformer_lt_official \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 1 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --model-source-dir ~/tensorflow-models/models \ + --in-graph ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ + --data-location ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/data \ + -- file=newstest2014.en \ + vocab_file=vocab.txt \ + file_out=translate.txt \ + reference=newstest2014.de +``` + +The translated German text will be in the file `translation.txt` located at `~/models/benchmarks/common/tensorflow/logs` + +3.2. *Max Throughput inference* (using `--socket-id 0` and `--batch-size 64` for throughput) + +```bash +python launch_benchmark.py \ + --model-name transformer_lt_official \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 64 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --model-source-dir ~/tensorflow-models/models \ + --in-graph ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ + --data-location ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/data \ + -- file=newstest2014.en \ + vocab_file=vocab.txt \ + file_out=translate.txt \ + reference=newstest2014.de +``` +console out: +``` +Graph parsed in ..... s +import_graph_def took .....s +tokenizer took ..... s +Translating 3003 sentences from English to German. +Total inferencing time:.... +Throughput:.... sentences/second +Total number of sentences translated:3003 +I0419 22:50:49.856748 140013257643776 compute_bleu.py:106] Case-insensitive results: 27.510020 +I0419 22:50:51.203501 140013257643776 compute_bleu.py:110] Case-sensitive results: 26.964748 +Ran inference with batch size 64 +Log location outside container: /~/models/benchmarks/common/tensorflow/logs/benchmark_transformer_lt_official_inference_fp32_20190419_224047.log +``` + +The logs are captured in a directory outside of the container.
+ +4. If you want to run the ```launch_benchmark.py``` interactively from within the docker container, add flag ```--debug```. This will launch a docker container based on the ```--docker_image```, +performs necessary installs, runs the ```launch_benchmark.py``` script and does not terminate the container process. As an example, this step will demonstrate real-time inference (--batch-size 1), but you can implement the same strategy for max throughput (--batch-size 64)." + +console in: +```bash +python launch_benchmark.py \ + --model-name transformer_lt_official \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 64 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --model-source-dir ~/tensorflow-models/models \ + --in-graph ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ + --data-location ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/data \ + --debug + -- file=newstest2014.en \ + vocab_file=vocab.txt \ + file_out=translate.txt \ + reference=newstest2014.de + +``` +console out: +```bash + lscpu_path_cmd = command -v lscpu + lscpu located here: b'/usr/bin/lscpu' + root@a78677f56d69:/workspace/benchmarks/common/tensorflow# +``` + +To rerun the bechmarking script, execute the ```start.sh``` bash script from your existing directory with the available flags, which inturn will run ```launch_benchmark.py```. For e.g to rerun with the different batch size (batch size=64) settings run with ```BATCH_SIZE``` +and to skip the run from reinstalling packages pass ```True``` to ```NOINSTALL```. + +```bash + chmod +x ./start.sh +``` +```bash + NOINSTALL=True BATCH_SIZE=64 ./start.sh +``` + +All other flags will be defaulted to values passed in the first ```launch_benchmark.py``` that starts the container. [See here](/docs/general/tensorflow/LaunchBenchmark.md) to get the full list of flags. + + From 9f0ee3d24991e9713d548adc11efe26f5f4ec6b5 Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Fri, 3 May 2019 13:49:49 -0700 Subject: [PATCH 29/62] Add instructions to download and convert coco dataset to TF records using a custom script (SSD-VGG16 model). (#298) * add instructions for how to download coco dataset and it convert to tf records (using a custom script). --- .../tensorflow/ssd_vgg16/README.md | 195 +++++------------- .../inference/generate_coco_records.py | 185 +++++++++++++++++ 2 files changed, 237 insertions(+), 143 deletions(-) create mode 100755 models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 653d6a3ce..320223c95 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -22,93 +22,80 @@ $ cd SSD.TensorFlow $ git checkout 2d8b0cb9b2e70281bf9dce438ff17ffa5e59075c ``` -2. Download the 2017 validation +2. Clone the [intelai/models](https://github.com/intelai/models) repository. +It will be used to run the SSD-VGG16 model accuracy and benchmark tests. + +3. Download the 2017 validation images file: [COCO dataset](http://cocodataset.org/#home) and annotations: This is required if you would like to run the accuracy test, or the throughput and latency benchmark with real data. -The [TensorFlow models](https://github.com/tensorflow/models) repo will be used for -converting the coco dataset to the TF records format. -Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). ``` -$ mkdir val -$ cd val $ wget http://images.cocodataset.org/zips/val2017.zip $ unzip val2017.zip -$ cd .. ``` -Continue the instructions below to generate the -TF record file. +Download the validation annotations file: ``` -$ mkdir annotations -$ cd annotations $ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip $ unzip annotations_trainval2017.zip -$ cd .. ``` -Since we are only using the validation dataset in this example, we will -create an empty directory and empty annotations json file to pass as the -train and test directories in the next step. -``` -$ mkdir empty_dir +4. Convert the COCO dataset to TF records format: -$ cd annotations -$ echo "{ \"images\": {}, \"categories\": {}}" > empty.json -$ cd .. -``` +We provide a script `generate_coco_records.py` to convert the raw dataset to the TF records required pattern. +* Some dependencies are required to be installed to run the script such as `python3`, `Tensorflow` and `tqdm`, also, the `SSD.TensorFlow/dataset` from the original model directory (from step 1). -3. Now that you have the raw COCO dataset, we need to convert it to the -TF records format in order to use it with the inference script. We will -do this by running the `create_coco_tf_record.py` file in the TensorFlow -models repo. +Follow the steps below get the COCO TF records: -Follow the steps below to navigate to the proper directory and point the -script to the raw COCO dataset files that you have downloaded in step 2. -The `--output_dir` is the location where the TF record files will be -located after the script has completed. +* Copy the `generate_coco_records.py` script from `models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py` +from the `models` directory (step 2) to `SSD.TensorFlow/dataset` in the original model directory (step 1). ``` -# We are going to use an older version of the conversion script to checkout the git commit -$ git clone https://github.com/tensorflow/models.git -$ cd models -$ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 +$ cp /home//models/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py /home//SSD.TensorFlow/dataset +``` -$ cd research/object_detection/dataset_tools/ -$ python create_coco_tf_record.py --logtostderr \ - --train_image_dir="/home//coco/empty_dir" \ - --val_image_dir="/home//coco/val/val2017" \ - --test_image_dir="/home//coco/empty_dir" \ - --train_annotations_file="/home//coco/annotations/empty.json" \ - --val_annotations_file="/home//coco/annotations/instances_val2017.json" \ - --testdev_annotations_file="/home//coco/annotations/empty.json" \ - --output_dir="/home//coco/output" +* Create directory for the output TF records: +``` +$ mkdir tf_records +``` -$ ll /home/myuser/coco/output -total 1598276 --rw-rw-r--. 1 0 Nov 2 21:46 coco_testdev.record --rw-rw-r--. 1 0 Nov 2 21:46 coco_train.record --rw-rw-r--. 1 818336740 Nov 2 21:46 coco_val.record +* Run the script to generate the TF records with the required prefix `val`, COCO raw dataset and annotation file (step 3): +``` +$ cd /home//SSD.TensorFlow/dataset +$ python generate_coco_records.py \ +--image_path /home//val2017/ \ +--annotations_file /home//annotations/instances_val2017.json \ +--output_prefix val \ +--output_path /home//tf_records/ ``` -4. Download the pretrained model: +Now, you can use the `/home//tf_records/` as the dataset location to run inference with real data, and test the model accuracy. +``` +$ ls -l /home//tf_records +total 792084 +-rw-r--r--. 1 170038836 Mar 17 21:35 val-00000-of-00005 +-rw-r--r--. 1 167260232 Mar 17 21:35 val-00001-of-00005 +-rw-r--r--. 1 167326957 Mar 17 21:35 val-00002-of-00005 +-rw-r--r--. 1 166289231 Mar 17 21:35 val-00003-of-00005 +-rw-r--r--. 1 140168531 Mar 17 21:35 val-00004-of-00005 +``` + +5. Download the pretrained model: ``` $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_int8_pretrained_model.pb ``` -5. Clone the [intelai/models](https://github.com/intelai/models) repo -and then run the benchmarking scripts for either benchmarking throughput +6. Navigate to the `benchmarks` directory (step 2), and run the benchmarking scripts for either benchmarking throughput and latency or accuracy. ``` -$ git clone git@github.com:IntelAI/models.git -$ cd benchmarks +$ cd models/benchmarks ``` * Run benchmarking for throughput and latency where the `--model-source-dir` is the model source directory from step 1, -and the `--in-graph` is the pretrained model graph from step 4, -if you specify the `--data-location` which is the path to the tf record file that you generated in step 3, +and the `--in-graph` is the pretrained model graph from step 5, +if you specify the `--data-location` which is the path to the tf record file that you generated in step 4, the benchmark will run with real data, otherwise dummy data will be used: ``` python launch_benchmark.py \ @@ -118,7 +105,7 @@ python launch_benchmark.py \ --framework tensorflow \ --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ --model-source-dir /home//SSD.TensorFlow \ - --data-location /home//coco/output \ + --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ --batch-size 1 \ --socket-id 0 \ @@ -137,7 +124,7 @@ the model directory `SSD.TensorFlow` from step 1. $ git clone https://github.com/waleedka/coco.git ``` - * The `--data-location` is required, which is the path to the tf record file that you generated in step 3. + * The `--data-location` is required, which is the path to the tf record file that you generated in step 4. * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//coco/output`. * Use the `--accuracy-only` flag: ``` @@ -148,7 +135,7 @@ python launch_benchmark.py \ --framework tensorflow \ --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ --model-source-dir /home//SSD.TensorFlow \ - --data-location /home//coco/output \ + --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ --accuracy-only \ --batch-size 1 @@ -195,106 +182,28 @@ And here is a sample log file tail when running for accuracy: ## FP32 Inference Instructions -1. Clone the [original model](https://github.com/HiKapok/SSD.TensorFlow) repository: -``` -$ git clone https://github.com/HiKapok/SSD.TensorFlow.git -$ cd SSD.TensorFlow -$ git checkout 2d8b0cb9b2e70281bf9dce438ff17ffa5e59075c -``` - -2. Download the 2017 validation -[COCO dataset](http://cocodataset.org/#home) and annotations: - -This is required if you would like to run the accuracy test, -or the throughput and latency benchmark with real data. - -The [TensorFlow models](https://github.com/tensorflow/models) repo will be used for -converting the coco dataset to the TF records format. -Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). -``` -$ mkdir val -$ cd val -$ wget http://images.cocodataset.org/zips/val2017.zip -$ unzip val2017.zip -$ cd .. -``` - -Continue the instructions below to generate the -TF record file. -``` -$ mkdir annotations -$ cd annotations -$ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip -$ unzip annotations_trainval2017.zip -$ cd .. -``` - -Since we are only using the validation dataset in this example, we will -create an empty directory and empty annotations json file to pass as the -train and test directories in the next step. -``` -$ mkdir empty_dir - -$ cd annotations -$ echo "{ \"images\": {}, \"categories\": {}}" > empty.json -$ cd .. -``` - -3. Now that you have the raw COCO dataset, we need to convert it to the -TF records format in order to use it with the inference script. We will -do this by running the `create_coco_tf_record.py` file in the TensorFlow -models repo. - -Follow the steps below to navigate to the proper directory and point the -script to the raw COCO dataset files that you have downloaded in step 2. -The `--output_dir` is the location where the TF record files will be -located after the script has completed. - -``` -# We are going to use an older version of the conversion script to checkout the git commit -$ git clone https://github.com/tensorflow/models.git -$ cd models -$ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 - -$ cd research/object_detection/dataset_tools/ -$ python create_coco_tf_record.py --logtostderr \ - --train_image_dir="/home//coco/empty_dir" \ - --val_image_dir="/home//coco/val/val2017" \ - --test_image_dir="/home//coco/empty_dir" \ - --train_annotations_file="/home//coco/annotations/empty.json" \ - --val_annotations_file="/home//coco/annotations/instances_val2017.json" \ - --testdev_annotations_file="/home//coco/annotations/empty.json" \ - --output_dir="/home//coco/output" - -$ ll /home/myuser/coco/output -total 1598276 --rw-rw-r--. 1 0 Nov 2 21:46 coco_testdev.record --rw-rw-r--. 1 0 Nov 2 21:46 coco_train.record --rw-rw-r--. 1 818336740 Nov 2 21:46 coco_val.record -``` +Use the steps 1, 2,3 and 4 as above. -4. Download the pretrained model: +5. Download the pretrained model: ``` $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_fp32_pretrained_model.pb ``` -5. Clone the [intelai/models](https://github.com/intelai/models) repo -and then run the benchmarking scripts for either benchmarking throughput +6. Navigate to the `benchmarks` directory (step 2), and run the benchmarking scripts for either benchmarking throughput and latency or accuracy. ``` -$ git clone git@github.com:IntelAI/models.git -$ cd benchmarks +$ cd models/benchmarks ``` * Run benchmarking for throughput and latency where the `--model-source-dir` is the model source directory from step 1, -and the `--in-graph` is the pretrained model graph from step 4, -if you specify the `--data-location` which is the path to the tf record file that you generated in step 3, +and the `--in-graph` is the pretrained model graph from step 5, +if you specify the `--data-location` which is the path to the tf record file that you generated in step 4, the benchmark will run with real data, otherwise dummy data will be used: ``` $ cd /home//models/benchmarks $ python launch_benchmark.py \ - --data-location /home//coco/output \ + --data-location /home//tf_records \ --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ --model-source-dir /home//SSD.TensorFlow \ --model-name ssd_vgg16 \ @@ -330,7 +239,7 @@ python launch_benchmark.py \ --framework tensorflow \ --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ --model-source-dir /home//SSD.TensorFlow \ - --data-location /home//coco/output \ + --data-location /home//tf_records \ --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ --accuracy-only \ --batch-size 1 diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py b/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py new file mode 100755 index 000000000..6badc74a9 --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py @@ -0,0 +1,185 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import argparse +import os +import json +import numpy as np +from tqdm import tqdm +import tensorflow as tf +from convert_tfrecords import ImageCoder, _process_image, _int64_feature, _float_feature, _bytes_feature, _bytes_list_feature + + +def load_annotation_data(annotations_filename): + + # Load annotation data + with open(annotations_filename, 'r') as annotations_file: + data = json.load(annotations_file) + + # Create map of category IDs to category names + category_map = {} + for category_datum in data['categories']: + category_map[category_datum['id']] = category_datum['name'] + + # Create map of file IDs to annotation data + annotation_map = {} + for annotation_datum in data['annotations']: + image_id = annotation_datum['image_id'] + if (image_id not in annotation_map): + annotation_map[image_id] = [] + + # Add annotation datum for current image ID + annotation_map[image_id].append(annotation_datum) + + # Create map of file IDs to image data + image_map = {} + for image_datum in data['images']: + image_id = image_datum['id'] + if (image_id in annotation_map): + image_map[image_id] = image_datum + + return image_map, annotation_map, category_map + + +def get_annotation_data(image_data, annotation_data, category_map): + + LABEL_MAP = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, + 13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, + 23: 22, 24: 23, 25: 24, 27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, + 36: 32, 37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40, 46: 41, + 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48, 54: 49, 55: 50, 56: 51, + 57: 52, 58: 53, 59: 54, 60: 55, 61: 56, 62: 57, 63: 58, 64: 59, 65: 60, 67: 61, + 70: 62, 72: 63, 73: 64, 74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, + 81: 72, 82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80} + + # Retrieve image width and height + image_width = image_data['width'] + image_height = image_data['height'] + + bboxes = [] + labels = [] + label_names = [] + difficult = [] + truncated = [] + for annotation_datum in annotation_data: + # Scale bounding box coordinates + # COCO bounding boxes are [x, y, width, height] but https://github.com/HiKapok/SSD.TensorFlow.git expects [ymin, xmin, ymax, xmax] + bbox = annotation_datum['bbox'] + ymin = bbox[1] / image_height + xmin = bbox[0] / image_width + ymax = (bbox[1] + bbox[3]) / image_height + xmax = (bbox[0] + bbox[2]) / image_width + bboxes.append([ymin, xmin, ymax, xmax]) + + labels.append(LABEL_MAP[annotation_datum['category_id']]) + label_names.append(category_map[annotation_datum['category_id']].encode('ascii')) + + # Append difficult and truncated flags + difficult.append(0) + truncated.append(0) + + return bboxes, labels, label_names, difficult, truncated + + +def get_record(filename, buffer, width, height, bboxes, labels, label_names, difficult, truncated): + + CHANNEL_COUNT = 3 + IMAGE_FORMAT = 'JPEG' + + # Extract bounding box coordinates + ymin = [] + xmin = [] + ymax = [] + xmax = [] + for bbox in bboxes: + ymin.append(bbox[0]) + xmin.append(bbox[1]) + ymax.append(bbox[2]) + xmax.append(bbox[3]) + + # Create record features + features = { + 'image/width': _int64_feature(width), + 'image/height': _int64_feature(height), + 'image/channels': _int64_feature(CHANNEL_COUNT), + 'image/shape': _int64_feature([height, width, CHANNEL_COUNT]), + 'image/object/bbox/xmin': _float_feature(xmin), + 'image/object/bbox/xmax': _float_feature(xmax), + 'image/object/bbox/ymin': _float_feature(ymin), + 'image/object/bbox/ymax': _float_feature(ymax), + 'image/object/bbox/label': _int64_feature(labels), + 'image/object/bbox/label_text': _bytes_list_feature(label_names), + 'image/object/bbox/difficult': _int64_feature(difficult), + 'image/object/bbox/truncated': _int64_feature(truncated), + 'image/format': _bytes_feature(IMAGE_FORMAT), + 'image/filename': _bytes_feature(filename.encode('utf8')), + 'image/encoded': _bytes_feature(buffer)} + + return tf.train.Example(features = tf.train.Features(feature = features)) + + +def main(): + + RECORDS_PER_FILE = 1024 + RECORD_FILENAME_FORMAT = '%s-%.5d-of-%.5d' + + parser = argparse.ArgumentParser() + parser.add_argument('--image_path', type = str, required = True, help = 'path to the input validation image files') + parser.add_argument('--annotations_file', type = str, required = True, help = 'name of the input validation annotations file') + parser.add_argument('--output_prefix', type = str, required = True, help = 'prefix of the output TensorFlow record files') + parser.add_argument('--output_path', type = str, required = True, help = 'path to the output TensorFlow record files') + + args = parser.parse_args() + + # Load annotation data + image_map, annotation_map, category_map = load_annotation_data(args.annotations_file) + + # Create output path if necessary + if (not os.path.exists(args.output_path)): + os.makedirs(args.output_path) + + # Create image coder + image_coder = ImageCoder() + + record_file_index = 0 + record_file_count = np.ceil(len(image_map) / RECORDS_PER_FILE).astype(int) + for index, image_id in tqdm(enumerate(image_map), desc = 'Generating', total = len(image_map), unit = ' file'): + # Create record writer + if (index % RECORDS_PER_FILE == 0): + output_filename = os.path.join(args.output_path, RECORD_FILENAME_FORMAT % (args.output_prefix, record_file_index, record_file_count)) + writer = tf.python_io.TFRecordWriter(output_filename) + record_file_index += 1 + + # Extract image data from current image file + image_filename = image_map[image_id]['file_name'] + image_buffer, _, _ = _process_image(os.path.join(args.image_path, image_filename), image_coder) + + # Retrieve annotation data associated with current image file + bboxes, labels, label_names, difficult, truncated = get_annotation_data(image_map[image_id], annotation_map[image_id], category_map) + + # Write TF record for current image file + image_width, image_height = image_map[image_id]['width'], image_map[image_id]['height'] + record = get_record(image_filename, image_buffer, image_width, image_height, bboxes, labels, label_names, difficult, truncated) + writer.write(record.SerializeToString()) + + +if __name__ == '__main__': + + main() \ No newline at end of file From 339e8bab17ad82d87a9ed328c7f6182dc37f7585 Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Mon, 6 May 2019 09:43:07 -0700 Subject: [PATCH 30/62] Use model-based JSON files for unit tests args (#294) * add an example for a resnet50 json file. * add a method to parse the models args json files, add resnet50 and facenet model args, tested and validated it. * clean up parse csv code. * convert some models args to json. * modify the parsing method doc. * add rfcn test and delete tf_model_args.txt file. * add faster_cnn test. * add unit tests for more models. * remeove white spaces. * add more tests. * add more tests. * fix int8 unit tests to work with memory alloc enabled, add ssdvgg16 unit test. * update the contribute.md * add empty line at the end of the file. * fix rfcn performance script. * add densenet169 unit test. * print out comments for each test. * add the two missing tests. --- Contribute.md | 9 +- .../rfcn/inference/int8/model_init.py | 5 +- tests/test_utils/io.py | 22 +++-- .../tensorflow/test_run_tf_benchmarks.py | 13 +-- .../unit/common/tensorflow/tf_model_args.txt | 95 ------------------- .../tf_model_args/tf_dcgan_args.json | 5 + .../tf_model_args/tf_densenet169_args.json | 15 +++ .../tf_model_args/tf_draw_args.json | 15 +++ .../tf_model_args/tf_facenet_args.json | 13 +++ .../tf_model_args/tf_faster_rcnn_args.json | 28 ++++++ .../tf_model_args/tf_gnmt_args.json | 11 +++ .../tf_inception_resnet_v2_args.json | 27 ++++++ .../tf_model_args/tf_inceptionv3_args.json | 44 +++++++++ .../tf_model_args/tf_inceptionv4_args.json | 19 ++++ .../tf_model_args/tf_lm_1b_args.json | 7 ++ .../tf_model_args/tf_maskrcnn_args.json | 11 +++ .../tf_model_args/tf_mobilenet_v1_args.json | 36 +++++++ .../tf_model_args/tf_mtcc_args.json | 5 + .../tensorflow/tf_model_args/tf_ncf_args.json | 15 +++ .../tf_model_args/tf_resnet101_args.json | 17 ++++ .../tf_model_args/tf_resnet50_args.json | 40 ++++++++ .../tf_model_args/tf_rfcn_args.json | 17 ++++ .../tf_model_args/tf_squeezenet_args.json | 11 +++ .../tf_model_args/tf_ssd_mobilenet_args.json | 17 ++++ .../tf_model_args/tf_ssd_resnet34_args.json | 11 +++ .../tf_model_args/tf_ssd_vgg16_args.json | 17 ++++ .../tf_transformer_language_args.json | 9 ++ .../tf_transformer_lt_official_args.json | 9 ++ .../tf_model_args/tf_unet_args.json | 7 ++ .../tf_model_args/tf_wavenet_args.json | 7 ++ .../tf_model_args/tf_wide_deep_args.json | 5 + .../tf_wide_deep_large_ds_args.json | 27 ++++++ 32 files changed, 471 insertions(+), 118 deletions(-) delete mode 100644 tests/unit/common/tensorflow/tf_model_args.txt create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_dcgan_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_densenet169_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_draw_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_facenet_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_faster_rcnn_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_gnmt_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_inception_resnet_v2_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_inceptionv3_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_inceptionv4_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_lm_1b_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_maskrcnn_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_mobilenet_v1_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_mtcc_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_ncf_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_resnet101_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_resnet50_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_rfcn_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_squeezenet_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_ssd_mobilenet_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_ssd_resnet34_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_ssd_vgg16_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_unet_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_wavenet_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_args.json create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_large_ds_args.json diff --git a/Contribute.md b/Contribute.md index 55d2b7e44..73c58e8af 100644 --- a/Contribute.md +++ b/Contribute.md @@ -177,10 +177,11 @@ developing new scripts: arguments. To add a new parameterized instance of the test for your - new model, update the [tf_models_args.txt](/tests/unit/common/tensorflow/tf_model_args.txt) - file. This file has comma-separated values where each row has two - items: (1) the `run_tf_benchmarks.py` command with the appropriate - flags to run the model (2) the expected inference or training + new model, add a new JSON file `tf__args.json` to the [tf_models_args](/tests/unit/common/tensorflow/tf_model_args) + directory. Each file has a list of dictionaries, a dictionary has three + items: (1) `_comment` a comment describes the command, + (2) `input` the `run_tf_benchmarks.py` command with the appropriate + flags to run the model (3) `output` the expected inference or training command that should get run by the `model_init.py` file. * If any launch script or base class files were changed, then additional unit tests should be added. diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py index 4f2a29ab4..f52eed9b4 100755 --- a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py @@ -54,9 +54,6 @@ def __init__(self, args, custom_args=[], platform_util=None): self.parse_args() - # Get the command previx, but numactl is added later in run_perf_command() - self.command.append(self.get_command_prefix(self.args.socket_id, numactl=False)) - # Set KMP env vars, if they haven't already been set config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") self.set_kmp_vars(config_file_path) @@ -114,6 +111,8 @@ def validate_args(self): format(self.args.model_source_dir)) def run_perf_command(self): + # Get the command previx, but numactl is added later in run_perf_command() + self.command.append(self.get_command_prefix(self.args.socket_id, numactl=False)) num_cores = str(self.platform_util.num_cores_per_socket) if self.args.num_cores != -1: num_cores = str(self.args.num_cores) diff --git a/tests/test_utils/io.py b/tests/test_utils/io.py index 50f8e5e61..5ec580f94 100644 --- a/tests/test_utils/io.py +++ b/tests/test_utils/io.py @@ -18,19 +18,21 @@ # SPDX-License-Identifier: EPL-2.0 # -import csv +import os +import json -def parse_csv_file(file_path, expected_num_columns): +def parse_json_files(json_dir_path): """ - Reads the specified csv file. Checks for a value number of columns in - each row. Returns the csv file values as a list of tuples. + Reads the JSON files in the specified directory. Checks for a value number of columns in + each row. Returns the JSON files values as a list of tuples. """ values = [] - with open(file_path) as csv_file: - csv_reader = csv.reader(csv_file, delimiter=',', - skipinitialspace=True) - for row in csv_reader: - assert len(row) == expected_num_columns - values.append(tuple(row)) + for model_file in os.listdir(json_dir_path): + file_path = os.path.join(json_dir_path, model_file) + with open(file_path) as f: + data = json.load(f) + for x in data: + values.append( + tuple((x['input'], x['output'], model_file + " :: " + x['_comment']))) return values diff --git a/tests/unit/common/tensorflow/test_run_tf_benchmarks.py b/tests/unit/common/tensorflow/test_run_tf_benchmarks.py index dba3e6ca6..0b58ac411 100644 --- a/tests/unit/common/tensorflow/test_run_tf_benchmarks.py +++ b/tests/unit/common/tensorflow/test_run_tf_benchmarks.py @@ -28,19 +28,19 @@ from benchmarks.common.tensorflow.run_tf_benchmark import ModelBenchmarkUtil from test_utils import platform_config -from test_utils.io import parse_csv_file +from test_utils.io import parse_json_files def parse_model_args_file(): """ - Gets test args from the tf_model_args.txt file to use as parameters + Gets test args from the models files in the specified directory to use as parameters for testing model benchmarking scripts. The file has a run_tf_benchmarks.py command with args with the corresponding run command that should get called from model_init.py """ current_dir = os.path.dirname(os.path.realpath(__file__)) - csv_file_path = os.path.join(current_dir, "tf_model_args.txt") - return parse_csv_file(csv_file_path, 2) + models_args_path = os.path.join(current_dir, "tf_model_args") + return parse_json_files(models_args_path) def delete_env_var(env_var): @@ -63,7 +63,7 @@ def clear_kmp_env_vars(): test_arg_values = parse_model_args_file() -@pytest.mark.parametrize("test_args,expected_cmd", test_arg_values) +@pytest.mark.parametrize("test_args,expected_cmd,comment", test_arg_values) @patch("os.mkdir") @patch("shutil.rmtree") @patch("os.listdir") @@ -81,12 +81,13 @@ def clear_kmp_env_vars(): def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, mock_os, mock_glob, mock_remove, mock_chdir, mock_stat, mock_path_exists, mock_is_file, mock_is_dir, mock_listdir, mock_rmtree, mock_mkdir, - test_args, expected_cmd): + test_args, expected_cmd, comment): """ Runs through executing the specified run_tf_benchmarks.py command from the test_args and verifying that the model_init file calls run_command with the expected_cmd string. """ + print("****** Running The {} test ******".format(comment)) os.environ["PYTHON_EXE"] = "python" mock_path_exists.return_value = True mock_is_dir.return_value = True diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt deleted file mode 100644 index 386d1185c..000000000 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ /dev/null @@ -1,95 +0,0 @@ -run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose,OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --disable-tcmalloc=True,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose,python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --data-location=/dataset --calibration-only,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50_int8_pretrained_model.pb --data_location=/dataset -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50.pb --intelai-models . --accuracy-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 128 --in-graph /final_int8_resnet50.pb --intelai-models . --benchmark-only --verbose,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=128 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 64 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 64 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 1 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 1 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset --in-graph=/in_graph/frozen_inference_graph.pb,sh /workspace/intelai_models/inference/fp32/coco_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset/coco_val.record /workspace/models -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --data-location=/dataset, sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --accuracy-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --benchmark-dir=/workspace/benchmarks --data-location=/dataset,sh /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset --accuracy-only --split=accuracy_message,FROZEN_GRAPH=/in_graph/frozen_inference_graph.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/fp32/coco_mAP.sh -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --benchmark-only --number_of_steps=500,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_rfcn_inference.py -m /workspace/models -g /in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb -x 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --accuracy-only --split=accuracy_message,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 FROZEN_GRAPH=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/int8/coco_mAP.sh -run_tf_benchmark.py --framework tensorflow --use-case text_to_speech --precision fp32 --mode inference --model-name wavenet --num-cores 1 --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --checkpoint_name=model.ckpt-99 --sample=8510,numactl --physcpubind=0-0 --membind=0 python generate.py /checkpoints/model.ckpt-99 --num_inter_threads=1 --num_intra_threads=1 --sample=8510 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_int8_model.pb,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-intra-threads=28 --num-inter-threads=1 --input-graph=/in_graph/resnet101_int8_model.pb --warmup-steps=40 --steps=100 -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data-location=/dataset --input_height=224 --input_width=224,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/accuracy.py --input_height=224 --input_width=224 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data_location=/dataset --input_layer=input -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=240 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1 -python common/tensorflow/run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models,numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset,python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/pretrained_int8_faster_rcnn_model.pb /dataset /workspace/models -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset,LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset \ No newline at end of file diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_dcgan_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_dcgan_args.json new file mode 100644 index 000000000..e5802f700 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_dcgan_args.json @@ -0,0 +1,5 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_densenet169_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_densenet169_args.json new file mode 100644 index 000000000..a5d665547 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_densenet169_args.json @@ -0,0 +1,15 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb"}, + + { "_comment": "Fp32 accuracy", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_draw_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_draw_args.json new file mode 100644 index 000000000..d638d7492 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_draw_args.json @@ -0,0 +1,15 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200"}, + + { "_comment": "FP32 throughput benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_facenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_facenet_args.json new file mode 100644 index 000000000..34b5af1fe --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_facenet_args.json @@ -0,0 +1,13 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000"}, + + { "_comment": "Fp32 accuracy", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_faster_rcnn_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_faster_rcnn_args.json new file mode 100644 index 000000000..ea6c0a75a --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_faster_rcnn_args.json @@ -0,0 +1,28 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset --in-graph=/in_graph/frozen_inference_graph.pb", + "output": "sh /workspace/intelai_models/inference/fp32/coco_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset/coco_val.record /workspace/models"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval"}, + + { "_comment": "FP32 benchmark command with custom --num_inter_threads 4 --num_intra_threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval"}, + + { "_comment": "Int8 command for throughput benchmark with --number-of-steps enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/pretrained_int8_faster_rcnn_model.pb /dataset /workspace/models"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -d /dataset --num-inter-threads 2 --num-intra-threads 56" + } +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_gnmt_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_gnmt_args.json new file mode 100644 index 000000000..7fe7db376 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_gnmt_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_inception_resnet_v2_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_inception_resnet_v2_args.json new file mode 100644 index 000000000..c1a59e0b5 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_inception_resnet_v2_args.json @@ -0,0 +1,27 @@ +[ + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100"}, + + { "_comment": "Int8 command for latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128"}, + + { "_comment": "Fp32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100"}, + + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1"}, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv3_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv3_args.json new file mode 100644 index 000000000..733b691ee --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv3_args.json @@ -0,0 +1,44 @@ +[ + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb"}, + + { "_comment": "Int8 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28"}, + + { "_comment": "Int8 command for latency benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --disable-tcmalloc=True", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --disable-tcmalloc=True", + "output": "numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28"}, + + { "_comment": "Fp32 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose", + "output": "python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28" + }, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for throughput benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv4_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv4_args.json new file mode 100644 index 000000000..0535c2eef --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv4_args.json @@ -0,0 +1,19 @@ +[ + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset"}, + + { "_comment": "Int8 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28"}, + + { "_comment": "Fp32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_lm_1b_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_lm_1b_args.json new file mode 100644 index 000000000..26d11e1c3 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_lm_1b_args.json @@ -0,0 +1,7 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_maskrcnn_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_maskrcnn_args.json new file mode 100644 index 000000000..5900877d3 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_maskrcnn_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1"}, + + { "_comment": "FP32 benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_mobilenet_v1_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_mobilenet_v1_args.json new file mode 100644 index 000000000..c98ada086 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_mobilenet_v1_args.json @@ -0,0 +1,36 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset"}, + + { "_comment": "FP32 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1"}, + + { "_comment": "FP32 throughput benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100"}, + + { "_comment": "FP32 benchmark command with dummy data and --output-dir specified", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints", + "output": "numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100"}, + + { "_comment": "Int8 command for throughput benchmark with --number-of-steps enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data-location=/dataset --input_height=224 --input_width=224", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/accuracy.py --input_height=224 --input_width=224 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data_location=/dataset --input_layer=input"}, + + { "_comment": "Int8 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50"}, + + + { "_comment": "Int8 throughput benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=240 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_mtcc_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_mtcc_args.json new file mode 100644 index 000000000..b0093db93 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_mtcc_args.json @@ -0,0 +1,5 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ncf_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ncf_args.json new file mode 100644 index 000000000..67fa8402c --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ncf_args.json @@ -0,0 +1,15 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only"}, + + { "_comment": "Fp32 accuracy", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_resnet101_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_resnet101_args.json new file mode 100644 index 000000000..4c9132a79 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_resnet101_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50"}, + + { "_comment": "Int8 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_int8_model.pb", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-intra-threads=28 --num-inter-threads=1 --input-graph=/in_graph/resnet101_int8_model.pb --warmup-steps=40 --steps=100"}, + + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_resnet50_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50_args.json new file mode 100644 index 000000000..199ae2c0f --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50_args.json @@ -0,0 +1,40 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50"}, + + { "_comment": "FP32 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for latency benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for throughput benchmark with --num-inter-threads=1 --num-intra-threads=28", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --output-dir enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200"}, + + { "_comment": "Int8 command for data calibration with --calibration-only", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --data-location=/dataset --calibration-only", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50_int8_pretrained_model.pb --data_location=/dataset"}, + + { "_comment": "Fp32 command for throughput benchmark with --output-results enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50.pb --intelai-models . --accuracy-only --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "Int8 command for throughput benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200" + } +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_rfcn_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_rfcn_args.json new file mode 100644 index 000000000..f8dc9b0a0 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_rfcn_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset --accuracy-only --split=accuracy_message", + "output": "FROZEN_GRAPH=/in_graph/frozen_inference_graph.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/fp32/coco_mAP.sh"}, + + { "_comment": "FP32 command for benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --accuracy-only --split=accuracy_message", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 FROZEN_GRAPH=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/int8/coco_mAP.sh"}, + + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --benchmark-only --number_of_steps=500", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_rfcn_inference.py -m /workspace/models -g /in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb -x 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_squeezenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_squeezenet_args.json new file mode 100644 index 000000000..9232b10fe --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_squeezenet_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 1 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose", + "output": "taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 1 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose"}, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 64 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose", + "output": "taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 64 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ssd_mobilenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_mobilenet_args.json new file mode 100644 index 000000000..fc4a7b1d9 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_mobilenet_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --accuracy-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --benchmark-dir=/workspace/benchmarks --data-location=/dataset", + "output": "sh /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --data-location=/dataset", + "output": "sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset"}, + + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ssd_resnet34_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_resnet34_args.json new file mode 100644 index 000000000..0aa2ca495 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_resnet34_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ssd_vgg16_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_vgg16_args.json new file mode 100644 index 000000000..37d478e1f --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_vgg16_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only"}, + + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json new file mode 100644 index 000000000..5662ad83e --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json @@ -0,0 +1,9 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28"}, + + { "_comment": "Fp32 throughput", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json new file mode 100644 index 000000000..1ccbf4bc4 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json @@ -0,0 +1,9 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt"}, + + { "_comment": "FP32 throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_unet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_unet_args.json new file mode 100644 index 000000000..cbbe2f3f4 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_unet_args.json @@ -0,0 +1,7 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_wavenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_wavenet_args.json new file mode 100644 index 000000000..49ea2e09e --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_wavenet_args.json @@ -0,0 +1,7 @@ +[ + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case text_to_speech --precision fp32 --mode inference --model-name wavenet --num-cores 1 --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --checkpoint_name=model.ckpt-99 --sample=8510", + "output": "numactl --physcpubind=0-0 --membind=0 python generate.py /checkpoints/model.ckpt-99 --num_inter_threads=1 --num_intra_threads=1 --sample=8510"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_args.json new file mode 100644 index 000000000..64fddac5b --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_args.json @@ -0,0 +1,5 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose", + "output": "OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_large_ds_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_large_ds_args.json new file mode 100644 index 000000000..3d2297515 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_large_ds_args.json @@ -0,0 +1,27 @@ +[ + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Int8 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Fp32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Fp32 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"} +] + + From 850a00372c63878c46e8e66184b4b1f163b4d67d Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Tue, 7 May 2019 09:51:00 -0700 Subject: [PATCH 31/62] Update docker images in README files use to TF 1.14 (#297) --- .../tensorflow/dcgan/README.md | 2 +- .../tensorflow/draw/README.md | 4 ++-- .../tensorflow/facenet/README.md | 6 ++--- .../tensorflow/mtcc/README.md | 2 +- .../tensorflow/densenet169/README.md | 6 ++--- .../tensorflow/inception_resnet_v2/README.md | 12 +++++----- .../tensorflow/inceptionv3/README.md | 22 +++++++------------ .../tensorflow/inceptionv4/README.md | 18 +++++---------- .../tensorflow/mobilenet_v1/README.md | 12 +++++----- .../tensorflow/resnet101/README.md | 21 ++++++------------ .../tensorflow/resnet50/README.md | 18 +++++---------- .../tensorflow/squeezenet/README.md | 4 ++-- .../tensorflow/maskrcnn/README.md | 2 +- .../tensorflow/unet/README.md | 2 +- .../tensorflow/lm-1b/README.md | 4 ++-- .../tensorflow/gnmt/README.md | 4 ++-- .../tensorflow/transformer_language/README.md | 4 ++-- .../tensorflow/faster_rcnn/README.md | 14 ++++-------- .../tensorflow/rfcn/README.md | 14 ++++-------- .../tensorflow/ssd-mobilenet/README.md | 13 ++++------- .../tensorflow/ssd-resnet34/README.md | 4 ++-- .../tensorflow/ssd_vgg16/README.md | 8 +++---- .../recommendation/tensorflow/ncf/README.md | 6 ++--- .../tensorflow/wavenet/README.md | 2 +- docs/general/tensorflow/LaunchBenchmark.md | 4 ++-- 25 files changed, 83 insertions(+), 125 deletions(-) diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md index d552ac46d..77953b78b 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md @@ -61,7 +61,7 @@ $ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//dcgan_fp32_unconditional_cifar10_pretrained_model \ --data-location /home//cifar10 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` 5. Log files are located at the value of `--output-dir`. diff --git a/benchmarks/content_creation/tensorflow/draw/README.md b/benchmarks/content_creation/tensorflow/draw/README.md index 159f8de7b..fdf18fade 100644 --- a/benchmarks/content_creation/tensorflow/draw/README.md +++ b/benchmarks/content_creation/tensorflow/draw/README.md @@ -48,7 +48,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 1 \ @@ -61,7 +61,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 100 \ diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md index 0e7e0d307..db5706b5d 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md @@ -61,7 +61,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` Example log tail when benchmarking for latency: ``` @@ -96,7 +96,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` Example log tail when benchmarking for throughput: ``` @@ -128,7 +128,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` Example log tail when benchmarking for accuracy: ``` diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md index a659f397f..9095110f9 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md @@ -56,7 +56,7 @@ Run benchmarking: --mode inference \ --socket-id 0 \ --checkpoint /home//MTCNN_model \ - --docker-image intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` 6. The log file is saved to the value of `--output-dir`. diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md index bf6b1f84f..fa02b7a80 100644 --- a/benchmarks/image_recognition/tensorflow/densenet169/README.md +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -56,7 +56,7 @@ following modes/precisions: --batch-size 100 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" ``` @@ -72,7 +72,7 @@ following modes/precisions: --batch-size 1 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" ``` @@ -89,7 +89,7 @@ following modes/precisions: --batch-size 100 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --data-location /home//imagenet_validation_dataset \ -- input_height=224 input_width=224 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index 4b0543e56..a55413eb3 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -87,7 +87,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -103,7 +103,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -118,7 +118,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -247,7 +247,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -264,7 +264,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` For throughput (using `--benchmark-only`, `--socket-id 0` and `--batch-size 128`): @@ -279,7 +279,7 @@ python launch_benchmark.py \ --batch-size 128 \ --socket-id 0 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index 3e8cf2f0b..e02c73331 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -100,7 +100,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -121,7 +121,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -138,7 +138,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -154,7 +154,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -171,17 +171,11 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location.. @@ -267,7 +261,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when benchmarking for latency: @@ -298,7 +292,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when benchmarking for throughput: @@ -330,7 +324,7 @@ python launch_benchmark.py \ --accuracy-only \ --batch-size 100 \ --data-location /dataset/Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when benchmarking for accuracy: diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index edb391d84..e89d13dee 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -57,7 +57,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -72,7 +72,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` @@ -86,16 +86,10 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` - The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) - used in the commands above were built using - [TensorFlow](git@github.com:tensorflow/tensorflow.git) master - ([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and - [PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` flag can be added to any of the above commands to get additional debug output. @@ -188,7 +182,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -203,7 +197,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` @@ -217,7 +211,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index bc84ba6c2..af5947e20 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -65,7 +65,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --batch-size 240 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" ``` @@ -81,7 +81,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --batch-size 1 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" ``` @@ -98,7 +98,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --batch-size 100 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --data-location /home//imagenet_validation_dataset \ -- input_height=224 input_width=224 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" @@ -216,7 +216,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --batch-size 1 \ --socket-id 0 \ @@ -232,7 +232,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --socket-id 0 \ @@ -246,7 +246,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --accuracy-only \ diff --git a/benchmarks/image_recognition/tensorflow/resnet101/README.md b/benchmarks/image_recognition/tensorflow/resnet101/README.md index 4bb6a8ded..2c87b7ea1 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet101/README.md @@ -85,7 +85,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --data-location /home//dataset/FullImageNetData_directory \ --in-graph=/home//resnet101_int8_pretrained_model.pb ``` @@ -106,7 +106,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -123,7 +123,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//dataset/FullImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -139,7 +139,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -156,18 +156,11 @@ python launch_benchmark.py \ --batch-size 128 \ --data-location /home//dataset/FullImageNetData_directory \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` - -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location.. @@ -257,7 +250,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 128 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --socket-id 0 ``` @@ -284,7 +277,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --data-location /home//imagenet_validation_dataset \ --accuracy-only \ diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index 5a666c6dd..31c06609a 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -43,12 +43,6 @@ $ git clone https://github.com/IntelAI/models.git The optimized ResNet50 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and located at `models/models/image_recognition/tensorflow/resnet50/`. - The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) - used in the commands above were built using - [TensorFlow](git@github.com:tensorflow/tensorflow.git) master - ([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and - [PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - * Calculate the model accuracy, the required parameters parameters include: the `ImageNet` dataset location (from step 1), the pre-trained `final_int8_resnet50.pb` input graph file (from step 2), and the `--accuracy-only` flag. @@ -64,7 +58,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=100 \ --accuracy-only \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The log file is saved to the value of `--output-dir`. @@ -100,7 +94,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --benchmark-only \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 -- warmup_steps=50 steps=500 ``` The tail of the log output when the benchmarking completes should look @@ -161,7 +155,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The log file is saved to the value of `--output-dir`. @@ -196,7 +190,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The log file is saved to the value of `--output-dir`. @@ -234,7 +228,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The log file is saved to the value of `--output-dir`. @@ -268,7 +262,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The results file will be written to the `models/benchmarks/common/tensorflow/logs` directory, unless another diff --git a/benchmarks/image_recognition/tensorflow/squeezenet/README.md b/benchmarks/image_recognition/tensorflow/squeezenet/README.md index 355efca72..1cf855218 100644 --- a/benchmarks/image_recognition/tensorflow/squeezenet/README.md +++ b/benchmarks/image_recognition/tensorflow/squeezenet/README.md @@ -79,7 +79,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 64 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -94,7 +94,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md index c862032f7..bdf3cdae1 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md @@ -62,7 +62,7 @@ $ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//COCO2014 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl-py3 + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 ``` 5. Log files are located at the value of `--output-dir`. diff --git a/benchmarks/image_segmentation/tensorflow/unet/README.md b/benchmarks/image_segmentation/tensorflow/unet/README.md index 6f6671e66..fd5968ad6 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/README.md +++ b/benchmarks/image_segmentation/tensorflow/unet/README.md @@ -57,7 +57,7 @@ modes/precisions: --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --checkpoint /home//unet_trained \ --model-source-dir /home//tf_unet \ -- checkpoint_name=model.cpkt diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/README.md b/benchmarks/language_modeling/tensorflow/lm-1b/README.md index 525ff352b..82b42cac9 100644 --- a/benchmarks/language_modeling/tensorflow/lm-1b/README.md +++ b/benchmarks/language_modeling/tensorflow/lm-1b/README.md @@ -54,7 +54,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /inference/cloud/language_modeling ``` @@ -69,7 +69,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1024 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /inference/cloud/language_modeling \ -- steps=4 \ ``` diff --git a/benchmarks/language_translation/tensorflow/gnmt/README.md b/benchmarks/language_translation/tensorflow/gnmt/README.md index f52bcdfc6..00bc1807f 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/README.md +++ b/benchmarks/language_translation/tensorflow/gnmt/README.md @@ -82,7 +82,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ +--docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- infer_mode=beam_search ``` @@ -99,7 +99,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ +--docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- infer_mode=beam_search ``` diff --git a/benchmarks/language_translation/tensorflow/transformer_language/README.md b/benchmarks/language_translation/tensorflow/transformer_language/README.md index abc931d51..a21a4fbd6 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_language/README.md @@ -81,7 +81,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ @@ -98,7 +98,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 32 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index e69fba728..072d91d2c 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -152,7 +152,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//faster_rcnn_resnet50_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- config_file=pipeline.config ``` @@ -165,7 +165,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output \ --in-graph /home//faster_rcnn_resnet50_fp32_coco/frozen_inference_graph.pb \ @@ -257,7 +257,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --benchmark-only \ -- number_of_steps=5000 ``` @@ -272,19 +272,13 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco_dataset/coco_val.record \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ --accuracy-only ``` -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - 5. The log file is saved to the value of `--output-dir`. Below is a sample log file tail when running benchmarking for throughput diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index f42ab9313..efe0a9489 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -140,7 +140,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -157,7 +157,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record-00000-of-00001 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -165,12 +165,6 @@ python launch_benchmark.py \ -- split="accuracy_message" ``` -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location. @@ -338,7 +332,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//rfcn_resnet101_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ -- config_file=rfcn_pipeline.config ``` @@ -351,7 +345,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record \ --in-graph /home//rfcn_resnet101_fp32_coco/frozen_inference_graph.pb \ diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index 33ac1d237..571fdeedd 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -121,7 +121,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ @@ -138,7 +138,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ @@ -146,11 +146,6 @@ python launch_benchmark.py \ --batch-size 1 ``` -Note that it is required to use the docker image specified in the -commands above (`intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7`) -to run SSD-MobileNet Int8, as it includes PRs that are required to run -this model. - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location. @@ -354,7 +349,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.12.0-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --benchmark-only ``` @@ -373,7 +368,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.12.0-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --accuracy-only ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index f4e419f79..c5dac8657 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -131,7 +131,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.13.1-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --benchmark-only ``` @@ -151,7 +151,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.13.1-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --accuracy-only ``` diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 320223c95..9d2cb7b3c 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -103,7 +103,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ @@ -133,7 +133,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ @@ -210,7 +210,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --batch-size 1 \ --socket-id 0 \ --num-inter-threads 11 \ @@ -237,7 +237,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:nightly-master-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ diff --git a/benchmarks/recommendation/tensorflow/ncf/README.md b/benchmarks/recommendation/tensorflow/ncf/README.md index c6b92d938..73efa89a8 100644 --- a/benchmarks/recommendation/tensorflow/ncf/README.md +++ b/benchmarks/recommendation/tensorflow/ncf/README.md @@ -53,7 +53,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The tail of Throughput log, looks as below. @@ -83,7 +83,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The tail of Latency log, looks as below. @@ -115,7 +115,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image intelaipg/intel-optimized-tensorflow:1.14 ``` The tail of accuracy log, looks as below. diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/README.md b/benchmarks/text_to_speech/tensorflow/wavenet/README.md index 340736a6e..782a55964 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/README.md +++ b/benchmarks/text_to_speech/tensorflow/wavenet/README.md @@ -71,7 +71,7 @@ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --num-cores 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//wavenet/tensorflow-wavenet \ --checkpoint /home//wavenet_checkpoints \ -- checkpoint_name=model.ckpt-99 sample=8510 diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index 59b9eb68d..139070e5f 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -164,7 +164,7 @@ $ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --volume /home//custom_folder_1:/custom_folder_1 \ --volume /home//custom_folder_2:/custom_folder_2 ``` @@ -201,7 +201,7 @@ Below is an example showing how to use the `--debug` flag: --batch-size=1 \ --socket-id 0 \ --data-location /home//Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --debug # ls From 37958bdb7a8d5a55fe15662f94d91bbf8de32373 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 9 May 2019 13:07:54 -0700 Subject: [PATCH 32/62] Update FasterRCNN Int8 README file to note benchmarking uses raw images (#300) * Update FasterRCNN Int8 README file to note benchmarking uses raw images * Fixing 'TR' records to 'TF' records * Reformatting FP32 steps so that the raw image download is one step * Adding missing parentheses --- .../tensorflow/faster_rcnn/README.md | 42 ++++++++++++------- 1 file changed, 27 insertions(+), 15 deletions(-) diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index 072d91d2c..b5ee91e71 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -44,8 +44,8 @@ sed -i.bak 95s/input_config/input_config[0]/ offline_eval_map_corloc.py ``` -2. Download the 2017 validation -[COCO dataset](http://cocodataset.org/#home) and annotations: +2. Download and unzip the 2017 validation +[COCO dataset](http://cocodataset.org/#home) images: ``` $ mkdir val @@ -53,7 +53,10 @@ $ cd val $ wget http://images.cocodataset.org/zips/val2017.zip $ unzip val2017.zip $ cd .. +``` +3. Download and unzip the coco dataset annotations file: +``` $ mkdir annotations $ cd annotations $ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip @@ -73,14 +76,15 @@ $ echo "{ \"images\": {}, \"categories\": {}}" > empty.json $ cd .. ``` -3. Now that you have the raw COCO dataset, we need to convert it to the +4. Now that you have the raw COCO dataset and annotations files, we need to convert it to the TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the -script to the raw COCO dataset files that you have downloaded in step 2. +script to the raw COCO dataset files that you have downloaded in step 2 +and the annotations files that you downloaded and created in step 3. The `--output_dir` is the location where the TF record files will be located after the script has completed. @@ -113,13 +117,13 @@ $ git checkout master The `coco_val.record` file is what we will use in this inference example. -4. Download and extract the pre-trained model. +5. Download and extract the pre-trained model. ``` $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz $ tar -xzvf faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz ``` -5. Clone the [intelai/models](https://github.com/intelai/models) repo. +6. Clone the [intelai/models](https://github.com/intelai/models) repo. This repo has the launch script for running benchmarking. ``` @@ -133,10 +137,10 @@ Receiving objects: 100% (11/11), done. Resolving deltas: 100% (3/3), done. ``` -6. Run the `launch_benchmark.py` script from the intelai/models repo +7. Run the `launch_benchmark.py` script from the intelai/models repo , with the appropriate parameters including: the -`coco_val.record` data location (from step 3), the pre-trained model -`pipeline.config` file and the checkpoint location (from step 4, and the +`coco_val.record` data location (from step 4), the pre-trained model +`pipeline.config` file and the checkpoint location (from step 5), and the location of your `tensorflow/models` clone (from step 1). Run benchmarking for throughput and latency: @@ -158,7 +162,7 @@ $ python launch_benchmark.py \ Or for accuracy where the `--data-location` is the path the directory where your `coco_val.record` file is located and the `--in-graph` is -the pre-trained graph located in the pre-trained model directory (from step 4): +the pre-trained graph located in the pre-trained model directory (from step 5): ``` python launch_benchmark.py \ --model-name faster_rcnn \ @@ -172,7 +176,7 @@ python launch_benchmark.py \ --accuracy-only ``` -7. The log file is saved to the value of `--output-dir`. +8. The log file is saved to the value of `--output-dir`. Below is a sample log file tail when running benchmarking for throughput and latency: @@ -218,7 +222,13 @@ better performance results for Int8 precision models with smaller batch sizes. If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` when calling `launch_benchmark.py` and the script will run without TCMalloc. -1. Please follow step 1, 2 and 3 of Faster R-CNN FP32 instructions written above. +1. Please follow the steps from the +[Faster R-CNN FP32 instructions](#fp32-inference-instructions) written +above for cloning dependecy repositories and getting the coco dataset: +* Performance bechmarking uses the raw coco dataset images. Follow steps +1 and 2 from the FP32 instructions. +* Accuracy testing requires the coco daataset to be in the TF records +format. Follow steps 1, 2, 3, and 4 from the FP32 instructions. 2. Download the pre-trained model. ``` @@ -244,12 +254,14 @@ with the appropriate parameters. To run on single socket use `--socket_id` switc by default it will be using all available sockets. Optional parameter `number_of_steps` (default value = 5000) can be added at the end of command after `--` as shown below: -Run benchmarking for throughput and latency: +Run benchmarking for throughput and latency using the following command. +The `--data-location` is the path to the directory that contains the +raw coco dataset validation images which you downloaded and unzipped: ``` $ cd /home//models/benchmarks $ python launch_benchmark.py \ - --data-location /home//coco/output/ \ + --data-location /home//val2017 \ --model-source-dir /home//tensorflow/models \ --model-name faster_rcnn \ --framework tensorflow \ @@ -274,7 +286,7 @@ python launch_benchmark.py \ --socket-id 0 \ --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ --model-source-dir /home//tensorflow/models \ - --data-location /home//coco_dataset/coco_val.record \ + --data-location /home//output/coco_val.record \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ --accuracy-only ``` From 94edbc7287efd1387694c123a8dc3132b989d9ff Mon Sep 17 00:00:00 2001 From: Jitendra Patil Date: Mon, 13 May 2019 09:53:58 -0700 Subject: [PATCH 33/62] fix docker build command (#306) --- docs/general/tensorflow_serving/InstallationGuide.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/general/tensorflow_serving/InstallationGuide.md b/docs/general/tensorflow_serving/InstallationGuide.md index 3c6b84c2f..bc557d040 100644 --- a/docs/general/tensorflow_serving/InstallationGuide.md +++ b/docs/general/tensorflow_serving/InstallationGuide.md @@ -54,6 +54,7 @@ The recommended way to use TensorFlow Serving is with Docker images. Letā€™s bui $ cd $TF_SERVING_ROOT/tensorflow_serving/tools/docker/ $ docker build \ -f Dockerfile.devel-mkl \ + --build-arg TF_SERVING_BAZEL_OPTIONS="--incompatible_disallow_data_transition=false --incompatible_disallow_filetype=false" \ --build-arg TF_SERVING_VERSION_GIT_BRANCH="1.13.0" \ -t tensorflow/serving:latest-devel-mkl . ``` From 5e19f8a9f2744dbf1ba91adb18acc7a9fd1f11fc Mon Sep 17 00:00:00 2001 From: Karthik Vadla Date: Thu, 16 May 2019 16:52:48 -0700 Subject: [PATCH 34/62] ADD: Tensorflow Serving Benchmarking (#307) --- benchmarks/README.md | 11 +- .../common/tensorflow_serving/__init__.py | 19 +++ .../build_tfserving_image.sh | 73 ++++++++ benchmarks/common/tensorflow_serving/start.sh | 161 ++++++++++++++++++ .../tensorflow_serving/__init__.py | 19 +++ .../tensorflow_serving/inceptionv3/README.md | 91 ++++++++++ .../inceptionv3/__init__.py | 19 +++ .../inceptionv3/inference/__init__.py | 19 +++ .../inceptionv3/inference/fp32/__init__.py | 19 +++ .../fp32/image_recognition_benchmark.py | 117 +++++++++++++ .../fp32}/image_recognition_client.py | 76 ++++----- .../fp32}/model_graph_to_saved_model.py | 2 - .../inceptionv3/inference/fp32/util.py | 61 +++++++ .../inceptionv3/inference/int8/__init__.py | 19 +++ benchmarks/launch_benchmark.py | 74 ++++++-- .../tensorflow_serving/InstallationGuide.md | 6 +- .../tensorflow_serving/Tutorial.md | 35 ++-- .../src/image_recognition_benchmark.py | 117 ------------- .../tensorflow_serving/src/util.py | 61 ------- tests/unit/test_launch_benchmark.py | 72 +++++++- tox.ini | 1 + 21 files changed, 813 insertions(+), 259 deletions(-) create mode 100644 benchmarks/common/tensorflow_serving/__init__.py create mode 100644 benchmarks/common/tensorflow_serving/build_tfserving_image.sh create mode 100644 benchmarks/common/tensorflow_serving/start.sh create mode 100644 benchmarks/image_recognition/tensorflow_serving/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md create mode 100644 benchmarks/image_recognition/tensorflow_serving/inceptionv3/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py rename {docs/image_recognition/tensorflow_serving/src => benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32}/image_recognition_client.py (52%) rename {docs/image_recognition/tensorflow_serving/src => benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32}/model_graph_to_saved_model.py (99%) create mode 100644 benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/util.py create mode 100644 benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/int8/__init__.py delete mode 100644 docs/image_recognition/tensorflow_serving/src/image_recognition_benchmark.py delete mode 100644 docs/image_recognition/tensorflow_serving/src/util.py diff --git a/benchmarks/README.md b/benchmarks/README.md index d0f2a15b5..0875f2baa 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -11,7 +11,7 @@ dependencies to be installed: * [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) * `wget` for downloading pre-trained models -## Use Cases +## TensorFlow Use Cases | Use Case | Framework | Model | Mode | Benchmarking Instructions | | -----------------------| --------------| ------------------- | --------- |------------------------------| @@ -42,3 +42,12 @@ dependencies to be installed: | Recommendation | TensorFlow | [Wide & Deep Large Dataset](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [Int8](recommendation/tensorflow/wide_deep_large_ds/README.md#int8-inference-instructions) [FP32](recommendation/tensorflow/wide_deep_large_ds/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [FP32](recommendation/tensorflow/wide_deep/README.md#fp32-inference-instructions) | | Text-to-Speech | TensorFlow | [WaveNet](https://arxiv.org/pdf/1609.03499.pdf) | Inference | [FP32](text_to_speech/tensorflow/wavenet/README.md#fp32-inference-instructions) | + + +## TensorFlow Serving Use Cases + + +| Use Case | Framework | Model | Mode | Benchmarking Instructions | +| -----------------------| --------------| ------------------- | --------- |------------------------------| +| Image Recognition | TensorFlow Serving | [Inception V3](https://arxiv.org/pdf/1512.00567.pdf) | Inference | [FP32](image_recognition/tensorflow_serving/inceptionv3/README.md#fp32-inference-instructions) | + diff --git a/benchmarks/common/tensorflow_serving/__init__.py b/benchmarks/common/tensorflow_serving/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/common/tensorflow_serving/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/common/tensorflow_serving/build_tfserving_image.sh b/benchmarks/common/tensorflow_serving/build_tfserving_image.sh new file mode 100644 index 000000000..a47505f88 --- /dev/null +++ b/benchmarks/common/tensorflow_serving/build_tfserving_image.sh @@ -0,0 +1,73 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Bash script to build tensorflow serving image +# Setup proxy on your terminal before running the script. + +# To build image separately +# TF_SERVING_VERSION=1.13.0 MKL_IMAGE_TAG=tensorflow/serving:latest-mkl bash build_tfserving_image.sh + +#!/usr/bin/env bash +set -e +set -x + +WORKDIR=serving_workspace + +if [ -d ${WORKDIR} ]; then + rm -rf ${WORKDIR} +fi + +pushd $(pwd) + +mkdir -p ${WORKDIR} +cd ${WORKDIR} + +# Build Tensorflow Serving image +TF_SERVING_VERSION=${TF_SERVING_VERSION:-"1.13.0"} +echo "Using TF_SERVING_VERSION=${TF_SERVING_VERSION} to build docker image" + +# Clone official tensorflow serving repo +git clone https://github.com/tensorflow/serving.git + +TF_SERVING_ROOT=$(pwd)/serving +cd ${TF_SERVING_ROOT}/tensorflow_serving/tools/docker/ + +# Build Dockerfile.devel-mkl +docker build \ + --build-arg TF_SERVING_BAZEL_OPTIONS="--incompatible_disallow_data_transition=false --incompatible_disallow_filetype=false" \ + --build-arg TF_SERVING_VERSION_GIT_BRANCH=${TF_SERVING_VERSION} \ + --build-arg HTTP_PROXY=${HTTP_PROXY} \ + --build-arg HTTPS_PROXY=${HTTPS_PROXY} \ + --build-arg http_proxy=${http_proxy} \ + --build-arg https_proxy=${https_proxy} \ + -f Dockerfile.devel-mkl -t tensorflow/serving:latest-devel-mkl . + +# Build Dockerfile.mkl, which uses above image as base_image +docker build \ + --build-arg TF_SERVING_VERSION_GIT_BRANCH=${TF_SERVING_VERSION} \ + --build-arg HTTP_PROXY=${HTTP_PROXY} \ + --build-arg HTTPS_PROXY=${HTTPS_PROXY} \ + --build-arg http_proxy=${http_proxy} \ + --build-arg https_proxy=${https_proxy} \ + -f Dockerfile.mkl -t ${MKL_IMAGE_TAG} . + +popd + +rm -rf ${WORKDIR} + +echo "Image built with tag: ${MKL_IMAGE_TAG}" diff --git a/benchmarks/common/tensorflow_serving/start.sh b/benchmarks/common/tensorflow_serving/start.sh new file mode 100644 index 000000000..e611cc931 --- /dev/null +++ b/benchmarks/common/tensorflow_serving/start.sh @@ -0,0 +1,161 @@ +#!/usr/bin/env bash +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +#!/usr/bin/env bash +set -e +set -x + +echo 'Running with parameters:' +echo " USE_CASE: ${USE_CASE}" +echo " FRAMEWORK: ${FRAMEWORK}" +echo " WORKSPACE: ${WORKSPACE}" +echo " IN_GRAPH: ${IN_GRAPH}" +echo " MODEL_NAME: ${MODEL_NAME}" +echo " MODE: ${MODE}" +echo " PRECISION: ${PRECISION}" +echo " BATCH_SIZE: ${BATCH_SIZE}" +echo " BENCHMARK_ONLY: ${BENCHMARK_ONLY}" +echo " ACCURACY_ONLY: ${ACCURACY_ONLY}" +echo " OMP_NUM_THREADS: ${OMP_NUM_THREADS}" +echo " NUM_INTRA_THREADS: ${NUM_INTRA_THREADS}" +echo " NUM_INTER_THREADS: ${NUM_INTER_THREADS}" +echo " OUTPUT_DIR: ${OUTPUT_DIR}" +echo " TF_SERVING_VERSION: ${TF_SERVING_VERSION}" + + +if [ ${ACCURACY_ONLY} == "True" ]; then + echo "Accuracy is not supported with Tensorflow Serving" + exit 1 +fi + +WORKDIR=workspace + +if [ -d ${WORKDIR} ]; then + rm -rf ${WORKDIR} +fi + +pushd $(pwd) + +mkdir -p ${WORKDIR} +cd ${WORKDIR} + +# Check docker +if ! [[ $(which docker) && $(docker --version) ]]; then + echo "Docker not found, please install docker to proceed." + exit 1 +fi + +# Check for pip +if ! [[ $(which pip) && $(pip --version) ]]; then + echo "pip not found, please install pip to proceed." + exit 1 +fi + +timestamp=`date +%Y%m%d_%H%M%S` +LOG_FILENAME="benchmark_${MODEL_NAME}_${MODE}_${PRECISION}_${timestamp}.log" +if [ ! -d "${OUTPUT_DIR}" ]; then + mkdir ${OUTPUT_DIR} +fi + +MKL_IMAGE_TAG=tensorflow/serving:latest-mkl + +# Build Tensorflow Serving docker image +echo "Building tensorflow serving image..." +echo "First time it takes few minutes to build images, consecutive builds are much faster" + +TF_SERVING_VERSION=${TF_SERVING_VERSION} MKL_IMAGE_TAG=${MKL_IMAGE_TAG} bash ${WORKSPACE}/build_tfserving_image.sh + +function docker_run(){ + docker run \ + --name=${CONTAINER_NAME} \ + --rm \ + -d \ + -p 8500:8500 \ + -v /tmp:/models/${MODEL_NAME} \ + -e MODEL_NAME=${MODEL_NAME} \ + -e OMP_NUM_THREADS=${OMP_NUM_THREADS} \ + -e TENSORFLOW_INTER_OP_PARALLELISM=${NUM_INTER_THREADS} \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=${NUM_INTRA_THREADS} \ + ${MKL_IMAGE_TAG} +} + + +function resnet50_or_inceptionv3(){ + # Setup virtual env + pip install virtualenv + virtualenv venv + + source venv/bin/activate + pip install grpc \ + requests \ + intel-tensorflow \ + tensorflow-serving-api + + # cd to image recognition tfserving scripts + cd ${WORKSPACE}/../../${USE_CASE}/${FRAMEWORK}/${MODEL_NAME}/${MODE}/${PRECISION} + + # by default converted model is saved at /tmp/1 + rm -rf /tmp/1 + + # convert pretrained model to savedmodel + python model_graph_to_saved_model.py --import_path ${IN_GRAPH} + + RUNNING=$(docker ps --filter="expose=8501/tcp" -q | xargs) + if [[ -n ${RUNNING} ]]; then + docker rm -f ${RUNNING} + fi + + CONTAINER_NAME=tfserving_${RANDOM} + + # Run container + MKL_IMAGE_TAG=${MKL_IMAGE_TAG} CONTAINER_NAME=${CONTAINER_NAME} docker_run + + # Test + python image_recognition_client.py --model ${MODEL_NAME} + + + if [ ${BATCH_SIZE} == 1 ];then + # Test Average latency + python image_recognition_benchmark.py --batch_size ${BATCH_SIZE} --model ${MODEL_NAME} + else + # Test max throughput + python image_recognition_benchmark.py --batch_size ${BATCH_SIZE} --model ${MODEL_NAME} + fi + + # Clean up + docker rm -f ${CONTAINER_NAME} +} + +LOGFILE=${OUTPUT_DIR}/${LOG_FILENAME} + +MODEL_NAME=$(echo ${MODEL_NAME} | tr 'A-Z' 'a-z') +if [ ${MODEL_NAME} == "inceptionv3" ] || [ ${MODEL_NAME} == "resnet50" ] && [ ${PRECISION} == "fp32" ]; then + resnet50_or_inceptionv3 | tee -a ${LOGFILE} +else + echo "Unsupported Model: ${MODEL_NAME} or Precision: ${PRECISION}" + exit 1 +fi + +popd + +# Clean up work directory +rm -rf ${WORKDIR} + +echo "Log output location: ${LOGFILE}" | tee -a ${LOGFILE} diff --git a/benchmarks/image_recognition/tensorflow_serving/__init__.py b/benchmarks/image_recognition/tensorflow_serving/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md new file mode 100644 index 000000000..1ddb7bb14 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md @@ -0,0 +1,91 @@ +# Inception V3 + +This document has instructions for how to run Inception V3 for the +following modes/precisions: +* [FP32 inference](#fp32-inference-instructions) + +## FP32 Inference Instructions + +1. Clone this [intelai/models](https://github.com/IntelAI/models) +repository: + +``` +$ git clone https://github.com/IntelAI/models.git +``` + +2. Download the pre-trained model. +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/inceptionv3_fp32_pretrained_model.pb +``` + +3. Navigate to the `benchmarks` directory in your local clone of +the [intelai/models](https://github.com/IntelAI/models) repo from step 1. +The `launch_benchmark.py` script in the `benchmarks` directory is +used for starting a tensorflow serving benchmarking using optimized TensorFlow Serving docker +container. It has arguments to specify which model, framework, mode, +precision, and input graph. + +Substitute in your own `--in-graph` pretrained model file path (from step 2). + +4. Inception V3 can be run for `latency` benchmarking and `throughput` +benchmarking. Use one of the following examples below, +depending on your use case. + +* For latency with dummy data (using `--batch-size 1`): + +``` +python launch_benchmark.py \ + --in-graph /home//inceptionv3_fp32_pretrained_model.pb \ + --model-name inceptionv3 \ + --framework tensorflow_serving \ + --precision fp32 \ + --mode inference \ + --batch-size=1 \ + --benchmark-only +``` +Example log tail when benchmarking for latency: +``` +Iteration 35: 0.019 sec +Iteration 36: 0.020 sec +Iteration 37: 0.018 sec +Iteration 38: 0.018 sec +Iteration 39: 0.019 sec +Iteration 40: 0.018 sec +Average time: 0.019 sec +Batch size = 1 +Latency: 18.801 ms +Throughput: 53.189 images/sec +tfserving_3784 +Log output location: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190516_103531.log +``` + +* For throughput with dummy data (using `--batch-size 128`): + +``` +python launch_benchmark.py \ + --in-graph /home//inceptionv3_fp32_pretrained_model.pb \ + --model-name inceptionv3 \ + --framework tensorflow_serving \ + --precision fp32 \ + --mode inference \ + --batch-size=128 \ + --benchmark-only +``` +Example log tail when benchmarking for throughput: +``` +Iteration 34: 0.779 sec +Iteration 35: 0.916 sec +Iteration 36: 0.809 sec +Iteration 37: 0.793 sec +Iteration 38: 0.813 sec +Iteration 39: 0.796 sec +Iteration 40: 0.796 sec +Average time: 0.817 sec +Batch size = 128 +Throughput: 156.752 images/sec +tfserving_5299 +Log output location: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190516_103958.log +``` + +Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py new file mode 100644 index 000000000..3178741db --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py @@ -0,0 +1,117 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +"""Send simulated image data to tensorflow_model_server loaded with ResNet50 or InceptionV3 model. + +""" + +from __future__ import print_function + +import os +import random + +import grpc +import numpy as np +import sys +import tensorflow as tf +import time +from tensorflow_serving.apis import predict_pb2 +from tensorflow_serving.apis import prediction_service_pb2_grpc + +from util import preprocess_image, parse_example_proto + +tf.app.flags.DEFINE_string('server', 'localhost:8500', + 'PredictionService host:port') +tf.app.flags.DEFINE_integer('batch_size', 1, 'Batch size to use') +tf.app.flags.DEFINE_string('data_dir', '', 'path to images in TF records format') +tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).') +FLAGS = tf.app.flags.FLAGS + + +def sample_images(image_size): + """Pull a random batch of images from FLAGS.data_dir containing TF record formatted ImageNet validation set + Returns: + ndarray of float32 with shape [FLAGS.batch_size, image_size, image_size, 3] + """ + + sample_file = random.choice(os.listdir(FLAGS.data_dir)) + dataset = tf.data.TFRecordDataset(os.path.join(FLAGS.data_dir, sample_file)) + dataset = dataset.map(lambda x: parse_example_proto(x)).shuffle(True).batch(FLAGS.batch_size) + iterator = dataset.make_one_shot_iterator() + next_element = iterator.get_next() + with tf.Session() as sess: + images, labels = sess.run(next_element) + images = np.array([sess.run(preprocess_image(x, FLAGS.model, image_size)) for x in images]) + + return images + + +def main(_): + if FLAGS.model == 'resnet50': + image_size = 224 + elif FLAGS.model == 'inceptionv3': + image_size = 299 + else: + print('Please specify model as either resnet50 or inceptionv3.') + sys.exit(-1) + + channel = grpc.insecure_channel(FLAGS.server) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + i = 0 + num_iteration = 40 + warm_up_iteration = 10 + total_time = 0 + for _ in range(num_iteration): + i += 1 + if FLAGS.data_dir: + image_np = sample_images(image_size) + else: + image_np = np.random.rand(FLAGS.batch_size, image_size, image_size, 3).astype(np.float32) + if FLAGS.model == 'resnet50': + # For ResNet50, rescale to [0, 256] + image_np *= 256.0 + elif FLAGS.model == 'inceptionv3': + # For InceptionV3, rescale to [-1, 1] + image_np = (image_np - 0.5) * 2.0 + + request = predict_pb2.PredictRequest() + request.model_spec.name = FLAGS.model + request.model_spec.signature_name = 'serving_default' + request.inputs['input'].CopyFrom( + tf.contrib.util.make_tensor_proto(image_np, shape=[FLAGS.batch_size, image_size, image_size, 3])) + start_time = time.time() + stub.Predict(request, 10.0) # 10 secs timeout + time_consume = time.time() - start_time + print('Iteration %d: %.3f sec' % (i, time_consume)) + if i > warm_up_iteration: + total_time += time_consume + + time_average = total_time / (num_iteration - warm_up_iteration) + print('Average time: %.3f sec' % (time_average)) + + print('Batch size = %d' % FLAGS.batch_size) + if (FLAGS.batch_size == 1): + print('Latency: %.3f ms' % (time_average * 1000)) + + print('Throughput: %.3f images/sec' % (FLAGS.batch_size / time_average)) + + +if __name__ == '__main__': + tf.app.run() diff --git a/docs/image_recognition/tensorflow_serving/src/image_recognition_client.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_client.py similarity index 52% rename from docs/image_recognition/tensorflow_serving/src/image_recognition_client.py rename to benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_client.py index abdc77d05..2926f4621 100644 --- a/docs/image_recognition/tensorflow_serving/src/image_recognition_client.py +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_client.py @@ -24,12 +24,11 @@ from __future__ import print_function -import sys import grpc -import requests import numpy as np +import requests +import sys import tensorflow as tf - from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_service_pb2_grpc @@ -41,45 +40,46 @@ tf.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port') tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format') -tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).') +tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or Inceptionv3).') FLAGS = tf.app.flags.FLAGS def main(_): - if FLAGS.model == 'resnet50': - image_size = 224 - elif FLAGS.model == 'inceptionv3': - image_size = 299 - else: - print('Please specify model as either resnet50 or inceptionv3.') - sys.exit(-1) - - if FLAGS.image: - with open(FLAGS.image, 'rb') as f: - data = f.read() - else: - # Download the image URL if a path is not provided as input - dl_request = requests.get(IMAGE_URL, stream=True) - dl_request.raise_for_status() - data = dl_request.content - - channel = grpc.insecure_channel(FLAGS.server) - stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) - request = predict_pb2.PredictRequest() - request.model_spec.name = FLAGS.model - request.model_spec.signature_name = 'serving_default' - image_data = tf.reshape(preprocess_image(data, FLAGS.model, image_size), [1, image_size, image_size, 3]) - - # Run the graph - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - image_data = (sess.run(image_data)) - - request.inputs['input'].CopyFrom(tf.contrib.util.make_tensor_proto(image_data, shape=[1, image_size, image_size, 3])) - result = stub.Predict(request) - print(result) - print('Predicted class: ', str(np.argmax(result.outputs['predict'].float_val))) + if FLAGS.model == 'resnet50': + image_size = 224 + elif FLAGS.model == 'inceptionv3': + image_size = 299 + else: + print('Please specify model as either resnet50 or Inceptionv3.') + sys.exit(-1) + + if FLAGS.image: + with open(FLAGS.image, 'rb') as f: + data = f.read() + else: + # Download the image URL if a path is not provided as input + dl_request = requests.get(IMAGE_URL, stream=True) + dl_request.raise_for_status() + data = dl_request.content + + channel = grpc.insecure_channel(FLAGS.server) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + request = predict_pb2.PredictRequest() + request.model_spec.name = FLAGS.model + request.model_spec.signature_name = 'serving_default' + image_data = tf.reshape(preprocess_image(data, FLAGS.model, image_size), [1, image_size, image_size, 3]) + + # Run the graph + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + image_data = (sess.run(image_data)) + + request.inputs['input'].CopyFrom( + tf.contrib.util.make_tensor_proto(image_data, shape=[1, image_size, image_size, 3])) + result = stub.Predict(request) + print(result) + print('Predicted class: ', str(np.argmax(result.outputs['predict'].float_val))) if __name__ == '__main__': - tf.app.run() + tf.app.run() diff --git a/docs/image_recognition/tensorflow_serving/src/model_graph_to_saved_model.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/model_graph_to_saved_model.py similarity index 99% rename from docs/image_recognition/tensorflow_serving/src/model_graph_to_saved_model.py rename to benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/model_graph_to_saved_model.py index ca4f8092c..a593539ca 100644 --- a/docs/image_recognition/tensorflow_serving/src/model_graph_to_saved_model.py +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/model_graph_to_saved_model.py @@ -25,9 +25,7 @@ from __future__ import print_function -import os import sys - import tensorflow as tf import tensorflow.tools.graph_transforms as graph_transforms diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/util.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/util.py new file mode 100644 index 000000000..70eaba0de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/util.py @@ -0,0 +1,61 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import print_function + +import tensorflow as tf + + +def preprocess_image(image_buffer, model, image_size): + """Preprocess JPEG encoded bytes to 3D float Tensor.""" + + # Decode the string as an RGB JPEG of unknown height and width. + image = tf.image.decode_jpeg(image_buffer, channels=3) + # Convert pixels to [0, 1) + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region to 87.5% of the original image. + image = tf.image.central_crop(image, central_fraction=0.875) + # Resize the image to image_size x image_size. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [image_size, image_size], align_corners=False) + image = tf.squeeze(image, [0]) + if model == 'resnet50': + # For ResNet50, rescale to [0, 256] + image = tf.multiply(image, 256.0) + elif model == 'Inceptionv3': + # For InceptionV3, rescale to [-1, 1] + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +def parse_example_proto(example_serialized): + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + } + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + return features['image/encoded'], label diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/int8/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/launch_benchmark.py b/benchmarks/launch_benchmark.py index 32c0f68ae..6da9d7cb6 100644 --- a/benchmarks/launch_benchmark.py +++ b/benchmarks/launch_benchmark.py @@ -29,7 +29,9 @@ import sys from argparse import ArgumentParser from common import base_benchmark_util +from common import platform_util from common.utils.validators import check_no_spaces, check_volume_mount +from common.base_model_init import BaseModelInitializer class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): @@ -213,13 +215,66 @@ def run_bare_metal(self, benchmark_scripts, intelai_models, env_var_dict): # setup volume directories to be the local system directories, since we aren't # mounting volumes when running bare metal, but start.sh expects these args args = self.args - mount_benchmark = benchmark_scripts - mount_external_models_source = args.model_source_dir - mount_intelai_models = intelai_models workspace = os.path.join(benchmark_scripts, "common", args.framework) + mount_benchmark = benchmark_scripts in_graph_path = args.input_graph - dataset_path = args.data_location checkpoint_path = args.checkpoint + dataset_path = args.data_location + + # To Launch Tensorflow Serving benchmark we need only --in-graph arg. + # It does not support checkpoint files. + if args.framework == "tensorflow_serving": + if args.docker_image: + raise ValueError("--docker-image arg is not supported with tensorflow serving benchmarking, " + "as script automatically builds image and supplies it.") + + if checkpoint_path: + raise ValueError("--checkpoint-path arg is not supported with tensorflow serving benchmarking") + + if args.mode != "inference": + raise ValueError("--mode arg should be set to inference") + + if in_graph_path: + env_var_dict["IN_GRAPH"] = in_graph_path + else: + raise ValueError("--in-graph arg is required to run tensorflow serving benchmarking") + + for env_var_name in env_var_dict: + os.environ[env_var_name] = str(env_var_dict[env_var_name]) + + # We need this env to be set for the platform util + os.environ["PYTHON_EXE"] = str(sys.executable if not args.docker_image else "python") + + # Get Platformutil + platform_util_obj = None or platform_util.PlatformUtil(self.args) + + # Configure num_inter_threads and num_intra_threads + base_obj = BaseModelInitializer(args=self.args, custom_args=[], platform_util=platform_util_obj) + base_obj.set_num_inter_intra_threads() + + # Update num_inter_threads and num_intra_threads in env dictionary + env_var_dict["NUM_INTER_THREADS"] = self.args.num_inter_threads + env_var_dict["NUM_INTRA_THREADS"] = self.args.num_intra_threads + + # Set OMP_NUM_THREADS + env_var_dict["OMP_NUM_THREADS"] = self.args.num_intra_threads + + else: + mount_external_models_source = args.model_source_dir + mount_intelai_models = intelai_models + + # Add env vars with bare metal settings + env_var_dict["MOUNT_EXTERNAL_MODELS_SOURCE"] = mount_external_models_source + env_var_dict["MOUNT_INTELAI_MODELS_SOURCE"] = mount_intelai_models + + if in_graph_path: + env_var_dict["IN_GRAPH"] = in_graph_path + + if checkpoint_path: + env_var_dict["CHECKPOINT_DIRECTORY"] = checkpoint_path + + if dataset_path: + env_var_dict["DATASET_LOCATION"] = dataset_path # if using the default output directory, get the full path if args.output_dir == "/models/benchmarks/common/tensorflow/logs": @@ -228,19 +283,8 @@ def run_bare_metal(self, benchmark_scripts, intelai_models, env_var_dict): # Add env vars with bare metal settings env_var_dict["WORKSPACE"] = workspace env_var_dict["MOUNT_BENCHMARK"] = mount_benchmark - env_var_dict["MOUNT_EXTERNAL_MODELS_SOURCE"] = mount_external_models_source - env_var_dict["MOUNT_INTELAI_MODELS_SOURCE"] = mount_intelai_models env_var_dict["OUTPUT_DIR"] = args.output_dir - if in_graph_path: - env_var_dict["IN_GRAPH"] = in_graph_path - - if checkpoint_path: - env_var_dict["CHECKPOINT_DIRECTORY"] = checkpoint_path - - if dataset_path: - env_var_dict["DATASET_LOCATION"] = dataset_path - # Set env vars for bare metal for env_var_name in env_var_dict: os.environ[env_var_name] = str(env_var_dict[env_var_name]) diff --git a/docs/general/tensorflow_serving/InstallationGuide.md b/docs/general/tensorflow_serving/InstallationGuide.md index bc557d040..f8a30f2f2 100644 --- a/docs/general/tensorflow_serving/InstallationGuide.md +++ b/docs/general/tensorflow_serving/InstallationGuide.md @@ -36,7 +36,7 @@ We will break down the installation into 2 steps: * Step 1: Build the Intel Optimized TensorFlow Serving Docker image * Step 2: Verify the Docker image by serving a simple model - half_plus_two -### Step 1: Build TensorFlow Serving Docker image +### Step 1: Build TensorFlow Serving Docker image. The recommended way to use TensorFlow Serving is with Docker images. Letā€™s build a docker image with TensorFlow Serving optimized for IntelĀ® Processors. * Login into your machine via SSH and clone the [Tensorflow Serving](https://github.com/tensorflow/serving/) repository and save the path of this cloned directory (Also, adding it to `.bashrc` ) for ease of use for the remainder of this tutorial. @@ -45,7 +45,9 @@ The recommended way to use TensorFlow Serving is with Docker images. Letā€™s bui $ export TF_SERVING_ROOT=$(pwd)/serving $ echo "export TF_SERVING_ROOT=$(pwd)/serving" >> ~/.bashrc ``` - + +* You can also build image using [this](/benchmarks/common/tensorflow_serving/build_tfserving_image.sh) script, run as per comments mentioned. Or Continue manual steps as below. + * Using `Dockerfile.devel-mkl`, build an image with Intel optimized ModelServer. This creates an image with all the required development tools and builds from sources. The image size will be around 5GB and will take some time. On AWS c5.4xlarge instance (16 logical cores), it took about 25min. **NOTE**: It is recommended that you build an official release version using `--build-arg TF_SERVING_VERSION_GIT_BRANCH=""`, but if you wish to build the (unstable) head of master, omit the build argument and master will be used by default. diff --git a/docs/image_recognition/tensorflow_serving/Tutorial.md b/docs/image_recognition/tensorflow_serving/Tutorial.md index f7c325686..f94350da7 100644 --- a/docs/image_recognition/tensorflow_serving/Tutorial.md +++ b/docs/image_recognition/tensorflow_serving/Tutorial.md @@ -1,6 +1,6 @@ # Image Recognition with TensorFlow Serving on CPU ### Real-time and Max Throughput Inference -Models: ResNet50, InceptionV3 +Model: InceptionV3 and ResNet50 ## Goal @@ -27,21 +27,23 @@ Tuning TensorFlow Serving to take full advantage of your hardware for image reco 3. Running a client script to measure latency and throughput 4. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case -## Hands-on Tutorial - ResNet50 or InceptionV3 +## Hands-on Tutorial - InceptionV3 and Resnet50 For steps 1 and 2, refer to the Intel Model Zoo FP32 benchmarks: -* [ResNet50 README](/benchmarks/image_recognition/tensorflow/resnet50#fp32-inference-instructions) * [InceptionV3 README](/benchmarks/image_recognition/tensorflow/inceptionv3#fp32-inference-instructions) +* [ResNet50 README](/benchmarks/image_recognition/tensorflow/resnet50#fp32-inference-instructions) + +NOTE: The below example shows InceptionV3. The same code snippets will work for ResNet50 by replacing the model name to `resnet50`. -1. **Download the Model**: Download and extract the ResNet50 or InceptionV3 pre-trained model (FP32), using the instructions in one of the READMEs above. +1. **Download the Model**: Download and extract the InceptionV3 pre-trained model (FP32), using the instructions in above README. 2. **(Optional) Download Data**: If you are interested only in testing latency and throughput, not accuracy, you can skip this step and use synthetic data. - If you want to verify prediction accuracy by testing on real data, follow the instructions in one of the READMEs above to download the ImageNet dataset. + If you want to verify prediction accuracy by testing on real data, follow the instructions in the above README to download the ImageNet dataset. -3. **Clone this repository**: Clone the [intelai/models](https://github.com/intelai/models) repository and `cd` into the `docs/image_recognition/tensorflow_serving/src` directory. +3. **Clone this repository**: Clone the [intelai/models](https://github.com/intelai/models) repository and `cd` into the `models/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32` directory. ``` $ git clone https://github.com/IntelAI/models.git - $ cd models/docs/image_recognition/tensorflow_serving/src + $ cd models/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32 ``` 4. **Set up your environment**: In this tutorial, we use a virtual environment to install a few required Python packages. @@ -60,6 +62,7 @@ For steps 1 and 2, refer to the Intel Model Zoo FP32 benchmarks: (venv)$ pip install tensorflow-serving-api ``` 5. **Create a SavedModel**: Using the conversion script `model_graph_to_saved_model.py`, convert the pre-trained model graph to a SavedModel. + (For ResNet50, substitute the name of the ResNet50 FP32 pre-trained model.) Example: ``` @@ -118,13 +121,13 @@ For steps 1 and 2, refer to the Intel Model Zoo FP32 benchmarks: To see average inference latency (in ms), run the benchmark script `image_recognition_benchmark.py` using batch_size 1: ``` (venv)$ python image_recognition_benchmark.py --batch_size 1 --model inceptionv3 - Iteration 1: 0.017 sec + Iteration 1: ... sec ... - Iteration 40: 0.016 sec - Average time: 0.016 sec + Iteration 40: ... sec + Average time: ... sec Batch size = 1 - Latency: 16.496 ms - Throughput: 60.619 images/sec + Latency: ... ms + Throughput: ... images/sec ``` In some cases, it is desirable to constrain the inference server to a single core or socket. @@ -156,12 +159,12 @@ For steps 1 and 2, refer to the Intel Model Zoo FP32 benchmarks: To see average throughput (in images/sec), run the benchmark script `image_recognition_benchmark.py` using batch_size 128: ``` (venv)$ python image_recognition_benchmark.py --batch_size 128 --model inceptionv3 - Iteration 1: 1.706 sec + Iteration 1: ... sec ... - Iteration 40: 0.707 sec - Average time: 0.693 sec + Iteration 40: ... sec + Average time: ... sec Batch size = 128 - Throughput: 184.669 images/sec + Throughput: ... images/sec ``` 11. **Clean up**: diff --git a/docs/image_recognition/tensorflow_serving/src/image_recognition_benchmark.py b/docs/image_recognition/tensorflow_serving/src/image_recognition_benchmark.py deleted file mode 100644 index 658812cd9..000000000 --- a/docs/image_recognition/tensorflow_serving/src/image_recognition_benchmark.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - -"""Send simulated image data to tensorflow_model_server loaded with ResNet50 or InceptionV3 model. - -""" - -from __future__ import print_function - -import os -import sys -import random -import time -import grpc -import tensorflow as tf -import numpy as np - -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc - -from util import preprocess_image, parse_example_proto - -tf.app.flags.DEFINE_string('server', 'localhost:8500', - 'PredictionService host:port') -tf.app.flags.DEFINE_integer('batch_size', 1, 'Batch size to use') -tf.app.flags.DEFINE_string('data_dir', '', 'path to images in TF records format') -tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).') -FLAGS = tf.app.flags.FLAGS - - -def sample_images(image_size): - """Pull a random batch of images from FLAGS.data_dir containing TF record formatted ImageNet validation set - - Returns: - ndarray of float32 with shape [FLAGS.batch_size, image_size, image_size, 3] - """ - - sample_file = random.choice(os.listdir(FLAGS.data_dir)) - dataset = tf.data.TFRecordDataset(os.path.join(FLAGS.data_dir, sample_file)) - dataset = dataset.map(lambda x: parse_example_proto(x)).shuffle(True).batch(FLAGS.batch_size) - iterator = dataset.make_one_shot_iterator() - next_element = iterator.get_next() - with tf.Session() as sess: - images, labels = sess.run(next_element) - images = np.array([sess.run(preprocess_image(x, FLAGS.model, image_size)) for x in images]) - - return images - -def main(_): - if FLAGS.model == 'resnet50': - image_size = 224 - elif FLAGS.model == 'inceptionv3': - image_size = 299 - else: - print('Please specify model as either resnet50 or inceptionv3.') - sys.exit(-1) - - channel = grpc.insecure_channel(FLAGS.server) - stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) - i = 0 - num_iteration = 40 - warm_up_iteration = 10 - total_time = 0 - for _ in range(num_iteration): - i += 1 - if FLAGS.data_dir: - image_np = sample_images(image_size) - else: - image_np = np.random.rand(FLAGS.batch_size, image_size, image_size, 3).astype(np.float32) - if FLAGS.model == 'resnet50': - # For ResNet50, rescale to [0, 256] - image_np *= 256.0 - elif FLAGS.model == 'inceptionv3': - # For InceptionV3, rescale to [-1, 1] - image_np = (image_np - 0.5) * 2.0 - - request = predict_pb2.PredictRequest() - request.model_spec.name = FLAGS.model - request.model_spec.signature_name = 'serving_default' - request.inputs['input'].CopyFrom( - tf.contrib.util.make_tensor_proto(image_np, shape=[FLAGS.batch_size, image_size, image_size, 3])) - start_time = time.time() - result = stub.Predict(request, 10.0) # 10 secs timeout - time_consume = time.time() - start_time - print('Iteration %d: %.3f sec' % (i, time_consume)) - if i > warm_up_iteration: - total_time += time_consume - - time_average = total_time / (num_iteration - warm_up_iteration) - print('Average time: %.3f sec' % (time_average)) - - print('Batch size = %d' % FLAGS.batch_size) - if (FLAGS.batch_size == 1): - print('Latency: %.3f ms' % (time_average * 1000)) - - print('Throughput: %.3f images/sec' % (FLAGS.batch_size / time_average)) - - -if __name__ == '__main__': - tf.app.run() diff --git a/docs/image_recognition/tensorflow_serving/src/util.py b/docs/image_recognition/tensorflow_serving/src/util.py deleted file mode 100644 index 8877e932d..000000000 --- a/docs/image_recognition/tensorflow_serving/src/util.py +++ /dev/null @@ -1,61 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - -from __future__ import print_function - -import tensorflow as tf - -def preprocess_image(image_buffer, model, image_size): - """Preprocess JPEG encoded bytes to 3D float Tensor.""" - - # Decode the string as an RGB JPEG of unknown height and width. - image = tf.image.decode_jpeg(image_buffer, channels=3) - # Convert pixels to [0, 1) - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - # Crop the central region to 87.5% of the original image. - image = tf.image.central_crop(image, central_fraction=0.875) - # Resize the image to image_size x image_size. - image = tf.expand_dims(image, 0) - image = tf.image.resize_bilinear(image, [image_size, image_size], align_corners=False) - image = tf.squeeze(image, [0]) - if model == 'resnet50': - # For ResNet50, rescale to [0, 256] - image = tf.multiply(image, 256.0) - elif model == 'inceptionv3': - # For InceptionV3, rescale to [-1, 1] - image = tf.subtract(image, 0.5) - image = tf.multiply(image, 2.0) - return image - -def parse_example_proto(example_serialized): - - # Dense features in Example proto. - feature_map = { - 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, - default_value=-1), - } - - features = tf.parse_single_example(example_serialized, feature_map) - label = tf.cast(features['image/class/label'], dtype=tf.int32) - - return features['image/encoded'], label - diff --git a/tests/unit/test_launch_benchmark.py b/tests/unit/test_launch_benchmark.py index 32a9eaec1..6145cf614 100644 --- a/tests/unit/test_launch_benchmark.py +++ b/tests/unit/test_launch_benchmark.py @@ -37,6 +37,9 @@ test_docker_image = "foo" test_batch_size = "100" test_num_cores = "1" +# need a valid file for tests to work, see conftest.py for where this is managed +test_input_graph = "test.pb" +test_tfserving_framework = "tensorflow_serving" @pytest.fixture @@ -66,8 +69,35 @@ def mock_system_platform(patch): return patch("common.base_benchmark_util.platform_util.system_platform") +@pytest.fixture +def mock_path_exists(patch): + return patch("os.path.exists", MagicMock(return_value=True)) + + +@pytest.fixture +def mock_isfile(patch): + return patch("os.path.isfile", MagicMock(return_value=True)) + + +@pytest.fixture +def mock_isdir(patch): + return patch("os.path.isdir", MagicMock(return_value=True)) + + +@pytest.fixture +def mock_islink(patch): + return patch("os.path.islink", MagicMock(return_value=False)) + + +@pytest.fixture +def mock_stat(patch): + stat = MagicMock() + stat.return_value.st_nlink = 0 + return patch("os.stat", stat) + + @pytest.fixture(autouse=True) -def launch_benchmark(mock_platform_util, request): +def launch_benchmark(mock_platform_util, request, mock_isdir, mock_isfile, mock_islink, mock_stat, mock_path_exists): """sets up launch_benchmark obj for every test case and handles catching errors if we wanna test that To catch errors called when running launch_benchmark, use something like: ['catch_error', SystemExit, [{args}], {error_message}] in parametrize @@ -113,8 +143,10 @@ def launch_benchmark(mock_platform_util, request): req_args = request.param[2] error_message = request.param[3] if len(request.param) == 4 else '' else: + # add extra arguments to the default ones when calling LaunchBenchmark req_args = request.param + example_req_args else: + # only use default arguments when calling LaunchBenchmark req_args = example_req_args with mock_patch.object(sys, "argv", ['run_tf_benchmark.py'] + req_args): @@ -168,12 +200,13 @@ def test_launch_benchmark_parse_unknown_args(launch_benchmark): "--output-results"], "--output-results can only be used when running " "inference with a dataset"], - ['catch_error', SystemExit, ["--model-name", test_model_name, - "--framework", test_framework, - "--mode", test_mode, - "--precision", test_precision, - "--volume", "~:test"], - "Volume mounts can only be used when running in a docker container"], + ['catch_error_override_all_params', SystemExit, + ["--model-name", test_model_name, + "--framework", test_framework, + "--mode", test_mode, + "--precision", test_precision, + "--volume", "~:test"], + "Volume mounts can only be used when running in a docker container"] ], indirect=True) def test_launch_benchmark_parse_bad_args(launch_benchmark): """ @@ -224,6 +257,31 @@ def test_bare_metal(launch_benchmark, mock_popen): assert os.environ["TEST_ENV_VAR_2"] == test_env_vars["TEST_ENV_VAR_2"] +@pytest.mark.parametrize('launch_benchmark', [["--in-graph", test_input_graph]], indirect=True) +def test_launch_benchmark_tensorflow_serving_framework(launch_benchmark, mock_popen): + """ + Tests that the launch script works for tensorflow serving framework + """ + test_env_vars = {"TEST_ENV_VAR_1": "a", "TEST_ENV_VAR_2": "b"} + # Override framework and docker image. + launch_benchmark.args.framework = test_tfserving_framework + launch_benchmark.args.docker_image = None + launch_benchmark.run_bare_metal("/foo", "/bar", test_env_vars) + assert mock_popen.called + args, kwargs = mock_popen.call_args + + assert launch_benchmark.args.input_graph == test_input_graph + assert launch_benchmark.args.framework == test_tfserving_framework + + # make sure that the start script is run + assert "bash" == args[0][0] + assert "start.sh" in args[0][1] + + # ensure env vars are set + assert os.environ["TEST_ENV_VAR_1"] == test_env_vars["TEST_ENV_VAR_1"] + assert os.environ["TEST_ENV_VAR_2"] == test_env_vars["TEST_ENV_VAR_2"] + + def test_help(mock_platform_util, capsys): """ Tests `launch_benchmark.py --help` output and ensures there is no error """ with mock_patch.object(sys, 'argv', ["launch_benchmark.py", "--help"]): diff --git a/tox.ini b/tox.ini index 90ac004f4..20ae07d16 100644 --- a/tox.ini +++ b/tox.ini @@ -37,6 +37,7 @@ omit = .tox/* .pytest_cache/* __pycache__/* + benchmarks/image_recognition/tensorflow_serving/* benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py benchmarks/recommendation/tensorflow/wide_deep/inference/fp32/data_download.py From cb2bb07dcb31c5f0aa6894024e75275da2c83a26 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Wed, 22 May 2019 10:52:36 -0700 Subject: [PATCH 35/62] Make reference file optional for Transformer LT benchmarking (#312) --- benchmarks/common/tensorflow/start.sh | 11 +++++----- .../tensorflow/transformer_language/README.md | 3 ++- .../inference/fp32/model_init.py | 21 +++++++++++-------- .../tf_transformer_language_args.json | 9 ++++++-- 4 files changed, 26 insertions(+), 18 deletions(-) diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index 26d25af86..ac6297d93 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -696,10 +696,6 @@ function transformer_language() { echo "transformer-language requires -- decode_from_file arg to be defined" exit 1 fi - if [[ -z "${reference}" ]]; then - echo "transformer-language requires -- reference arg to be defined" - exit 1 - fi if [[ -z "${CHECKPOINT_DIRECTORY}" ]]; then echo "transformer-language requires --checkpoint arg to be defined" exit 1 @@ -717,8 +713,11 @@ function transformer_language() { cp ${MOUNT_INTELAI_MODELS_SOURCE}/${MODE}/${PRECISION}/decoding.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/tensor2tensor/utils/decoding.py - CMD="${CMD} --decode_from_file=${CHECKPOINT_DIRECTORY}/${decode_from_file} \ - --reference=${CHECKPOINT_DIRECTORY}/${reference}" + CMD="${CMD} --decode_from_file=${CHECKPOINT_DIRECTORY}/${decode_from_file}" + + if [[ -n "${reference}" ]]; then + CMD="${CMD} --reference=${CHECKPOINT_DIRECTORY}/${reference}" + fi PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model else diff --git a/benchmarks/language_translation/tensorflow/transformer_language/README.md b/benchmarks/language_translation/tensorflow/transformer_language/README.md index a21a4fbd6..cb3a80e17 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_language/README.md @@ -69,7 +69,8 @@ Substitute the `--model-source-dir` for the location where you cloned the Transformer Language can run for latency or throughput benchmarking. Use one of the following examples below, depending on -your use case. +your use case. Note that if no `reference` file is provided in the +launch script parameters, then the BLEU score cannot be calculated. For latency (using `--socket-id 0` and `--batch-size 1`): diff --git a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py index b4fd1bc30..8d01493ae 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py @@ -98,14 +98,15 @@ def __init__(self, args, custom_args, platform_util=None): " --output_dir=" + self.args.checkpoint + \ " --decode_from_file=" + self.args.decode_from_file + \ " --decode_to_file=" + self.args.decode_to_file + \ - " --reference=" + self.args.reference + \ " --inter_op_parallelism_threads=" + \ str(self.args.num_inter_threads) + \ " --intra_op_parallelism_threads=" + \ str(self.args.num_intra_threads) - self.bleu_params += " --translation=" + self.args.decode_to_file + \ - " --reference=" + self.args.reference + # If a reference file was provided, also calculate the bleu file + if self.args.reference: + self.bleu_params += " --translation=" + self.args.decode_to_file + \ + " --reference=" + self.args.reference self.cmd = self.cmd + run_script + cmd_args @@ -114,10 +115,12 @@ def run(self): os.chdir(self.args.model_source_dir) self.run_command(self.cmd) - # calculate the bleu number after inference is done - bleucmd = "python " + \ - os.path.join(self.args.model_source_dir, - "tensor2tensor/bin/t2t_bleu.py") + \ - self.bleu_params - os.system(bleucmd) + # calculate the bleu number after inference is done (this is skipped if no reference file is provided) + if self.bleu_params: + bleucmd = "python " + \ + os.path.join(self.args.model_source_dir, + "tensor2tensor/bin/t2t_bleu.py") + \ + self.bleu_params + os.system(bleucmd) + os.chdir(original_dir) diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json index 5662ad83e..bf5759531 100644 --- a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json +++ b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json @@ -1,9 +1,14 @@ [ { "_comment": "FP32 latency benchmark", "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de", - "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28"}, + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28"}, { "_comment": "Fp32 throughput", "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de", - "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28"} + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" + }, + { "_comment": "Fp32 benchmarking with no reference file", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" + } ] From dbc54be50ebed9923bfeacdb9f2ce82ada89a3ff Mon Sep 17 00:00:00 2001 From: "Li, Guizi" Date: Fri, 24 May 2019 01:18:46 +0800 Subject: [PATCH 36/62] Add SSD-ResNet34 Int8 benchmarking and refactor FP32 code (#301) * refactor ssd-resnet34 fp32 code * add ssd-resnet34 int8 * fix for style check * remove debug code and remove data-location for benchmark mode * update tf to 1.14 and remove data location for benchmark * update pre-trianed model link * update pb file name --- benchmarks/README.md | 2 +- benchmarks/common/tensorflow/start.sh | 8 +- .../tensorflow/ssd-resnet34/README.md | 198 ++- .../ssd-resnet34/inference/fp32/model_init.py | 2 +- .../ssd-resnet34/inference/int8/__init__.py | 19 + .../ssd-resnet34/inference/int8/config.json | 7 + .../ssd-resnet34/inference/int8/model_init.py | 76 + .../inference/fp32/coco_metric.py | 193 --- .../ssd-resnet34/inference/fp32/datasets.py | 251 ---- .../inference/fp32/infer_detections.py | 6 +- .../inference/fp32/preprocessing.py | 1259 ----------------- .../inference/fp32/ssd_constants.py | 118 -- .../inference/fp32/ssd_dataloader.py | 382 ----- .../ssd-resnet34/inference/fp32/ssd_model.py | 171 --- .../ssd-resnet34/inference/int8/__init__.py | 20 + .../inference/int8/infer_detections.py | 211 +++ 16 files changed, 534 insertions(+), 2389 deletions(-) create mode 100644 benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py create mode 100644 benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/config.json create mode 100644 benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/model_init.py delete mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py delete mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/fp32/datasets.py delete mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/fp32/preprocessing.py delete mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_constants.py delete mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_dataloader.py delete mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_model.py create mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py create mode 100644 models/object_detection/tensorflow/ssd-resnet34/inference/int8/infer_detections.py diff --git a/benchmarks/README.md b/benchmarks/README.md index 0875f2baa..25cea61b6 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -36,8 +36,8 @@ dependencies to be installed: | Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [Int8](object_detection/tensorflow/rfcn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [Faster R-CNN](https://arxiv.org/pdf/1506.01497.pdf) | Inference | [Int8](object_detection/tensorflow/faster_rcnn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/faster_rcnn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-MobileNet](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-mobilenet/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [INT8](object_detection/tensorflow/ssd-resnet34/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-VGG16](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [Int8](object_detection/tensorflow/ssd_vgg16/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd_vgg16/README.md#fp32-inference-instructions) | -| Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [NCF](https://arxiv.org/pdf/1708.05031.pdf) | Inference | [FP32](recommendation/tensorflow/ncf/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep Large Dataset](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [Int8](recommendation/tensorflow/wide_deep_large_ds/README.md#int8-inference-instructions) [FP32](recommendation/tensorflow/wide_deep_large_ds/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [FP32](recommendation/tensorflow/wide_deep/README.md#fp32-inference-instructions) | diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index ac6297d93..be2d30f5c 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -627,12 +627,18 @@ function ssd_mobilenet() { # SSD-ResNet34 model function ssd-resnet34() { - if [ ${PRECISION} == "fp32" ]; then + if [ ${PRECISION} == "fp32" ] || [ ${PRECISION} == "int8" ]; then if [ ${NOINSTALL} != "True" ]; then for line in $(cat ${MOUNT_BENCHMARK}/object_detection/tensorflow/ssd-resnet34/requirements.txt) do pip install $line done + old_dir=${PWD} + cd /tmp + git clone --single-branch https://github.com/tensorflow/benchmarks.git + cd benchmarks + git checkout 1e7d788042dfc6d5e5cd87410c57d5eccee5c664 + cd ${old_dir} fi CMD=${CMD} run_model diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index c5dac8657..a16d716ae 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -3,6 +3,7 @@ This document has instructions for how to run SSD-ResNet34 for the following modes/precisions: * [FP32 inference](#fp32-inference-instructions) +* [INT8 inference](#int8-inference-instructions) Benchmarking instructions and scripts for model training and inference other precisions are coming later. @@ -96,7 +97,11 @@ $ git checkout f505cecde2d8ebf6fe15f40fb8bc350b2b1ed5dc The `coco_val.record` file is what we will use in this inference example. -5. A link to download the pre-trained model is coming soon. +5. Download the pretrained model: + +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssd_resnet34_fp32_bs1_pretrained_model.pb +``` 6. Clone the [intelai/models](https://github.com/intelai/models) repo. This repo has the launch script for running benchmarking, which we will @@ -110,20 +115,18 @@ $ git clone https://github.com/IntelAI/models.git [intelai/models](https://github.com/intelai/models) repo that was just cloned in the previous step. SSD-ResNet34 can be run for benchmarking throughput and latency, or testing accuracy. Note that we are running -SSD-ResNet34 with a TensorFlow 1.13 docker image. +SSD-ResNet34 with a TensorFlow 1.14 docker image. To benchmarking for throughput and latency, use the following command, -but replace in your path to the unzipped coco dataset images from step 3 -for the `--dataset-location`, the path to the frozen graph that you -downloaded in step 5 as the `--in-graph`, and use the `--benchmark-only` +the path to the frozen graph that you downloaded in step 5 as +the `--in-graph`, and use the `--benchmark-only` flag: ``` $ cd /home//models/benchmarks $ python launch_benchmark.py \ - --data-location /home//coco/output/ \ - --in-graph /home//ssd_resnet34_coco_pretained_model/ssd_resnet34_bs1.pb \ + --in-graph /home//ssd_resnet34_fp32_bs1_pretrained_model.pb \ --model-source-dir /home//tensorflow/models \ --model-name ssd-resnet34 \ --framework tensorflow \ @@ -143,7 +146,7 @@ the path to the frozen graph that you downloaded in step 5 as the ``` $ python launch_benchmark.py \ --data-location /home//coco/output/ \ - --in-graph /home//ssd_resnet34_coco_pretained_model/ssd_resnet34_bs1.pb \ + --in-graph /home//ssd_resnet34_fp32_bs1_pretrained_model.pb \ --model-source-dir /home//tensorflow/models \ --model-name ssd-resnet34 \ --framework tensorflow \ @@ -181,6 +184,181 @@ Below is a sample log file tail when testing accuracy: Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.334 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.494 Current AP: 0.21082 -Ran inference with batch size -1 -Log location outside container: {--output-dir value}/benchmark_ssd-mobilenet_inference_fp32_20190123_225145.log +``` + +## INT8 Inference Instructions + +1. Clone the `tensorflow/models` repository with the specified SHA, +since we are using an older version of the models repo for +SSD-ResNet34. + +``` +$ git clone https://github.com/tensorflow/models.git +$ cd models +$ git checkout f505cecde2d8ebf6fe15f40fb8bc350b2b1ed5dc +$ git clone https://github.com/cocodataset/cocoapi.git +``` + +The TensorFlow models repo will be used for running inference as well as +converting the coco dataset to the TF records format. + +2. Follow the TensorFlow models object detection +[installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#installation) +to get your environment setup with the required dependencies. + +3. Download the 2017 validation +[COCO dataset](http://cocodataset.org/#home) and annotations: + +``` +$ mkdir val +$ cd val +$ wget http://images.cocodataset.org/zips/val2017.zip +$ unzip val2017.zip +$ cd .. + +$ mkdir annotations +$ cd annotations +$ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip +$ unzip annotations_trainval2017.zip +$ cd .. +``` + +Since we are only using the validation dataset in this example, we will +create an empty directory and empty annotations json file to pass as the +train and test directories in the next step. + +``` +$ mkdir empty_dir + +$ cd annotations +$ echo "{ \"images\": {}, \"categories\": {}}" > empty.json +$ cd .. +``` + +4. Now that you have the raw COCO dataset, we need to convert it to the +TF records format in order to use it with the inference script. We will +do this by running the `create_coco_tf_record.py` file in the TensorFlow +models repo. + +Follow the steps below to navigate to the proper directory and point the +script to the raw COCO dataset files that you have downloaded in step 2. +The `--output_dir` is the location where the TF record files will be +located after the script has completed. + +``` + +# We are going to use an older version of the conversion script to checkout the git commit +$ cd models +$ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 + +$ cd research/object_detection/dataset_tools/ +$ python create_coco_tf_record.py --logtostderr \ + --train_image_dir="/home//coco/empty_dir" \ + --val_image_dir="/home//coco/val/val2017" \ + --test_image_dir="/home//coco/empty_dir" \ + --train_annotations_file="/home//coco/annotations/empty.json" \ + --val_annotations_file="/home//coco/annotations/instances_val2017.json" \ + --testdev_annotations_file="/home//coco/annotations/empty.json" \ + --output_dir="/home//coco/output" + +$ ll /home//coco/output +total 1598276 +-rw-rw-r--. 1 0 Nov 2 21:46 coco_testdev.record +-rw-rw-r--. 1 0 Nov 2 21:46 coco_train.record +-rw-rw-r--. 1 818336740 Nov 2 21:46 coco_val.record + +# Go back to the main models directory and checkout the SHA that we are using for SSD-ResMet34 +$ cd /home//models +$ git checkout f505cecde2d8ebf6fe15f40fb8bc350b2b1ed5dc +``` + +The `coco_val.record` file is what we will use in this inference example. + +5. Download the pretrained model: + +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssd_resnet34_int8_bs1_pretrained_model.pb +``` + +6. Clone the [intelai/models](https://github.com/intelai/models) repo. +This repo has the launch script for running benchmarking, which we will +use in the next step. + +``` +$ git clone https://github.com/IntelAI/models.git +``` + +7. Next, navigate to the `benchmarks` directory of the +[intelai/models](https://github.com/intelai/models) repo that was just +cloned in the previous step. SSD-ResNet34 can be run for benchmarking +throughput and latency, or testing accuracy. Note that we are running +SSD-ResNet34 with a TensorFlow 1.14 docker image. + +To benchmarking for throughput and latency, use the following command, +the path to the frozen graph that you downloaded in step 5 as +the `--in-graph`, and use the `--benchmark-only` +flag: + +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph /home//ssd_resnet34_int8_bs1_pretrained_model.pb \ + --model-source-dir /home//tensorflow/models \ + --model-name ssd-resnet34 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --socket-id 0 \ + --batch-size=1 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl-py3 \ + --benchmark-only +``` + +To test accuracy, use the following command but replace in your path to +the tf record file that you generated in step 4 for the `--data-location`, +the path to the frozen graph that you downloaded in step 5 as the +`--in-graph`, and use the `--accuracy-only` flag: + +``` +$ python launch_benchmark.py \ + --data-location /home//coco/output/ \ + --in-graph /home//ssd_resnet34_int8_bs1_pretrained_model.pb \ + --model-source-dir /home//tensorflow/models \ + --model-name ssd-resnet34 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --socket-id 0 \ + --batch-size=1 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl-py3 \ + --accuracy-only +``` + +8. The log file is saved to the value of `--output-dir`. + +Below is a sample log file tail when running benchmarking: + +``` +Batchsize: 1 +Time spent per BATCH: 12.0245 ms +Total samples/sec: 83.1635 samples/s +``` + +Below is a sample log file tail when testing accuracy: + +``` + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.204 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.360 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.208 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.051 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.213 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.335 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.210 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.294 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.301 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.083 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.327 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.484 +Current AP: 0.20408 ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py index 20bfcccf5..0b53a0112 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py @@ -69,8 +69,8 @@ def __init__(self, args, custom_args, platform_util): self.run_cmd += " --data-location {0}".format(self.args.data_location) def run(self): - print(self.run_cmd) old_python_path = os.environ["PYTHONPATH"] os.environ["PYTHONPATH"] = os.path.join(self.args.model_source_dir, "research") + os.environ["PYTHONPATH"] += ":/tmp/benchmarks/scripts/tf_cnn_benchmarks/" self.run_command(self.run_cmd) os.environ["PYTHONPATH"] = old_python_path diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/config.json b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/model_init.py new file mode 100644 index 000000000..0b53a0112 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/model_init.py @@ -0,0 +1,76 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import os +import sys + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + def run_inference_sanity_checks(self, args, custom_args): + if not args.input_graph: + sys.exit("Please provide a path to the frozen graph directory" + " via the '--in-graph' flag.") + if not args.data_location and self.args.accuracy_only: + sys.exit("Please provide a path to the data directory via the " + "'--data-location' flag.") + if args.socket_id == -1 and args.num_cores == -1: + print("***Warning***: Running inference on all cores could degrade" + " performance. Pass a '--socket-id' to specify running on a" + " single socket instead.\n") + + def __init__(self, args, custom_args, platform_util): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.run_inference_sanity_checks(self.args, self.custom_args) + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + self.set_num_inter_intra_threads() + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + self.model_dir = os.path.join(self.args.intelai_models, self.args.mode, self.args.precision) + + # get benchmark command + benchmark_script = os.path.join(self.model_dir, "infer_detections.py") + + # get command with numactl + self.run_cmd = self.get_command_prefix(self.args.socket_id) + self.run_cmd += "{0} {1}".format(self.python_exe, benchmark_script) + self.run_cmd += " --input-graph {0}".format(self.args.input_graph) + self.run_cmd += " --batch-size {0}".format(args.batch_size) + self.run_cmd += " --inter-op-parallelism-threads {0}".format(self.args.num_inter_threads) + self.run_cmd += " --intra-op-parallelism-threads {0}".format(self.args.num_intra_threads) + + if self.args.accuracy_only: + self.run_cmd += " --accuracy-only " + self.run_cmd += " --data-location {0}".format(self.args.data_location) + + def run(self): + old_python_path = os.environ["PYTHONPATH"] + os.environ["PYTHONPATH"] = os.path.join(self.args.model_source_dir, "research") + os.environ["PYTHONPATH"] += ":/tmp/benchmarks/scripts/tf_cnn_benchmarks/" + self.run_command(self.run_cmd) + os.environ["PYTHONPATH"] = old_python_path diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py deleted file mode 100644 index 08f3b7e5a..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2018 Google. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""COCO-style evaluation metrics. - -Forked from reference model implementation. - -COCO API: github.com/cocodataset/cocoapi/ -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import atexit -import tempfile - -from absl import flags - -import numpy as np -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval -import six - -import tensorflow as tf - -import ssd_constants - -FLAGS = flags.FLAGS - - -# https://github.com/cocodataset/cocoapi/issues/49 -if six.PY3: - import pycocotools.coco - pycocotools.coco.unicode = str - - -def async_eval_runner(queue_predictions, queue_results, val_json_file): - """Load intermediate eval results and get COCO metrics.""" - while True: - message = queue_predictions.get() - if message == 'STOP': # poison pill - break - step, predictions = message - results = compute_map(predictions, val_json_file) - queue_results.put((step, results)) - - -def compute_map(predictions, val_json_file): - """Use model predictions to compute mAP. - - Args: - predictions: a list of tuples returned by decoded_predictions function, - each containing the following elements: - image source_id, box coordinates in XYWH order, probability score, label - val_json_file: path to COCO annotation file - Returns: - A dictionary that maps all COCO metrics (keys) to their values - """ - - if val_json_file.startswith("gs://"): - _, local_val_json = tempfile.mkstemp(suffix=".json") - tf.gfile.Remove(local_val_json) - - tf.gfile.Copy(val_json_file, local_val_json) - atexit.register(tf.gfile.Remove, local_val_json) - else: - local_val_json = val_json_file - - cocoGt = COCO(local_val_json) - cocoDt = cocoGt.loadRes(np.array(predictions)) - E = COCOeval(cocoGt, cocoDt, iouType='bbox') - E.evaluate() - E.accumulate() - E.summarize() - print("Current AP: {:.5f}".format(E.stats[0])) - metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', - 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl'] - - # Prefix with "COCO" to group in TensorBoard. - return {"COCO/" + key: value for key, value in zip(metric_names, E.stats)} - - -def calc_iou(target, candidates): - target_tiled = np.tile(target[np.newaxis, :], (candidates.shape[0], 1)) - # Left Top & Right Bottom - lt = np.maximum(target_tiled[:,:2], candidates[:,:2]) - - rb = np.minimum(target_tiled[:,2:], candidates[:,2:]) - - delta = np.maximum(rb - lt, 0) - - intersect = delta[:,0] * delta[:,1] - - delta1 = target_tiled[:,2:] - candidates[:,:2] - area1 = delta1[:,0] * delta1[:,1] - delta2 = target_tiled[:,2:] - candidates[:,:2] - area2 = delta2[:,0] * delta2[:,1] - - iou = intersect/(area1 + area2 - intersect) - return iou - - -# TODO(haoyuzhang): Rewrite this NumPy based implementation to TensorFlow based -# implementation under ssd_model.py accuracy_function. -def decode_predictions(labels_and_predictions): - """Decode predictions and remove unused boxes and labels.""" - predictions = [] - for example in labels_and_predictions: - source_id = int(example[ssd_constants.SOURCE_ID]) - pred_box = example[ssd_constants.PRED_BOXES] - pred_scores = example[ssd_constants.PRED_SCORES] - - locs, labels, probs = decode_single( - pred_box, pred_scores, ssd_constants.OVERLAP_CRITERIA, - ssd_constants.MAX_NUM_EVAL_BOXES, ssd_constants.MAX_NUM_EVAL_BOXES) - - raw_height, raw_width, _ = example[ssd_constants.RAW_SHAPE] - for loc, label, prob in zip(locs, labels, probs): - # Ordering convention differs, hence [1], [0] rather than [0], [1] - x, y = loc[1] * raw_width, loc[0] * raw_height - w, h = (loc[3] - loc[1]) * raw_width, (loc[2] - loc[0]) * raw_height - predictions.append( - [source_id, x, y, w, h, prob, ssd_constants.CLASS_INV_MAP[label]]) - return predictions - - -def decode_single(bboxes_in, scores_in, criteria, max_output, max_num=200): - # Reference to https://github.com/amdegroot/ssd.pytorch - - bboxes_out = [] - scores_out = [] - labels_out = [] - - for i, score in enumerate(np.split(scores_in, scores_in.shape[1], 1)): - score = np.squeeze(score, 1) - - # skip background - if i == 0: - continue - - mask = score > ssd_constants.MIN_SCORE - if not np.any(mask): - continue - - bboxes, score = bboxes_in[mask, :], score[mask] - - score_idx_sorted = np.argsort(score) - score_sorted = score[score_idx_sorted] - - score_idx_sorted = score_idx_sorted[-max_num:] - candidates = [] - - # perform non-maximum suppression - while len(score_idx_sorted): - idx = score_idx_sorted[-1] - bboxes_sorted = bboxes[score_idx_sorted, :] - bboxes_idx = bboxes[idx, :] - iou = calc_iou(bboxes_idx, bboxes_sorted) - - score_idx_sorted = score_idx_sorted[iou < criteria] - candidates.append(idx) - - bboxes_out.append(bboxes[candidates, :]) - scores_out.append(score[candidates]) - labels_out.extend([i]*len(candidates)) - - if len(scores_out) == 0: - tf.logging.info("No objects detected. Returning dummy values.") - return ( - np.zeros(shape=(1, 4), dtype=np.float32), - np.zeros(shape=(1,), dtype=np.int32), - np.ones(shape=(1,), dtype=np.float32) * ssd_constants.DUMMY_SCORE, - ) - - bboxes_out = np.concatenate(bboxes_out, axis=0) - scores_out = np.concatenate(scores_out, axis=0) - labels_out = np.array(labels_out) - - max_ids = np.argsort(scores_out)[-max_output:] - - return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids] diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/datasets.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/datasets.py deleted file mode 100644 index 58c0f0dff..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/datasets.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark dataset utilities. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from abc import abstractmethod -import os - -import numpy as np -import six -from six.moves import cPickle -from six.moves import xrange # pylint: disable=redefined-builtin -import tensorflow as tf - -from tensorflow.python.platform import gfile -import preprocessing - -IMAGENET_NUM_TRAIN_IMAGES = 1281167 -IMAGENET_NUM_VAL_IMAGES = 50000 - -COCO_NUM_TRAIN_IMAGES = 118287 -COCO_NUM_VAL_IMAGES = 4952 - - -class Dataset(object): - """Abstract class for cnn benchmarks dataset.""" - - def __init__(self, - name, - data_dir=None, - queue_runner_required=False, - num_classes=None): - self.name = name - self.data_dir = data_dir - self._queue_runner_required = queue_runner_required - self._num_classes = num_classes - - def tf_record_pattern(self, subset): - return os.path.join(self.data_dir, '%s-*-of-*' % subset) - - def reader(self): - return tf.TFRecordReader() - - @property - def num_classes(self): - return self._num_classes - - @num_classes.setter - def num_classes(self, val): - self._num_classes = val - - @abstractmethod - def num_examples_per_epoch(self, subset): - pass - - def __str__(self): - return self.name - - def get_input_preprocessor(self, input_preprocessor='default'): - assert not self.use_synthetic_gpu_inputs() - return _SUPPORTED_INPUT_PREPROCESSORS[self.name][input_preprocessor] - - def queue_runner_required(self): - return self._queue_runner_required - - def use_synthetic_gpu_inputs(self): - return not self.data_dir - - -class LibrispeechDataset(Dataset): - """Configuration for LibriSpeech dataset.""" - - def __init__(self, data_dir=None): - super(LibrispeechDataset, self).__init__( - 'librispeech', data_dir, num_classes=29) - - def tf_record_pattern(self, subset): - if subset == 'train': - return os.path.join(self.data_dir, 'train-clean-*.tfrecords') - elif subset == 'validation': - return os.path.join(self.data_dir, 'test-clean.tfrecords') - else: - return '' - - def num_examples_per_epoch(self, subset='train'): - del subset - return 2 # TODO(laigd): currently this is an arbitrary number. - - -class ImageDataset(Dataset): - """Abstract class for image datasets.""" - - def __init__(self, - name, - height, - width, - depth=None, - data_dir=None, - queue_runner_required=False, - num_classes=1001): - super(ImageDataset, self).__init__(name, data_dir, queue_runner_required, - num_classes) - self.height = height - self.width = width - self.depth = depth or 3 - - -class ImagenetDataset(ImageDataset): - """Configuration for Imagenet dataset.""" - - def __init__(self, data_dir=None): - super(ImagenetDataset, self).__init__( - 'imagenet', 300, 300, data_dir=data_dir) - - def num_examples_per_epoch(self, subset='train'): - if subset == 'train': - return IMAGENET_NUM_TRAIN_IMAGES - elif subset == 'validation': - return IMAGENET_NUM_VAL_IMAGES - else: - raise ValueError('Invalid data subset "%s"' % subset) - - -class Cifar10Dataset(ImageDataset): - """Configuration for cifar 10 dataset. - - It will mount all the input images to memory. - """ - - def __init__(self, data_dir=None): - super(Cifar10Dataset, self).__init__( - 'cifar10', - 32, - 32, - data_dir=data_dir, - queue_runner_required=True, - num_classes=11) - - def read_data_files(self, subset='train'): - """Reads from data file and returns images and labels in a numpy array.""" - assert self.data_dir, ('Cannot call `read_data_files` when using synthetic ' - 'data') - if subset == 'train': - filenames = [ - os.path.join(self.data_dir, 'data_batch_%d' % i) - for i in xrange(1, 6) - ] - elif subset == 'validation': - filenames = [os.path.join(self.data_dir, 'test_batch')] - else: - raise ValueError('Invalid data subset "%s"' % subset) - - inputs = [] - for filename in filenames: - with gfile.Open(filename, 'rb') as f: - # python2 does not have the encoding parameter - encoding = {} if six.PY2 else {'encoding': 'bytes'} - inputs.append(cPickle.load(f, **encoding)) - # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the - # input format. - all_images = np.concatenate( - [each_input[b'data'] for each_input in inputs]).astype(np.float32) - all_labels = np.concatenate( - [each_input[b'labels'] for each_input in inputs]) - return all_images, all_labels - - def num_examples_per_epoch(self, subset='train'): - if subset == 'train': - return 50000 - elif subset == 'validation': - return 10000 - else: - raise ValueError('Invalid data subset "%s"' % subset) - - -class COCODataset(ImageDataset): - """COnfiguration for COCO dataset.""" - - def __init__(self, data_dir=None, image_size=300): - super(COCODataset, self).__init__( - 'coco', image_size, image_size, data_dir=data_dir, num_classes=81) - - def num_examples_per_epoch(self, subset='train'): - if subset == 'train': - return COCO_NUM_TRAIN_IMAGES - elif subset == 'validation': - return COCO_NUM_VAL_IMAGES - else: - raise ValueError('Invalid data subset "%s"' % subset) - - -_SUPPORTED_DATASETS = { - 'imagenet': ImagenetDataset, - 'cifar10': Cifar10Dataset, - 'librispeech': LibrispeechDataset, - 'coco': COCODataset, -} - -_SUPPORTED_INPUT_PREPROCESSORS = { - 'imagenet': { - 'default': preprocessing.RecordInputImagePreprocessor, - 'official_models_imagenet': preprocessing.ImagenetPreprocessor, - }, - 'cifar10': { - 'default': preprocessing.Cifar10ImagePreprocessor - }, - 'librispeech': { - 'default': preprocessing.LibrispeechPreprocessor - }, - 'coco': { - 'default': preprocessing.COCOPreprocessor - }, -} - - -def create_dataset(data_dir, data_name): - """Create a Dataset instance based on data_dir and data_name.""" - if not data_dir and not data_name: - # When using synthetic data, use synthetic imagenet images by default. - data_name = 'imagenet' - - # Infere dataset name from data_dir if data_name is not provided. - if data_name is None: - for supported_name in _SUPPORTED_DATASETS: - if supported_name in data_dir: - data_name = supported_name - break - else: # Failed to identify dataset name from data dir. - raise ValueError('Could not identify name of dataset. ' - 'Please specify with --data_name option.') - if data_name not in _SUPPORTED_DATASETS: - raise ValueError('Unknown dataset. Must be one of %s' % ', '.join( - [key for key in sorted(_SUPPORTED_DATASETS.keys())])) - - return _SUPPORTED_DATASETS[data_name](data_dir) diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py index f2666a94c..657469658 100644 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py +++ b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py @@ -23,9 +23,10 @@ from argparse import ArgumentParser +import benchmark_cnn import datasets import ssd_constants -import ssd_model +from models import ssd_model from preprocessing import COCOPreprocessor IMAGE_SIZE = 300 @@ -168,7 +169,8 @@ def accuracy_check(self): ds_init = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS) ds_sess = tf.Session() - self.model = ssd_model.SSD300Model(self.args.data_location) + params = benchmark_cnn.make_params(data_dir=self.args.data_location) + self.model = ssd_model.SSD300Model(params=params) print("Inference for accuracy check.") with tf.Session(graph=self.freeze_graph, config=self.config) as sess: diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/preprocessing.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/preprocessing.py deleted file mode 100644 index 6814a48cd..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/preprocessing.py +++ /dev/null @@ -1,1259 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Image pre-processing utilities. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -from six.moves import xrange # pylint: disable=redefined-builtin -import tensorflow as tf - -from tensorflow.contrib.data.python.ops import threadpool -from tensorflow.contrib.image.python.ops import distort_image_ops -from tensorflow.contrib.data.python.ops import interleave_ops -from tensorflow.contrib.data.python.ops import batching -from tensorflow.python.framework import function -from tensorflow.python.layers import utils -from tensorflow.python.ops import data_flow_ops -from tensorflow.python.platform import gfile - - -def parse_example_proto(example_serialized): - """Parses an Example proto containing a training example of an image. - - The output of the build_image_data.py image preprocessing script is a dataset - containing serialized Example protocol buffers. Each Example proto contains - the following fields: - - image/height: 462 - image/width: 581 - image/colorspace: 'RGB' - image/channels: 3 - image/class/label: 615 - image/class/synset: 'n03623198' - image/class/text: 'knee pad' - image/object/bbox/xmin: 0.1 - image/object/bbox/xmax: 0.9 - image/object/bbox/ymin: 0.2 - image/object/bbox/ymax: 0.6 - image/object/bbox/label: 615 - image/format: 'JPEG' - image/filename: 'ILSVRC2012_val_00041207.JPEG' - image/encoded: - - Args: - example_serialized: scalar Tensor tf.string containing a serialized - Example protocol buffer. - - Returns: - image_buffer: Tensor tf.string containing the contents of a JPEG file. - label: Tensor tf.int32 containing the label. - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged as - [ymin, xmin, ymax, xmax]. - text: Tensor tf.string containing the human-readable label. - """ - # Dense features in Example proto. - feature_map = { - 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, - default_value=-1), - 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - } - sparse_float32 = tf.VarLenFeature(dtype=tf.float32) - # Sparse features in Example proto. - feature_map.update( - {k: sparse_float32 for k in ['image/object/bbox/xmin', - 'image/object/bbox/ymin', - 'image/object/bbox/xmax', - 'image/object/bbox/ymax']}) - - features = tf.parse_single_example(example_serialized, feature_map) - label = tf.cast(features['image/class/label'], dtype=tf.int32) - - xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) - ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) - xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) - ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) - - # Note that we impose an ordering of (y, x) just to make life difficult. - bbox = tf.concat([ymin, xmin, ymax, xmax], 0) - - # Force the variable number of bounding boxes into the shape - # [1, num_boxes, coords]. - bbox = tf.expand_dims(bbox, 0) - bbox = tf.transpose(bbox, [0, 2, 1]) - - return features['image/encoded'], label, bbox, features['image/class/text'] - - -_RESIZE_METHOD_MAP = { - 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, - 'bilinear': tf.image.ResizeMethod.BILINEAR, - 'bicubic': tf.image.ResizeMethod.BICUBIC, - 'area': tf.image.ResizeMethod.AREA -} - - -def get_image_resize_method(resize_method, batch_position=0): - """Get tensorflow resize method. - - If resize_method is 'round_robin', return different methods based on batch - position in a round-robin fashion. NOTE: If the batch size is not a multiple - of the number of methods, then the distribution of methods will not be - uniform. - - Args: - resize_method: (string) nearest, bilinear, bicubic, area, or round_robin. - batch_position: position of the image in a batch. NOTE: this argument can - be an integer or a tensor - Returns: - one of resize type defined in tf.image.ResizeMethod. - """ - - if resize_method != 'round_robin': - return _RESIZE_METHOD_MAP[resize_method] - - # return a resize method based on batch position in a round-robin fashion. - resize_methods = list(_RESIZE_METHOD_MAP.values()) - def lookup(index): - return resize_methods[index] - - def resize_method_0(): - return utils.smart_cond(batch_position % len(resize_methods) == 0, - lambda: lookup(0), resize_method_1) - - def resize_method_1(): - return utils.smart_cond(batch_position % len(resize_methods) == 1, - lambda: lookup(1), resize_method_2) - - def resize_method_2(): - return utils.smart_cond(batch_position % len(resize_methods) == 2, - lambda: lookup(2), lambda: lookup(3)) - - # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here - # because TF would not be able to construct a finite graph. - - return resize_method_0() - - -def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): - """Decode a JPEG string into one 3-D float image Tensor. - - Args: - image_buffer: scalar string Tensor. - scope: Optional scope for op_scope. - Returns: - 3-D float Tensor with values ranging from [0, 1). - """ - # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): - # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): - with tf.name_scope(scope or 'decode_jpeg'): - # Decode the string as an RGB JPEG. - # Note that the resulting image contains an unknown height and width - # that is set dynamically by decode_jpeg. In other words, the height - # and width of image is unknown at compile-time. - image = tf.image.decode_jpeg(image_buffer, channels=3, - fancy_upscaling=False, - dct_method='INTEGER_FAST') - - # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') - - return image - - -_R_MEAN = 123.68 -_G_MEAN = 116.78 -_B_MEAN = 103.94 -_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN] - - -def normalized_image(images): - # Rescale from [0, 255] to [0, 2] - images = tf.multiply(images, 1. / 127.5) - # Rescale to [-1, 1] - return tf.subtract(images, 1.0) - - -def eval_image(image, - height, - width, - batch_position, - resize_method, - summary_verbosity=0): - """Get the image for model evaluation. - - We preprocess the image simiarly to Slim, see - https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/vgg_preprocessing.py - Validation images do not have bounding boxes, so to crop the image, we first - resize the image such that the aspect ratio is maintained and the resized - height and width are both at least 1.145 times `height` and `width` - respectively. Then, we do a central crop to size (`height`, `width`). - - Args: - image: 3-D float Tensor representing the image. - height: The height of the image that will be returned. - width: The width of the image that will be returned. - batch_position: position of the image in a batch, which affects how images - are distorted and resized. NOTE: this argument can be an integer or a - tensor - resize_method: one of the strings 'round_robin', 'nearest', 'bilinear', - 'bicubic', or 'area'. - summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both - summaries and checkpoints. - Returns: - An image of size (output_height, output_width, 3) that is resized and - cropped as described above. - """ - # TODO(reedwm): Currently we resize then crop. Investigate if it's faster to - # crop then resize. - with tf.name_scope('eval_image'): - if summary_verbosity >= 3: - tf.summary.image( - 'original_image', tf.expand_dims(image, 0)) - - shape = tf.shape(image) - image_height = shape[0] - image_width = shape[1] - image_height_float = tf.cast(image_height, tf.float32) - image_width_float = tf.cast(image_width, tf.float32) - - # This value is chosen so that in resnet, images are cropped to a size of - # 256 x 256, which matches what other implementations do. The final image - # size for resnet is 224 x 224, and floor(224 * 1.145) = 256. - scale_factor = 1.145 - - # Compute resize_height and resize_width to be the minimum values such that - # 1. The aspect ratio is maintained (i.e. resize_height / resize_width is - # image_height / image_width), and - # 2. resize_height >= height * `scale_factor`, and - # 3. resize_width >= width * `scale_factor` - max_ratio = tf.maximum(height / image_height_float, - width / image_width_float) - resize_height = tf.cast(image_height_float * max_ratio * scale_factor, - tf.int32) - resize_width = tf.cast(image_width_float * max_ratio * scale_factor, - tf.int32) - - # Resize the image to shape (`resize_height`, `resize_width`) - image_resize_method = get_image_resize_method(resize_method, batch_position) - distorted_image = tf.image.resize_images(image, - [resize_height, resize_width], - image_resize_method, - align_corners=False) - - # Do a central crop of the image to size (height, width). - # MLPerf requires us to log (height, width) with two different keys. - total_crop_height = (resize_height - height) - crop_top = total_crop_height // 2 - total_crop_width = (resize_width - width) - crop_left = total_crop_width // 2 - distorted_image = tf.slice(distorted_image, [crop_top, crop_left, 0], - [height, width, 3]) - - distorted_image.set_shape([height, width, 3]) - if summary_verbosity >= 3: - tf.summary.image( - 'cropped_resized_image', tf.expand_dims(distorted_image, 0)) - image = distorted_image - return image - - -def train_image(image_buffer, - height, - width, - bbox, - batch_position, - resize_method, - distortions, - scope=None, - summary_verbosity=0, - distort_color_in_yiq=False, - fuse_decode_and_crop=False): - """Distort one image for training a network. - - Distorting images provides a useful technique for augmenting the data - set during training in order to make the network invariant to aspects - of the image that do not effect the label. - - Args: - image_buffer: scalar string Tensor representing the raw JPEG image buffer. - height: integer - width: integer - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged - as [ymin, xmin, ymax, xmax]. - batch_position: position of the image in a batch, which affects how images - are distorted and resized. NOTE: this argument can be an integer or a - tensor - resize_method: round_robin, nearest, bilinear, bicubic, or area. - distortions: If true, apply full distortions for image colors. - scope: Optional scope for op_scope. - summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both - summaries and checkpoints. - distort_color_in_yiq: distort color of input images in YIQ space. - fuse_decode_and_crop: fuse the decode/crop operation. - Returns: - 3-D float Tensor of distorted image used for training. - """ - # with tf.op_scope([image, height, width, bbox], scope, 'distort_image'): - # with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): - with tf.name_scope(scope or 'distort_image'): - # A large fraction of image datasets contain a human-annotated bounding box - # delineating the region of the image containing the object of interest. We - # choose to create a new bounding box for the object which is a randomly - # distorted version of the human-annotated bounding box that obeys an - # allowed range of aspect ratios, sizes and overlap with the human-annotated - # bounding box. If no box is supplied, then we assume the bounding box is - # the entire image. - min_object_covered = 0.1 - aspect_ratio_range = [0.75, 1.33] - area_range = [0.05, 1.0] - max_attempts = 100 - - sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( - tf.image.extract_jpeg_shape(image_buffer), - bounding_boxes=bbox, - min_object_covered=min_object_covered, - aspect_ratio_range=aspect_ratio_range, - area_range=area_range, - max_attempts=max_attempts, - use_image_if_no_bounding_boxes=True) - bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box - if summary_verbosity >= 3: - image = tf.image.decode_jpeg(image_buffer, channels=3, - dct_method='INTEGER_FAST') - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - image_with_distorted_box = tf.image.draw_bounding_boxes( - tf.expand_dims(image, 0), distort_bbox) - tf.summary.image( - 'images_with_distorted_bounding_box', - image_with_distorted_box) - - # Crop the image to the specified bounding box. - if fuse_decode_and_crop: - offset_y, offset_x, _ = tf.unstack(bbox_begin) - target_height, target_width, _ = tf.unstack(bbox_size) - crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) - image = tf.image.decode_and_crop_jpeg( - image_buffer, crop_window, channels=3) - else: - image = tf.image.decode_jpeg(image_buffer, channels=3, - dct_method='INTEGER_FAST') - image = tf.slice(image, bbox_begin, bbox_size) - - distorted_image = tf.image.random_flip_left_right(image) - - # This resizing operation may distort the images because the aspect - # ratio is not respected. - image_resize_method = get_image_resize_method(resize_method, batch_position) - distorted_image = tf.image.resize_images( - distorted_image, [height, width], - image_resize_method, - align_corners=False) - # Restore the shape since the dynamic slice based upon the bbox_size loses - # the third dimension. - distorted_image.set_shape([height, width, 3]) - if summary_verbosity >= 3: - tf.summary.image('cropped_resized_maybe_flipped_image', - tf.expand_dims(distorted_image, 0)) - - if distortions: - distorted_image = tf.cast(distorted_image, dtype=tf.float32) - # Images values are expected to be in [0,1] for color distortion. - distorted_image /= 255. - # Randomly distort the colors. - distorted_image = distort_color(distorted_image, batch_position, - distort_color_in_yiq=distort_color_in_yiq) - - # Note: This ensures the scaling matches the output of eval_image - distorted_image *= 255 - - if summary_verbosity >= 3: - tf.summary.image( - 'final_distorted_image', - tf.expand_dims(distorted_image, 0)) - return distorted_image - - -def distort_color(image, batch_position=0, distort_color_in_yiq=False, - scope=None): - """Distort the color of the image. - - Each color distortion is non-commutative and thus ordering of the color ops - matters. Ideally we would randomly permute the ordering of the color ops. - Rather then adding that level of complication, we select a distinct ordering - of color ops based on the position of the image in a batch. - - Args: - image: float32 Tensor containing single image. Tensor values should be in - range [0, 1]. - batch_position: the position of the image in a batch. NOTE: this argument - can be an integer or a tensor - distort_color_in_yiq: distort color of input images in YIQ space. - scope: Optional scope for op_scope. - Returns: - color-distorted image - """ - with tf.name_scope(scope or 'distort_color'): - - def distort_fn_0(image=image): - """Variant 0 of distort function.""" - image = tf.image.random_brightness(image, max_delta=32. / 255.) - if distort_color_in_yiq: - image = distort_image_ops.random_hsv_in_yiq( - image, lower_saturation=0.5, upper_saturation=1.5, - max_delta_hue=0.2 * math.pi) - else: - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - return image - - def distort_fn_1(image=image): - """Variant 1 of distort function.""" - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - if distort_color_in_yiq: - image = distort_image_ops.random_hsv_in_yiq( - image, lower_saturation=0.5, upper_saturation=1.5, - max_delta_hue=0.2 * math.pi) - else: - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - return image - - image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0, - distort_fn_1) - # The random_* ops do not necessarily clamp. - image = tf.clip_by_value(image, 0.0, 1.0) - return image - - -class InputPreprocessor(object): - """Base class for all model preprocessors.""" - - def __init__(self, batch_size, output_shapes): - self.batch_size = batch_size - self.output_shapes = output_shapes - - def supports_datasets(self): - """Whether this preprocessor supports dataset.""" - return False - - def minibatch(self, dataset, subset, params, shift_ratio=-1): - """Returns tensors representing a minibatch of all the input.""" - raise NotImplementedError('Must be implemented by subclass.') - - # The methods added below are only supported/used if supports_datasets() - # returns True. - # TODO(laigd): refactor benchmark_cnn.py and put the logic of - # _build_input_processing() into InputPreprocessor. - - def parse_and_preprocess(self, value, batch_position): - """Function to parse and preprocess an Example proto in input pipeline.""" - raise NotImplementedError('Must be implemented by subclass.') - - def build_prefetch_input_processing(self, batch_size, model_input_shapes, - num_splits, cpu_device, params, - gpu_devices, model_input_data_types, - dataset, doing_eval): - """"Returns FunctionBufferingResources that do input pre(processing).""" - assert self.supports_datasets() - with tf.device(cpu_device): - if doing_eval: - subset = 'validation' - else: - subset = 'train' - - function_buffering_resources = [] - remote_fn, args = self.minibatch_fn( - batch_size=batch_size, - model_input_shapes=model_input_shapes, - num_splits=num_splits, - dataset=dataset, - subset=subset, - train=(not doing_eval), - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - for device_num in range(len(gpu_devices)): - with tf.device(gpu_devices[device_num]): - buffer_resource_handle = prefetching_ops.function_buffering_resource( - f=remote_fn, - output_types=model_input_data_types, - target_device=cpu_device, - string_arg=args[0], - buffer_size=params.datasets_prefetch_buffer_size, - shared_name=None) - function_buffering_resources.append(buffer_resource_handle) - return function_buffering_resources - - # TODO(laigd): figure out how to remove these parameters, since the - # preprocessor itself has self.batch_size, self.num_splits, etc defined. - def build_multi_device_iterator(self, batch_size, num_splits, cpu_device, - params, gpu_devices, dataset, doing_eval): - """Creates a MultiDeviceIterator.""" - assert self.supports_datasets() - assert num_splits == len(gpu_devices) - with tf.name_scope('batch_processing'): - if doing_eval: - subset = 'validation' - else: - subset = 'train' - batch_size_per_split = batch_size // num_splits - ds = self.create_dataset( - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train=(not doing_eval), - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( - ds, - gpu_devices, - source_device=cpu_device, - max_buffer_size=params.multi_device_iterator_max_buffer_size) - tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, - multi_device_iterator.initializer) - return multi_device_iterator - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - raise NotImplementedError('Must be implemented by subclass.') - - def create_iterator(self, ds): - ds_iterator = ds.make_initializable_iterator() - tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, - ds_iterator.initializer) - return ds_iterator - - def minibatch_fn(self, batch_size, model_input_shapes, num_splits, - dataset, subset, train, datasets_repeat_cached_sample, - num_threads, datasets_use_caching, - datasets_parallel_interleave_cycle_length, - datasets_sloppy_parallel_interleave, - datasets_parallel_interleave_prefetch): - """Returns a function and list of args for the fn to create a minibatch.""" - assert self.supports_datasets() - batch_size_per_split = batch_size // num_splits - assert batch_size_per_split == model_input_shapes[0][0] - with tf.name_scope('batch_processing'): - ds = self.create_dataset(batch_size, num_splits, batch_size_per_split, - dataset, subset, train, - datasets_repeat_cached_sample, num_threads, - datasets_use_caching, - datasets_parallel_interleave_cycle_length, - datasets_sloppy_parallel_interleave, - datasets_parallel_interleave_prefetch) - ds_iterator = self.create_iterator(ds) - - ds_iterator_string_handle = ds_iterator.string_handle() - - @function.Defun(tf.string) - def _fn(h): - remote_iterator = tf.data.Iterator.from_string_handle( - h, ds_iterator.output_types, ds_iterator.output_shapes) - input_list = remote_iterator.get_next() - reshaped_input_list = [ - tf.reshape(input_list[i], shape=model_input_shapes[i]) - for i in range(len(input_list)) - ] - return reshaped_input_list - - return _fn, [ds_iterator_string_handle] - - -class BaseImagePreprocessor(InputPreprocessor): - """Base class for all image model preprocessors.""" - - def __init__(self, - batch_size, - output_shapes, - num_splits, - dtype, - train, - distortions, - resize_method, - shift_ratio=-1, - summary_verbosity=0, - distort_color_in_yiq=True, - fuse_decode_and_crop=True, - match_mlperf=False): - super(BaseImagePreprocessor, self).__init__(batch_size, output_shapes) - image_shape = output_shapes[0] - # image_shape is in form (batch_size, height, width, depth) - self.height = image_shape[1] - self.width = image_shape[2] - self.depth = image_shape[3] - self.num_splits = num_splits - self.dtype = dtype - self.train = train - self.resize_method = resize_method - self.shift_ratio = shift_ratio - self.distortions = distortions - self.distort_color_in_yiq = distort_color_in_yiq - self.fuse_decode_and_crop = fuse_decode_and_crop - if self.batch_size % self.num_splits != 0: - raise ValueError( - ('batch_size must be a multiple of num_splits: ' - 'batch_size %d, num_splits: %d') % - (self.batch_size, self.num_splits)) - self.batch_size_per_split = self.batch_size // self.num_splits - self.summary_verbosity = summary_verbosity - self.match_mlperf = match_mlperf - - def parse_and_preprocess(self, value, batch_position): - assert self.supports_datasets() - image_buffer, label_index, bbox, _ = parse_example_proto(value) - if self.match_mlperf: - bbox = tf.zeros((1, 0, 4), dtype=bbox.dtype) - image = self.preprocess(image_buffer, bbox, batch_position) - return (image, label_index) - - def preprocess(self, image_buffer, bbox, batch_position): - raise NotImplementedError('Must be implemented by subclass.') - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - assert self.supports_datasets() - glob_pattern = dataset.tf_record_pattern(subset) - file_names = gfile.Glob(glob_pattern) - if not file_names: - raise ValueError('Found no files in --data_dir matching: {}' - .format(glob_pattern)) - ds = tf.data.TFRecordDataset.list_files(file_names) - ds = ds.apply( - interleave_ops.parallel_interleave( - tf.data.TFRecordDataset, - cycle_length=datasets_parallel_interleave_cycle_length or 10, - sloppy=datasets_sloppy_parallel_interleave, - prefetch_input_elements=datasets_parallel_interleave_prefetch)) - if datasets_repeat_cached_sample: - # Repeat a single sample element indefinitely to emulate memory-speed IO. - ds = ds.take(1).cache().repeat() - counter = tf.data.Dataset.range(batch_size) - counter = counter.repeat() - ds = tf.data.Dataset.zip((ds, counter)) - ds = ds.prefetch(buffer_size=batch_size) - if datasets_use_caching: - ds = ds.cache() - if train: - buffer_size = 10000 - ds = ds.apply( - tf.data.experimental.shuffle_and_repeat(buffer_size=buffer_size)) - else: - ds = ds.repeat() - ds = ds.apply( - batching.map_and_batch( - map_func=self.parse_and_preprocess, - batch_size=batch_size_per_split, - num_parallel_batches=num_splits)) - ds = ds.prefetch(buffer_size=num_splits) - if num_threads: - ds = threadpool.override_threadpool( - ds, - threadpool.PrivateThreadPool( - num_threads, display_name='input_pipeline_thread_pool')) - return ds - - -class RecordInputImagePreprocessor(BaseImagePreprocessor): - """Preprocessor for images with RecordInput format.""" - - def preprocess(self, image_buffer, bbox, batch_position): - """Preprocessing image_buffer as a function of its batch position.""" - if self.train: - image = train_image(image_buffer, self.height, self.width, bbox, - batch_position, self.resize_method, self.distortions, - None, summary_verbosity=self.summary_verbosity, - distort_color_in_yiq=self.distort_color_in_yiq, - fuse_decode_and_crop=self.fuse_decode_and_crop) - else: - image = tf.image.decode_jpeg( - image_buffer, channels=3, dct_method='INTEGER_FAST') - image = eval_image(image, self.height, self.width, batch_position, - self.resize_method, - summary_verbosity=self.summary_verbosity) - # Note: image is now float32 [height,width,3] with range [0, 255] - - # image = tf.cast(image, tf.uint8) # HACK TESTING - - if self.match_mlperf: - normalized = image - _CHANNEL_MEANS - else: - normalized = normalized_image(image) - return tf.cast(normalized, self.dtype) - - def minibatch(self, - dataset, - subset, - params, - shift_ratio=-1): - if shift_ratio < 0: - shift_ratio = self.shift_ratio - with tf.name_scope('batch_processing'): - # Build final results per split. - images = [[] for _ in range(self.num_splits)] - labels = [[] for _ in range(self.num_splits)] - if params.use_datasets: - ds = self.create_dataset( - self.batch_size, self.num_splits, self.batch_size_per_split, - dataset, subset, self.train, - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - ds_iterator = self.create_iterator(ds) - for d in xrange(self.num_splits): - images[d], labels[d] = ds_iterator.get_next() - - # TODO(laigd): consider removing the --use_datasets option, it should - # always use datasets. - else: - record_input = data_flow_ops.RecordInput( - file_pattern=dataset.tf_record_pattern(subset), - seed=301, - parallelism=64, - buffer_size=10000, - batch_size=self.batch_size, - shift_ratio=shift_ratio, - name='record_input') - records = record_input.get_yield_op() - records = tf.split(records, self.batch_size, 0) - records = [tf.reshape(record, []) for record in records] - for idx in xrange(self.batch_size): - value = records[idx] - (image, label) = self.parse_and_preprocess(value, idx) - split_index = idx % self.num_splits - labels[split_index].append(label) - images[split_index].append(image) - - for split_index in xrange(self.num_splits): - if not params.use_datasets: - images[split_index] = tf.parallel_stack(images[split_index]) - labels[split_index] = tf.concat(labels[split_index], 0) - images[split_index] = tf.reshape( - images[split_index], - shape=[self.batch_size_per_split, self.height, self.width, - self.depth]) - labels[split_index] = tf.reshape(labels[split_index], - [self.batch_size_per_split]) - return images, labels - - def supports_datasets(self): - return True - - -class ImagenetPreprocessor(RecordInputImagePreprocessor): - - def preprocess(self, image_buffer, bbox, batch_position): - # pylint: disable=g-import-not-at-top - try: - from official.resnet.imagenet_preprocessing import preprocess_image - except ImportError: - tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.') - raise - if self.train: - image = preprocess_image( - image_buffer, bbox, self.height, self.width, self.depth, - is_training=True) - else: - image = preprocess_image( - image_buffer, bbox, self.height, self.width, self.depth, - is_training=False) - return tf.cast(image, self.dtype) - - -class Cifar10ImagePreprocessor(BaseImagePreprocessor): - """Preprocessor for Cifar10 input images.""" - - def _distort_image(self, image): - """Distort one image for training a network. - - Adopted the standard data augmentation scheme that is widely used for - this dataset: the images are first zero-padded with 4 pixels on each side, - then randomly cropped to again produce distorted images; half of the images - are then horizontally mirrored. - - Args: - image: input image. - Returns: - distorted image. - """ - image = tf.image.resize_image_with_crop_or_pad( - image, self.height + 8, self.width + 8) - distorted_image = tf.random_crop(image, - [self.height, self.width, self.depth]) - # Randomly flip the image horizontally. - distorted_image = tf.image.random_flip_left_right(distorted_image) - if self.summary_verbosity >= 3: - tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0)) - return distorted_image - - def _eval_image(self, image): - """Get the image for model evaluation.""" - distorted_image = tf.image.resize_image_with_crop_or_pad( - image, self.width, self.height) - if self.summary_verbosity >= 3: - tf.summary.image('cropped.image', tf.expand_dims(distorted_image, 0)) - return distorted_image - - def preprocess(self, raw_image): - """Preprocessing raw image.""" - if self.summary_verbosity >= 3: - tf.summary.image('raw.image', tf.expand_dims(raw_image, 0)) - if self.train and self.distortions: - image = self._distort_image(raw_image) - else: - image = self._eval_image(raw_image) - normalized = normalized_image(image) - return tf.cast(normalized, self.dtype) - - def minibatch(self, - dataset, - subset, - params, - shift_ratio=-1): - # TODO(jsimsa): Implement datasets code path - del shift_ratio, params - with tf.name_scope('batch_processing'): - all_images, all_labels = dataset.read_data_files(subset) - all_images = tf.constant(all_images) - all_labels = tf.constant(all_labels) - input_image, input_label = tf.train.slice_input_producer( - [all_images, all_labels]) - input_image = tf.cast(input_image, self.dtype) - input_label = tf.cast(input_label, tf.int32) - # Ensure that the random shuffling has good mixing properties. - min_fraction_of_examples_in_queue = 0.4 - min_queue_examples = int(dataset.num_examples_per_epoch(subset) * - min_fraction_of_examples_in_queue) - raw_images, raw_labels = tf.train.shuffle_batch( - [input_image, input_label], batch_size=self.batch_size, - capacity=min_queue_examples + 3 * self.batch_size, - min_after_dequeue=min_queue_examples) - - images = [[] for i in range(self.num_splits)] - labels = [[] for i in range(self.num_splits)] - - # Create a list of size batch_size, each containing one image of the - # batch. Without the unstack call, raw_images[i] would still access the - # same image via a strided_slice op, but would be slower. - raw_images = tf.unstack(raw_images, axis=0) - raw_labels = tf.unstack(raw_labels, axis=0) - for i in xrange(self.batch_size): - split_index = i % self.num_splits - # The raw image read from data has the format [depth, height, width] - # reshape to the format returned by minibatch. - raw_image = tf.reshape(raw_images[i], - [dataset.depth, dataset.height, dataset.width]) - raw_image = tf.transpose(raw_image, [1, 2, 0]) - image = self.preprocess(raw_image) - images[split_index].append(image) - - labels[split_index].append(raw_labels[i]) - - for split_index in xrange(self.num_splits): - images[split_index] = tf.parallel_stack(images[split_index]) - labels[split_index] = tf.parallel_stack(labels[split_index]) - return images, labels - - -class COCOPreprocessor(BaseImagePreprocessor): - """Preprocessor for COCO dataset input images, boxes, and labels.""" - - def minibatch(self, - dataset, - subset, - params, - shift_ratio=-1): - del shift_ratio # Not used when using datasets instead of data_flow_ops - with tf.name_scope('batch_processing'): - ds = self.create_dataset( - self.batch_size, self.num_splits, self.batch_size_per_split, - dataset, subset, self.train, params.datasets_repeat_cached_sample) - ds_iterator = self.create_iterator(ds) - - # Training data: 4 tuple - # Validation data: 5 tuple - # See get_input_shapes in models/ssd_model.py for details. - input_len = 4 if subset == 'train' else 5 - input_lists = [[None for _ in range(self.num_splits)] - for _ in range(input_len)] - for d in xrange(self.num_splits): - input_list = ds_iterator.get_next() - for i in range(input_len): - input_lists[i][d] = input_list[i] - return input_lists - - def preprocess(self, data): - try: - import ssd_dataloader # pylint: disable=g-import-not-at-top - import ssd_constants # pylint: disable=g-import-not-at-top - from object_detection.core import preprocessor # pylint: disable=g-import-not-at-top - except ImportError: - raise ImportError('To use the COCO dataset, you must clone the ' - 'repo https://github.com/tensorflow/models and add ' - 'tensorflow/models and tensorflow/models/research to ' - 'the PYTHONPATH, and compile the protobufs by ' - 'following https://github.com/tensorflow/models/blob/' - 'master/research/object_detection/g3doc/installation.md' - '#protobuf-compilation') - image_buffer = data['image_buffer'] - boxes = data['groundtruth_boxes'] - classes = tf.reshape(data['groundtruth_classes'], [-1, 1]) - source_id = tf.string_to_number(data['source_id']) - raw_shape = data['raw_shape'] - - ssd_encoder = ssd_dataloader.Encoder() - - # Only 80 of the 90 COCO classes are used. - class_map = tf.convert_to_tensor(ssd_constants.CLASS_MAP) - classes = tf.gather(class_map, classes) - classes = tf.cast(classes, dtype=tf.float32) - - if self.train: - image, boxes, classes = ssd_dataloader.ssd_decode_and_crop( - image_buffer, boxes, classes, raw_shape) - # ssd_crop resizes and returns image of dtype float32 and does not change - # its range (i.e., value in between 0--255). Divide by 255. converts it - # to [0, 1] range. Not doing this before cropping to avoid dtype cast - # (which incurs additional memory copy). - image /= 255. - - image, boxes = preprocessor.random_horizontal_flip( - image=image, boxes=boxes) - # Random horizontal flip probability is 50% - # See https://github.com/tensorflow/models/blob/master/research/object_detection/core/preprocessor.py # pylint: disable=line-too-long - - image = ssd_dataloader.color_jitter( - image, brightness=0.125, contrast=0.5, saturation=0.5, hue=0.05) - image = ssd_dataloader.normalize_image(image) - image = tf.cast(image, self.dtype) - - encoded_returns = ssd_encoder.encode_labels(boxes, classes) - encoded_classes, encoded_boxes, num_matched_boxes = encoded_returns - - # Shape of image: [width, height, channel] - # Shape of encoded_boxes: [NUM_SSD_BOXES, 4] - # Shape of encoded_classes: [NUM_SSD_BOXES, 1] - # Shape of num_matched_boxes: [1] - return (image, encoded_boxes, encoded_classes, num_matched_boxes) - - else: - image = tf.image.decode_jpeg(image_buffer) - image = tf.image.resize_images( - image, size=(ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE)) - # resize_image returns image of dtype float32 and does not change its - # range. Divide by 255 to convert image to [0, 1] range. - image /= 255. - - image = ssd_dataloader.normalize_image(image) - image = tf.cast(image, self.dtype) - - def trim_and_pad(inp_tensor): - """Limit the number of boxes, and pad if necessary.""" - inp_tensor = inp_tensor[:ssd_constants.MAX_NUM_EVAL_BOXES] - num_pad = ssd_constants.MAX_NUM_EVAL_BOXES - tf.shape(inp_tensor)[0] - inp_tensor = tf.pad(inp_tensor, [[0, num_pad], [0, 0]]) - return tf.reshape(inp_tensor, [ssd_constants.MAX_NUM_EVAL_BOXES, - inp_tensor.get_shape()[1]]) - - boxes, classes = trim_and_pad(boxes), trim_and_pad(classes) - - # Shape of boxes: [MAX_NUM_EVAL_BOXES, 4] - # Shape of classes: [MAX_NUM_EVAL_BOXES, 1] - # Shape of source_id: [] (scalar tensor) - # Shape of raw_shape: [3] - return (image, boxes, classes, source_id, raw_shape) - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - try: - import ssd_dataloader # pylint: disable=g-import-not-at-top - except ImportError: - raise ImportError('To use the COCO dataset, you must clone the ' - 'repo https://github.com/tensorflow/models and add ' - 'tensorflow/models and tensorflow/models/research to ' - 'the PYTHONPATH, and compile the protobufs by ' - 'following https://github.com/tensorflow/models/blob/' - 'master/research/object_detection/g3doc/installation.md' - '#protobuf-compilation') - assert self.supports_datasets() - - glob_pattern = dataset.tf_record_pattern(subset) - file_names = gfile.Glob(glob_pattern) - if not file_names: - raise ValueError('Found no files in --data_dir matching: {}' - .format(glob_pattern)) - - ds = tf.data.TFRecordDataset.list_files(file_names) - # TODO(haoyuzhang): Enable map+filter fusion after cl/218399112 in release - # options = tf.data.Options() - # options.experimental_map_and_filter_fusion = True - # ds = ds.with_options(options) - - ds = ds.apply( - interleave_ops.parallel_interleave( - tf.data.TFRecordDataset, - cycle_length=datasets_parallel_interleave_cycle_length or 10, - sloppy=datasets_sloppy_parallel_interleave)) - if datasets_repeat_cached_sample: - # Repeat a single sample element indefinitely to emulate memory-speed IO. - ds = ds.take(1).cache().repeat() - ds = ds.prefetch(buffer_size=batch_size) - if datasets_use_caching: - ds = ds.cache() - if train: - ds = ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=10000)) - else: - ds = ds.repeat() - - ds = ds.map(ssd_dataloader.ssd_parse_example_proto, num_parallel_calls=64) - ds = ds.filter( - lambda data: tf.greater(tf.shape(data['groundtruth_boxes'])[0], 0)) - ds = ds.apply( - batching.map_and_batch( - map_func=self.preprocess, - batch_size=batch_size_per_split, - num_parallel_batches=num_splits, - drop_remainder=train)) - ds = ds.prefetch(buffer_size=num_splits) - if num_threads: - ds = threadpool.override_threadpool( - ds, - threadpool.PrivateThreadPool( - num_threads, display_name='input_pipeline_thread_pool')) - return ds - - def supports_datasets(self): - return True - - -class LibrispeechPreprocessor(InputPreprocessor): - """Preprocessor for librispeech class for all image model preprocessors.""" - - def __init__(self, batch_size, output_shapes, num_splits, dtype, train, - **kwargs): - del kwargs - super(LibrispeechPreprocessor, self).__init__(batch_size, output_shapes) - self.num_splits = num_splits - self.dtype = dtype - self.is_train = train - if self.batch_size % self.num_splits != 0: - raise ValueError(('batch_size must be a multiple of num_splits: ' - 'batch_size %d, num_splits: %d') % (self.batch_size, - self.num_splits)) - self.batch_size_per_split = self.batch_size // self.num_splits - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - # TODO(laigd): currently the only difference between this and the one in - # BaseImagePreprocessor is, this uses map() and padded_batch() while the - # latter uses tf.data.experimental.map_and_batch(). Try to merge them. - assert self.supports_datasets() - glob_pattern = dataset.tf_record_pattern(subset) - file_names = gfile.Glob(glob_pattern) - if not file_names: - raise ValueError('Found no files in --data_dir matching: {}' - .format(glob_pattern)) - ds = tf.data.TFRecordDataset.list_files(file_names) - ds = ds.apply( - tf.data.experimental.parallel_interleave( - tf.data.TFRecordDataset, - cycle_length=datasets_parallel_interleave_cycle_length or 10, - sloppy=datasets_sloppy_parallel_interleave, - prefetch_input_elements=datasets_parallel_interleave_prefetch)) - if datasets_repeat_cached_sample: - # Repeat a single sample element indefinitely to emulate memory-speed IO. - ds = ds.take(1).cache().repeat() - counter = tf.data.Dataset.range(batch_size) - counter = counter.repeat() - ds = tf.data.Dataset.zip((ds, counter)) - ds = ds.prefetch(buffer_size=batch_size) - if datasets_use_caching: - ds = ds.cache() - if train: - ds = ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=10000)) - else: - ds = ds.repeat() - ds = ds.map(map_func=self.parse_and_preprocess, - num_parallel_calls=batch_size_per_split*num_splits) - ds = ds.padded_batch( - batch_size=batch_size_per_split, - padded_shapes=tuple([ - tf.TensorShape(output_shape[1:]) - for output_shape in self.output_shapes - ]), - drop_remainder=True) - ds = ds.prefetch(buffer_size=num_splits) - if num_threads: - ds = threadpool.override_threadpool( - ds, - threadpool.PrivateThreadPool( - num_threads, display_name='input_pipeline_thread_pool')) - return ds - - def minibatch(self, dataset, subset, params, shift_ratio=-1): - assert params.use_datasets - # TODO(laigd): unify this with CNNModel's minibatch() - # TODO(laigd): in distributed mode we use shift_ratio so different workers - # won't work on same inputs, so we should respect that. - del shift_ratio - with tf.name_scope('batch_processing'): - ds = self.create_dataset( - self.batch_size, - self.num_splits, - self.batch_size_per_split, - dataset, - subset, - self.is_train, - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - ds_iterator = self.create_iterator(ds) - - # The four lists are: input spectrogram feature, labels, input lengths, - # label lengths - input_lists = [[None for _ in range(self.num_splits)] for _ in range(4)] - for d in xrange(self.num_splits): - input_list = ds_iterator.get_next() - for i in range(4): - input_lists[i][d] = input_list[i] - - assert self.output_shapes == [ - input_lists[i][0].shape.as_list() for i in range(4) - ] - return tuple(input_lists) - - def supports_datasets(self): - return True - - def parse_and_preprocess(self, value, batch_position): - """Parse an TFRecord.""" - del batch_position - assert self.supports_datasets() - context_features = { - 'labels': tf.VarLenFeature(dtype=tf.int64), - 'input_length': tf.FixedLenFeature([], dtype=tf.int64), - 'label_length': tf.FixedLenFeature([], dtype=tf.int64), - } - sequence_features = { - 'features': tf.FixedLenSequenceFeature([161], dtype=tf.float32) - } - context_parsed, sequence_parsed = tf.parse_single_sequence_example( - serialized=value, - context_features=context_features, - sequence_features=sequence_features, - ) - - return [ - # Input - tf.expand_dims(sequence_parsed['features'], axis=2), - # Label - tf.cast( - tf.reshape( - tf.sparse_tensor_to_dense(context_parsed['labels']), [-1]), - dtype=tf.int32), - # Input length - tf.cast( - tf.reshape(context_parsed['input_length'], [1]), - dtype=tf.int32), - # Label length - tf.cast( - tf.reshape(context_parsed['label_length'], [1]), - dtype=tf.int32), - ] diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_constants.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_constants.py deleted file mode 100644 index 77fa0149b..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_constants.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2018 Google. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Central location for all constants related to MLPerf SSD.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# ============================================================================== -# == Model ===================================================================== -# ============================================================================== -IMAGE_SIZE = 300 - -# TODO(taylorrobie): MLPerf uses 80, but COCO documents 90. (RetinaNet uses 90) -# Update(taylorrobie): Labels > 81 show up in the pipeline. This will need to -# be resolved. -NUM_CLASSES = 81 # Including "no class". Not all COCO classes are used. - -# Note: Zero is special. (Background class) CLASS_INV_MAP[0] must be zero. -CLASS_INV_MAP = ( - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, - 88, 89, 90) -_MAP = {j: i for i, j in enumerate(CLASS_INV_MAP)} -CLASS_MAP = tuple(_MAP.get(i, -1) for i in range(max(CLASS_INV_MAP) + 1)) - -NUM_SSD_BOXES = 8732 - -RESNET_DEPTH = 34 - -"""SSD specific""" -MIN_LEVEL = 3 -MAX_LEVEL = 8 - -FEATURE_SIZES = (38, 19, 10, 5, 3, 1) -STEPS = (8, 16, 32, 64, 100, 300) - -# https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py -SCALES = (21, 45, 99, 153, 207, 261, 315) -ASPECT_RATIOS = ((2,), (2, 3), (2, 3), (2, 3), (2,), (2,)) -NUM_DEFAULTS = (4, 6, 6, 6, 4, 4) -NUM_DEFAULTS_BY_LEVEL = {3: 4, 4: 6, 5: 6, 6: 6, 7: 4, 8: 4} -SCALE_XY = 0.1 -SCALE_HW = 0.2 -BOX_CODER_SCALES = (1 / SCALE_XY, 1 / SCALE_XY, 1 / SCALE_HW, 1 / SCALE_HW) -MATCH_THRESHOLD = 0.5 - -# https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683 -NORMALIZATION_MEAN = (0.485, 0.456, 0.406) -NORMALIZATION_STD = (0.229, 0.224, 0.225) - -# SSD Cropping -NUM_CROP_PASSES = 50 -CROP_MIN_IOU_CHOICES = (0, 0.1, 0.3, 0.5, 0.7, 0.9) -P_NO_CROP_PER_PASS = 1 / (len(CROP_MIN_IOU_CHOICES) + 1) - -# Hard example mining -NEGS_PER_POSITIVE = 3 - -# Batch normalization -BATCH_NORM_DECAY = 0.997 -BATCH_NORM_EPSILON = 1e-4 - - -# ============================================================================== -# == Optimizer ================================================================= -# ============================================================================== -LEARNING_RATE_SCHEDULE = ( - (0, 1e-3), - (160000, 1e-4), - (200000, 1e-5), -) -MOMENTUM = 0.9 -WEIGHT_DECAY = 5e-4 - - -# ============================================================================== -# == Keys ====================================================================== -# ============================================================================== -BOXES = "boxes" -CLASSES = "classes" -NUM_MATCHED_BOXES = "num_matched_boxes" -IMAGE = "image" -SOURCE_ID = "source_id" -RAW_SHAPE = "raw_shape" -PRED_BOXES = "pred_boxes" -PRED_SCORES = "pred_scores" - - -# ============================================================================== -# == Evaluation ================================================================ -# ============================================================================== - -# Note: This is based on a batch size of 32 -# https://github.com/mlperf/reference/blob/master/single_stage_detector/ssd/train.py#L21-L37 -CHECKPOINT_FREQUENCY = 20000 -MAX_NUM_EVAL_BOXES = 200 -OVERLAP_CRITERIA = 0.5 # Used for nonmax supression -MIN_SCORE = 0.05 # Minimum score to be considered during evaluation. -DUMMY_SCORE = -1e5 # If no boxes are matched. - -ANNOTATION_FILE = "annotations/instances_val2017.json" -COCO_NUM_TRAIN_IMAGES = 118287 -COCO_NUM_VAL_IMAGES = 4952 diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_dataloader.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_dataloader.py deleted file mode 100644 index 2f291fd85..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_dataloader.py +++ /dev/null @@ -1,382 +0,0 @@ -# Copyright 2018 Google. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Data loader and processing.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import itertools as it -import math - -import numpy as np -import tensorflow as tf - -from object_detection.box_coders import faster_rcnn_box_coder -from object_detection.core import box_list -from object_detection.core import region_similarity_calculator -from object_detection.core import target_assigner -from object_detection.matchers import argmax_matcher -import ssd_constants - - -class DefaultBoxes(object): - """Default bounding boxes for 300x300 5 layer SSD. - - Default bounding boxes generation follows the order of (W, H, anchor_sizes). - Therefore, the tensor converted from DefaultBoxes has a shape of - [anchor_sizes, H, W, 4]. The last dimension is the box coordinates; 'ltrb' - is [ymin, xmin, ymax, xmax] while 'xywh' is [cy, cx, h, w]. - """ - - def __init__(self): - fk = ssd_constants.IMAGE_SIZE / np.array(ssd_constants.STEPS) - - self.default_boxes = [] - # size of feature and number of feature - for idx, feature_size in enumerate(ssd_constants.FEATURE_SIZES): - sk1 = ssd_constants.SCALES[idx] / ssd_constants.IMAGE_SIZE - sk2 = ssd_constants.SCALES[idx+1] / ssd_constants.IMAGE_SIZE - sk3 = math.sqrt(sk1*sk2) - all_sizes = [(sk1, sk1), (sk3, sk3)] - - for alpha in ssd_constants.ASPECT_RATIOS[idx]: - w, h = sk1 * math.sqrt(alpha), sk1 / math.sqrt(alpha) - all_sizes.append((w, h)) - all_sizes.append((h, w)) - - assert len(all_sizes) == ssd_constants.NUM_DEFAULTS[idx] - - for w, h in all_sizes: - for i, j in it.product(range(feature_size), repeat=2): - cx, cy = (j + 0.5) / fk[idx], (i + 0.5) / fk[idx] - box = tuple(np.clip(k, 0, 1) for k in (cy, cx, h, w)) - self.default_boxes.append(box) - - assert len(self.default_boxes) == ssd_constants.NUM_SSD_BOXES - - def to_ltrb(cy, cx, h, w): - return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2 - - # For IoU calculation - self.default_boxes_ltrb = tuple(to_ltrb(*i) for i in self.default_boxes) - - def __call__(self, order='ltrb'): - if order == 'ltrb': return self.default_boxes_ltrb - if order == 'xywh': return self.default_boxes - - -def calc_iou_tensor(boxes1, boxes2): - """Calculation of IoU based on two boxes tensor. - - Reference to https://github.com/kuangliu/pytorch-ssd - - Args: - boxes1: shape (N, 4), four coordinates of N boxes - boxes2: shape (M, 4), four coordinates of M boxes - Returns: - IoU: shape (N, M), IoU of the i-th box in `boxes1` and j-th box in `boxes2` - """ - b1_left, b1_top, b1_right, b1_bottom = tf.split(boxes1, 4, axis=1) - b2_left, b2_top, b2_right, b2_bottom = tf.split(boxes2, 4, axis=1) - - # Shape of intersect_* (N, M) - intersect_left = tf.maximum(b1_left, tf.transpose(b2_left)) - intersect_top = tf.maximum(b1_top, tf.transpose(b2_top)) - intersect_right = tf.minimum(b1_right, tf.transpose(b2_right)) - intersect_bottom = tf.minimum(b1_bottom, tf.transpose(b2_bottom)) - - boxes1_area = (b1_right - b1_left) * (b1_bottom - b1_top) - boxes2_area = (b2_right - b2_left) * (b2_bottom - b2_top) - - intersect = tf.multiply(tf.maximum((intersect_right - intersect_left), 0), - tf.maximum((intersect_bottom - intersect_top), 0)) - union = boxes1_area + tf.transpose(boxes2_area) - intersect - iou = intersect / union - - return iou - - -def ssd_parse_example_proto(example_serialized): - """Parses an Example proto containing a training example of an image. - - Each Example proto contains the following fields that we care about: - - image/encoded: - image/source_id: tf.string - image/height: tf.int64 - image/width: tf.int64 - image/object/bbox/xmin: tf.VarLenFeature(tf.float32) - image/object/bbox/xmax: tf.VarLenFeature(tf.float32) - image/object/bbox/ymin: tf.VarLenFeature(tf.float32 - image/object/bbox/ymax: tf.VarLenFeature(tf.float32) - image/object/class/label: tf.VarLenFeature(tf.int64) - image/object/class/text: tf.VarLenFeature(tf.string) - - Complete decoder can be found in: - https://github.com/tensorflow/models/blob/master/research/object_detection/data_decoders/tf_example_decoder.py - - Args: - example_serialized: scalar Tensor tf.string containing a serialized - Example protocol buffer. - - Returns: - A dictionary with the following key-values: - image_buffer: Tensor tf.string containing the contents of a JPEG file. - groundtruth_boxes: Tensor tf.float32 of shape [num_boxes, 4], containing - coordinates of object bounding boxes. - groundtruth_classeS: Tensor tf.int64 of shape [num_boxes, 1], containing - class labels of objects. - source_id: unique image identifier. - raw_shape: [height, width, 3]. - """ - feature_map = { - 'image/encoded': tf.FixedLenFeature( - (), dtype=tf.string, default_value=''), - 'image/source_id': tf.FixedLenFeature((), tf.string, default_value=''), - 'image/height': tf.FixedLenFeature((), tf.int64, default_value=1), - 'image/width': tf.FixedLenFeature((), tf.int64, default_value=1), - 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), - 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64), - } - features = tf.parse_single_example(example_serialized, feature_map) - - xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 1) - ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 1) - xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 1) - ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 1) - - image_buffer = features['image/encoded'] - # Bounding box coordinates should be in ltrb order - boxes = tf.concat([ymin, xmin, ymax, xmax], 1) - classes = tf.expand_dims(features['image/object/class/label'].values, 1) - source_id = features['image/source_id'] - raw_shape = tf.stack([features['image/height'], features['image/width'], 3]) - - return {'image_buffer': image_buffer, - 'groundtruth_boxes': boxes, - 'groundtruth_classes': classes, - 'source_id': source_id, - 'raw_shape': raw_shape} - - -def ssd_decode_and_crop(image_buffer, boxes, classes, raw_shape): - """Crop image randomly and decode the cropped region. - - This function will crop an image to meet the following requirements: - 1. height to width ratio between 0.5 and 2; - 2. IoUs of some boxes exceed specified threshold; - 3. At least one box center is in the cropped region. - We defer the jpeg decoding task until after the crop to avoid wasted work. - - Reference: https://github.com/chauhan-utk/ssd.DomainAdaptation - - Args: - image_buffer: Tensor tf.string containing the contents of a JPEG file. - boxes: Tensor tf.float32 of shape [num_boxes, 4], containing coordinates of - object bounding boxes. - classes: Tensor tf.int64 of shape [num_boxes, 1], containing class labels - of objects. - raw_shape: [height, width, 3]. - - Returns: - resized_image: decoded, cropped, and resized image Tensor tf.float32 of - shape [ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE, 3], value - range 0--255. - cropped_boxes: box coordinates for objects in the cropped region. - cropped_classes: class labels for objects in the cropped region. - """ - - num_boxes = tf.shape(boxes)[0] - - def no_crop_check(): - return (tf.random_uniform(shape=(), minval=0, maxval=1, dtype=tf.float32) - < ssd_constants.P_NO_CROP_PER_PASS) - - def no_crop_proposal(): - return ( - tf.ones((), tf.bool), - tf.convert_to_tensor([0, 0, 1, 1], dtype=tf.float32), - tf.ones((num_boxes,), tf.bool), - ) - - def crop_proposal(): - rand_vec = lambda minval, maxval: tf.random_uniform( - shape=(ssd_constants.NUM_CROP_PASSES, 1), minval=minval, maxval=maxval, - dtype=tf.float32) - - width, height = rand_vec(0.3, 1), rand_vec(0.3, 1) - left, top = rand_vec(0, 1-width), rand_vec(0, 1-height) - - right = left + width - bottom = top + height - - ltrb = tf.concat([left, top, right, bottom], axis=1) - - min_iou = tf.random_shuffle(ssd_constants.CROP_MIN_IOU_CHOICES)[0] - ious = calc_iou_tensor(ltrb, boxes) - - # discard any bboxes whose center not in the cropped image - xc, yc = [tf.tile(0.5 * (boxes[:, i + 0] + boxes[:, i + 2])[tf.newaxis, :], - (ssd_constants.NUM_CROP_PASSES, 1)) for i in range(2)] - - masks = tf.reduce_all(tf.stack([ - tf.greater(xc, tf.tile(left, (1, num_boxes))), - tf.less(xc, tf.tile(right, (1, num_boxes))), - tf.greater(yc, tf.tile(top, (1, num_boxes))), - tf.less(yc, tf.tile(bottom, (1, num_boxes))), - ], axis=2), axis=2) - - # Checks of whether a crop is valid. - valid_aspect = tf.logical_and(tf.less(height/width, 2), - tf.less(width/height, 2)) - valid_ious = tf.reduce_all(tf.greater(ious, min_iou), axis=1, keepdims=True) - valid_masks = tf.reduce_any(masks, axis=1, keepdims=True) - - valid_all = tf.cast(tf.reduce_all(tf.concat( - [valid_aspect, valid_ious, valid_masks], axis=1), axis=1), tf.int32) - - # One indexed, as zero is needed for the case of no matches. - index = tf.range(1, 1 + ssd_constants.NUM_CROP_PASSES, dtype=tf.int32) - - # Either one-hot, or zeros if there is no valid crop. - selection = tf.equal(tf.reduce_max(index * valid_all), index) - - use_crop = tf.reduce_any(selection) - output_ltrb = tf.reduce_sum(tf.multiply(ltrb, tf.tile(tf.cast( - selection, tf.float32)[:, tf.newaxis], (1, 4))), axis=0) - output_masks = tf.reduce_any(tf.logical_and(masks, tf.tile( - selection[:, tf.newaxis], (1, num_boxes))), axis=0) - - return use_crop, output_ltrb, output_masks - - def proposal(*args): - return tf.cond( - pred=no_crop_check(), - true_fn=no_crop_proposal, - false_fn=crop_proposal, - ) - - _, crop_bounds, box_masks = tf.while_loop( - cond=lambda x, *_: tf.logical_not(x), - body=proposal, - loop_vars=[tf.zeros((), tf.bool), tf.zeros((4,), tf.float32), tf.zeros((num_boxes,), tf.bool)], - ) - - filtered_boxes = tf.boolean_mask(boxes, box_masks, axis=0) - - # Clip boxes to the cropped region. - filtered_boxes = tf.stack([ - tf.maximum(filtered_boxes[:, 0], crop_bounds[0]), - tf.maximum(filtered_boxes[:, 1], crop_bounds[1]), - tf.minimum(filtered_boxes[:, 2], crop_bounds[2]), - tf.minimum(filtered_boxes[:, 3], crop_bounds[3]), - ], axis=1) - - left = crop_bounds[0] - top = crop_bounds[1] - width = crop_bounds[2] - left - height = crop_bounds[3] - top - - cropped_boxes = tf.stack([ - (filtered_boxes[:, 0] - left) / width, - (filtered_boxes[:, 1] - top) / height, - (filtered_boxes[:, 2] - left) / width, - (filtered_boxes[:, 3] - top) / height, - ], axis=1) - - # crop_window containing integer coordinates of cropped region. A normalized - # coordinate value of y should be mapped to the image coordinate at - # y * (height - 1). - raw_shape = tf.cast(raw_shape, tf.float32) - crop_window = tf.stack([left * (raw_shape[0] - 1), - top * (raw_shape[1] - 1), - width * raw_shape[0], - height * raw_shape[1]]) - crop_window = tf.cast(crop_window, tf.int32) - - # Fused op only decodes the cropped portion of an image - cropped_image = tf.image.decode_and_crop_jpeg( - image_buffer, crop_window, channels=3) - - # Resize converts image dtype from uint8 to float32, without rescaling values. - resized_image = tf.image.resize_images( - cropped_image, [ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE]) - - cropped_classes = tf.boolean_mask(classes, box_masks, axis=0) - - return resized_image, cropped_boxes, cropped_classes - - -def color_jitter(image, brightness=0, contrast=0, saturation=0, hue=0): - """Distort the color of the image.""" - with tf.name_scope('distort_color'): - if brightness > 0: - image = tf.image.random_brightness(image, max_delta=brightness) - if contrast > 0: - image = tf.image.random_contrast( - image, lower=1-contrast, upper=1+contrast) - if saturation > 0: - image = tf.image.random_saturation( - image, lower=1-saturation, upper=1+saturation) - if hue > 0: - image = tf.image.random_hue(image, max_delta=hue) - return image - - -def normalize_image(image): - """Normalize the image to zero mean and unit variance. - - Args: - image: 3D tensor of type float32, value in [0, 1] - Returns: - image normalized by mean and stdev. - """ - image = tf.subtract(image, ssd_constants.NORMALIZATION_MEAN) - image = tf.divide(image, ssd_constants.NORMALIZATION_STD) - - return image - - -class Encoder(object): - """Encoder for SSD boxes and labels.""" - - def __init__(self): - similarity_calc = region_similarity_calculator.IouSimilarity() - matcher = argmax_matcher.ArgMaxMatcher( - matched_threshold=ssd_constants.MATCH_THRESHOLD, - unmatched_threshold=ssd_constants.MATCH_THRESHOLD, - negatives_lower_than_unmatched=True, - force_match_for_each_row=True) - - box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( - scale_factors=ssd_constants.BOX_CODER_SCALES) - - self.default_boxes = DefaultBoxes()('ltrb') - self.default_boxes = box_list.BoxList( - tf.convert_to_tensor(self.default_boxes)) - self.assigner = target_assigner.TargetAssigner( - similarity_calc, matcher, box_coder) - - def encode_labels(self, gt_boxes, gt_labels): - target_boxes = box_list.BoxList(gt_boxes) - encoded_classes, _, encoded_boxes, _, matches = self.assigner.assign( - self.default_boxes, target_boxes, gt_labels) - num_matched_boxes = tf.reduce_sum( - tf.cast(tf.not_equal(matches.match_results, -1), tf.float32)) - return encoded_classes, encoded_boxes, num_matched_boxes diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_model.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_model.py deleted file mode 100644 index c8d67c24d..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_model.py +++ /dev/null @@ -1,171 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - -# Copyright 2018 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -"""SSD300 Model Configuration. - -References: - Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, - Cheng-Yang Fu, Alexander C. Berg - SSD: Single Shot MultiBox Detector - arXiv:1512.02325 - -Ported from MLPerf reference implementation: - https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd - -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import multiprocessing -import os -import re -import threading -import tensorflow as tf - -import ssd_constants - - -class SSD300Model(): - """Single Shot Multibox Detection (SSD) model for 300x300 image datasets.""" - - def __init__(self, data_dir, label_num=ssd_constants.NUM_CLASSES): - # For COCO dataset, 80 categories + 1 background = 81 labels - self.label_num = label_num - self.data_dir = data_dir - - # Collected predictions for eval stage. It maps each image id in eval - # dataset to a dict containing the following information: - # source_id: raw ID of image - # raw_shape: raw shape of image - # pred_box: encoded box coordinates of prediction - # pred_scores: scores of classes in prediction - self.predictions = {} - - # Global step when predictions are collected. - self.eval_global_step = 0 - - # Average precision. In asynchronous eval mode, this is the latest AP we - # get so far and may not be the results at current eval step. - self.eval_coco_ap = 0 - - # Process, queues, and thread for asynchronous evaluation. When enabled, - # create a separte process (async_eval_process) that continously pull - # intermediate results from the predictions queue (a multiprocessing queue), - # process them, and push final results into results queue (another - # multiprocessing queue). The main thread is responsible to push message - # into predictions queue, and start a separate thread to continuously pull - # messages from results queue to update final results. - # Message in predictions queue should be a tuple of two elements: - # (evaluation step, predictions) - # Message in results queue should be a tuple of two elements: - # (evaluation step, final results) - self.async_eval_process = None - self.async_eval_predictions_queue = None - self.async_eval_results_queue = None - self.async_eval_results_getter_thread = None - - # The MLPerf reference uses a starting lr of 1e-3 at bs=32. - self.base_lr_batch_size = 32 - - def skip_final_affine_layer(self): - return True - - def postprocess(self, results): - """Postprocess results returned from model.""" - try: - import coco_metric # pylint: disable=g-import-not-at-top - except ImportError: - raise ImportError('To use the COCO dataset, you must clone the ' - 'repo https://github.com/tensorflow/models and add ' - 'tensorflow/models and tensorflow/models/research to ' - 'the PYTHONPATH, and compile the protobufs by ' - 'following https://github.com/tensorflow/models/blob/' - 'master/research/object_detection/g3doc/installation.md' - '#protobuf-compilation ; To evaluate using COCO' - 'metric, download and install Python COCO API from' - 'https://github.com/cocodataset/cocoapi') - - pred_boxes = results[ssd_constants.PRED_BOXES] - pred_scores = results[ssd_constants.PRED_SCORES] - # TODO(haoyuzhang): maybe use these values for visualization. - # gt_boxes = results['gt_boxes'] - # gt_classes = results['gt_classes'] - source_id = results[ssd_constants.SOURCE_ID] - raw_shape = results[ssd_constants.RAW_SHAPE] - - # COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due - # to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting - # `num_eval_epochs` to 1 is not enough and will often miss some images. We - # expect user to set `num_eval_epochs` to >1, which will leave some unused - # images from previous steps in `predictions`. Here we check if we are doing - # eval at a new global step. - if results['global_step'] > self.eval_global_step: - self.eval_global_step = results['global_step'] - self.predictions.clear() - - for i, sid in enumerate(source_id): - self.predictions[int(sid)] = { - ssd_constants.PRED_BOXES: pred_boxes[i], - ssd_constants.PRED_SCORES: pred_scores[i], - ssd_constants.SOURCE_ID: source_id[i], - ssd_constants.RAW_SHAPE: raw_shape[i] - } - - # COCO metric calculates mAP only after a full epoch of evaluation. Return - # dummy results for top_N_accuracy to be compatible with benchmar_cnn.py. - if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES: - print('Got results for all {:d} eval examples. Calculate mAP...'.format( - ssd_constants.COCO_NUM_VAL_IMAGES)) - - annotation_file = os.path.join(self.data_dir, - ssd_constants.ANNOTATION_FILE) - # Size of predictions before decoding about 15--30GB, while size after - # decoding is 100--200MB. When using async eval mode, decoding takes - # 20--30 seconds of main thread time but is necessary to avoid OOM during - # inter-process communication. - decoded_preds = coco_metric.decode_predictions(self.predictions.values()) - self.predictions.clear() - - eval_results = coco_metric.compute_map(decoded_preds, annotation_file) - self.eval_coco_ap = eval_results['COCO/AP'] - ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.} - return ret - print('Got {:d} out of {:d} eval examples.' - ' Waiting for the remaining to calculate mAP...'.format( - len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES)) - return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.} diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/int8/infer_detections.py b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/infer_detections.py new file mode 100644 index 000000000..657469658 --- /dev/null +++ b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/infer_detections.py @@ -0,0 +1,211 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import tensorflow as tf +import time + +from argparse import ArgumentParser + +import benchmark_cnn +import datasets +import ssd_constants +from models import ssd_model +from preprocessing import COCOPreprocessor + +IMAGE_SIZE = 300 + +import os + +class ssd_resnet34_infer: + + def __init__(self): + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument('-b', "--batch-size", + help="Specify the batch size. If this " \ + "parameter is not specified or is -1, the " \ + "largest ideal batch size for the model will " \ + "be used.", + dest="batch_size", type=int, default=-1) + + arg_parser.add_argument('-e', "--inter-op-parallelism-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + + arg_parser.add_argument('-a', "--intra-op-parallelism-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) + + arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph.', + dest='input_graph') + + arg_parser.add_argument('-d', "--data-location", + help='Specify the location of the data. ' + 'If this parameter is not specified, ' + 'the benchmark will use random/dummy data.', + dest="data_location", default=None) + + arg_parser.add_argument('-r', "--accuracy-only", + help='For accuracy measurement only.', + dest='accuracy_only', action='store_true') + + arg_parser.add_argument("--results-file-path", + help="File path for the inference results", + dest="results_file_path", default=None) + + # parse the arguments + self.args = arg_parser.parse_args() + + self.freeze_graph = self.load_graph(self.args.input_graph) + self.config = tf.ConfigProto() + self.config.intra_op_parallelism_threads = self.args.num_intra_threads + self.config.inter_op_parallelism_threads = self.args.num_inter_threads + + if self.args.batch_size == -1: + self.args.batch_size = 64 + + self.num_batches = (ssd_constants.COCO_NUM_VAL_IMAGES // self.args.batch_size) + \ + (ssd_constants.COCO_NUM_VAL_IMAGES % self.args.batch_size > 0) + + input_layer = 'input' + output_layers = ['v/stack', 'v/Softmax'] + self.input_tensor = self.freeze_graph.get_tensor_by_name(input_layer + ":0") + self.output_tensors = [self.freeze_graph.get_tensor_by_name(x + ":0") for x in output_layers] + + + def load_graph(self, frozen_graph_filename): + print('load graph from: ' + frozen_graph_filename) + with tf.gfile.GFile(frozen_graph_filename, "rb") as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + + # Then, we import the graph_def into a new Graph and returns it + with tf.Graph().as_default() as graph: + # Since we load everything in a new graph, this is not needed + tf.import_graph_def(graph_def, name='') + return graph + + def run_benchmark(self): + print("Inference with dummy data.") + with tf.Session(graph=self.freeze_graph, config=self.config) as sess: + + input_images = sess.run(tf.truncated_normal( + [self.args.batch_size, IMAGE_SIZE, IMAGE_SIZE, 3], + dtype=tf.float32, + stddev=10, + name='synthetic_images')) + + total_iter = 1000 + warmup_iter = 200 + ttime = 0.0 + + print('total iteration is {0}'.format(str(total_iter))) + print('warm up iteration is {0}'.format(str(warmup_iter))) + + for step in range(total_iter): + start_time = time.time() + _ = sess.run(self.output_tensors, {self.input_tensor: input_images}) + end_time = time.time() + + duration = end_time - start_time + if (step + 1) % 10 == 0: + print('steps = {0}, {1} sec'.format(str(step), str(duration))) + + if step + 1 > warmup_iter: + ttime += duration + + total_batches = total_iter - warmup_iter + print ('Batchsize: {0}'.format(str(self.args.batch_size))) + print ('Time spent per BATCH: {0:10.4f} ms'.format(ttime / total_batches * 1000)) + print ('Total samples/sec: {0:10.4f} samples/s'.format(total_batches * self.args.batch_size / ttime)) + + + def __get_input(self): + preprocessor = COCOPreprocessor( + batch_size=self.args.batch_size, + output_shapes=[[self.args.batch_size, IMAGE_SIZE, IMAGE_SIZE, 3]], + num_splits=1, + dtype=tf.float32, + train=False, + distortions=True, + resize_method=None, + shift_ratio=0 + ) + + class params: + datasets_repeat_cached_sample = False + + self.params = params() + self.dataset = datasets.create_dataset(self.args.data_location, 'coco') + + return preprocessor.minibatch( + self.dataset, + subset='validation', + params=self.params, + shift_ratio=0) + + + def accuracy_check(self): + print(self.args) + input_list = self.__get_input() + ds_init = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS) + + ds_sess = tf.Session() + params = benchmark_cnn.make_params(data_dir=self.args.data_location) + self.model = ssd_model.SSD300Model(params=params) + + print("Inference for accuracy check.") + with tf.Session(graph=self.freeze_graph, config=self.config) as sess: + ds_sess.run(ds_init) + global_step = 0 + + for _ in range(self.num_batches): + results = {} + input_lists = ds_sess.run(input_list) + input_images = input_lists[0][0] + input_ids = input_lists[3][0] + input_raw_shapes = input_lists[4][0] + + result = sess.run(self.output_tensors, {self.input_tensor: input_images}) + # Make global_step available in results for postprocessing. + results['global_step'] = global_step + results[ssd_constants.SOURCE_ID] = input_ids + results[ssd_constants.RAW_SHAPE] = input_raw_shapes + + results[ssd_constants.PRED_BOXES] = result[0] + results[ssd_constants.PRED_SCORES] = result[1] + + results = self.model.postprocess(results) + + + + def run(self): + if self.args.accuracy_only: + self.accuracy_check() + else: + self.run_benchmark() + + + +if __name__ == "__main__": + infer = ssd_resnet34_infer() + infer.run() + From 1ecd87bebcfdcbd7e5522e112aba7f0643cb6cfa Mon Sep 17 00:00:00 2001 From: Niranjan Hasabnis Date: Thu, 23 May 2019 11:59:05 -0700 Subject: [PATCH 37/62] Enabling ResNet50v1.5 model for FP32 and INT8 (#309) * fixed docker build command in installation guide (#303) * Revert "fixed docker build command in installation guide (#303)" (#305) This reverts commit 6c17f8e45a09a6867328ee419791c204e1aaf0f1. * Adding ResNet50v1.5 model (and steps to get FP32 numbers) * Adding INT8 support * Addressing review comments This commit addresses review comments. It also adds unit tests. * Updating README for ResNet50v1.5 with latest perf numbers * Adding URL for INT8 model --- benchmarks/README.md | 1 + benchmarks/common/base_benchmark_util.py | 3 +- benchmarks/common/tensorflow/start.sh | 2 + .../tensorflow/resnet50v1_5/README.md | 295 ++++++++++++ .../tensorflow/resnet50v1_5/__init__.py | 19 + .../resnet50v1_5/inference/__init__.py | 19 + .../resnet50v1_5/inference/fp32/__init__.py | 19 + .../resnet50v1_5/inference/fp32/config.json | 7 + .../resnet50v1_5/inference/fp32/model_init.py | 115 +++++ .../resnet50v1_5/inference/int8/__init__.py | 19 + .../resnet50v1_5/inference/int8/config.json | 7 + .../resnet50v1_5/inference/int8/model_init.py | 123 +++++ .../tensorflow/resnet50v1_5/__init__.py | 20 + .../resnet50v1_5/inference/__init__.py | 20 + .../resnet50v1_5/inference/datasets.py | 96 ++++ .../eval_image_classifier_inference.py | 268 +++++++++++ .../resnet50v1_5/inference/preprocessing.py | 177 ++++++++ .../tensorflow/resnet50v1_5/int8/__init__.py | 20 + .../tensorflow/resnet50v1_5/int8/benchmark.py | 213 +++++++++ .../tensorflow/resnet50v1_5/int8/cnn_util.py | 51 +++ .../tensorflow/resnet50v1_5/int8/datasets.py | 114 +++++ .../int8/generate_calibration_data.py | 183 ++++++++ .../resnet50v1_5/int8/preprocessing.py | 419 ++++++++++++++++++ .../int8/preprocessing_benchmark.py | 173 ++++++++ .../tf_model_args/tf_resnet50v1_5_args.json | 40 ++ 25 files changed, 2422 insertions(+), 1 deletion(-) create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/config.json create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/model_init.py create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/__init__.py create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/config.json create mode 100644 benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/__init__.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/inference/datasets.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/inference/preprocessing.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/int8/__init__.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/int8/benchmark.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/int8/cnn_util.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/int8/datasets.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/int8/generate_calibration_data.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing.py create mode 100644 models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing_benchmark.py create mode 100644 tests/unit/common/tensorflow/tf_model_args/tf_resnet50v1_5_args.json diff --git a/benchmarks/README.md b/benchmarks/README.md index 25cea61b6..414e344e5 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -26,6 +26,7 @@ dependencies to be installed: | Image Recognition | TensorFlow | [MobileNet V1](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](image_recognition/tensorflow/mobilenet_v1/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/mobilenet_v1/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [ResNet 101](https://arxiv.org/pdf/1512.03385.pdf) | Inference | [Int8](image_recognition/tensorflow/resnet101/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet101/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [ResNet 50](https://arxiv.org/pdf/1512.03385.pdf) | Inference | [Int8](image_recognition/tensorflow/resnet50/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet50/README.md#fp32-inference-instructions) | +| Image Recognition | TensorFlow | [ResNet 50v1.5](https://github.com/tensorflow/models/tree/master/official/resnet) | Inference | [Int8](image_recognition/tensorflow/resnet50v1_5/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet50v1_5/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [SqueezeNet](https://arxiv.org/pdf/1602.07360.pdf) | Inference | [FP32](image_recognition/tensorflow/squeezenet/README.md#fp32-inference-instructions) | | Image Segmentation | TensorFlow | [Mask R-CNN](https://arxiv.org/pdf/1703.06870.pdf) | Inference | [FP32](image_segmentation/tensorflow/maskrcnn/README.md#fp32-inference-instructions) | | Image Segmentation | TensorFlow | [UNet](https://arxiv.org/pdf/1505.04597.pdf) | Inference | [FP32](image_segmentation/tensorflow/unet/README.md#fp32-inference-instructions) | diff --git a/benchmarks/common/base_benchmark_util.py b/benchmarks/common/base_benchmark_util.py index e4c92639d..1aefdebd0 100644 --- a/benchmarks/common/base_benchmark_util.py +++ b/benchmarks/common/base_benchmark_util.py @@ -228,7 +228,8 @@ def _validate_args(self): raise ValueError("Number of cores exceeds system core number: {}". format(system_num_cores)) - if args.output_results and (args.model_name != "resnet50" or args.precision != "fp32"): + if args.output_results and ((args.model_name != "resnet50" and + args.model_name != "resnet50v1_5") or args.precision != "fp32"): raise ValueError("--output-results is currently only supported for resnet50 FP32 inference.") elif args.output_results and (args.mode != "inference" or not args.data_location): raise ValueError("--output-results can only be used when running inference with a dataset.") diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index be2d30f5c..5884cfac9 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -891,6 +891,8 @@ elif [ ${MODEL_NAME} == "resnet101" ]; then resnet50_101_inceptionv3 elif [ ${MODEL_NAME} == "resnet50" ]; then resnet50_101_inceptionv3 +elif [ ${MODEL_NAME} == "resnet50v1_5" ]; then + resnet50_101_inceptionv3 elif [ ${MODEL_NAME} == "rfcn" ]; then rfcn elif [ ${MODEL_NAME} == "squeezenet" ]; then diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md new file mode 100644 index 000000000..cc1f255a6 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md @@ -0,0 +1,295 @@ +# ResNet50 + +This document has instructions for how to run ResNet50 (v1.5) for the +following precisions: +* [Int8 inference](#int8-inference-instructions) +* [FP32 inference](#fp32-inference-instructions) + +Original ResNet model has multiple versions which have shown better accuracy +and/or throughput performance. As mentioned in TensorFlow's [official ResNet +model page](https://github.com/tensorflow/models/tree/master/official/resnet), 3 different +versions of the original ResNet model exists - ResNet50v1, ResNet50v1.5, and ResNet50v2. +As a side note, ResNet50v1.5 is also in MLPerf's [cloud inference benchmark for +image classification](https://github.com/mlperf/inference/tree/master/cloud/image_classification). + +## Int8 Inference Instructions + +1. Download the full ImageNet dataset and convert to the TF records format. + +* Clone the tensorflow/models repository: +``` +$ git clone https://github.com/tensorflow/models.git +``` +The TensorFlow models repo provides +[scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) +to download, process and convert the ImageNet dataset to the TF records format. + +* The ImageNet dataset directory location is only required to calculate the model accuracy. + +2. Download the pre-trained model. +``` +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet50v1_5_int8_pretrained_model.pb +``` + +3. Clone the +[intelai/models](https://github.com/intelai/models) +repository +``` +$ git clone https://github.com/IntelAI/models.git +``` + +4. Run the inference script `launch_benchmark.py` with the appropriate parameters to evaluate the model performance and/or calculate the accuracy. +The optimized ResNet50v1.5 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and +located at `models/models/image_recognition/tensorflow/resnet50v1_5/`. + + The docker image (`intelaipg/intel-optimized-tensorflow:1.14`) + used in the commands above were built using + [TensorFlow](git@github.com:tensorflow/tensorflow.git) master for TensorFlow + version 1.14. + +* Calculate the model accuracy, the required parameters parameters include: the `ImageNet` dataset location (from step 1), +the pre-trained `resnet50v1_5_int8_pretrained_model.pb` input graph file (from step 2), and the `--accuracy-only` flag. +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --data-location /home//dataset/FullImageNetData_directory + --in-graph resnet50v1_5_int8_pretrained_model.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --batch-size=100 \ + --accuracy-only \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 +``` +The log file is saved to the value of `--output-dir`. + +The tail of the log output when the benchmarking completes should look +something like this: +``` +Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7622, 0.9296) +Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7621, 0.9295) +Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7622, 0.9296) +Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7623, 0.9296) +Ran inference with batch size 100 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_{timestamp}.log +``` + +* Evaluate the model performance: If just evaluate performance for dummy data, the `--data-location` is not needed. +Otherwise `--data-location` argument needs to be specified: +Calculate the model throughput `images/sec`, the required parameters to run the inference script would include: +the pre-trained `resnet50v1_5_int8_pretrained_model.pb` input graph file (from step +2), and the `--benchmark-only` flag. It is +optional to specify the number of `warmup_steps` and `steps` as extra +args, as shown in the command below. If these values are not specified, +the script will default to use `warmup_steps=10` and `steps=50`. + +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50v1_5_int8_pretrained_model.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --batch-size=128 \ + --benchmark-only \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 + -- warmup_steps=50 steps=500 +``` +The tail of the log output when the benchmarking completes should look +something like this: +``` +... +Iteration 490: 0.249899 sec +Iteration 500: 0.249110 sec +Average time: 0.251280 sec +Batch size = 128 +Throughput: 509.392 images/sec +Ran inference with batch size 128 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_{timestamp}.log +``` + +Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. + +## FP32 Inference Instructions + +1. Download the pre-trained model. + +If you would like to get a pre-trained model for ResNet50v1.5, +``` +$ wget https://zenodo.org/record/2535873/files/resnet50_v1.pb +``` + +2. Clone the [intelai/models](https://github.com/intelai/models) repository +``` +$ git clone https://github.com/IntelAI/models.git +``` + +3. If running resnet50 for accuracy, the ImageNet dataset will be +required (if running benchmarking for throughput/latency, then dummy +data will be used). + +The TensorFlow models repo provides +[scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) +to download, process, and convert the ImageNet dataset to the TF records format. + +4. Run the inference script `launch_benchmark.py` with the appropriate parameters to evaluate the model performance. +The optimized ResNet50v1.5 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and +located at `models/models/image_recognition/tensorflow/resnet50v1_5/`. +If benchmarking uses dummy data for inference, `--data-location` flag is not required. Otherwise, +`--data-location` needs to point to point to ImageNet dataset location. + +* To measure the model latency, set `--batch-size=1` and run the benchmark script as shown: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size=1 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 +``` + +The log file is saved to the value of `--output-dir`. + +The tail of the log output when the benchmarking completes should look +something like this: +``` +Inference with dummy data. +Iteration 1: 2.761204 sec +Iteration 2: 0.011155 sec +Iteration 3: 0.009289 sec +... +Iteration 48: 0.009315 sec +Iteration 49: 0.009343 sec +Iteration 50: 0.009278 sec +Average time: 0.009481 sec +Batch size = 1 +Latency: 9.481 ms +Throughput: 105.470 images/sec +lscpu_path_cmd = command -v lscpu +lscpu located here: /usr/bin/lscpu +Ran inference with batch size 1 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log +``` + +* To measure the model Throughput, set `--batch-size=128` and run the benchmark script as shown: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size=128 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 +``` + +The log file is saved to the value of `--output-dir`. + +The tail of the log output when the benchmarking completes should look +something like this: +``` +Inference with dummy data. +Iteration 1: 3.013918 sec +Iteration 2: 0.543498 sec +Iteration 3: 0.536187 sec +Iteration 4: 0.532568 sec +... +Iteration 46: 0.532444 sec +Iteration 47: 0.535652 sec +Iteration 48: 0.532158 sec +Iteration 49: 0.538117 sec +Iteration 50: 0.532411 sec +Average time: 0.534427 sec +Batch size = 128 +Throughput: 239.509 images/sec +Ran inference with batch size 128 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log +``` + +* To measure the model accuracy, use the `--accuracy-only` flag and pass +the ImageNet dataset directory from step 3 as the `--data-location`: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --accuracy-only \ + --batch-size 100 \ + --socket-id 0 \ + --data-location /home//dataset/ImageNetData_directory \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 +``` + +The log file is saved to the value of `--output-dir`. +The tail of the log output when the accuracy run completes should look +something like this: +``` +... +Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7651, 0.9307) +lscpu_path_cmd = command -v lscpu +lscpu located here: /usr/bin/lscpu +Ran inference with batch size 100 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log +``` + +* The `--output-results` flag can be used along with above benchmarking +or accuracy test, in order to also output a file with the inference +results (file name, actual label, and the predicted label). The results +output can only be used with real data. + +For example, the command below is the same as the accuracy test above, +except with the `--output-results` flag added: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --accuracy-only \ + --output-results \ + --batch-size 100 \ + --socket-id 0 \ + --data-location /home//dataset/ImageNetData_directory \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14 +``` +The results file will be written to the +`models/benchmarks/common/tensorflow/logs` directory, unless another +output directory is specified by the `--output-dir` arg. Below is an +example of what the inference results file will look like: +``` +filename,actual,prediction +ILSVRC2012_val_00033870.JPEG,592,592 +ILSVRC2012_val_00045598.JPEG,258,258 +ILSVRC2012_val_00047428.JPEG,736,736 +ILSVRC2012_val_00003341.JPEG,344,344 +ILSVRC2012_val_00037069.JPEG,192,192 +ILSVRC2012_val_00029701.JPEG,440,440 +ILSVRC2012_val_00016918.JPEG,286,737 +ILSVRC2012_val_00015545.JPEG,5,5 +ILSVRC2012_val_00016713.JPEG,274,274 +ILSVRC2012_val_00014735.JPEG,31,31 +... +``` + +Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/model_init.py new file mode 100644 index 000000000..7231243b8 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/model_init.py @@ -0,0 +1,115 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + +import os +from argparse import ArgumentParser +import time + + +class ModelInitializer(BaseModelInitializer): + """initialize mode and run benchmark""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.benchmark_command = "" + if not platform_util: + raise ValueError("Did not find any platform info.") + + # use default batch size if -1 + if self.args.batch_size == -1: + self.args.batch_size = 128 + + # set num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument("--warmup-steps", dest='warmup_steps', + type=int, default=10, + help="number of warmup steps") + arg_parser.add_argument("--steps", dest='steps', + type=int, default=50, + help="number of steps") + arg_parser.add_argument( + '--kmp-blocktime', dest='kmp_blocktime', + help='number of kmp block time', + type=int, default=1) + + self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) + + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + benchmark_script = os.path.join( + self.args.intelai_models, self.args.mode, + "eval_image_classifier_inference.py") + + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ + self.python_exe + " " + benchmark_script + + num_cores = self.platform_util.num_cores_per_socket if self.args.num_cores == -1 \ + else self.args.num_cores + + self.benchmark_command = \ + self.benchmark_command + \ + " --input-graph=" + self.args.input_graph + \ + " --num-inter-threads=" + str(self.args.num_inter_threads) + \ + " --num-intra-threads=" + str(self.args.num_intra_threads) + \ + " --num-cores=" + str(num_cores) + \ + " --batch-size=" + str(self.args.batch_size) + \ + " --warmup-steps=" + str(self.args.warmup_steps) + \ + " --steps=" + str(self.args.steps) + + if self.args.data_num_inter_threads: + self.benchmark_command += " --data-num-inter-threads=" + str(self.args.data_num_inter_threads) + if self.args.data_num_intra_threads: + self.benchmark_command += " --data-num-intra-threads=" + str(self.args.data_num_intra_threads) + + # if the data location directory is not empty, then include the arg + if self.args.data_location and os.listdir(self.args.data_location): + self.benchmark_command += " --data-location=" + \ + self.args.data_location + if self.args.accuracy_only: + self.benchmark_command += " --accuracy-only" + + # if output results is enabled, generate a results file name and pass it to the inference script + if self.args.output_results: + self.results_filename = "{}_{}_{}_results_{}.txt".format( + self.args.model_name, self.args.precision, self.args.mode, + time.strftime("%Y%m%d_%H%M%S", time.gmtime())) + self.results_file_path = os.path.join(self.args.output_dir, self.results_filename) + self.benchmark_command += " --results-file-path {}".format(self.results_file_path) + + def run(self): + if self.benchmark_command: + self.run_command(self.benchmark_command) + + if self.args.output_results: + print("Inference results file in the output directory: {}".format(self.results_filename)) diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py new file mode 100644 index 000000000..03b523829 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py @@ -0,0 +1,123 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + +import argparse +import os + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for resnet50 int8 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + # Set the num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + # Set env vars, if they haven't already been set + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads, overwrite_existing=True) + + def parse_args(self): + parser = argparse.ArgumentParser() + parser.add_argument( + "--warmup-steps", dest="warmup_steps", + help="number of warmup steps", + type=int, default=10) + parser.add_argument( + "--steps", dest="steps", + help="number of steps", + type=int, default=50) + parser.add_argument( + '--kmp-blocktime', dest='kmp_blocktime', + help='number of kmp block time', + type=int, default=1) + parser.add_argument( + "--calibration-only", + help="Calibrate the accuracy.", + dest="calibration_only", action="store_true") + parser.add_argument( + "--calibrate", dest="calibrate", + help=" run accuracy with calibration data, " + "to generate min_max ranges, calibrate=[True/False]", + type=bool, default=False) + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + def run_benchmark_or_accuracy(self): + cmd = os.path.join( + self.args.intelai_models, self.args.mode, + "eval_image_classifier_inference.py") + + cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + cmd + + cmd += " --input-graph=" + self.args.input_graph + \ + " --num-inter-threads=" + str(self.args.num_inter_threads) + \ + " --num-intra-threads=" + str(self.args.num_intra_threads) + \ + " --batch-size=" + str(self.args.batch_size) + \ + " --warmup-steps=" + str(self.args.warmup_steps) + \ + " --steps=" + str(self.args.steps) + + if self.args.calibrate: + cmd += " --calibrate=" + str(self.args.calibrate) + if self.args.data_num_inter_threads: + cmd += " --data-num-inter-threads=" + str(self.args.data_num_inter_threads) + if self.args.data_num_intra_threads: + cmd += " --data-num-intra-threads=" + str(self.args.data_num_intra_threads) + + # if the data location directory is not empty, then include the arg + if self.args.data_location and os.listdir(self.args.data_location): + cmd += " --data-location=" + self.args.data_location + if self.args.accuracy_only: + cmd += " --accuracy-only" + + self.run_command(cmd) + + def run_calibration(self): + calibration_script = os.path.join(self.args.intelai_models, + self.args.precision, + "generate_calibration_data.py") + script_args_list = [ + "input_graph", "data_location", + "batch_size", + "num_inter_threads", "num_intra_threads"] + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ + self.python_exe + " " + calibration_script + cmd = self.add_args_to_command(cmd_prefix, script_args_list) + self.run_command(cmd) + + def run(self): + # Parse custom arguments and append to self.args + self.parse_args() + if self.args.accuracy_only and self.args.calibration_only: + self.run_calibration() + else: + self.run_benchmark_or_accuracy() diff --git a/models/image_recognition/tensorflow/resnet50v1_5/__init__.py b/models/image_recognition/tensorflow/resnet50v1_5/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/datasets.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/datasets.py new file mode 100644 index 000000000..cb848e467 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/datasets.py @@ -0,0 +1,96 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Benchmark dataset utilities. +""" + +from abc import abstractmethod +import os + +import tensorflow as tf + +import preprocessing + +IMAGENET_NUM_TRAIN_IMAGES = 1281167 +IMAGENET_NUM_VAL_IMAGES = 50000 +IMAGENET_NUM_CLASSES = 1000 + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, data_dir=None): + self.name = name + if data_dir is None: + raise ValueError('Data directory not specified') + self.data_dir = data_dir + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @abstractmethod + def num_classes(self): + pass + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + +class ImagenetData(Dataset): + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('ImageNet', data_dir) + + def num_classes(self): + return IMAGENET_NUM_CLASSES + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return IMAGENET_NUM_TRAIN_IMAGES + elif subset == 'validation': + return IMAGENET_NUM_VAL_IMAGES + elif subset == 'calibrate' or subset == 'calibration': + return 100 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self): + return preprocessing.RecordInputImagePreprocessor diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py new file mode 100644 index 000000000..c8fe46a11 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py @@ -0,0 +1,268 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import time +from argparse import ArgumentParser + +import tensorflow as tf +import tensorflow.tools.graph_transforms as graph_transforms + +import datasets +import numpy as np + +INPUTS = 'input_tensor:0' +OUTPUTS = 'softmax_tensor:0' +OPTIMIZATION = 'strip_unused_nodes remove_nodes(op=Identity, op=CheckNumerics) fold_constants(ignore_errors=true) fold_batch_norms fold_old_batch_norms' + +RESNET_IMAGE_SIZE = 224 + + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph""" + + def __init__(self): + + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument('-b', "--batch-size", + help="Specify the batch size. If this " \ + "parameter is not specified or is -1, the " \ + "largest ideal batch size for the model will " \ + "be used.", + dest="batch_size", type=int, default=-1) + + arg_parser.add_argument('-e', "--num-inter-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + + arg_parser.add_argument('-a', "--num-intra-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) + + arg_parser.add_argument('-m', "--model-name", + help='Specify the model name to run benchmark for', + dest='model_name') + + arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') + + arg_parser.add_argument('-d', "--data-location", + help='Specify the location of the data. ' + 'If this parameter is not specified, ' + 'the benchmark will use random/dummy data.', + dest="data_location", default=None) + + arg_parser.add_argument('-r', "--accuracy-only", + help='For accuracy measurement only.', + dest='accuracy_only', action='store_true') + arg_parser.add_argument('--calibrate', dest='calibrate', + help='Run accuracy with calibration data,' + 'to generate min_max ranges, calibrate=[True/False]', + type=bool, default=False) + arg_parser.add_argument("--results-file-path", + help="File path for the inference results", + dest="results_file_path", default=None) + arg_parser.add_argument("--warmup-steps", type=int, default=10, + help="number of warmup steps") + arg_parser.add_argument("--steps", type=int, default=50, + help="number of steps") + + arg_parser.add_argument( + '--data-num-inter-threads', dest='data_num_inter_threads', + help='number threads across operators', + type=int, default=32) + arg_parser.add_argument( + '--data-num-intra-threads', dest='data_num_intra_threads', + help='number threads for data layer operator', + type=int, default=14) + arg_parser.add_argument( + '--num-cores', dest='num_cores', + help='number of cores', + type=int, default=28) + + self.args = arg_parser.parse_args() + # validate the arguements + self.validate_args() + + def write_results_output(self, predictions, filenames, labels): + # If a results_file_path is provided, write the predictions to the file + if self.args.results_file_path: + top_predictions = np.argmax(predictions, 1) + with open(self.args.results_file_path, "a") as fp: + for filename, expected_label, top_prediction in zip(filenames, labels, top_predictions): + fp.write("{},{},{}\n".format(filename, expected_label, top_prediction)) + + def run(self): + """run benchmark with optimized graph""" + + print("Run inference") + + data_config = tf.ConfigProto() + data_config.intra_op_parallelism_threads = self.args.data_num_intra_threads + data_config.inter_op_parallelism_threads = self.args.data_num_inter_threads + data_config.use_per_session_threads = 1 + + infer_config = tf.ConfigProto() + infer_config.intra_op_parallelism_threads = self.args.num_intra_threads + infer_config.inter_op_parallelism_threads = self.args.num_inter_threads + infer_config.use_per_session_threads = 1 + + data_graph = tf.Graph() + with data_graph.as_default(): + if (self.args.data_location): + print("Inference with real data.") + if self.args.calibrate: + subset = 'calibration' + else: + subset = 'validation' + dataset = datasets.ImagenetData(self.args.data_location) + preprocessor = dataset.get_image_preprocessor()( + RESNET_IMAGE_SIZE, RESNET_IMAGE_SIZE, self.args.batch_size, + num_cores=self.args.num_cores, + resize_method='crop') + + images, labels, filenames = preprocessor.minibatch(dataset, subset=subset) + + # If a results file path is provided, then start the prediction output file + if self.args.results_file_path: + with open(self.args.results_file_path, "w+") as fp: + fp.write("filename,actual,prediction\n") + else: + print("Inference with dummy data.") + input_shape = [self.args.batch_size, RESNET_IMAGE_SIZE, RESNET_IMAGE_SIZE, 3] + images = tf.random.uniform(input_shape, 0.0, 255.0, dtype=tf.float32, name='synthetic_images') + + infer_graph = tf.Graph() + with infer_graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.FastGFile(self.args.input_graph, 'rb') as input_file: + input_graph_content = input_file.read() + graph_def.ParseFromString(input_graph_content) + + output_graph = graph_transforms.TransformGraph(graph_def, + [INPUTS], [OUTPUTS], [OPTIMIZATION]) + tf.import_graph_def(output_graph, name='') + + # Definite input and output Tensors for detection_graph + input_tensor = infer_graph.get_tensor_by_name('input_tensor:0') + output_tensor = infer_graph.get_tensor_by_name('softmax_tensor:0') + + data_sess = tf.Session(graph=data_graph, config=data_config) + infer_sess = tf.Session(graph=infer_graph, config=infer_config) + + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset=subset) - num_processed_images \ + if self.args.data_location else datasets.IMAGENET_NUM_VAL_IMAGES + + if (not self.args.accuracy_only): + iteration = 0 + warm_up_iteration = self.args.warmup_steps + total_run = self.args.steps + total_time = 0 + + while num_remaining_images >= self.args.batch_size and iteration < total_run: + iteration += 1 + tf_filenames = None + np_labels = None + data_load_start = time.time() + if self.args.results_file_path: + image_np, np_labels, tf_filenames = data_sess.run([images, labels, filenames]) + else: + image_np = data_sess.run(images) + + data_load_time = time.time() - data_load_start + + num_processed_images += self.args.batch_size + num_remaining_images -= self.args.batch_size + + start_time = time.time() + predictions = infer_sess.run(output_tensor, feed_dict={input_tensor: image_np}) + time_consume = time.time() - start_time + + # Write out the file name, expected label, and top prediction + self.write_results_output(predictions, tf_filenames, np_labels) + + # only add data loading time for real data, not for dummy data + if self.args.data_location: + time_consume += data_load_time + + print('Iteration %d: %.6f sec' % (iteration, time_consume)) + if iteration > warm_up_iteration: + total_time += time_consume + + time_average = total_time / (iteration - warm_up_iteration) + print('Average time: %.6f sec' % (time_average)) + + print('Batch size = %d' % self.args.batch_size) + if (self.args.batch_size == 1): + print('Latency: %.3f ms' % (time_average * 1000)) + # print throughput for both batch size 1 and 128 + print('Throughput: %.3f images/sec' % (self.args.batch_size / time_average)) + + else: # accuracy check + total_accuracy1, total_accuracy5 = (0.0, 0.0) + + while num_remaining_images >= self.args.batch_size: + # Reads and preprocess data + tf_filenames = None + if self.args.results_file_path: + np_images, np_labels, tf_filenames = data_sess.run([images, labels, filenames]) + else: + np_images, np_labels = data_sess.run([images, labels]) + num_processed_images += self.args.batch_size + num_remaining_images -= self.args.batch_size + + # Compute inference on the preprocessed data + predictions = infer_sess.run(output_tensor, + {input_tensor: np_images}) + + # Write out the file name, expected label, and top prediction + self.write_results_output(predictions, tf_filenames, np_labels) + + with tf.Graph().as_default() as accu_graph: + accuracy1 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 1), tf.float32)) + + accuracy5 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 5), tf.float32)) + with tf.Session() as accu_sess: + np_accuracy1, np_accuracy5 = accu_sess.run([accuracy1, accuracy5]) + + total_accuracy1 += np_accuracy1 + total_accuracy5 += np_accuracy5 + + print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ + % (num_processed_images, total_accuracy1 / num_processed_images, + total_accuracy5 / num_processed_images)) + + def validate_args(self): + """validate the arguments""" + + if not self.args.data_location: + if self.args.accuracy_only: + raise ValueError("You must use real data for accuracy measurement.") + + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/preprocessing.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/preprocessing.py new file mode 100644 index 000000000..3c6361584 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/preprocessing.py @@ -0,0 +1,177 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.data.experimental import parallel_interleave +from tensorflow.data.experimental import map_and_batch +from tensorflow.python.platform import gfile + + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/filename': tf.FixedLenFeature([], dtype=tf.string, + default_value="") + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + filename = tf.cast(features['image/filename'], dtype=tf.string) + + return features['image/encoded'], label, filename + + +def eval_image(image, height, width, resize_method, + central_fraction=0.875, scope=None): + + with tf.name_scope('eval_image'): + if resize_method == 'crop': + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256, 256 * shape[1] / shape[0]], + dtype=tf.int32)), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256 * shape[0] / shape[1], 256], + dtype=tf.int32))) + + shape = tf.shape(image) + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, width) + distorted_image.set_shape([height, width, 3]) + means = tf.broadcast_to([123.68, 116.78, 103.94], tf.shape(distorted_image)) + return distorted_image - means + else: # bilinear + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + +class RecordInputImagePreprocessor(object): + """Preprocessor for images with RecordInput format.""" + + def __init__(self, + height, + width, + batch_size, + num_cores, + resize_method="bilinear"): + + self.height = height + self.width = width + self.batch_size = batch_size + self.num_cores = num_cores + self.resize_method = resize_method + + def parse_and_preprocess(self, value): + # parse + image_buffer, label_index, filename = parse_example_proto(value) + # preprocess + image = tf.image.decode_jpeg( + image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') + image = eval_image(image, self.height, self.width, self.resize_method) + return (image, label_index, filename) + + def minibatch(self, dataset, subset, cache_data=False): + + with tf.name_scope('batch_processing'): + + glob_pattern = dataset.tf_record_pattern(subset) + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError('Found no files in --data_dir matching: {}' + .format(glob_pattern)) + ds = tf.data.TFRecordDataset.list_files(file_names) + + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5, + sloppy=True, + buffer_output_elements=10000, prefetch_input_elements=10000)) + + if cache_data: + ds = ds.take(1).cache().repeat() + + ds = ds.prefetch(buffer_size=10000) + #ds = ds.prefetch(buffer_size=self.batch_size) + + # num of parallel batches not greater than 56 + max_num_parallel_batches = min(56, 2 * self.num_cores) + ds = ds.apply( + map_and_batch( + map_func=self.parse_and_preprocess, + batch_size=self.batch_size, + num_parallel_batches=max_num_parallel_batches, + num_parallel_calls=None)) + + ds = ds.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) + + ds_iterator = ds.make_one_shot_iterator() + images, labels, filename = ds_iterator.get_next() + # reshape + labels = tf.reshape(labels, [self.batch_size]) + filename = tf.reshape(filename, [self.batch_size]) + + return images, labels, filename diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/__init__.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/benchmark.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/benchmark.py new file mode 100644 index 000000000..c6d9a9e1f --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/benchmark.py @@ -0,0 +1,213 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import time + +import datasets +import tensorflow as tf + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--data_location", default=None, + help="dataset location") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="predict", + help="name of output layer") + parser.add_argument("--num_cores", default=28, + type=int, help="number of physical cores") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + parser.add_argument( + '--data_num_inter_threads', + help='number threads across data layer operators', + type=int, default=16) + parser.add_argument( + '--data_num_intra_threads', + help='number threads for an data layer operator', + type=int, default=14) + parser.add_argument("--warmup_steps", type=int, default=10, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=50, help="number of steps") + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + if args.input_height: + input_height = args.input_height + else: + input_height = 224 + if args.input_width: + input_width = args.input_width + else: + input_width = 224 + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + warmup_steps = args.warmup_steps + steps = args.steps + assert steps > 10, "Benchmark steps should be at least 10." + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + + data_config = tf.ConfigProto() + data_config.intra_op_parallelism_threads = args.data_num_intra_threads + data_config.inter_op_parallelism_threads = args.data_num_inter_threads + data_config.use_per_session_threads = 1 + + infer_config = tf.ConfigProto() + infer_config.intra_op_parallelism_threads = num_intra_threads + infer_config.inter_op_parallelism_threads = num_inter_threads + infer_config.use_per_session_threads = 1 + + data_graph = tf.Graph() + with data_graph.as_default(): + if args.data_location: + print("inference with real data") + # get the images from dataset + dataset = datasets.ImagenetData(args.data_location) + preprocessor = dataset.get_image_preprocessor(benchmark=True)( + input_height, input_width, batch_size, + num_cores=args.num_cores, + resize_method='crop') + images = preprocessor.minibatch(dataset, subset='validation') + else: + # synthetic images + print("inference with dummy data") + input_shape = [batch_size, input_height, input_width, 3] + images = tf.random.uniform( + input_shape, 0.0, 255.0, dtype=tf.float32, name='synthetic_images') + + infer_graph = tf.Graph() + with infer_graph.as_default(): + graph_def = tf.GraphDef() + with open(model_file, "rb") as f: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name='') + + input_tensor = infer_graph.get_tensor_by_name(input_layer + ":0") + output_tensor = infer_graph.get_tensor_by_name(output_layer + ":0") + tf.global_variables_initializer() + + data_sess = tf.Session(graph=data_graph, config=data_config) + infer_sess = tf.Session(graph=infer_graph, config=infer_config) + + print("[Running warmup steps...]") + step_total_time = 0 + step_total_images = 0 + + for t in range(warmup_steps): + data_start_time = time.time() + image_data = data_sess.run(images) + data_load_time = time.time() - data_start_time + + start_time = time.time() + infer_sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + + # only count the data loading and processing time for real data + if args.data_location: + elapsed_time += data_load_time + + step_total_time += elapsed_time + step_total_images += batch_size + + if ((t + 1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t + 1, step_total_images / step_total_time)) + step_total_time = 0 + step_total_images = 0 + + print("[Running benchmark steps...]") + total_time = 0 + total_images = 0 + + step_total_time = 0 + step_total_images = 0 + + for t in range(steps): + try: + data_start_time = time.time() + image_data = data_sess.run(images) + data_load_time = time.time() - data_start_time + + start_time = time.time() + infer_sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + + # only count the data loading and processing time for real data + if args.data_location: + elapsed_time += data_load_time + + total_time += elapsed_time + total_images += batch_size + + step_total_time += elapsed_time + step_total_images += batch_size + + if ((t + 1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t + 1, step_total_images / step_total_time)) + step_total_time = 0 + step_total_images = 0 + + except tf.errors.OutOfRangeError: + print("Running out of images from dataset.") + break + + print("Average throughput for batch size {0}: {1} images/sec".format(batch_size, total_images / total_time)) diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/cnn_util.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/cnn_util.py new file mode 100644 index 000000000..fb76f2971 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/cnn_util.py @@ -0,0 +1,51 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for CNN benchmarks.""" + +import tensorflow as tf + + +def tensorflow_version_tuple(): + v = tf.__version__ + major, minor, patch = v.split('.') + return (int(major), int(minor), patch) + + +def tensorflow_version(): + vt = tensorflow_version_tuple() + return vt[0] * 1000 + vt[1] + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/datasets.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/datasets.py new file mode 100644 index 000000000..1a885cb66 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/datasets.py @@ -0,0 +1,114 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import os +from abc import abstractmethod + +import tensorflow as tf + + +IMAGENET_NUM_TRAIN_IMAGES = 1281167 +IMAGENET_NUM_VAL_IMAGES = 50000 + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, height=None, width=None, depth=None, data_dir=None, + queue_runner_required=False, num_classes=1000): + self.name = name + self.height = height + self.width = width + self.depth = depth or 3 + + self.data_dir = data_dir + self._queue_runner_required = queue_runner_required + self._num_classes = num_classes + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @property + def num_classes(self): + return self._num_classes + + @num_classes.setter + def num_classes(self, val): + self._num_classes = val + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + def get_image_preprocessor(self): + return None + + def queue_runner_required(self): + return self._queue_runner_required + + def use_synthetic_gpu_images(self): + return not self.data_dir + + +class ImagenetData(Dataset): + """Configuration for Imagenet dataset.""" + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('imagenet', 300, 300, data_dir=data_dir) + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return IMAGENET_NUM_TRAIN_IMAGES + elif subset == 'validation': + return IMAGENET_NUM_VAL_IMAGES + elif subset == 'calibrate' or subset == 'calibration': + return 100 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self, benchmark=False): + if benchmark: + import preprocessing_benchmark + return preprocessing_benchmark.RecordInputImagePreprocessor + else: + import preprocessing + return preprocessing.RecordInputImagePreprocessor + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/generate_calibration_data.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/generate_calibration_data.py new file mode 100644 index 000000000..abf62345b --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/generate_calibration_data.py @@ -0,0 +1,183 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np +from collections import namedtuple +from operator import attrgetter + +from google.protobuf import text_format +import tensorflow as tf +import preprocessing +import datasets + +NUM_TEST_IMAGES = 50000 + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--data_location", default=None, + help="full path to the validation data") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="predict", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + if args.input_height: + input_height = args.input_height + else: + input_height = 224 + if args.input_width: + input_width = args.input_width + else: + input_width = 224 + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + data_location = args.data_location + dataset = datasets.ImagenetData(data_location) + preprocessor = preprocessing.ImagePreprocessor( + input_height, input_width, batch_size, + 1, # device count + tf.float32, # data_type for input fed to the graph + train=False, # doing inference + resize_method='crop') + images, labels, tf_records = preprocessor.minibatch(dataset, subset='train') + graph = load_graph(model_file) + input_tensor = graph.get_tensor_by_name(input_layer + ":0") + output_tensor = graph.get_tensor_by_name(output_layer + ":0") + + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + + total_accuracy1, total_accuracy5 = (0.0, 0.0) + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset='train') \ + - num_processed_images + + CALIBRATION_POOL_SIZE = 1000 + CALIBRATION_SET_SIZE = 100 + calibration_pool = [] + ImageWithConfidence = namedtuple('ImageWithConfidence', + ['tf_record', 'confidence']) + current_pool_size = 0 + with tf.Session() as sess: + sess_graph = tf.Session(graph=graph, config=config) + while num_remaining_images >= batch_size: + # Reads and preprocess data + np_images, np_labels, serialized_images = sess.run( + [images[0], labels[0], tf_records]) + num_processed_images += batch_size + num_remaining_images -= batch_size + # Compute inference on the preprocessed data + predictions = sess_graph.run(output_tensor, + {input_tensor: np_images}) + selected_img_indices = np.where( + predictions.argmax(axis=1) == np_labels)[0].tolist() + current_pool_size += len(selected_img_indices) + for indx in selected_img_indices: + calibration_pool.append(ImageWithConfidence( + serialized_images[indx], predictions[indx].max())) + + accuracy1 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 1), tf.float32)) + + accuracy5 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 5), tf.float32)) + np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) + total_accuracy1 += np_accuracy1 + total_accuracy5 += np_accuracy5 + print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ + % (num_processed_images, total_accuracy1/num_processed_images, + total_accuracy5/num_processed_images)) + if current_pool_size >= CALIBRATION_POOL_SIZE: + break + + writer = tf.python_io.TFRecordWriter('calibration-1-of-1') + calibration_pool = sorted(calibration_pool, + key=attrgetter('confidence'), reverse=True) + for i in range(CALIBRATION_SET_SIZE): + writer.write(calibration_pool[i].tf_record) + writer.close() diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing.py new file mode 100644 index 000000000..c4e0a95ce --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing.py @@ -0,0 +1,419 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image pre-processing utilities. +""" +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf +from random import randint + +from tensorflow.python.ops import data_flow_ops +import cnn_util + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): + # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): + with tf.name_scope(scope or 'decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3, + fancy_upscaling=False, + dct_method='INTEGER_FAST') + + # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') + + return image + + +def eval_image(image, height, width, bbox, thread_id, resize): + """Get the image for model evaluation.""" + with tf.name_scope('eval_image'): + if not thread_id: + tf.summary.image( + 'original_image', tf.expand_dims(image, 0)) + + if resize == 'crop': + # Note: This is much slower than crop_to_bounding_box + # It seems that the redundant pad step has huge overhead + # distorted_image = tf.image.resize_image_with_crop_or_pad(image, + # height, width) + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256, 256*shape[1]/shape[0]], dtype=tf.int32)), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256*shape[0]/shape[1], 256], dtype=tf.int32))) + shape = tf.shape(image) + + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + #y0=tf.random_uniform([],minval=0,maxval=(shape[0] - height + 1), dtype=tf.int32) + #x0=tf.random_uniform([],minval=0,maxval=(shape[1] - width + 1), dtype=tf.int32) + ## distorted_image = tf.slice(image, [y0,x0,0], [height,width,3]) + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, + width) + else: + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.5, + aspect_ratio_range=[0.90, 1.10], + area_range=[0.10, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + resize_method = { + 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, + 'bilinear': tf.image.ResizeMethod.BILINEAR, + 'bicubic': tf.image.ResizeMethod.BICUBIC, + 'area': tf.image.ResizeMethod.AREA + }[resize] + # This resizing operation may distort the images because the aspect + # ratio is not respected. + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], + resize_method, + align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', tf.expand_dims(distorted_image, 0)) + image = distorted_image + return image + + +def distort_image(image, height, width, bbox, thread_id=0, scope=None): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D float Tensor of image + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + thread_id: integer indicating the preprocessing thread. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor of distorted image used for training. + """ + # with tf.op_scope([image, height, width, bbox], scope, 'distort_image'): + # with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + with tf.name_scope(scope or 'distort_image'): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # After this point, all image pixels reside in [0,1) + # until the very end, when they're rescaled to (-1, 1). The various + # adjust_* ops all require this range for dtype float. + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + # Display the bounding box in the first thread only. + if not thread_id: + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + tf.summary.image( + 'image_with_bounding_boxes', image_with_box) + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an allowed + # range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.99, 1.01], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + if not thread_id: + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distort_bbox) + tf.summary.image( + 'images_with_distorted_bounding_box', + image_with_distorted_box) + + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + resize_method = thread_id % 4 + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], resize_method, align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. + distorted_image = distort_color(distorted_image, thread_id) + + # Note: This ensures the scaling matches the output of eval_image + distorted_image *= 256 + + if not thread_id: + tf.summary.image( + 'final_distorted_image', + tf.expand_dims(distorted_image, 0)) + return distorted_image + + +def distort_color(image, thread_id=0, scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: Tensor containing single image. + thread_id: preprocessing thread ID. + scope: Optional scope for op_scope. + Returns: + color-distorted image + """ + # with tf.op_scope([image], scope, 'distort_color'): + # with tf.name_scope(scope, 'distort_color', [image]): + with tf.name_scope(scope or 'distort_color'): + color_ordering = thread_id % 2 + + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +class ImagePreprocessor(object): + """Preprocessor for input images.""" + + def __init__(self, + height, + width, + batch_size, + device_count, + dtype=tf.float32, + train=True, + distortions=None, + resize_method=None): + self.height = height + self.width = width + self.batch_size = batch_size + self.device_count = device_count + self.dtype = dtype + self.train = train + self.resize_method = resize_method + if distortions is None: + distortions = False + self.distortions = distortions + if self.batch_size % self.device_count != 0: + raise ValueError( + ('batch_size must be a multiple of device_count: ' + 'batch_size %d, device_count: %d') % + (self.batch_size, self.device_count)) + self.batch_size_per_device = self.batch_size // self.device_count + + def preprocess(self, image_buffer, bbox, thread_id): + """Preprocessing image_buffer using thread_id.""" + # Note: Width and height of image is known only at runtime. + image = tf.image.decode_jpeg(image_buffer, channels=3, + dct_method='INTEGER_FAST') + if self.train and self.distortions: + image = distort_image(image, self.height, self.width, bbox, thread_id) + else: + image = eval_image(image, self.height, self.width, bbox, thread_id, + self.resize_method) + # Note: image is now float32 [height,width,3] with range [0, 255] + + # image = tf.cast(image, tf.uint8) # HACK TESTING + + return image + + def minibatch(self, dataset, subset): + with tf.name_scope('batch_processing'): + images = [[] for i in range(self.device_count)] + labels = [[] for i in range(self.device_count)] + record_input = data_flow_ops.RecordInput( + file_pattern=dataset.tf_record_pattern(subset), + seed=randint(0, 9000), + parallelism=64, + buffer_size=10000, + batch_size=self.batch_size, + name='record_input') + records = record_input.get_yield_op() + records = tf.split(records, self.batch_size, 0) + records = [tf.reshape(record, []) for record in records] + for i in xrange(self.batch_size): + value = records[i] + image_buffer, label_index, bbox, _ = parse_example_proto(value) + image = self.preprocess(image_buffer, bbox, i % 4) + device_index = i % self.device_count + images[device_index].append(image) + labels[device_index].append(label_index) + label_index_batch = [None] * self.device_count + for device_index in xrange(self.device_count): + images[device_index] = tf.parallel_stack(images[device_index]) + label_index_batch[device_index] = tf.concat(labels[device_index], 0) + + # dynamic_pad=True) # HACK TESTING dynamic_pad=True + images[device_index] = tf.cast(images[device_index], self.dtype) + depth = 3 + images[device_index] = tf.reshape( + images[device_index], + shape=[self.batch_size_per_device, self.height, self.width, depth]) + label_index_batch[device_index] = tf.reshape( + label_index_batch[device_index], [self.batch_size_per_device]) + # Display the training images in the visualizer. + # tf.summary.image('images', images) + + return images, label_index_batch, records diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing_benchmark.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing_benchmark.py new file mode 100644 index 000000000..8e3556556 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing_benchmark.py @@ -0,0 +1,173 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.data.experimental import parallel_interleave +from tensorflow.data.experimental import map_and_batch +from tensorflow.python.platform import gfile + + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + return features['image/encoded'], label + + +def eval_image(image, height, width, resize_method, + central_fraction=0.875, scope=None): + with tf.name_scope('eval_image'): + if resize_method == 'crop': + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256, 256 * shape[1] / shape[0]], + dtype=tf.int32)), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256 * shape[0] / shape[1], 256], + dtype=tf.int32))) + shape = tf.shape(image) + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, width) + distorted_image.set_shape([height, width, 3]) + means = tf.broadcast_to([123.68, 116.78, 103.94], tf.shape(distorted_image)) + return distorted_image - means + else: # bilinear + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +class RecordInputImagePreprocessor(object): + """Preprocessor for images with RecordInput format.""" + + def __init__(self, + height, + width, + batch_size, + num_cores, + resize_method): + + self.height = height + self.width = width + self.batch_size = batch_size + self.num_cores = num_cores + self.resize_method = resize_method + + def parse_and_preprocess(self, value): + # parse + image_buffer, label_index = parse_example_proto(value) + # preprocess + image = tf.image.decode_jpeg( + image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') + image = eval_image(image, self.height, self.width, self.resize_method) + + return (image, label_index) + + def minibatch(self, dataset, subset, cache_data=False): + + with tf.name_scope('batch_processing'): + + glob_pattern = dataset.tf_record_pattern(subset) + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError('Found no files in --data_dir matching: {}' + .format(glob_pattern)) + ds = tf.data.TFRecordDataset.list_files(file_names) + + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5, + sloppy=True, + buffer_output_elements=10000, prefetch_input_elements=10000)) + + if cache_data: + ds = ds.take(1).cache().repeat() + + ds = ds.prefetch(buffer_size=10000) + # ds = ds.prefetch(buffer_size=self.batch_size) + + # num of parallel batches not greater than 56 + max_num_parallel_batches = min(56, 2*self.num_cores) + ds = ds.apply( + map_and_batch( + map_func=self.parse_and_preprocess, + batch_size=self.batch_size, + num_parallel_batches=max_num_parallel_batches, + num_parallel_calls=None)) # this number should be tuned + + ds = ds.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) # this number can be tuned + + ds_iterator = ds.make_one_shot_iterator() + images, _ = ds_iterator.get_next() + + return images diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_resnet50v1_5_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50v1_5_args.json new file mode 100644 index 000000000..271813ed7 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50v1_5_args.json @@ -0,0 +1,40 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50v1_5.pb --accuracy-only --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50v1_5.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50"}, + + { "_comment": "FP32 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50v1_5 --batch-size 128 --in-graph /freezed_resnet50v1_5.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50v1_5.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for latency benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50v1_5 --batch-size 1 --in-graph /freezed_resnet50v1_5.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50v1_5.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for throughput benchmark with --num-inter-threads=1 --num-intra-threads=28", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50v1_5 --batch-size 128 --in-graph /freezed_resnet50v1_5.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50v1_5.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --output-dir enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200"}, + + { "_comment": "Int8 command for data calibration with --calibration-only", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --data-location=/dataset --calibration-only", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --data_location=/dataset"}, + + { "_comment": "Fp32 command for throughput benchmark with --output-results enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50v1_5_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50v1_5_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50v1_5_fp32_inference_results*.txt"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50v1_5 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50v1_5.pb --intelai-models . --accuracy-only --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50v1_5.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "Int8 command for throughput benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200" + } +] + + From 3db66e13f5ebc325ca020dc9afe00482b0e7a11f Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 23 May 2019 13:47:20 -0700 Subject: [PATCH 38/62] Add link download the MobileNet v1 Int8 pretrained model (#313) --- .../tensorflow/mobilenet_v1/README.md | 47 +++++++++---------- 1 file changed, 22 insertions(+), 25 deletions(-) diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index af5947e20..3c8c0d947 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -40,7 +40,10 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. -rw-r--r--. 1 user 52508270 Jun 20 15:09 validation-00126-of-00128 -rw-r--r--. 1 user 55292089 Jun 20 15:09 validation-00127-of-00128 ``` -2. A link to download the pre-trained model is coming soon. +2. Download the pre-trained model. +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilenetv1_int8_pretrained_model.pb +``` 3. Clone the [intelai/models](https://github.com/intelai/models) repo and then run the benchmarking scripts for either benchmarking throughput, @@ -113,45 +116,39 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. Example log tail when benchmarking for throughput: ``` - OMP: Info #250: KMP_AFFINITY: pid 682 tid 885 thread 55 bound to OS proc set 83 - OMP: Info #250: KMP_AFFINITY: pid 682 tid 886 thread 56 bound to OS proc set 0 - OMP: Info #250: KMP_AFFINITY: pid 682 tid 884 thread 54 bound to OS proc set 82 [Running warmup steps...] - steps = 10, 1830.24507317 images/sec + steps = 10, 1865.30956528 images/sec [Running benchmark steps...] - steps = 10, 1841.47811007 images/sec - steps = 20, 1848.84108679 images/sec - steps = 30, 1847.84668478 images/sec - steps = 40, 1849.15354305 images/sec - steps = 50, 1840.95611001 images/sec + steps = 10, 1872.92398031 images/sec + steps = 20, 1862.64499512 images/sec + steps = 30, 1857.97283454 images/sec + steps = 40, 1864.70142784 images/sec + steps = 50, 1854.23896906 images/sec Ran inference with batch size 240 - Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190409_222536.log + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164626.log ``` Example log tail when benchmarking for latency: ``` - OMP: Info #250: KMP_AFFINITY: pid 681 tid 882 thread 53 bound to OS proc set 81 - OMP: Info #250: KMP_AFFINITY: pid 681 tid 884 thread 55 bound to OS proc set 83 - OMP: Info #250: KMP_AFFINITY: pid 681 tid 885 thread 56 bound to OS proc set 0 [Running warmup steps...] - steps = 10, 139.81945463 images/sec + steps = 10, 197.082229114 images/sec [Running benchmark steps...] - steps = 10, 140.212074614 images/sec - steps = 20, 135.230332731 images/sec - steps = 30, 133.508530685 images/sec - steps = 40, 135.724816361 images/sec - steps = 50, 132.714339957 images/sec + steps = 10, 195.201936054 images/sec + steps = 20, 195.693743293 images/sec + steps = 30, 198.999098543 images/sec + steps = 40, 189.256565292 images/sec + steps = 50, 201.252531069 images/sec Ran inference with batch size 1 - Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190409_223122.log + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164348.log ``` Example log tail when running for accuracy: ``` - Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7009, 0.8933) - Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7011, 0.8933) - Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7013, 0.8933) + Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8935) + Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8934) + Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8934) Ran inference with batch size 100 - Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190409_223621.log + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164955.log ``` ## FP32 Inference Instructions From 59dbbda8697b93849e7b5936b5e1267769e415b7 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Fri, 24 May 2019 09:55:33 -0700 Subject: [PATCH 39/62] Trivial update to benchmark README (#315) --- benchmarks/README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/README.md b/benchmarks/README.md index 414e344e5..03f59a89d 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -37,7 +37,7 @@ dependencies to be installed: | Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [Int8](object_detection/tensorflow/rfcn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [Faster R-CNN](https://arxiv.org/pdf/1506.01497.pdf) | Inference | [Int8](object_detection/tensorflow/faster_rcnn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/faster_rcnn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-MobileNet](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-mobilenet/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) | -| Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [INT8](object_detection/tensorflow/ssd-resnet34/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-resnet34/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-VGG16](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [Int8](object_detection/tensorflow/ssd_vgg16/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd_vgg16/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [NCF](https://arxiv.org/pdf/1708.05031.pdf) | Inference | [FP32](recommendation/tensorflow/ncf/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep Large Dataset](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [Int8](recommendation/tensorflow/wide_deep_large_ds/README.md#int8-inference-instructions) [FP32](recommendation/tensorflow/wide_deep_large_ds/README.md#fp32-inference-instructions) | From 59563bec9491647ad67269526255c82effd91c87 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Tue, 28 May 2019 09:58:53 -0700 Subject: [PATCH 40/62] Add link to download the DenseNet 169 pretrained model (#318) --- .../image_recognition/tensorflow/densenet169/README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md index fa02b7a80..f75146e6d 100644 --- a/benchmarks/image_recognition/tensorflow/densenet169/README.md +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -31,7 +31,10 @@ following modes/precisions: -rw-r--r--. 1 user 55292089 Jun 20 15:09 validation-00127-of-00128 ``` -2. A link to download the pre-trained model is coming soon. +2. Download the pretrained model: + ``` + $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/densenet169_fp32_pretrained_model.pb + ``` 3. Clone the [intelai/models](https://github.com/intelai/models) repo and then run the benchmarking scripts for either benchmarking throughput, From 4adab615b0c0ae5e81e2fd116f18482c8f30c2d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Wencel?= Date: Thu, 30 May 2019 18:30:58 +0200 Subject: [PATCH 41/62] Add iteration time to accuracy scripts (#317) --- .../tensorflow/densenet169/README.md | 4 ++++ .../tensorflow/inceptionv3/README.md | 6 +++++- .../tensorflow/inceptionv4/README.md | 11 ++++++++++- .../tensorflow/mobilenet_v1/README.md | 8 +++++++- .../image_recognition/tensorflow/resnet50/README.md | 6 ++++++ .../tensorflow/resnet50v1_5/README.md | 5 +++++ .../tensorflow/densenet169/inference/fp32/accuracy.py | 3 +++ .../eval_image_classifier_accuracy.py | 3 +++ .../fp32/eval_image_classifier_inference.py | 3 +++ .../tensorflow/inceptionv3/int8/accuracy.py | 3 +++ .../tensorflow/inceptionv4/inference/accuracy.py | 3 +++ .../mobilenet_v1/inference/fp32/accuracy.py | 3 +++ .../mobilenet_v1/inference/int8/accuracy.py | 3 +++ .../inference/eval_image_classifier_inference.py | 3 +++ .../inference/eval_image_classifier_inference.py | 3 +++ .../inference/eval_image_classifier_inference.py | 3 +++ 16 files changed, 67 insertions(+), 3 deletions(-) diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md index f75146e6d..f38be702a 100644 --- a/benchmarks/image_recognition/tensorflow/densenet169/README.md +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -131,9 +131,13 @@ following modes/precisions: Example log tail when running for accuracy: ``` + Iteration time: 581.6446 ms 0.757505030181 + Iteration time: 581.5755 ms 0.757489959839 + Iteration time: 581.5709 ms 0.75749498998 + Iteration time: 581.1705 ms 0.75748 Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_021545.log diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index e02c73331..f40cdfebd 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -187,6 +187,7 @@ different configs. Example log tail when running for accuracy: ``` +Iteration time: 357.3781 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7666, 0.9333) Executing command: python /workspace/intelai_models/int8/accuracy.py --input_height=299 --input_width=299 --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/inceptionv3_int8_pretrained_model.pb --data_location=/dataset Ran inference with batch size 100 @@ -329,12 +330,15 @@ python launch_benchmark.py \ ``` Example log tail when benchmarking for accuracy: ``` +Iteration time: 756.7571 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7673, 0.9341) +Iteration time: 757.3781 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7674, 0.9341) +Iteration time: 760.3024 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7675, 0.9342) Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190104_023816.log ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands -to get additional debug output or change the default output location.. \ No newline at end of file +to get additional debug output or change the default output location.. diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index e89d13dee..75c6fa102 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -101,9 +101,13 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. Example log tail when running for accuracy: ``` ... + Iteration time: 685.1976 ms Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7985, 0.9504) + Iteration time: 686.3845 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7983, 0.9504) + Iteration time: 686.7021 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7984, 0.9504) + Iteration time: 685.8914 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7984, 0.9504) Ran inference with batch size 100 Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_221608.log @@ -226,10 +230,15 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. Example log tail when running for accuracy: ``` ... + Iteration time: 1337.8728 ms Processed 49600 images. (Top1 accuracy, Top5 accuracy) = (0.8015, 0.9517) + Iteration time: 1331.8253 ms Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.8017, 0.9518) + Iteration time: 1339.1553 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.8017, 0.9518) + Iteration time: 1334.5991 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.8018, 0.9519) + Iteration time: 1336.1905 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.8018, 0.9519) Ran inference with batch size 100 Log location outside container: /benchmark_inceptionv4_inference_fp32_20190308_182729.log @@ -262,4 +271,4 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. Latency: 63.534 ms Ran inference with batch size 1 Log location outside container: /benchmark_inceptionv4_inference_fp32_20190307_221954.log - ``` \ No newline at end of file + ``` diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index 3c8c0d947..93a0d9025 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -144,8 +144,11 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene Example log tail when running for accuracy: ``` + Iteration time: 66.8541 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8935) + Iteration time: 66.7909 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8934) + Iteration time: 66.7001 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8934) Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164955.log @@ -287,9 +290,12 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene ``` * Below is a sample lof file snippet when testing accuracy: ``` + Iteration time: 119.1134 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7104, 0.8999) + Iteration time: 118.8375 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7103, 0.8999) + Iteration time: 119.9311 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7102, 0.8999) Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_fp32_20190110_211648.log - ``` \ No newline at end of file + ``` diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index 31c06609a..fa2fb6e65 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -65,10 +65,15 @@ The log file is saved to the value of `--output-dir`. The tail of the log output when the benchmarking completes should look something like this: ``` +Iteration time: 233.495 ms Processed 49600 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) +Iteration time: 233.231 ms Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) +Iteration time: 234.541 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7360, 0.9154) +Iteration time: 233.033 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) +Iteration time: 233.013 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7360, 0.9154) Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190104_212224.log @@ -236,6 +241,7 @@ The tail of the log output when the accuracy run completes should look something like this: ``` ... +Iteration time: 649.252 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7430, 0.9188) Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190104_213452.log diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md index cc1f255a6..610eb7cc0 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md @@ -68,9 +68,13 @@ The log file is saved to the value of `--output-dir`. The tail of the log output when the benchmarking completes should look something like this: ``` +Iteration time: 239.899 ms Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7622, 0.9296) +Iteration time: 239.110 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7621, 0.9295) +Iteration time: 239.512 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7622, 0.9296) +Iteration time: 239.989 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7623, 0.9296) Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_{timestamp}.log @@ -242,6 +246,7 @@ The tail of the log output when the accuracy run completes should look something like this: ``` ... +Iteration time: 514.427 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7651, 0.9307) lscpu_path_cmd = command -v lscpu lscpu located here: /usr/bin/lscpu diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py index 35d598a48..0335ce423 100644 --- a/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py @@ -121,9 +121,11 @@ def load_graph(model_file): #print(np_labels.shape) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions1 = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time if(batch_size !=1): predictions1 = sess.run(tf.squeeze(predictions1)) else : @@ -131,4 +133,5 @@ def load_graph(model_file): predictions2 = tf.argmax(predictions1, axis=1) predictions = sess.run(predictions2) top1 += batch_size - (np.count_nonzero(predictions - np_labels)) + print("Iteration time: %0.4f ms" % elapsed_time) print(top1/num_processed_images) diff --git a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py b/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py index 5671f2287..595b252a4 100644 --- a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py +++ b/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py @@ -147,9 +147,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -160,6 +162,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py index 98b826ac9..b84d28ae3 100644 --- a/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py @@ -189,9 +189,11 @@ def run(self): num_processed_images += self.args.batch_size num_remaining_images -= self.args.batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = infer_sess.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time with tf.Graph().as_default() as accu_graph: accuracy1 = tf.reduce_sum( @@ -207,6 +209,7 @@ def run(self): total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py b/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py index 7d79593aa..8062bd6be 100644 --- a/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py +++ b/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py @@ -120,9 +120,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -133,6 +135,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1/num_processed_images, total_accuracy5/num_processed_images)) diff --git a/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py b/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py index 3dc0b90f9..a3bdf7c58 100644 --- a/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py +++ b/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py @@ -144,9 +144,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -157,6 +159,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print( "Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % ( diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py index 7d6a37abc..f5d45fb9f 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py @@ -143,9 +143,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -156,6 +158,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print( "Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % ( diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py index 347c39989..6d7acaf50 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py @@ -114,9 +114,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -127,6 +129,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1/num_processed_images, total_accuracy5/num_processed_images)) diff --git a/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py index a65a54b08..e62b40b3d 100644 --- a/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py @@ -200,9 +200,11 @@ def run(self): num_processed_images += self.args.batch_size num_remaining_images -= self.args.batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = infer_sess.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time with tf.Graph().as_default() as accu_graph: # Putting all code within this make things faster. accuracy1 = tf.reduce_sum( @@ -216,6 +218,7 @@ def run(self): np_accuracy1, np_accuracy5 = accu_sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py index 791c1b761..21a1b465e 100644 --- a/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py @@ -230,9 +230,11 @@ def run(self): num_processed_images += self.args.batch_size num_remaining_images -= self.args.batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = infer_sess.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time # Write out the file name, expected label, and top prediction self.write_results_output(predictions, tf_filenames, np_labels) @@ -251,6 +253,7 @@ def run(self): total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py index c8fe46a11..e1e6133e1 100644 --- a/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py @@ -230,9 +230,11 @@ def run(self): num_processed_images += self.args.batch_size num_remaining_images -= self.args.batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = infer_sess.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time # Write out the file name, expected label, and top prediction self.write_results_output(predictions, tf_filenames, np_labels) @@ -251,6 +253,7 @@ def run(self): total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) From a2b26ee4dbb8df799ec1e90e91543a91219271d3 Mon Sep 17 00:00:00 2001 From: Melanie Buehler Date: Thu, 30 May 2019 10:41:43 -0700 Subject: [PATCH 42/62] Adds TF Serving Transformer-LT Tutorial (#302) * Initial draft of tutorial and model export script * Added benchmarking script * Improved settings, benchmarking, and other sections * Added PYTHONPATH update for needed utils * Updated for reviewer suggestions * Added * to bold heading * Replaced '&' with '-d' for detached mode --- docs/README.md | 1 + .../tensorflow_serving/Tutorial.md | 211 ++++++++++++++++++ .../transformer_benchmark.py | 181 +++++++++++++++ .../transformer_graph_to_saved_model.py | 87 ++++++++ 4 files changed, 480 insertions(+) create mode 100644 docs/language_translation/tensorflow_serving/Tutorial.md create mode 100644 docs/language_translation/tensorflow_serving/transformer_benchmark.py create mode 100644 docs/language_translation/tensorflow_serving/transformer_graph_to_saved_model.py diff --git a/docs/README.md b/docs/README.md index 11e99bf97..3fd8677db 100644 --- a/docs/README.md +++ b/docs/README.md @@ -18,6 +18,7 @@ * Inference with IntelĀ® Optimization of Tensorflow Serving: * [Image Recognition](/docs/image_recognition/tensorflow_serving/Tutorial.md) (ResNet50 and InceptionV3) * [Object Detection](/docs/object_detection/tensorflow_serving/Tutorial.md) (R-FCN) + * [Language Translation](/docs/language_translation/tensorflow_serving/Tutorial.md) (Transformer-LT) * Model Quantization and Optimization * [Image Recognition](/docs/image_recognition/quantization/Tutorial.md) (ResNet50) diff --git a/docs/language_translation/tensorflow_serving/Tutorial.md b/docs/language_translation/tensorflow_serving/Tutorial.md new file mode 100644 index 000000000..c0a690e3e --- /dev/null +++ b/docs/language_translation/tensorflow_serving/Tutorial.md @@ -0,0 +1,211 @@ + +# Language Translation with TensorFlow Serving on CPU using Transformer-LT + +## Goal + +This tutorial will introduce you to the CPU performance considerations for language translation and how to use [IntelĀ® Optimizations for TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. +This tutorial uses a pre-trained [Transformer-LT](https://arxiv.org/pdf/1706.03762.pdf) model for translating English to German and a sample of English news excerpts. +We provide sample code that you can use to get your optimized TensorFlow model server and GRPC client up and running quickly. +In this tutorial using Transformer-LT, you will measure inference performance in two situations: +* **Online inference**, where batch_size=1. In this case, a lower number means better runtime performance. +* **Batch inference**, where batch_size>1. In this case, a higher number means better runtime performance. + +**NOTE about GRPC vs. REST**: It [has been suggested](https://medium.com/@avidaneran/tensorflow-serving-rest-vs-grpc-e8cef9d4ff62) that GRPC has faster client-side serialization and de-serialization than REST, especially if you are optimizing for batch inference. +Please note however that this tutorial is focused on optimizing the model server, not the client that sends requests. +We use GRPC in this tutorial for illustration, not as a best practice, and offer another [tutorial](/docs/object_detection/tensorflow_serving/Tutorial.md) that illustrates the use of the REST API with TensorFlow Serving, if you are interested in that protocol. + +## Prerequisites + +This tutorial assumes you have already: +* [Installed TensorFlow Serving](/docs/general/tensorflow_serving/InstallationGuide.md) +* Read and understood the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md), + especially these sections: + * [Performance Metrics](/docs/general/tensorflow_serving/GeneralBestPractices.md#performance-metrics) + * [TensorFlow Serving Configuration Settings](/docs/general/tensorflow_serving/GeneralBestPractices.md#tensorflow-serving-configuration-settings) +* Ran an example end-to-end using a GRPC client, such as the [one in the Installation Guide](/docs/general/tensorflow_serving/InstallationGuide.md#option-2-query-using-grpc) + +## Background + +The Transformer-LT model is a popular solution for language translation. +It is based on an encoder-decoder architecture with an added attention mechanism. +The encoder is used to encode the original sentence to a meaningful fixed-length vector, and the decoder is responsible for extracting the context data from the vector. +The encoder and decoder process the inputs and outputs, which are in the form of a time sequence. + +In a traditional encoder/decoder model, each element in the context vector is treated equally, but this is typically not the ideal solution. +For instance, when you translate the phrase ā€œI travel by trainā€ from English into Chinese, the word ā€œIā€ has a greater influence than other words when producing its counterpart in Chinese. +Thus, the attention mechanism was introduced to differentiate contributions of each element in the source sequence to their counterpart in the destination sequence, through the use of a hidden matrix. +This matrix contains weights of each element in the source sequence when producing elements in the destination sequence. + +[IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN)](https://github.com/intel/mkl-dnn) offers significant performance improvements for many neural network operations. +Tuning TensorFlow Serving to take full advantage of your hardware for recommendation systems inference involves: +1. Running a TensorFlow Serving docker container configured for performance given your hardware resources +2. Running a GRPC client to verify prediction accuracy and measure online and batch inference performance +3. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case + +## Hands-on Tutorial with pre-trained Transformer-LT (Official) model + +1. **Clone this repository**: Clone the [intelai/models](https://github.com/intelai/models) repository into your home directory. + + ``` + cd ~ + git clone https://github.com/IntelAI/models.git + ``` + +2. **Clone the tensorflow/models repository**: Tokenization of the input data requires utility functions in a specific commit of the tensorflow/models repository. + + ``` + cd ~ + mkdir tensorflow-models + cd tensorflow-models + git clone https://github.com/tensorflow/models.git + cd models + git checkout 8367cf6dabe11adf7628541706b660821f397dce + ``` + + Now add the required directory to the `PYTHONPATH` variable: + + ``` + export PYTHONPATH=$PYTHONPATH:$(pwd)/official/transformer + ``` + +3. **Set up the client environment**: We need to create a virtual environment for this tutorial. + + - We will use a virtual environment to install the required packages. If you do not have pip or virtualenv, you will need to get them first: + + ``` + sudo apt-get install -y python python-pip virtualenv + ``` + + - Create and activate the python virtual environment in your home directory and install the `grpc`, `tensorflow`, `pandas`, and `tensorflow-serving-api` packages. + + ``` + cd ~ + virtualenv lt_venv + source lt_venv/bin/activate + pip install grpc intel-tensorflow pandas tensorflow-serving-api + ``` + +4. **Download the pre-trained model and test data**: Download and extract the packaged pre-trained model and dataset ```transformer_lt_official_fp32_pretrained_model.tar.gz``` + (refer to the [model README](/benchmarks/language_translation/tensorflow/transformer_lt_official) to get the latest location of this archive). + + ``` + wget https://storage.googleapis.com/intel-optimized-tensorflow/models/transformer_lt_official_fp32_pretrained_model.tar.gz + tar -xzvf transformer_lt_official_fp32_pretrained_model.tar.gz + ``` + + After extraction, you should see the following folders and files in the `transformer_lt_official_fp32_pretrained_model` directory: + + ``` + $ ls -l transformer_lt_official_fp32_pretrained_model/* + + transformer_lt_official_fp32_pretrained_model/data: + total 1064 + -rw-r--r--. 1 359898 Feb 20 16:05 newstest2014.en + -rw-r--r--. 1 399406 Feb 20 16:05 newstest2014.de + -rw-r--r--. 1 324025 Mar 15 17:31 vocab.txt + + transformer_lt_official_fp32_pretrained_model/graph: + total 241540 + -rwx------. 1 247333269 Mar 15 17:29 fp32_graphdef.pb + ``` + + - `newstest2014.en`: Input file with English text + - `newstest2014.de`: German translation of the input file for measuring accuracy + - `vocab.txt`: Dictionary of vocabulary + - `fp32_graphdef.pb`: Pre-trained model + +5. **Create a SavedModel**: Using the conversion script `transformer_graph_to_saved_model.py`, convert the pre-trained model graph to a SavedModel. + + ``` + cd ~/models/docs/language_translation/tensorflow_serving + python transformer_graph_to_saved_model.py --import_path ~/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb + ``` + + This will create a `/tmp/1/` directory with a `saved_model.pb` file in it. This is the file we will serve from TensorFlow Serving. + The [`transformer_graph_to_saved_model.py`](transformer_graph_to_saved_model.py) script attaches a signature definition to the model in order to make it compatible with TensorFlow Serving. + You can take a look at the script, its flags/options, and these resources for more information: + * [SavedModel](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/saved_model) + * [SignatureDefs](https://www.tensorflow.org/serving/signature_defs) + +6. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. + For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. + To compute *num_physical_cores* with bash commands: + ``` + cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` + num_sockets=`lscpu | grep "Socket(s)" | cut -d':' -f2 | xargs` + num_physical_cores=$((cores_per_socket * num_sockets)) + echo $num_physical_cores + ``` + +7. **Recommended Settings**: To optimize overall performance, start with the following settings from the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md). + Playing around with these settings can improve performance even further, so you should experiment with your own hardware and model if you have strict performance requirements. + + | Options | Recommendations| + | ------------- | ------------- | + |TENSORFLOW_INTER_OP_PARALLELISM | 2 | + |TENSORFLOW_INTRA_OP_PARALLELISM| Number of physical cores | + |OMP_NUM_THREADS |Number of physical cores| + | Batch Size | 64 | + +8. **Start the server**: We can now start up the TensorFlow model server. Using `-d` (for "detached") runs the container as a background process. + + ``` + cd ~ + docker run \ + --name=tfserving \ + -d \ + -p 8500:8500 \ + -v "/tmp:/models/transformer" \ + -e MODEL_NAME=transformer \ + -e OMP_NUM_THREADS=$num_physical_cores \ + -e TENSORFLOW_INTER_OP_PARALLELISM=2 \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \ + tensorflow/serving:mkl + ``` + + You can make sure the container is running using the `docker ps` command. + +9. **Online and batch performance**: Run `transformer_benchmark.py` [python script](/docs/language_translation/tensorflow_serving/transformer_benchmark.py), which can measure both online and batch performance. + + If you are not already there, go to the tutorial directory: + ``` + cd ~/models/docs/language_translation/tensorflow_serving + ``` + + **Online Inference** (batch_size=1): + ``` + python transformer_benchmark.py \ + -d ~/transformer_lt_official_fp32_pretrained_model/data/newstest2014.en \ + -v ~/transformer_lt_official_fp32_pretrained_model/data/vocab.txt \ + -b 1 + ``` + + **Batch Inference** (batch_size=64): + ``` + python transformer_benchmark.py \ + -d ~/transformer_lt_official_fp32_pretrained_model/data/newstest2014.en \ + -v ~/transformer_lt_official_fp32_pretrained_model/data/vocab.txt \ + -b 64 + ``` + + Note: If you want an output file of translated sentences, set the `-o` flag to an output file name of your choice. + If this option is set, the script will take a significantly longer time to finish. + +10. **Clean up**: + * After you are finished sending requests to the server, you can stop the container running in the background. To restart the container with the same name, you need to stop and remove the container from the registry. To view your running containers run `docker ps`. + + ``` + docker rm -f tfserving + ``` + + * Deactivate your virtual environment with `deactivate`. + + +## Conclusion +You have now seen an end-to-end example of serving a language translation model for inference using TensorFlow Serving, and learned: +1. How to create a SavedModel from a Transformer-LT TensorFlow model graph +2. How to choose good values for the performance-related runtime parameters exposed by the `docker run` command +3. How to test online and batch inference metrics using a GRPC client + +With this knowledge and the example code provided, you should be able to get started serving your own custom language translation model with good performance. +If desired, you should also be able to investigate a variety of different settings combinations to see if further performance improvements are possible. diff --git a/docs/language_translation/tensorflow_serving/transformer_benchmark.py b/docs/language_translation/tensorflow_serving/transformer_benchmark.py new file mode 100644 index 000000000..a5cf43654 --- /dev/null +++ b/docs/language_translation/tensorflow_serving/transformer_benchmark.py @@ -0,0 +1,181 @@ +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from __future__ import print_function + +import os +import sys +import time +import argparse +import grpc +import numpy as np +import pandas as pd +import tensorflow as tf + +from tensorflow_serving.apis import predict_pb2 +from tensorflow_serving.apis import prediction_service_pb2_grpc + +from utils import tokenizer +from utils.tokenizer import Subtokenizer + +def check_for_link(value): + """ + Throws an error if the specified path is a link. os.islink returns + True for sym links. For files, we also look at the number of links in + os.stat() to determine if it's a hard link. + """ + if os.path.islink(value) or \ + (os.path.isfile(value) and os.stat(value).st_nlink > 1): + raise argparse.ArgumentTypeError("{} cannot be a link.".format(value)) + +def check_valid_file_or_folder(value): + """verifies filename exists and isn't a link""" + if value is not None: + if not os.path.isfile(value) and not os.path.isdir(value): + raise argparse.ArgumentTypeError("{} does not exist or is not a file/folder.". + format(value)) + check_for_link(value) + return value + +def input_generator_ts(file_path, vocab_file): + """Read and sort lines based on token count from the file + sorted by decreasing length based on token sorting. + + Args: + file_path: String path of file to read + vocab_file: String path of vocab file + Returns: + Sorted list of inputs, and dictionary mapping original index->sorted index + of each element. + """ + with tf.gfile.Open(file_path) as f: + records = f.read().split("\n") + inputs = [record.strip() for record in records] + if not inputs[-1]: + inputs.pop() + + subtokenizer = Subtokenizer(vocab_file) + + batch = [] + token_lens = [] + for i, line in enumerate(inputs): + enc = subtokenizer.encode(line, add_eos=True) + token_lens.append((i, len(enc))) + + sorted_by_token_input_lens = sorted(token_lens, key=lambda x: x[1], reverse=True) + sorted_inputs = [None] * len(sorted_by_token_input_lens) + sorted_keys = [0] * len(sorted_by_token_input_lens) + + for i, (index, _) in enumerate(sorted_by_token_input_lens): + sorted_inputs[i] = inputs[index] + sorted_keys[index] = i + enc = subtokenizer.encode(sorted_inputs[i], add_eos=True) + batch.append(enc) + + return batch, sorted_keys + +def _trim_and_decode(ids, vocab_file): + """Trim EOS and PAD tokens from ids, and decode to return a string.""" + subtokenizer = Subtokenizer(vocab_file) + try: + index = list(ids).index(tokenizer.EOS_ID) + return subtokenizer.decode(ids[:index]) + except ValueError: # No EOS found in sequence + return subtokenizer.decode(ids) + +def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10): + channel = grpc.insecure_channel(SERVER_URL) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + request = predict_pb2.PredictRequest() + request.model_spec.name = 'transformer' + request.model_spec.signature_name = 'serving_default' + + batches, sorted_keys = input_generator_ts(DATA_FILE, VOCAB_FILE) + + translations = [] + batch = [] + inference_time = 0.0 + sentences_to_translate = min(batch_size * num_iteration, len(batches)) + sentences_after_warmup = 0 + + for i, line in enumerate(batches[0:sentences_to_translate]): + batch.append(line) + if (i + 1) % batch_size == 0 or i == sentences_to_translate - 1: + batch_num = (i // batch_size) + 1 + request.inputs['input'].CopyFrom( + tf.contrib.util.make_tensor_proto(pd.DataFrame(batch).fillna(0).values.astype(np.int64))) + start_time = time.time() + result = stub.Predict(request) + duration = time.time() - start_time + shape = [int(dim.size) for dim in result.outputs['output'].tensor_shape.dim] + translations += np.reshape(result.outputs['output'].int_val, shape).tolist() + print('Iteration %d: %.3f sec' % (batch_num, duration)) + if batch_num > warm_up_iteration: + inference_time += duration + sentences_after_warmup += len(batch) + batch = [] + + average_time = inference_time / sentences_after_warmup + print('Inferencing time: %s' % (inference_time)) + print('Batch size = %d' % batch_size) + if batch_size == 1: + print('Latency: %.3f ms' % (average_time * 1000)) + print('Throughput: %.3f sentences/sec' % (sentences_after_warmup / inference_time)) + + if OUT_FILE: + print('Decoding and saving translations to {}...'.format(OUT_FILE)) + decoded_translations = [] + for i, tr in enumerate(translations): + decoded_translations.append(_trim_and_decode(tr, VOCAB_FILE)) + + with tf.gfile.Open(OUT_FILE, "w") as f: + for i in sorted_keys: + if i < len(decoded_translations): + f.write("%s\n" % decoded_translations[i]) + print('Done!') + +if __name__ == '__main__': + ap = argparse.ArgumentParser() + ap.add_argument("-d", "--data_file", type=check_valid_file_or_folder, required=True, + help="Path to English language input file") + ap.add_argument("-v", "--vocab_file", type=check_valid_file_or_folder, required=True, + help="Path to vocabulary file") + ap.add_argument("-o", "--out_file", type=str, required=False, default='', + help="Path to output file (optional") + ap.add_argument("-b", "--batch_size", required=False, type=int, default=1, + help="Batch size to use") + ap.add_argument("-n", "--num_iteration", required=False, type=int, default=20, + help="Number of times to repeat") + ap.add_argument("-w", "--warm_up_iteration", required=False, type=int, default=10, + help="Number of initial iterations to ignore in benchmarking") + + args = vars(ap.parse_args()) + + SERVER_URL = 'localhost:8500' + DATA_FILE = args['data_file'] + VOCAB_FILE = args['vocab_file'] + OUT_FILE = args['out_file'] + BATCH_SIZE = args['batch_size'] + NUM_ITERATION = args['num_iteration'] + WARM_UP_ITERATION = args['warm_up_iteration'] + + tf.logging.set_verbosity(tf.logging.WARN) + + print('\n SERVER_URL: {} \n DATA_FILE: {}'.format(SERVER_URL, DATA_FILE)) + + print('\nStarting Transformer-LT (Official) model benchmarking for Latency with batch_size={}, num_iteration={}, warm_up_iteration={}'.format(BATCH_SIZE, NUM_ITERATION, WARM_UP_ITERATION)) + benchmark(batch_size=BATCH_SIZE, num_iteration=NUM_ITERATION, warm_up_iteration=WARM_UP_ITERATION) + diff --git a/docs/language_translation/tensorflow_serving/transformer_graph_to_saved_model.py b/docs/language_translation/tensorflow_serving/transformer_graph_to_saved_model.py new file mode 100644 index 000000000..c5cc250ce --- /dev/null +++ b/docs/language_translation/tensorflow_serving/transformer_graph_to_saved_model.py @@ -0,0 +1,87 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +"""Import a Transformer-LT model graph and export a SavedModel. + +Usage: transformer_graph_to_saved_model.py [--model_version=y] import_path export_dir +""" + +from __future__ import print_function + +import sys +import tensorflow as tf + +tf.app.flags.DEFINE_integer('model_version', 1, 'Version number of the model.') +tf.app.flags.DEFINE_string('import_path', '', 'Model import path.') +tf.app.flags.DEFINE_string('export_dir', '/tmp', 'Export directory.') +FLAGS = tf.app.flags.FLAGS + + +def main(_): + if len(sys.argv) < 2 or sys.argv[-1].startswith('-'): + print('Usage: transformer_graph_to_saved_model.py [--model_version=y] import_path export_dir') + sys.exit(-1) + if FLAGS.import_path == '': + print('Please specify the path to the model graph you want to convert to SavedModel format.') + sys.exit(-1) + if FLAGS.model_version <= 0: + print('Please specify a positive value for version number.') + sys.exit(-1) + + # Import model graph + with tf.Session() as sess: + graph_def = tf.GraphDef() + with tf.gfile.GFile(FLAGS.import_path, 'rb') as input_file: + input_graph_content = input_file.read() + graph_def.ParseFromString(input_graph_content) + + sess.graph.as_default() + tf.import_graph_def(graph_def, name='') + sess.run(tf.global_variables_initializer()) + + # Build the signature_def_map. + in_data = sess.graph.get_tensor_by_name('input_tensor:0') + inputs = {'input': tf.saved_model.utils.build_tensor_info(in_data)} + + out_data = sess.graph.get_tensor_by_name('model/Transformer/strided_slice_19:0') + outputs = {'output': tf.saved_model.utils.build_tensor_info(out_data)} + + signature = tf.saved_model.signature_def_utils.build_signature_def( + inputs=inputs, + outputs=outputs, + method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME + ) + + # Save out the SavedModel + print('Exporting trained model to', FLAGS.export_dir + '/' + str(FLAGS.model_version)) + builder = tf.saved_model.builder.SavedModelBuilder(FLAGS.export_dir + '/' + str(FLAGS.model_version)) + builder.add_meta_graph_and_variables( + sess, [tf.saved_model.tag_constants.SERVING], + signature_def_map={ + tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature + } + ) + builder.save() + + print('Done!') + + +if __name__ == '__main__': + tf.app.run() From e4a7f4ff31e1d8aebe1e9f92c74246db0bad26d8 Mon Sep 17 00:00:00 2001 From: Melanie Buehler Date: Mon, 3 Jun 2019 09:21:51 -0700 Subject: [PATCH 43/62] Update verbiage in new READMEs, precisions, tutorials, etc. (#324) --- Contribute.md | 12 +++---- README.md | 3 +- benchmarks/README.md | 4 +-- .../tensorflow/densenet169/README.md | 13 ++++--- .../tensorflow/mobilenet_v1/README.md | 19 +++++------ .../tensorflow/resnet50v1_5/README.md | 16 ++++----- .../tensorflow_serving/inceptionv3/README.md | 13 ++++--- .../tensorflow/lm-1b/README.md | 17 +++++----- .../tensorflow/faster_rcnn/README.md | 11 +++--- .../tensorflow/ssd-resnet34/README.md | 9 +++-- .../tensorflow/ssd_vgg16/README.md | 34 +++++++++---------- .../tensorflow/wavenet/README.md | 2 +- .../tensorflow/Tutorial.md | 6 ++-- .../tensorflow_serving/Tutorial.md | 2 +- 14 files changed, 77 insertions(+), 84 deletions(-) diff --git a/Contribute.md b/Contribute.md index 73c58e8af..4a869c931 100644 --- a/Contribute.md +++ b/Contribute.md @@ -1,6 +1,6 @@ # Contributing to the Model Zoo for IntelĀ® Architecture -## Adding benchmarking scripts for a new TensorFlow model +## Adding scripts for a new TensorFlow model ### Code updates @@ -14,7 +14,7 @@ required: Note that you will need to add `__init__.py` files in each new directory that you add, in order for python to find the code. - ![Benchmarks Directory Structure](benchmarks_directory_structure.png) + ![Directory Structure](benchmarks_directory_structure.png) 2. Next, in the leaf folder that was created in the previous step, you will need to create `config.json` and `model_init.py` files: @@ -149,16 +149,16 @@ developing new scripts: This README file should describe all of the steps necessary to run the model, including downloading and preprocessing the dataset, downloading the pretrained model, cloning repositories, and running - the benchmarking script with the appropriate arguments. Most models - have best known settings for throughput and latency performance + the model script with the appropriate arguments. Most models + have best known settings for batch and online inference performance testing as well as testing accuracy. The README file should specify how to set these configs using the `launch_benchmark.py` script. -2. Update the table in the [benchmarks README](/benchmarks/README.md) +2. Update the table in the [main `benchmarks` README](/benchmarks/README.md) with a link to the model that you are adding. Note that the models in this table are ordered alphabetically by use case, framework, and model name. The model name should link to the original paper for the - model. The benchmarking instructions column should link to the README + model. The instructions column should link to the README file that you created in the previous step. ### Testing diff --git a/README.md b/README.md index 5e3c45394..eb326584b 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,8 @@ This repository contains **links to pre-trained models, sample scripts, best pra - Show how to efficiently execute, train, and deploy Intel-optimized models - Make it easy to get started running Intel-optimized models on Intel hardware in the cloud or on bare metal -***DISCLAIMER: These scripts are not intended for benchmarking Intel platforms. For any performance and/or benchmarking information on specific Intel platforms, visit [https://www.intel.ai/blog](https://www.intel.ai/blog).*** +***DISCLAIMER: These scripts are not intended for benchmarking Intel platforms. +For any performance and/or benchmarking information on specific Intel platforms, visit [https://www.intel.ai/blog](https://www.intel.ai/blog).*** ## How to Use the Model Zoo diff --git a/benchmarks/README.md b/benchmarks/README.md index 3c5675fbd..a1bac907b 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -34,7 +34,7 @@ dependencies to be installed: | Language Translation | TensorFlow | [GNMT](https://arxiv.org/pdf/1609.08144.pdf) | Inference | [FP32](language_translation/tensorflow/gnmt/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [Transformer Language](https://arxiv.org/pdf/1706.03762.pdf)| Inference | [FP32](language_translation/tensorflow/transformer_language/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [Transformer_LT_Official ](https://arxiv.org/pdf/1706.03762.pdf)| Inference | [FP32](language_translation/tensorflow/transformer_lt_official/README.md#fp32-inference-instructions) | -| Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [Int8](object_detection/tensorflow/rfcn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [Faster R-CNN](https://arxiv.org/pdf/1506.01497.pdf) | Inference | [Int8](object_detection/tensorflow/faster_rcnn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/faster_rcnn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-MobileNet](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-mobilenet/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-resnet34/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | @@ -48,7 +48,7 @@ dependencies to be installed: ## TensorFlow Serving Use Cases -| Use Case | Framework | Model | Mode | Benchmarking Instructions | +| Use Case | Framework | Model | Mode | Instructions | | -----------------------| --------------| ------------------- | --------- |------------------------------| | Image Recognition | TensorFlow Serving | [Inception V3](https://arxiv.org/pdf/1512.00567.pdf) | Inference | [FP32](image_recognition/tensorflow_serving/inceptionv3/README.md#fp32-inference-instructions) | diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md index f38be702a..b1ecd8832 100644 --- a/benchmarks/image_recognition/tensorflow/densenet169/README.md +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -8,7 +8,7 @@ following modes/precisions: 1. Download ImageNet dataset. - This step is required only for running accuracy, for running benchmark we do not need to provide dataset. + This step is required only for running accuracy, for running the model for performance we do not need to provide dataset. Register and download the ImageNet dataset. Once you have the raw ImageNet dataset downloaded, we need to convert it to the TFRecord format. The TensorFlow models repo provides @@ -37,10 +37,9 @@ following modes/precisions: ``` 3. Clone the [intelai/models](https://github.com/intelai/models) repo - and then run the benchmarking scripts for either benchmarking throughput, - latency or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. - Each benchmark run has user configurable arguments separated from regular arguments by '--' at the end of the command. - Unless configured, these arguments will run with default values. Below are the example codes for each benchmark case: + and then run the model scripts for either online or batch inference or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. + Each model run has user configurable arguments separated from regular arguments by '--' at the end of the command. + Unless configured, these arguments will run with default values. Below are the example codes for each use case: ``` $ git clone https://github.com/IntelAI/models.git @@ -105,7 +104,7 @@ following modes/precisions: or the directory specified by the `--output-dir` arg. Below are examples of what the tail of your log file should look like for the different configs. - Example log tail when benchmarking for throughput: + Example log tail when running for batch inference: ``` steps = 80, 159.83471377 images/sec Latency: 625.646317005 ms @@ -117,7 +116,7 @@ following modes/precisions: Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_023940.log ``` - Example log tail when benchmarking for latency: + Example log tail when running for online inference: ``` steps = 80, 34.9948442873 images/sec Latency: 28.5756379366 ms diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index e0af6b190..694a3f575 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -46,10 +46,9 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene ``` 3. Clone the [intelai/models](https://github.com/intelai/models) repo - and then run the benchmarking scripts for either benchmarking throughput, - latency or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. - Each benchmark run has user configurable arguments separated from regular arguments by '--' at the end of the command. - Unless configured, these arguments will run with default values. Below are the example codes for each benchmark case: + and then run the model scripts for either online or batch inference or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. + Each model run has user configurable arguments separated from regular arguments by '--' at the end of the command. + Unless configured, these arguments will run with default values. Below are the example codes for each use case: ``` $ git clone https://github.com/IntelAI/models.git @@ -57,7 +56,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene $ cd benchmarks ``` - For throughput (using `--benchmark-only`, `--socket-id 0` and `--batch-size 240`): + For batch inference (using `--benchmark-only`, `--socket-id 0` and `--batch-size 240`): ``` python launch_benchmark.py \ --model-name mobilenet_v1 \ @@ -73,7 +72,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" ``` - For latency (using `--benchmark-only`, `--socket-id 0` and `--batch-size 1`) + For online inference (using `--benchmark-only`, `--socket-id 0` and `--batch-size 1`) ``` python launch_benchmark.py \ --model-name mobilenet_v1 \ @@ -114,7 +113,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene or the directory specified by the `--output-dir` arg. Below are examples of what the tail of your log file should look like for the different configs. - Example log tail when benchmarking for throughput: + Example log tail when running for batch inference: ``` [Running warmup steps...] steps = 10, 1865.30956528 images/sec @@ -128,7 +127,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164626.log ``` - Example log tail when benchmarking for latency: + Example log tail when running for online inference: ``` [Running warmup steps...] steps = 10, 197.082229114 images/sec @@ -157,8 +156,8 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene ## FP32 Inference Instructions 1. The ImageNet dataset is required for testing accuracy and can also be - used when running benchmarking. If no datset is provided when running - benchmarking, synthetic data will be used. + used when running online or batch inference. If no dataset is provided when running + online or batch inference, synthetic data will be used. Download the ImageNet dataset and convert it to the TF records format using the instructions diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md index 610eb7cc0..2a13913d9 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md @@ -6,7 +6,7 @@ following precisions: * [FP32 inference](#fp32-inference-instructions) Original ResNet model has multiple versions which have shown better accuracy -and/or throughput performance. As mentioned in TensorFlow's [official ResNet +and/or batch inference performance. As mentioned in TensorFlow's [official ResNet model page](https://github.com/tensorflow/models/tree/master/official/resnet), 3 different versions of the original ResNet model exists - ResNet50v1, ResNet50v1.5, and ResNet50v2. As a side note, ResNet50v1.5 is also in MLPerf's [cloud inference benchmark for @@ -82,7 +82,7 @@ Log location outside container: {--output-dir value}/benchmark_resnet50_inferenc * Evaluate the model performance: If just evaluate performance for dummy data, the `--data-location` is not needed. Otherwise `--data-location` argument needs to be specified: -Calculate the model throughput `images/sec`, the required parameters to run the inference script would include: +Calculate the batch inference performance `images/sec`, the required parameters to run the inference script would include: the pre-trained `resnet50v1_5_int8_pretrained_model.pb` input graph file (from step 2), and the `--benchmark-only` flag. It is optional to specify the number of `warmup_steps` and `steps` as extra @@ -134,7 +134,7 @@ $ git clone https://github.com/IntelAI/models.git ``` 3. If running resnet50 for accuracy, the ImageNet dataset will be -required (if running benchmarking for throughput/latency, then dummy +required (if running the model for batch or online inference, then dummy data will be used). The TensorFlow models repo provides @@ -147,7 +147,7 @@ located at `models/models/image_recognition/tensorflow/resnet50v1_5/`. If benchmarking uses dummy data for inference, `--data-location` flag is not required. Otherwise, `--data-location` needs to point to point to ImageNet dataset location. -* To measure the model latency, set `--batch-size=1` and run the benchmark script as shown: +* To measure online inference, set `--batch-size=1` and run the model script as shown: ``` $ cd /home//models/benchmarks @@ -164,7 +164,7 @@ $ python launch_benchmark.py \ The log file is saved to the value of `--output-dir`. -The tail of the log output when the benchmarking completes should look +The tail of the log output when the script completes should look something like this: ``` Inference with dummy data. @@ -185,7 +185,7 @@ Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log ``` -* To measure the model Throughput, set `--batch-size=128` and run the benchmark script as shown: +* To measure batch inference, set `--batch-size=128` and run the model script as shown: ``` $ cd /home//models/benchmarks @@ -202,7 +202,7 @@ $ python launch_benchmark.py \ The log file is saved to the value of `--output-dir`. -The tail of the log output when the benchmarking completes should look +The tail of the log output when the script completes should look something like this: ``` Inference with dummy data. @@ -254,7 +254,7 @@ Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log ``` -* The `--output-results` flag can be used along with above benchmarking +* The `--output-results` flag can be used along with above performance or accuracy test, in order to also output a file with the inference results (file name, actual label, and the predicted label). The results output can only be used with real data. diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md index 1ddb7bb14..bef280f1d 100644 --- a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md @@ -21,17 +21,16 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/inceptio 3. Navigate to the `benchmarks` directory in your local clone of the [intelai/models](https://github.com/IntelAI/models) repo from step 1. The `launch_benchmark.py` script in the `benchmarks` directory is -used for starting a tensorflow serving benchmarking using optimized TensorFlow Serving docker +used for starting a tensorflow serving run using optimized TensorFlow Serving docker container. It has arguments to specify which model, framework, mode, precision, and input graph. Substitute in your own `--in-graph` pretrained model file path (from step 2). -4. Inception V3 can be run for `latency` benchmarking and `throughput` -benchmarking. Use one of the following examples below, +4. Inception V3 can be run for measuring batch or online inference performance. Use one of the following examples below, depending on your use case. -* For latency with dummy data (using `--batch-size 1`): +* For online inference with dummy data (using `--batch-size 1`): ``` python launch_benchmark.py \ @@ -43,7 +42,7 @@ python launch_benchmark.py \ --batch-size=1 \ --benchmark-only ``` -Example log tail when benchmarking for latency: +Example log tail when running for online inference: ``` Iteration 35: 0.019 sec Iteration 36: 0.020 sec @@ -59,7 +58,7 @@ tfserving_3784 Log output location: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190516_103531.log ``` -* For throughput with dummy data (using `--batch-size 128`): +* For batch inference with dummy data (using `--batch-size 128`): ``` python launch_benchmark.py \ @@ -71,7 +70,7 @@ python launch_benchmark.py \ --batch-size=128 \ --benchmark-only ``` -Example log tail when benchmarking for throughput: +Example log tail when running for batch inference: ``` Iteration 34: 0.779 sec Iteration 35: 0.916 sec diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/README.md b/benchmarks/language_modeling/tensorflow/lm-1b/README.md index 82b42cac9..ec4bdcf47 100644 --- a/benchmarks/language_modeling/tensorflow/lm-1b/README.md +++ b/benchmarks/language_modeling/tensorflow/lm-1b/README.md @@ -1,10 +1,10 @@ # LM-1B -This document has instructions for how to run LM-1B benchmark for the +This document has instructions for how to run LM-1B for the following modes/platforms: * [FP32 inference](#fp32-inference-instructions) -Benchmarking instructions and scripts for model training and inference for +Instructions and scripts for model training and inference for other platforms are coming later. ## FP32 Inference Instructions @@ -32,19 +32,18 @@ git clone https://github.com/IntelAI/models.git 3. Next, navigate to the `benchmarks` directory in your local clone of the [intelai/models](https://github.com/IntelAI/models) repo (from step 2). The `launch_benchmark.py` script in the `benchmarks` directory is -used for starting a benchmarking run in a optimized TensorFlow docker +used for starting a model run in a optimized TensorFlow docker container. It has arguments to specify which model, framework, mode, precision, and docker image to use, and the checkpoint directory. Substitute the `--model-source-dir` to `/inference/cloud/language_modeling`. -Before benchmarking, ensure that you have run the script to prepare checkpoint files and the dataset +Before running, ensure that you have run the script to prepare checkpoint files and the dataset from Step 1. -LM-1B can run for latency or throughput -benchmarking. Use one of the following examples below, depending on +LM-1B can run for online or batch inference. Use one of the following examples below, depending on your use case. -For latency (using `--socket-id 0` and `--batch-size 1`): +For online inference (using `--socket-id 0` and `--batch-size 1`): ``` python launch_benchmark.py \ @@ -59,7 +58,7 @@ python launch_benchmark.py \ ``` -For throughput (using `--socket-id 0` and `--batch-size 1024`): +For batch inference (using `--socket-id 0` and `--batch-size 1024`): ``` python launch_benchmark.py \ @@ -81,7 +80,7 @@ to get additional debug output. `models/benchmarks/common/tensorflow/logs` directory. The user can specify a different directory using `--output-dir`. -Example log tail when benchmarking for latency or throughput: +Example log tail when running for online or batch inference: ``` Running warmup... Running benchmark... diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index fe553f411..9528f4808 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -254,13 +254,10 @@ with the appropriate parameters. To run on single socket use `--socket_id` switc by default it will be using all available sockets. Optional parameter `number_of_steps` (default value = 5000) can be added at the end of command after `--` as shown below: -<<<<<<< HEAD -Run benchmarking for throughput and latency using the following command. -The `--data-location` is the path to the directory that contains the -raw coco dataset validation images which you downloaded and unzipped: -======= -Run for batch and online inference: ->>>>>>> 869ed7aa20949bb5346e10887d92933dff7bc894 +Run batch and online inference using the following command. +The `--data-location` is the path to the directory that contains the raw coco dataset +validation images which you downloaded and unzipped: + ``` $ cd /home//models/benchmarks diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index 97791788f..3f2623389 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -281,7 +281,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssd_resn ``` 6. Clone the [intelai/models](https://github.com/intelai/models) repo. -This repo has the launch script for running benchmarking, which we will +This repo has the launch script for running the model, which we will use in the next step. ``` @@ -290,11 +290,10 @@ $ git clone https://github.com/IntelAI/models.git 7. Next, navigate to the `benchmarks` directory of the [intelai/models](https://github.com/intelai/models) repo that was just -cloned in the previous step. SSD-ResNet34 can be run for benchmarking -throughput and latency, or testing accuracy. Note that we are running +cloned in the previous step. SSD-ResNet34 can be run for testing batch or online inference, or testing accuracy. Note that we are running SSD-ResNet34 with a TensorFlow 1.14 docker image. -To benchmarking for throughput and latency, use the following command, +To run for batch and online inference, use the following command, the path to the frozen graph that you downloaded in step 5 as the `--in-graph`, and use the `--benchmark-only` flag: @@ -337,7 +336,7 @@ $ python launch_benchmark.py \ 8. The log file is saved to the value of `--output-dir`. -Below is a sample log file tail when running benchmarking: +Below is a sample log file tail when testing performance: ``` Batchsize: 1 diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 9d2cb7b3c..5f101b835 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -5,7 +5,7 @@ following modes/precisions: * [Int8 inference](#int8-inference-instructions) * [FP32 inference](#fp32-inference-instructions) -Benchmarking instructions and scripts for model training and inference +Instructions and scripts for model training and inference other precisions are coming later. ## Int8 Inference Instructions @@ -23,12 +23,12 @@ $ git checkout 2d8b0cb9b2e70281bf9dce438ff17ffa5e59075c ``` 2. Clone the [intelai/models](https://github.com/intelai/models) repository. -It will be used to run the SSD-VGG16 model accuracy and benchmark tests. +It will be used to run the SSD-VGG16 model accuracy and inference performance tests. 3. Download the 2017 validation images file: [COCO dataset](http://cocodataset.org/#home) and annotations: This is required if you would like to run the accuracy test, -or the throughput and latency benchmark with real data. +or batch/online inference with real data. ``` $ wget http://images.cocodataset.org/zips/val2017.zip @@ -87,16 +87,16 @@ total 792084 $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_int8_pretrained_model.pb ``` -6. Navigate to the `benchmarks` directory (step 2), and run the benchmarking scripts for either benchmarking throughput -and latency or accuracy. +6. Navigate to the `benchmarks` directory (step 2), and run the model scripts for either batch or online +inference or accuracy. ``` $ cd models/benchmarks ``` -* Run benchmarking for throughput and latency where the `--model-source-dir` is the model source directory from step 1, -and the `--in-graph` is the pretrained model graph from step 5, -if you specify the `--data-location` which is the path to the tf record file that you generated in step 4, -the benchmark will run with real data, otherwise dummy data will be used: +* Run the model for batch or online inference where the `--model-source-dir` is the model source directory from step 1, +and the `--in-graph` is the pretrained model graph from step 5. +If you specify the `--data-location` which is the path to the tf record file that you generated in step 4, +the model will run with real data, otherwise dummy data will be used: ``` python launch_benchmark.py \ --model-name ssd_vgg16 \ @@ -142,7 +142,7 @@ python launch_benchmark.py \ ``` >Notes: ->* For the throughput and latency benchmark, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, +>* For batch and online inference, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, `--data-num-intra-threads=28` for optimized performance on `28-cores Cascade Lake (CLX)` machine. >* SSD-VGG16 model accuracy test works only with the `Python3` based docker images. @@ -152,8 +152,8 @@ to get additional debug output or change the default output location. 6. The log file is saved to the value of `--output-dir`. -Below is a sample log file tail when running benchmarking for throughput -and latency, the following results are based on CLX 28-cores with hyper-threading enabled: +Below is a sample log file tail when running the model for batch +and online inference, the following results are based on CLX 28-cores with hyper-threading enabled: ``` Batch size = 1 @@ -189,13 +189,13 @@ Use the steps 1, 2,3 and 4 as above. $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_fp32_pretrained_model.pb ``` -6. Navigate to the `benchmarks` directory (step 2), and run the benchmarking scripts for either benchmarking throughput -and latency or accuracy. +6. Navigate to the `benchmarks` directory (step 2), and run the model scripts for either batch +and online inference or accuracy. ``` $ cd models/benchmarks ``` -* Run benchmarking for throughput and latency where the `--model-source-dir` is the model source directory from step 1, +* Run the model for batch and online inference where the `--model-source-dir` is the model source directory from step 1, and the `--in-graph` is the pretrained model graph from step 5, if you specify the `--data-location` which is the path to the tf record file that you generated in step 4, the benchmark will run with real data, otherwise dummy data will be used: @@ -246,7 +246,7 @@ python launch_benchmark.py \ ``` >Notes: ->* For the throughput and latency benchmark, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, +>* For batch and online inference, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, `--data-num-intra-threads=28` for optimized performance on `28-cores Cascade Lake (CLX)` machine. >* SSD-VGG16 model accuracy test works only with the `Python3` based docker images. @@ -256,7 +256,7 @@ to get additional debug output or change the default output location. 6. The log file is saved to the value of `--output-dir`. -Below is a sample log file tail when running throughput and latency benchmarking, +Below is a sample log file tail when running batch and online inference, the following results are based on CLX 28-cores with hyper-threading enabled: ``` diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/README.md b/benchmarks/text_to_speech/tensorflow/wavenet/README.md index 1c88cbae2..512cabd95 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/README.md +++ b/benchmarks/text_to_speech/tensorflow/wavenet/README.md @@ -41,7 +41,7 @@ $ pwd 2. Clone this [intelai/models](https://github.com/intelai/models) repo. This repo has the launch script for running the model, as well as checkpoint files for a pre-trained model. After cloning the repo, -navigate to the benchmarks directory, which is where the launch script +navigate to the `benchmarks` directory, which is where the launch script is located. ``` diff --git a/docs/language_translation/tensorflow/Tutorial.md b/docs/language_translation/tensorflow/Tutorial.md index 13f827a50..aee385c63 100644 --- a/docs/language_translation/tensorflow/Tutorial.md +++ b/docs/language_translation/tensorflow/Tutorial.md @@ -158,7 +158,7 @@ Substitute the `--model-source-dir` for the location where you cloned the ``` ~/tensorflow-models/models ``` -3.1. *Real Time inference* (using `--socket-id 0` and `--batch-size 1` for latency) +3.1. *Online inference* (using `--socket-id 0` and `--batch-size 1`) If you wish to calculate the [BLEU](https://en.wikipedia.org/wiki/BLEU) metric to find out the machine-translation quality, pass the file as `reference` flag. `newstest2014.en` file must have only one sentence per line @@ -185,7 +185,7 @@ python launch_benchmark.py \ The translated German text will be in the file `translation.txt` located at `~/models/benchmarks/common/tensorflow/logs` -3.2. *Max Throughput inference* (using `--socket-id 0` and `--batch-size 64` for throughput) +3.2. *Batch inference* (using `--socket-id 0` and `--batch-size 64`) ```bash python launch_benchmark.py \ @@ -222,7 +222,7 @@ Log location outside container: /~/models/benchmarks/common/tensorflow/logs/benc The logs are captured in a directory outside of the container.
4. If you want to run the ```launch_benchmark.py``` interactively from within the docker container, add flag ```--debug```. This will launch a docker container based on the ```--docker_image```, -performs necessary installs, runs the ```launch_benchmark.py``` script and does not terminate the container process. As an example, this step will demonstrate real-time inference (--batch-size 1), but you can implement the same strategy for max throughput (--batch-size 64)." +performs necessary installs, runs the ```launch_benchmark.py``` script and does not terminate the container process. As an example, this step will demonstrate online inference (--batch-size 1), but you can implement the same strategy for batch inference (--batch-size 64)." console in: ```bash diff --git a/docs/language_translation/tensorflow_serving/Tutorial.md b/docs/language_translation/tensorflow_serving/Tutorial.md index c0a690e3e..1d8ebff71 100644 --- a/docs/language_translation/tensorflow_serving/Tutorial.md +++ b/docs/language_translation/tensorflow_serving/Tutorial.md @@ -37,7 +37,7 @@ Thus, the attention mechanism was introduced to differentiate contributions of e This matrix contains weights of each element in the source sequence when producing elements in the destination sequence. [IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN)](https://github.com/intel/mkl-dnn) offers significant performance improvements for many neural network operations. -Tuning TensorFlow Serving to take full advantage of your hardware for recommendation systems inference involves: +Tuning TensorFlow Serving to take full advantage of your hardware for language translation inference involves: 1. Running a TensorFlow Serving docker container configured for performance given your hardware resources 2. Running a GRPC client to verify prediction accuracy and measure online and batch inference performance 3. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case From 634d8dffa5e037bd3c58889083399ff7bd5a5a5c Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Thu, 6 Jun 2019 17:09:27 -0700 Subject: [PATCH 44/62] fix one of the data location references in readme (#325) --- .../object_detection/tensorflow/ssd_vgg16/README.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 5f101b835..8036419ba 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -125,7 +125,7 @@ the model directory `SSD.TensorFlow` from step 1. ``` * The `--data-location` is required, which is the path to the tf record file that you generated in step 4. - * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//coco/output`. + * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//tf_records/`. * Use the `--accuracy-only` flag: ``` python launch_benchmark.py \ @@ -150,7 +150,7 @@ python launch_benchmark.py \ >* The `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location. -6. The log file is saved to the value of `--output-dir`. +7. The log file is saved to the value of `--output-dir`. Below is a sample log file tail when running the model for batch and online inference, the following results are based on CLX 28-cores with hyper-threading enabled: @@ -229,7 +229,7 @@ the model directory `SSD.TensorFlow` from step 1. ``` * The `--data-location` is required, which is the path to the tf record file that you generated in step 3. - * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//coco/output`. + * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//tf_records/`. * Use the `--accuracy-only` flag: ``` python launch_benchmark.py \ @@ -254,7 +254,7 @@ python launch_benchmark.py \ >* The `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location. -6. The log file is saved to the value of `--output-dir`. +7. The log file is saved to the value of `--output-dir`. Below is a sample log file tail when running batch and online inference, the following results are based on CLX 28-cores with hyper-threading enabled: From 058f0bf711a2b052ad024d283da27f34c80c83c8 Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Tue, 11 Jun 2019 09:22:52 -0700 Subject: [PATCH 45/62] Add ResNet50 int8 TF Serving Tutorial (#314) * add tf serving resnet50 int8 tutorial * fix typo. * combine both int8 and fp32 tutorials. * formatting changes based on code review comments. * remove inceptionv3 int8. * update the tutorial to remove inceptionv3 int8. --- .../tensorflow_serving/Tutorial.md | 30 ++++++++++++------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/docs/image_recognition/tensorflow_serving/Tutorial.md b/docs/image_recognition/tensorflow_serving/Tutorial.md index a5f832193..71f94f76c 100644 --- a/docs/image_recognition/tensorflow_serving/Tutorial.md +++ b/docs/image_recognition/tensorflow_serving/Tutorial.md @@ -1,10 +1,12 @@ # Image Recognition with TensorFlow Serving on CPU + ### Online and Batch Inference -Model: InceptionV3 and ResNet50 +Model and Precision: InceptionV3 FP32, ResNet50 FP32, and ResNet50 Int8 ## Goal -This tutorial will introduce you to the CPU performance considerations for image recognition deep learning models and how to use IntelĀ® Optimizations for [TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. +This tutorial will introduce you to the CPU performance considerations for image recognition deep learning models with different precisions and +how to use IntelĀ® Optimizations for [TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. It also provides sample code that you can use to get your optimized TensorFlow model server and GRPC client up and running quickly. ## Prerequisites @@ -22,20 +24,28 @@ This tutorial assumes you have already: Convolutional neural networks (CNNs) for image recognition are computationally expensive. The IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN) offers significant performance improvements for convolution, pooling, normalization, activation, and other operations via efficient vectorization and multi-threading. Tuning TensorFlow Serving to take full advantage of your hardware for image recognition deep learning inference involves: -1. Working through this tutorial to set up servable versions of the well-known [ResNet50](https://arxiv.org/pdf/1512.03385.pdf) and [InceptionV3](https://arxiv.org/pdf/1512.00567v1.pdf) CNN models +1. Working through this tutorial to set up servable versions of the well-known [ResNet50](https://arxiv.org/pdf/1512.03385.pdf) and [InceptionV3](https://arxiv.org/pdf/1512.00567v1.pdf) CNN models with different precisions. 2. Running a TensorFlow Serving docker container configured for performance given your hardware resources 3. Running a client script to measure online and batch inference performance 4. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case ## Hands-on Tutorial - InceptionV3 and Resnet50 -For steps 1 and 2, refer to the Intel Model Zoo FP32 READMEs: -* [InceptionV3 README](/benchmarks/image_recognition/tensorflow/inceptionv3#fp32-inference-instructions) -* [ResNet50 README](/benchmarks/image_recognition/tensorflow/resnet50#fp32-inference-instructions) +This section shows a step-by-step example for how to serve one of the following Image Recognition models +`(ResNet50 FP32, ResNet50 Int8, and InceptionV3 FP32)` using TensorFlow Serving. +It also explains the possible ways to manage the available CPU resources and tune it for the optimal performance. + +For steps 1 and 2, refer to the Intel Model Zoo READMEs: +* **FP32 precision:** use the Intel Model Zoo `FP32` README sections, + * [InceptionV3 FP32 README](/benchmarks/image_recognition/tensorflow/inceptionv3#fp32-inference-instructions), and + * [ResNet50 FP32 README](/benchmarks/image_recognition/tensorflow/resnet50#fp32-inference-instructions) + +* **Int8 precision:** use the Intel Model Zoo `Int8` README sections, + * [ResNet50 Int8 README](/benchmarks/image_recognition/tensorflow/resnet50#int8-inference-instructions) -NOTE: The below example shows InceptionV3. The same code snippets will work for ResNet50 by replacing the model name to `resnet50`. +>NOTE: The below example shows InceptionV3 (FP32). The same code snippets will work for ResNet50 (FP32 and Int8) by replacing the model name to `resnet50`. -1. **Download the Model**: Download and extract the InceptionV3 pre-trained model (FP32), using the instructions in above README. +1. **Download the Model**: Download and extract the InceptionV3 pre-trained model, using the instructions in above README. 2. **(Optional) Download Data**: If you are interested only in testing performance, not accuracy, you can skip this step and use synthetic data. If you want to verify prediction accuracy by testing on real data, follow the instructions in one of the READMEs above to download the ImageNet dataset. @@ -62,7 +72,7 @@ NOTE: The below example shows InceptionV3. The same code snippets will work for (venv)$ pip install tensorflow-serving-api ``` 5. **Create a SavedModel**: Using the conversion script `model_graph_to_saved_model.py`, convert the pre-trained model graph to a SavedModel. - (For ResNet50, substitute the name of the ResNet50 FP32 pre-trained model.) + (For ResNet50, substitute the name of the ResNet50 FP32 or the ResNet50 Int8 pre-trained model.) Example: ``` @@ -174,7 +184,7 @@ NOTE: The below example shows InceptionV3. The same code snippets will work for ## Conclusion -You have now seen two end-to-end examples of serving an image recognition model for inference using TensorFlow Serving, and learned: +You have now seen three end-to-end examples of serving an image recognition model for inference using TensorFlow Serving, and learned: 1. How to create a SavedModel from a TensorFlow model graph 2. How to choose good values for the performance-related runtime parameters exposed by the `docker run` command 3. How to verify that the served model can correctly classify an image using a GRPC client From d01c39b19fdb63bf859b0a5bf400dab9e2715dcb Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Tue, 11 Jun 2019 09:56:41 -0700 Subject: [PATCH 46/62] Make the launch script executable (#326) --- benchmarks/launch_benchmark.py | 0 .../tensorflow/faster_rcnn/inference/int8/coco_int8.sh | 0 2 files changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 benchmarks/launch_benchmark.py mode change 100644 => 100755 models/object_detection/tensorflow/faster_rcnn/inference/int8/coco_int8.sh diff --git a/benchmarks/launch_benchmark.py b/benchmarks/launch_benchmark.py old mode 100644 new mode 100755 diff --git a/models/object_detection/tensorflow/faster_rcnn/inference/int8/coco_int8.sh b/models/object_detection/tensorflow/faster_rcnn/inference/int8/coco_int8.sh old mode 100644 new mode 100755 From a37f48f0a74dc8ee9f8230a4ac6333d8b567e9ff Mon Sep 17 00:00:00 2001 From: Clayne Robison Date: Wed, 12 Jun 2019 11:17:51 -0700 Subject: [PATCH 47/62] Ubuntu 18 tzdata fix (#310) * Fix tzdata installation hang with Ubuntu 18.04 * Update to use 'DEBIAN_FRONTEND=noninteractive' based on feedback from Ebi --- benchmarks/common/tensorflow/start.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index 5884cfac9..7c37309d3 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -60,6 +60,8 @@ if [[ ${NOINSTALL} != "True" ]]; then ## install common dependencies apt update apt full-upgrade -y + # Set env var before installs so that user interaction is not required + export DEBIAN_FRONTEND=noninteractive apt-get install python-tk numactl -y apt install -y libsm6 libxext6 pip install --upgrade pip From 17a5cccccc8980a572a038c84c41c985a7a21fb8 Mon Sep 17 00:00:00 2001 From: "Xiaoming (Jason) Cui" Date: Thu, 13 Jun 2019 09:32:29 -0700 Subject: [PATCH 48/62] Update Transformer LT Official to support num_inter and num_intra threads (#308) * Added the support to change num_inter_threads and num_intra_threads to the model * Update unit test args to include num_inter and num_intra --- .../transformer_lt_official/inference/fp32/model_init.py | 4 +++- .../tf_model_args/tf_transformer_lt_official_args.json | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py index 85dae1e68..a8b0b9432 100644 --- a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py @@ -92,7 +92,9 @@ def __init__(self, args, custom_args, platform_util=None): if self.args.batch_size != -1 else "1") + \ " --file=" + self.args.decode_from_file + \ " --file_out=" + translate_file + \ - " --vocab_file=" + self.args.vocab_file + " --vocab_file=" + self.args.vocab_file +\ + " --num_inter=" + str(self.args.num_inter_threads) +\ + " --num_intra=" + str(self.args.num_intra_threads) self.bleu_params += " --translation=" + translate_file + \ " --reference=" + self.args.reference diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json index 1ccbf4bc4..079f99abd 100644 --- a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json +++ b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json @@ -1,9 +1,9 @@ [ { "_comment": "FP32 latency benchmark", "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb", - "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt"}, + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt --num_inter=1 --num_intra=28"}, { "_comment": "FP32 throughput benchmark", "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb", - "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt"} + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt --num_inter=1 --num_intra=28"} ] From 194e0119a20a4539dabbffdfe6eb9dd62a159f30 Mon Sep 17 00:00:00 2001 From: Melanie Buehler Date: Thu, 13 Jun 2019 09:40:49 -0700 Subject: [PATCH 49/62] TFServing SSD-MobileNet Tutorial (#311) * Added SSD-MobileNet to existing tutorial * Added tutorial script and notebook * Improvements and fixes from testing * More small fixes * Adds SSD-MobileNet to main docs README * Updated benchmarking verbiage * Simplify package installs, docker ports, and detached mode * Created requirements.txt --- docs/README.md | 2 +- .../tensorflow_serving/ObjectDetection.ipynb | 322 ++++++++++++++++++ .../tensorflow_serving/RFCN.ipynb | 207 ----------- .../tensorflow_serving/Tutorial.md | 304 ++++++++++------- ...hmark.py => object_detection_benchmark.py} | 68 +++- .../tensorflow_serving/requirements.txt | 16 + 6 files changed, 571 insertions(+), 348 deletions(-) create mode 100644 docs/object_detection/tensorflow_serving/ObjectDetection.ipynb delete mode 100644 docs/object_detection/tensorflow_serving/RFCN.ipynb rename docs/object_detection/tensorflow_serving/{rfcn-benchmark.py => object_detection_benchmark.py} (54%) create mode 100644 docs/object_detection/tensorflow_serving/requirements.txt diff --git a/docs/README.md b/docs/README.md index 3fd8677db..c5933030c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -17,7 +17,7 @@ * [Recommendation Systems](/docs/recommendation/tensorflow/Tutorial.md) (Wide and Deep) * Inference with IntelĀ® Optimization of Tensorflow Serving: * [Image Recognition](/docs/image_recognition/tensorflow_serving/Tutorial.md) (ResNet50 and InceptionV3) - * [Object Detection](/docs/object_detection/tensorflow_serving/Tutorial.md) (R-FCN) + * [Object Detection](/docs/object_detection/tensorflow_serving/Tutorial.md) (R-FCN and SSD-MobileNet) * [Language Translation](/docs/language_translation/tensorflow_serving/Tutorial.md) (Transformer-LT) * Model Quantization and Optimization * [Image Recognition](/docs/image_recognition/quantization/Tutorial.md) (ResNet50) diff --git a/docs/object_detection/tensorflow_serving/ObjectDetection.ipynb b/docs/object_detection/tensorflow_serving/ObjectDetection.ipynb new file mode 100644 index 000000000..5e975ae0c --- /dev/null +++ b/docs/object_detection/tensorflow_serving/ObjectDetection.ipynb @@ -0,0 +1,322 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Object Detection: R-FCN and SSD-MobileNet" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import print_function\n", + "\n", + "import os\n", + "import time\n", + "import random\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "from PIL import Image\n", + "\n", + "from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array\n", + "\n", + "%matplotlib inline\n", + "import matplotlib\n", + "from matplotlib import pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "MODEL = 'rfcn' # Use 'rfcn' for R-FCN or 'ssdmobilenet' for SSD-MobileNet\n", + "PROTOCOL = 'grpc' # Use 'grpc' for GRPC or 'rest' for REST\n", + "IMAGES_PATH = '/home//coco/val/val2017' # Edit this to your COCO validation directory" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "if PROTOCOL == 'grpc':\n", + " import grpc\n", + " import tensorflow as tf\n", + " from tensorflow_serving.apis import predict_pb2\n", + " from tensorflow_serving.apis import prediction_service_pb2_grpc\n", + " SERVER_URL = 'localhost:8500'\n", + "elif PROTOCOL == 'rest':\n", + " import requests\n", + " SERVER_URL = 'http://localhost:8501/v1/models/{}:predict'.format(MODEL)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def get_random_image(image_dir):\n", + " image_path = os.path.join(image_dir, random.choice(os.listdir(image_dir)))\n", + " image = Image.open(image_path)\n", + " (im_width, im_height) = image.size\n", + " return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\n", + "\n", + "def visualize(output_dict, image_np):\n", + " new_dict = {}\n", + " if PROTOCOL == 'grpc':\n", + " new_dict['num_detections'] = int(output_dict['num_detections'].float_val[0])\n", + " new_dict['detection_classes'] = np.array(output_dict['detection_classes'].float_val).astype(np.uint8)\n", + " new_dict['detection_boxes'] = np.array(output_dict['detection_boxes'].float_val).reshape((-1,4))\n", + " new_dict['detection_scores'] = np.array(output_dict['detection_scores'].float_val)\n", + " new_dict['instance_masks'] = np.array(output_dict['instance_masks'].float_val)\n", + " elif PROTOCOL == 'rest':\n", + " new_dict['num_detections'] = int(output_dict['num_detections'])\n", + " new_dict['detection_classes'] = np.array(output_dict['detection_classes']).astype(np.uint8)\n", + " new_dict['detection_boxes'] = np.array(output_dict['detection_boxes'])\n", + " new_dict['detection_scores'] = np.array(output_dict['detection_scores'])\n", + "\n", + " # Visualize the results of a detection\n", + " visualize_boxes_and_labels_on_image_array(\n", + " image_np,\n", + " new_dict['detection_boxes'],\n", + " new_dict['detection_classes'],\n", + " new_dict['detection_scores'],\n", + " {1: {'id': 1, 'name': 'object'}}, # Empty category index\n", + " instance_masks=None,\n", + " use_normalized_coordinates=True,\n", + " line_thickness=8)\n", + " plt.figure()\n", + " plt.imshow(image_np)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test Object Detection" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "batch_size = 1\n", + "np_image = get_random_image(IMAGES_PATH)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXEAAAD8CAYAAACB3pQWAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi40LCBodHRwOi8vbWF0cGxvdGxpYi5vcmcv7US4rQAAIABJREFUeJzsnHl8ZVWV7797n3PunJt5TqpS80DNFAVVzKKCiCD2U5DWBrUFRZyfA90tTT9xaMQBxQnFRgRUkGYSUOa5KGqghlRqTlKZ59yb3Pmcs/f749ybSlIptN/n+Wn9dNbncz83N/ucPZ19fnut31p7Ca01szIrszIrs/K3KfK/uwOzMiuzMiuz8v8usyA+K7MyK7PyNyyzID4rszIrs/I3LLMgPiuzMiuz8jcssyA+K7MyK7PyNyyzID4rszIrs/I3LH8xEBdCXCCEOCCEOCyE+PJfqp1ZmZVZmZX/ySL+EnHiQggDOAi8DegCtgLv11q3/H9vbFZmZVZm5X+w/KU08Q3AYa11q9Y6B/wGuOQv1NaszMqszMr/WDH/QvXWA52TfncBp57o4tLyCl1XVU1vTzcOGYoraxkc6sXJ5MCVmKbEdVwa586j2B+hf2yE+OggCpfqokp8GAw4KYpFgJLaKrpHBxnp66I0WEraSWAJiRUwse0sWgjsjMKVIDHQOPgMC6U0jnYwMJG45LRCaYFEInAIBn0oQ2JoSTqdAmEhtQuYmNIl62q0MBDaQRoWmZyN0OAzfQjtgJBorfH7BVlHEzADhKJhYiOjCO0SDlikchrHcXCVi9/nx1UKaZgoN4vAj0EO6TPQ0sAUBol0CgMTn0+ihcZQBtqAdDpLwOfDxsUyLaTSIF1y2SxCWPh8PlwXNArXsRGGhe3kkNrAMsHSkJXgOAIhFD6hwTLxC0E6qwgUmWghqK9bjOEKlHDJZbJoQxDyBXFsF4VGokAIEAZaaaRwAR9COijXQEiVXwECAWgBovAfIdBae7+FmGHVHC9CgGdZFq7XE3WBVybepK6p905tdsJiFaC1RAoNaASSjs5DYLpUVS/CJ6XXrAaNRiNBawxZ6M2b1I3Ij+HE13nzMmPvJ10/8xgnz8N/pexY+zPV+ee3o5U3J1K++fOcPB8q/+fUW6Y/x2PXT25S5zv9Zs98UqNo4bWntcjP4AnmQkz5QqApLOBCm96z1CAEYuqyOr7aE3TvjTd2DGmtK/9U1/9SIP4nRQhxNXA1gOEzaS0Z5oFr7+bLh69n5ZEFbMm209dYjN1tMn8xhMZsOsNZIiMua983j67Xklx1zef5jwf+wNHeI9x01sdY87n38b2Hv4zxoz1E6qPMr4+i+iM0VlfzxOs7qS2qpDMxxNsvPI19bUeJDyapLCnCHUsxt9yg+uTzuOv+31ASjlAXSuOqBlLxEc48dRE9yQHKfGW8urWFupI6Kiotxrr7GSgpZc68Ojr3tFAVCTGYylFh+hmWNgCheAqzsprGogDDoSwNqWJOXtzE84d3IMoWkk0MY8SGaE+MUzN3Lundh4hHq6mzNf6iEEkSVFaXosazXPf+D7Kv4wi72w6yIlDC0dEcc5Yt5MlXd6Pig6Srilm7ooG2ln4aIgs4Ot6N3dzDsrPmUp4JsSXRQWZAUFLiY8jNctG61WzvP8yenV2sK1lM2u4gMa+BkvEMi8uj7GrtpG00wKqGGi47+0x++dRvUEUBVtbMZXd/H41RxVnvuIKh7q0YfW2saHwvD776Pb7w4834ksOISANRDKIBH1m/JjAyhFtcjZWRZM00QlqAQko5AcCGYXhAJcB7B8TEJ79u0FofBxBC6knAbyCkBhw8Zm/KuptST0EKdTo2mJZESgmoKcCvtcbSBtoSCFdhWD5SMs233n8VsfmattYu7rvnFXLJLEILpGmQ1BLh5ghIgTSO9UEIPandfP1KTLTn9X/SZiYEoBDCu77wf6UUUvjQuAihJ+bK6//UMc807kIf/hSIK6XI5RwcW2FYJpZlYJkmQnh9mGmjnPycMmkHKSWWz0ApZ0r/JvexcI/WgpwL8WSOYCBEwHCQUiBQ+XkQM9wncNypa2RiPqc/b5VfU0LlfxukHIWtDBylERIsofAbXv1STN4MvHuU9uqU+WcppUQpBUiEEBjC9cap9IzrTYipz6ZQXuhvpChwdMaHMU3+Upz4RuBGrfX5+d/X5zv5jZmuX7f+ZP3ilu188oJLubPtIeZmSnBrSrno5A1owjz72gOsaZjL7v5hikcV6UgKpyrEaWddxMjrD2EmitjWNUj9KfMxxmEsMczOH7Zx6g1vR+w6zKF4J6tWreJg3wBSWmRNH4t9YcatNG5MMkaMIkY4d+NV/OLun+MPlxENO/T1x9m0cR52Ikf52pMoiSvSEYf/fOEVVkYtxu0QOFniAyaL60sZVaMMdmeJ1kUZGk5TZAYoWl7NIjmHR5/+PSvOOolaUnTGRgkV15EaGccyEwRkDd29RzjnsstZm6tml9XH5vseQ/mziJCJGShmfsik7cA4RbVhEsU+GMyx6awlbNu6i0QgzDJRzd7De1mwpomTfAkS8122vJTFdW0C/gjoJH43ylhKY5VAoifOvIYI80+qprW9FT2iORRzKS0xCIUM/NVlnF++gFv/sI+T1tQw3n6Y0YzLNe/bxDOvdtNQV0ePHGSx45A2BE5kEcmBFj771TtxVQ3rl5bQMZzAzDhYpo9E2OCHt/+EKilYEhjkkvffRDyXo/BCCgFS5DUZ6YFQAcTza2jy+vJeHimPLfzJIC70lGun/30iwPG+xQSAA7iuO7GxKKXIKYGWGQI6iOE3+MKt3yRq91LUUck//ODzlLohspkMhrTQAjIKcukcAb+FKZz8hiUm6vfazQOalvmxuBPADnnAkToP0npKf71+yYl5LNT/p8Y9Wf48C4U8WHs7kdeOBinQrppy7WTAKnxnMy4+v4m3sYqJjWh6OzoPjLajyTpgK03Q58cU9owgXpg7VymUAuUKtBRTxjrTeAtSWEO2bZB0QUgTUyiEdPEZgoB0yQ9+Upvet60Vri5sCAaGVFM2DaMA/HlzQjH92R3rkyho7JOkKBrcrrVef8IHM2UG/v/LVmCREGKeEMIHXA48cqKLk+kkH35HGYNWO1X+pdz78xd4+M7n6U6N8NiD9xNLCEazgr5ECjMUIlA0h9beDPsffYKWfcXs6SpmfqgMgY+O/v1ktObCG87h0LbNbNGdNNSH6dyzF5VIo4eGSQy1kfENkOjqIRfposxvkC1q5Jf3/Yyy0grMgCI2FqdpeRQRydE5HGP/71s4sHeE1jdaWF5ezMlzTqE/NoQVDKHCaU47eyVRsxrIEIsPUVUSoLuvnxXBRpqz7Xznh7eyZn4ZL/WPMSe8kFRuHHu0CxUPMpw4ynsuPIPh1+7nns0/p3h4iMGhEbKJJGosTHVpmN7OGEWLfYz0jpDrHmJuNMXOp3bTFRsjmskxcGQ7DQslpnJ4rM9m78N+ltcsZPmS04jv7eJn1/2GnrYeigKDrC5LsKZO8Mq+Tg4dSRKUIdRYkqqKesoyYUKVVaj+UTbTxdvOqyfXOwapEYZMk+/f0cwF5yymPpLj6MFetvYbZI0yWlo3Y5c7JHf0sXFpFa0v7sSXDSOkxatbnuT7N36C+p4nObM0yvYxg5TPf9w6cNHHNHA98ws4XXOZDFjHfk8F8sn3zQRWBfDzPkzcO12jBfBnFQFXIIXFDd/6AmW+l2neuZ21Jzcx+IttoASe8pXDtm1wXMAgkwFHH6ur0J5hGJgmGIZAGiqvgXtANVmT9MYn0NrIA7xECMMDD+N40JpJOTvR/98MwAtzYBgGhmEgJZimB+CuVriue1z9M4GmaQE4E5rq9P4UANx1NK6jyTngIohYBkGyJ+idRLl4HwVKaWzl1aOUQik1pf7COCc2oIJGrRQ5V+E4kLEVAoeAYROQDgLXW1MGHqcjPStRkbeg8lq2lJM3B4XW7gnnZMIyUHLio9zC9nTs8+fKX0QTz3f0QuB7gAH8Qmv9tRNdu2LFGn39927ikjkXcd3mb6PvPsKDXb/joqJVBFc0sH/ZCO23vYThOmRVBggRqSgmFetHa8kZ9dCSEaSNUkLCZCiRwjI0oZAJ6QwpR6KVSSw+TmVJFDUwgFMUQYkQrqlZsSzM9pY+wpbFSCqDTFmE/RaxdIZGK8CAP4nMCErCFr0ixxIZxq2WVNuaUr+PDlGJHHY5PNxKTWUF3cqm3lboMk2sbZSNazeQ1D6SRzsZDseJZV2WN9ZQUlJC2lZYmR62dic5ZUEjDeUluLlxlA2itJEnHnuG8rIyjsa7qSyOUhwoojWZozgbwLETlEcDZNwo/oifsXSMhfOraN98hHu/ez8f/e6lhP31pJ0cScNkWU2UHTtaqS6L0hvrY/3y82jr3U5kQS3ZrYOc/s7z2b99C7sPDLP07FIscox0pzlp7UKC/aXUn3s2j9z5S4zgEI4OsLA4QDxXDyWtLFv3Ds6b+y5uvu9KlkU28S9f/h6hhipe3L+TbOdmYjsFD3a8ym03fpaF5avoyzoY2tPKkAKfFcB1cvm1wzFKpUAPMAmEhacVGcbx4HMi0J8O9JOvMwxjosxUkqTIEjFMskqBE0AGYliqhJxKYRMircZIp5P09To8+YsvM/c9V/O/Np5NYjBOaagYW4+T0g4+oxThJrz2LAOZc9CGiWmaFF7TgiY4FcymjkFpIw9++X5LiZBgoEF7JrurDAQuhlSeDyJvqUgpcV13ipbu1XVsfqZbJDPRV4Xfk+89EXbMBOKF+44BKYDp1a09LdpFo/II5ih3whqTUiINd2KDLvhKtPZh5+vWAhxHYQgz305+Pl0bUxoImW8LlfdyuRPjdF2XhGshtMJvGN66lAVqjzwgyynPSCmFKwyU61EopsVE3yQCU0omk99aaxTGlLlR6AllRSh9zErLUzwlxaH/Vk0crfXjWuvFWusFbwbgAAc6D7A03sTHfnEd2/9wK6XZNCsCFTwee5b3Lj+f1js341OCUFM5q9esorjKRyqTIBgMsnBRI6+IMJFcGdlEjp6OTnTWpiQUJYnECpQyphx8aZsVdXVIQ1G8cTn+iET6ByGrkXYl/3zFVygtaaIyUkumVBMKGBTVhClriFLl87Fq1SLC9RWsX74GVVbM6vR6Xux2eH1/nCojiKX2U9nQSOPCjaQyDoGIIhnXXPv3/8SrrS10HO6DucX8/RkfpaRUIpwUpcF+GqMOrT1jvHtpBa9taaZ5Xx/DQzHsuEPz1j1c8L73MyIU8+uWoZWPpUsXo/oHCZcImhbNQ4b84E+SVmnqamrY27KfquWlfOYrl+C3olQGl9Ax1EG9E6Is7pCUGc582xI2zqmhM32IJlFGbGucBetqeeLxh0ngQwR6KXPmM9xRxNK6Zex5rQ1HZMgefo7c6CHGxhU+bWNkwgzk9tO4cjm5PS20x1t54jcdfO6zN1NeV88j9z/AC7f/gBd3Pcst993AurPfxne/9A2eP7wZVMRzfWqBbdtkc+nj1sV07XIysMzE7f6J9Tilzsnc8eSycTNHWFtkbPAbAXw+cOJ+tJNF5VKEUfzspzfw24d+xDN3/StDiX52PvN/+OnXP8aLse0kZJLX9u/FMB0EcfCZ2K4DSqH9Vv5Fd/OaooPr2m8K4F7HHDzQdzEM6TnKXSc/JwapLGRyCtvROK4HShNA47pT5muy5jx9nqfP+fT5KwDxTJvOTBz09Hon/1aqAI4KhddfR0PWVjgYWJaFaZpYlonlk5PGkN9MMEnbNlnbwXYV6awL2kRpjRAuEhdTanw+A9MSGCZIw3OqKtQERVbom9QultAYUmFIME1z0hqbajl4CoSR/+1tNp4/QyCliRAGjgJXT/2c6DkLIcCQx63NP1f+Ypr4f0WK68r0rc/9gc+dex6P/eO9zHnXmdSvLuEp2cwFlSup3LCQ8niWkYOduIEQkZoIQyNZfALQOcqNGkasMYzcOI4S+BvLEPt6Ka4uY9gnKHL9xJJxfMUhUqNx5hdXcWh0lLJwFSNHO6isK+eKBe8i9OFVHHy8he7mbVj+EI9ve4mFi+qROERdm1aVoc6JIizN/vEYV//Dlfzom7ezYpnBxrOvoKujk1cef57zTq/Hra2n49AAO1va+elPbuM7D36TDaqSe7e8QW19hNMXL+Do0BijXT3Mra6ju88mIi0ikR7GlcCsqCKlfMwzKhkeP4BKSXIyigoEWH9yE82799B6eIBofSnLVy8gfXSIjqM95IrKKVtcQ26wGydrctM1H+PiW/6de676Hnc9dieZpiSDL7cRWOVnqdvEWJfD0XgrYxWVnH/mQgb3D3K0p4uUEeBji2u4dUcLp69ax+YdHaxYF6K6yEdCmvS0xclmHIYHBrFCfj59xZV89FM3MTwwQm9I8ounbmX8988wEkyxqmoTpf5OXnjhAMOJLA+9vIOjnRAVWUrCRfgsg1QmjZn3/E04fOSkl4xjC1tO+CrVcXTHidbzZCA7EV8shMDQLklhYbqatC/Jbx9/kEs3nUOgZA4vvPoiW37zfQZkmgtWfYZHuq+iszVEuevyv7/4KBuWLSZrCN571RoevrOZ9IiLGXJwUIR8QWzXngKiM/WnoEFPHdMxjtzTpD2nmqM8zdVxJUpo/Ab4pMepT9emZxrz5L+nb3IzlU/nuU8kMznrZr7P06BdV+PYAheTrGPjNy38JoAHqh49MZlS0thK4LoCN7+xGIaF4zhYpsS0ND45jYoSeStCS5SaagFprXHyNJgh8NaaIScomfyopsyF4zg45Ok2LT0rwNQTfg2QuHlt35Qzc97TwdwotJHXxIv/mznx/5LML59Lx+92MNIxSvEnF3H/3h9xsC/NF77wOaqMYiJH+xjIDlNbVkdxNEQgYRKxDYxEhrAOk1R9DA/EsftM6mqbcPb2smLDWlR5hFT3MLG+fux0klwuh2EF6RtMsHbBAnQmzaZ3v5V5J51C9Yp6nvvJw9zwpX/hqV3b+fVLL4INUhj0xseQTQ2EQkUcTA+zNlDGHFnMM3c/zpqzTuHkDVeQiQ0Srhnmin+7hF7/GMX1lRRXudzx9a9x9ceuoyFWyfO+DhbWluLTPrYd7CI2CivqFxAbdxgQaWRwnHo5l8aqOYynsownRglbSUb7BXVzRimLDJAa6aT5tRaCRaW4xWGqSiup0UUI0sxZUEtO5eje18XY0QQ6JPnRD+/l2tMu5adP/pqXXn6W2LYhPnf1v1B2YIg/dBymvmkR42GDRFecbU+20Dc4QElxkLllcX55aABfWrFv3zY2Lc4y1pdBp03mldZw+qoFiPEBRjNJdrzeydzAqVx56Wnc9cdfUfnkDi4ObmBO+ZnMzyznod88DOHzoFGzdtPJ9D25lcd+/WXKqstIpBMkk2nPETiNP5yu1Z0oCmKyTL5+pvKZNMfJZTlTYqYydA4d4atfv5rDjz9AVWQO0ayDb6CbT3z/Z6xespBfv3wl59V/mnef8Rly5QF+e++ZPLllCzfffQe/vbOZa7/8IVyjD6E1OhAkqx2kKjhjPa1NuRLlTrUECtTO1DEZeWD3AEgDWVeTsgUpx0QI8JkKn6kxTXdiEzgRFzuT/Kn5KlwznW75UzLZ0Tr5byEECoHtGqQzkHIEaTuv5ZoKhJ23QJgC4EoplAsSjYHGb0gClsAUNn4DAqbAb4BATXKM5+dYS1y8zc9xnCmWhVJeW64uAL4zaR16Y5nMswshkOKYti6PaRY4SpBzXBwtcDU4iin8/OQ5B689Yfy/Q/FfBYi3HGrmf3/xajoHsnz36AHUiy6XfOcaKl+NEa5JYwcMLDuIryxCtNKgqcwi6YwSj9hkHReVi/D377wYozRE62sH+NLdd3C4tY/OwQyma5KxFEsWNrC0pIgFJSEsK82wOUaTkry2bSvPPPsHVp61iY/ffBM//N//zqc/fz3FQcGZn3oPdjLNki6D3a/uZrArxfJVa7js8i/zL888gIomaVgeojd1lD4yWKkMrz/dwtLik7jnkccwiheyff8DfOSjZzFHJgkcEOTGRjnSN0p9OErncIxy3ceQM8Q7VxfTFYNnYoO0NR/klMY4NVYdA4E64tKhpdeHJUKETJNkyTh7WroJYNB2oJUntzbTY0eIlBaxoiYJzhjCDOPEJQtPO4WUo+hJdHLFB04lYaZ49olvM2aEqKSUbbnDXHfBRaw5fQFjqoNQFNoPdmEFyxkfGOXkt6xk2IbxbD2tiSF2dLRzsKONB198gVMuuJiTljUwsH0PA3WSeLSUy6+8hvAlb2Hzs0/gVsDp7z6f1Zs28ON7fkR99RlsO9TBF2/9JpedexVPP3gPlRVRtGmRzdq4wlvQQD7OXExxck4FZ49imAmIZzLtgeM2Aa01PtuHKSzGGUG4DrabIBWL0XVkmM7+ZirqanBN+PpdP+LJo//JrV+5mNQI1NYVsavjNUZGH2OJWUO44nS+8+138pWNV3P9F04BdYRgcS2WX/DOd78Nv6vA8OHoBGg/0hRkMmJivAX+evLfBYDQrkJKcFyXnGt6YGJLclmF5TMI+WyCJvlY9GObQmG80zXvN6NDJgPbTOXHomuOt26mfwrXFsY1tS+eVuxmFU7Wwc3/z2d6TlS06ZVrhRb55wwgDFR+jiZoEik8mkkqtNDYDrhKIgwT1xE4tspvlgJlu97GqiQ6v7YkJqbp8+Y+z1O79rH15yiFoyRZLUlrQVZLlPAjcEC5GBIM6Xobi1T4DAef5RJA4rqW93EktgZX52kwJbxoFTmJ6pLgCI2DwP4vECR/FXTKmrUn6+e2bMfvwulnrYfT5nJO43z+8JN7qHnHSqpbEgyGMljFmkT7KHtGBqiJVlGJy6GhToKEEfPnsy6RJGObYOTYPjpANJdhPFpKbkDSUC5omlNJ2jWpW1dKYn8Xw8UGQRVhoP8gpw7N42XZxpxT3sHvH3qY2mLB4uoNlOcilHzqQpa2ai5asokr7/8KJfE0zz39Iqlcire9bR2H2vdy5op1vNrRS12RRVurzYp5xdRWWXQG51CqD+J3MuzeWUxZVS+nrb+G79zxdT6wUTCW1OwaLcUxsqysP51tW17n0rU1WDLO7zs182rKSeRibCgqZVgdYUFpJU9u7WV7v8PCBfW42QzDhiDgakr8gne+/XSe37af9GiCcLWBHvaRtLuI1DThVz0MDEU4f5nFhnd8nHXnvIuVi0/iutPOY/T8M+l94pe4/jFSsoRYQqDUGLVVDVREE7y4a5TFdQ1kB4eJxYYpb5xLaciPP6zZ3m2zcX4DuVAFDectpqx9lBee2kxQJQlEQlxz7bVse3Unl155JamxcZ7uH+Aza9+KGbWwlSSZcTDRaKkxhMAQnjlbcGpNBl7DLCx6j0/1IjRmNt/heNN+OuDYahwhDEKJEt535bu48O/exVOv/YxLL/4KT77yVYRvIatXnsO1b/0QH/zn04lWRjm57nxe2/U0/qRBeWMTw2YLpQYYJVG6dh4mW6fxDdWRSoe59eeP0jHQxpOPPsTnPvpZLJ3FzppknTEMGUaaWQz8U0F7mqMRpUFqkqkc0gzis1wcW5BzFKYpsfL0yUxjngl0J8/TTO//ZGD+U/gwEw0zEyUz3SHqut6BMLQ5sVEJ4TkDNTbS8HvPNx86KrTCcQW2rRB4PPf0Zzq5fi9ix5igrwobiZsPh9ST5sHrmso7IwE0pqHJ2RoXCxeRr8fbFC1DYAmP9vPalvkN8xj37WqwlUHK9jZXvyERWmFKPRFzrgXHP+tJY6gs+W92bP5XZNfBHTx33W94vvslihYkeHt4Lvf+7i4Wvfd01pfNY192lNf27+CPj79BafU81lbVIUgQ82tK8VMRCjL6fAvzyqvQKJq7uqmxShkwysmmNCkjR1WjprW5h67WFp595HXahzUNdSuxezRVgSb2zk/x4TPOINu+hesuWssV776Uwx27aW59ggMPfp3Pvvciat69kq33P87WnmYu3HAuLz69nXjWZNOqjfQM2XSOjNKoa/nwJz7KI3/cx/MHE9x85RdImyEefnoI0XiYrlgxdz//Hfbe2EhxQmMryVUf/BS9Q2mCTpKFS+P88UAvbW6cxdkkY8MC1ad4aOdRhlOLMMtL2BnTXPX560hJTbSoBHt4DFP6GR53uOeBFzlyuIPB+AA9LaMctdOEiqsxkjmSw/XE4jYPbbb5t6/fQpNZw1e//x0OZJvpePEVDJ/NonlNBEsdirI2UaOGuWRBVHL22g3Y8TESQhMqKWW4Z4iD4xnajsR5/xmLCVU0cP0nr6HvgedoPbKXJSv9yKCPYFjw8x/eRnpM8f5zLuakuvUkdu/CZ0ni4zkyOQVa4/cfH3J4IseY1gXQOn75TtcGp2uU0zXxsCrH5y/mfd94D5d/4uM0lUSpCqzi0edvo7VH0rxrF5FEisu+dDG1gTXUOgZky7jtqw/S1neUyob1jB1uIzYY4rmnXqG0vo6+lgSDsUNc+9lrqLThjus/TyTXimac5LhAGFl8RhRDSCwZPCHQQh58pEe/BII+/BYIrbBMTTQkCOTpk8na+4k04OlzMlPZnwL6E4H+icoK2n3hezLPLITw+Hvp4uY1UyUlwgwihXfKdcIay2/WUsp8dA9T6nQcZ1pbYgqAa30MwEGj8s5lR2kcNKacPHaB7TrYwiLjGuRcb1PwGSY+KbCERs4wP67Go4i0iYNBMiNwbIEhfaSyIHWehxczO4en+zH+XPmrAPF5c5fyd4nruLDxTPY9NcLZiZW88f3tmLqSgzsfId59gMWBCq5/xyd47uA+jvSnefsZZ7A8XIpVU8NQNESkvoxvv/QaPckR+kMB4hKW6CKKBuOErDBtB3OYQZdE0mbxsgoSJQ4HX9nGeLafI+1J/IaPLz7+BEMZTXc6yc4Dr3HGGctpWjSX0V2DNJy3km/c9FlWXvYOYiPDXPTZD/PAt27hQIXBezd9kJd27WGFKOLl0W7uvO/H3PfcY9zz9Ct89Jtf5EDzfi5+70aGDkaoaPBx9wc38M37Rihd2sDcy24hsS/JS7e/zJ5tr7KrI0AoO0Y0UsmhmlLGBo5w1A1i1BTT132Up5rHcatLee13TyDHcowmR2hYUIuZs4ncewtmAAAgAElEQVT4wxj+EKFQCF+gjIzlx3CGMIPVdA51cfkV/8hI/yDlc6pZOn859ecsYv+ObQSqy4gNj9B+KM5AfJzIODg+m+pwljfGB9nf/AojYz0MJccYdXIMJMYRPoMPrbgA0zR4quUoR994hvvuvJPmPfvJHB6nL+Zy1lmVOMY4ZbV15Eb62PR359NljLE02kiqooiscrAsgd9vkclkppjhk0GpIJMP4cwE4ifSGmcCoUK9mRB84e6bqAzatL38BL95+ot0GgeIjkuCiXE+ddP32XLwGf75vf9IX+tTrJnzeWqWhxhO5qhdWcPru2+mcdE6RnLN1EZXcWB4BKt+KYHaSn5y55f4zC1vITjHzz98+Abuuf9+zKiPpzc/jhkwcJWNdo9/Yac7NiFPjWjQKjcBaBqJRh63CfypuZg8t5NBf/I1hciW6fHW04F+Jn78RL6NgnhtmmglcGzIZhTKEEhLYJkKqR0Kp08LY3ccldewJRpnQpufDH6Tx2y7GkcVNOJjMdvkj+ybQnqat/KsPy+EVee1aI2jfWRygkzOxZLgkxqfofHlwd6VhagTiaPAUZKcMsg6gowryLomAZ+gOCzBThPxC4T0LAutdT7O/Ph5ejML6UTyV0GnlFeVa/8cRbwnQW04SrldSY9vkNKmYpz0MLnhLAFl0V4uOXn5atJ7uuiMtRF0iqheUMWG1Wv49WOvkh5yKC1RqNgwVeWSNfObMCvC9LUP80qnwqiSpPv7CCRqKCsXhMMGw8ND1M+ZQ7vTidubwl9SxOp1p/PG3pfw90J5QxHJAMwZSOMULaV+6BAXfvDT7Ojq4fLLruJnv7qdG759M1d/7Eq27dzO0FCKz3/gCr533x2sbqpnJNtL33iAjVVB+tJh/BU2EWeck5ctoLqskq/ff4Du/YdYOCdE47JzEAebGRCjXFUV5mudgyxYHcZoDpEiSWh+JUODMWqDIeJZE5EaY1TZnhNGQDRYhJOwSWIi0goddikqNsjGbP7xsk/wvR9/mxtuuw1fcye/fPoBKooFZrkPUzqkMwkqassZbnUI1wRJdY2QKSpBpWxy8QCBmiz+eIbBnji+ihALqup4+MmXWHL2KQwNd7N2rIgll5/NLf92G6//egcf/9a5lJaWsGTJOnS6j7OXncqm8z/GkfgY69auYSzrJ2Q5eTNUg1C4AgwhMaXAMswpL/HEkWbhIKWJk8viD1iY+FDCi3JAasjYZEUKy+dD54oxLNvT0oTHKyNcAkYRyVSOnJkjGJDsev0ldmy9jx0tWyiONDKWTBCeW8JAbwuRcICTqt7ByWXnc/e+u7jxsn9iy5Yt3LX1OoLBBagRwZKVizBGu2jvCZEuClMVGGcwmeDyTV+kNBcltLQYq2oRO3fdx8ubWzh/xblcdMk7MYpNksLG7xzLfnEsdG2S5aA0SnugL+WxwyreaVI/hlYoYeMohRQBMspBuDl8poUWEkPI44C3UHdBWz0RiGutsSzrTemSP49ykVM2J8dxcOz84S6BF9o3KezPcb01McHVu8ccv55TcurmoV03H80kcJQC6c2jIaRnuempJ1q1FAjlUVUTkSgCdD5G3dE+L9rFMPFJiUtuCuWllIOjTCzh4AqPL3cdgcBEaAef6TlelZAo7fl4DOliFiwL5ZVN3nwMBEocc6DWVEb+LDrlrwLEDb+pF69qxMlmCDaajB3tZ/H6RvZsHiQi/Ihxk5KaLNZoBS+2HuT2O37A127+KievPx1lD9C3tRunVtOfGCBglGIkTYINQezBUcqqy2Gsm5qVSxnqGCeVtCkOjtI6bJBMpkilMmgkK2pqaRsbpq6ynJ7uQZrmFNNyZJT5jTX0DHVQUVxO3ZwSWtuy/ODLt3PRey5AW/Cd625gu46RenYzr4T7WRWo5tm2bUQNQbimmkwiQ31VmFCFgT8W46TipTzY3ksyOYibcjn35At428ZTaXm1n3lXr2P0qS388eGHWDQ3Tkfax2CyjrnzFG3tYwyNplhUVUws7ZAdGcMsL8GJ5aibU8bO/ftZ2DSXw7E+VtY00jfcz8joOFHLR9m8udSm/Xz/S9/j47/6NmNjvdjj/SwprmCk2MXULmXaR9XqMnrbeuk+mMbxKbLahVFNJqSoIszChSY7ehwGOhJEwjV88opN7E4M4rdtHtvcxsEv3ssTyQfYsWeMskiAbbueYNSCC9ddxNBAM48+3cXpp5Zw8/95FFfYKJ+BVpPoDelpiYYAUxpI45iTrRASJoQgEAiQSCQwTYlh+bDtLIbwHGJZaZDJZLBUGMMYwpRFCBTaNhD4EEKTcZII6bJ98y7Ofvsq9h7Jccstl+Mr2Yrft4LObkWJz0Q7mn0jr3Hn9c/x1HP3E6xZwn888SnKjJUsX3Ianfs2c9ppF9KTPkrL3kOUpBOMRqs42P4G3/qn53niPz5N44q1fO5D/8qO9mbufPgbfORzP+CksiWYGRcpNVoKpHKPA9iCFPhkxwZXexy4d8AHPEtEogEXF1cZ5LIgDYew30IrLxmbYKqDc3Js/GRn72ROfrrFMpN5/2ba/onGUyhX+WPyFKwBoae0L/LUhFIKpTVaH+OrC+thCoYI6dExSuMohcqDpWHkfSxi6hiU8IBUu14yPAOZT6ugvWgS7dFYUihMvM1g+riVFqBdHG2SVSZjWY1pGvilImCC4R1fmji0Zk6aVykltjvVMnQKfct/zy33/+1w4hIY6B5jPAOqPcuyJevY80YrtXOgcgHEwuOUlLnEug7x6JYdPPLU8zj+OO3NT5KI9zHgH6Q0HGZFw2KcTJzSuQ5Lm4IsXV1PfQgyxS4DPS6uMUwuCwdjWaTMYoog9TVzqa2qpCuVIpgTWI5NadCHkfBDwE9tTSNB4aO+OszeA4c5dVGQ2x68iiXrSli9sYFdbg8fWnMGL421IxNxSldWURIs5tyLL0Gkk6womsvgSIKq2FziyVKe7n2d05oWwFiYNWXLCUQUv3jgcZ48+jTXN13Ktx6+AyPVR3NPjv6RKJbdwd4jhymfH6UsapEyEpSE/TQsL0cmh0jLFMnUMGevWoblZKkUQSLjApUzWFk7j/JIBSKVY2eyk0s++24WLZvD1Z/5FOW18xhUo7hpm41LzmbD8nfy4K/ewC+iLKiNMjSQJmAX448ECBWVYWZHef3AAIGgn42LKrj2LU0c7T/E0Tf2IRhiWWaclR9/K394YhvDLTt4bs8jHO6WFAcshg/tYkv7ES48q463X341512wgmxVFNCe42oCvAWSyQdxvJN9hiHyPLgmnU6TyWQIRcIIw0Qol6DPj5HnSn/+i+9y8y0fIRgcQ2fLOdTZQ04IYrkYjpnG8afxF0dIOTlWn7qcG2++laaAxaKa0ykqXcrRdkk5WWoa1pI0Epw9fwP/8cL9bG318cwjX+T3N2ZYUr6e2EAr85evxupO0L+tg9JABlEdoH5xgOJMHcSHcEPzOfOiK/nG3Y9z+10f4hPrP4m5e5yRvYNoyyArBaaTw3XdCeqioP0WRGuN46h8hIQg4yi0Mjim2SqEtjExcO081YSF6x47pFKoZ7KpPpmfLrQ9GVCmhwNOl+lgP1P55O/j7pHeiVtDesmlpm8cExq4lrjaRCFRSByl0ZMAtZASYLr1IhFTAHxy+ZSP8DIX2q7GdgVCS0xhEgIiEsKGIGRITAmG0BgU6D7To0YwcZVEOy4hSxGULgHDc4e6WnlpU7RAINFa4mqJrQUpx8VWXpIvWwlsJcA10Y4BrkSo40NNTyTGjTfe+Gdf/JeSr33tphtLgwbBaJKmqmJ2bW3j5E0XMTTicGS3w3gIitpLEaeW8MPbbic7HkM4Jv3Dkpydpn9M48YyaGmzZN08sqF6Nj+/jYQTpr8/TWDUYDTnMJSRZM04I4N+KotKGYp1YwrJaL/N6GCMnCFwAwYjySyB0ihGNs5Q3xhJ28+8U+cjRiTPbeumOJhlZDyDDGfp0jYPP/cQ8UN9nHXG33Fgx25yhgOd43z+M1/n0c2/oipYSV29QpomMlNDKh3DqjI4vK8d3TvG6adv4umfPcC/3fU1gkUpuslSH6xjQVGcxfXlOP41mLER2g53U12zgM6BXhaV1ZALWSSHMkRD1fR09aAjUd570rnct+0FGksqOOSmmBMM0ZsYJZwzCNXVsNyqpLy5i7bcAMlMipraRs7fdCGPbt3Fpredz3D/fvrHFcFAjiHXpdyyiEbTjGWiFNctoMiXJBcs50jMpq23m5IAKCdIcJUmXF/K/PEgr4QGmJsMMjQ4gp9qhnWG9rYxyoMZdrfvYv3Z5/Lqww+y6Yy3T4CFYXgUiiELB+xBGgIpjXwMrsA0LYygn3QqgWVYCCVBCGwkKa147rXXefa5n/KWM9/PssWncLBrHz/94Tc4c+NbqSgrx1UZpIjQ17mX5196gRu+83m+dPVX0HKYLXs6ObSvl42Lqml1XMp8Cdq6exhURxnp6cEfSqDlAp7ecR8HY9tZ0FTF1s1PYwRq6U310y/LKTVTzM+tZtlb3s95G9dhJyDq5Eh1PcIH/+Fu/v7md/KsE2P7/U/w+r7dtI90s7K6CeErHBU/pilP5bYFSpg4CBztHbcXhvYOugjIZiVZ1yDramwhMVEe4BiA0AhmzgQ5WUueiS6ZXj4dkCdHnLyZaK2xHYXrKjQC11UoTxXOP+tjWSwndQJXeaF2tvIiO0BhmBLDADGR0tbjubWXlQCvxKNoLMOAfKgq0+iiQtiqqyWOFt5hH2FgGB7/bfkBofKnMTUIT0PWIr+xCHDyB49MUxD0gWWAYSgMKbyEWVIghEQLOXGtzqfwEkIitUQKiSGll/xNC4SRz/0jFd+9+abeG2+88fY/Nb9/FXSK5TO0v1ixqGwRcdlFVkvKQwYqnUboYuI5zdBQlsYl1aR6e3CLItjZGE01TbQc6WZBbQlx4qhkiIsWL+eB4SM4nV0Ea8qIiCL8wmIs3suwsqmpqUcPjDI0EOPUtYvZ0X+QcMkcPvAPn+SX3/pnEskgJSGJo6MknH6i5TYrfJUcPNDHvAs3MdQ/REVIM9I3RgQfkYpihuJDuIZJT18KnQYnKKmMhvEHQ8S6j9JnpmhqrKIh52P7qE2DL0NUl9E1mkTXlHN022HcdI6XOvfxgSvPYUnCRpetYcuBLYS0n5q5YSIRk7IKP3taunnbW9YS7k4QKxe4YwmqS0/ltp/dTeP8alafeTEvP3k/Rdph0xnnsfXAQWrKDJr3d6FMxYa5pbyyZ5AVEcVnv/pTuof7ePLe32KnE1x0xQfYPdbJY4/ey5lnLCcxvJ/Q8GJ2HW5h+YoqSmUQNxxixPITDBfz8vPPsaR6Lo52WHnmPJ5/ppmVdQ0kdYqQU4TjugynFE3REEdSCSyh+LdrbuLpI0/zteu/y+iojRBe2KBSLj7LmKJNGZbEsiyy6Uz+CLbFhz94GT+76w5sEcEiCXaOpx5/ml/u/g4L3MV09x/CSGf59lde4hv3voNrr/wZXZ0xVq1fSgiTu3/xCO943ya++++38vDhe7nnzmf54+1PEg100XJwO6PEqBhKMThPERifQ3ffCD+75UVu+e41lJZHyYV68MerWTn/JA4rm9efexDHjhP0FVFTU8PRAwcQlaO4EipKziFQsp1ofBlvHHJoWlfBDR/5DiG/nwN7mzll4wZc4Xq2PSd2SuZyuQnao5CIynE841spxXjOxM4pDBN8fgjJY87L6VkNJ4P39IidQvl0OmSyTERiuO6Mdc9EnWityWQktoRw0EI4GRAGhvQO5Xi+jmN8uOfElYzlDNK2TdDvwyKHd6zdOz5vMDVsUUpzIlug40IKiSk0Po6lvS34VYQQXrI15cWTOxpMAabhuYpRkBUCv2GCcvL0jGfZuLpwYhZQXmIsaeTn0s0f3JESoQU5pdHaQLk6r5AoDK0QhURvSmArg4wCjYHrgs+wMUzPYbqwwvzboVNCIR/zNpUyYh6ltmkVFbVzOdA1xoCOcDiTplcrSot8dLQewQkaZHMGq2oW8MaRQyyuizA21kONW028KMXOsR78RoZ3XfE5ZCqEkBbtPV0ox48vF6TvSD9JN8u89YtoVwmWVDWgApq5VQ2Eq8rw+10SJHjLkjOwujQpu46nOkbpFyGee/pVRuMp+g/ncIsiDIeguXeQfSlNSoRwlEKHFPMqi0mpLO19/cS1oiw0j6hSjBuNBEokKVHEG47B2991MqfMWU24KkBopeD2r1/LqvEg0XmLEDWHmbd+ITnLJho0cYIJursGWHPSMtoPDbJvZIzmA0lCbilH23fwlRvfQ7UM8e3bbmH9xe9i1fq38OSO7Zx20YU8v3MPb/lf72E8GedQa5y5JVG+9bt7+M0zD/Hb3z3Av9/2Ay7ZGOQ/X/gdB15/gbmLNF0jGWI9jdQsb+fKy8/DkQ20RyPsPNBKsneAtt27KS9dxOhIgvGRcXJjJmHLYm/XKEVFxew43Enb+H58ZpphJ4XrC+MrCnL9jz/CQNcYKAnCRuNgWZJwxD8lMkUIgaG9NKeBQMDLmAfUrKkhISLAOIZrcuk/Xcby9Wezzr6MuEpy8Gg/hr+MO37zGQbsKDff/TX+sPMl/DrKpz5zLWe86wz+8457GEoN8oG3fI5XfvlDegb+yGOv3UvLYYGVLaVbVtLXbJHW/VSUFPOTX3yXLXub6R5XZPYN8MbYbl5qe5kjzU8gtEsqlcAIaoazwyxYuhZDVlMaiRKJDGD217Cj+wXOOmcTuZEc/f19xOOjbNh0Gul0GtfVkzIEHg+8UkoCgQDBYJBAIIDP58tHaXj5RXw+H+URSV15kNqyABURi2AwiM/nO5abfRoXO32eTxRtMhONUuDoJ5dPp2Nmusfv1wQscLNZJAYU8sfkTzsVcnIr7UXd5FyNqxyCPhMrzxYLUQDDqYeipJT5SBSFqzW2ziccZDLIywkHueu6aMedlgjMIOe4pF1NBrC0jZNLk3McL3TQlTjKOzBEPo2ui+FRW65E6EkbmtYo18HQYOLiM/IHgJQCbZLSFmOOybjrI6mM/MlbL/OhwiSVM0hm/3xo/qsA8cR4lki2GJtS+gZaaD/QSk1xDbn+NCU5aFQ+iuuLWLS0CTfnclLZSrZtPUJtaTUpn4+jQ/DSniNkhnLYozZrnDDPPf5zvvnPX+PowcNU15TTNzxCWTDEioXzqJpTyqHt3cxbsILO8TinOII9z/6B5EiKf/3HTxPvETx85Hm+8qlP8PEVF2MrlyUrlnHZ299LWaSKXGqIr37ph8RiCt9YijNPOgmRGWXlyiaCpUF64zF8YU3YzJJIaXyJHpZGqzHkKG8PmwTdJBVBg21P7mRX207Koz4+ufpD7GjeR3oFDAdd5o4vJr1/gF/99NNUhW3MeA0yZdK8ewdVc6qpW1yDGXY51NtNqsSmf3sfp6+uYuOiauxXdmONDGCVRhh69jXOWryKkZa9lFu1pKwIiWSaD334OipsxVVrFnLJx89nf/atlEfqWVW/kNMWvY3OvV0oZ4RDbQvZ/NobXHrhQipyaULFBtq1CUWLCUbibDhvKfWLqnj18RdZlgvxkcs2IhyHlesaWOIPI3Ip+tQYxSMu0aTJ8KhD2UkLGBptw++TRIuCaOWQy6S9lzTPg0uZTxGayU7EAN9z7z3s7exg+69/zFB3mm7Dzy+/9QSfv+Z0hiMdqEN72NBwKZ3tcR575Q+I7hYS7d2UWZKWRC/Xfvzj/OKum/ngF79ALt3HaevP4D+efYDDyVYao4s4yWpleeNq3LIwZiZFR3cvd3/vOVpaf8QPv/sUsaFDdHX1MM9YRu2on337h4hrhfYFyKbHiA8OEghLYj1hdLyGQ7u30Dc2xrL6IMXVIX787V+xYsVyyisr0ELjDwaOCy+b4HRPEAZYAKIpoG8aCCN/XDyfKe9EoZrTAb2Q6GmmNgsyU/z49NDP6Y7Tyal2pZRYlsAnNabhhYZOaUt6vLLtKGzb9dLCKkXA9PJ5W0JjGV5IoNQeOAu8/POFj2Hm84K7rhf1oZV3+nJS+KEQAsdxpkapaA/8s7ZDzvWiTFIuJF2TlPi/7L1ldBxXuv39K2hUt5hlgS3LtmTG2LHj2AE7DjNNOA7z5IYmyUxgMklmAjOhCZMnzJwxxbFjZpJRlmzJYmw1Fp33Q3XLkuLMzbq07l3v/6zVS93VVaequlTPeWqf/eztocNw0Wk46dQddBsyYUMhYilETQVFNUExsCRbyMw0FXRDYFoSSInB8lCmHpUVIqaFGZOQo4pNmzQtVCw8SnwwMiWimiBm/B/jiTvdDuq21hJuCdCxXyIjL58DLY2EFY2ILONOhqARJtZh0dIQoS62m6POm0ZuYTqDk5ORhJvC3HTSM5KJDUlnZziXoSllPPvyU4w8eix6p8WgYQMxiREJdRENxhgwIg1HcwPJ6Vk0DUlDi7QxujSLZ7/6lDknj8ZobOXvLRs5mJHG3DvvYN3+rVS37WfXtp1MKj+CqROPZtqUk2jxedD31TC0aCxtzZ2khQyS0/1kmmk48eFtj5FSWERjtJPOpjaaR59AqpJNsuIjfUQxF5x7PIU5gnVswIpGyBODkDvb2b13K8npDv7y1FI2Nqp40oJErCCFOWPYsWgFka4GWvY20NzpZsPq3UQHS2gZYc47YyILarYz9oJrGFxaTFDSqItquNx+Zg8po72zDu8gGTpNzr72NOpjyxhQ6OWbrc9T6BB88t03rFmxgBPGTEW3UshOjeIryuLrxTvojsWYOed4Ai4YWz4CRzSNHatrCHbrDJw8mAMpu/jy03cI6QY1He005Q5A1b1MKhhCzGOx88AB7rnvZX43YQ4zxs3G4/FgGAZOpxO329uH2mZZlj355VCJxWK4HE4uvvA3DOlu572NC7j2zjMpCAT55p23efiNn1hb+RFkpVIVXk6bp5bc0VOwtFH8/ra/sHHL+yz666Ns+fR7Opp/4PyTp1JWMY53f/wWWWtj9pAT6Yzup6NwMFsrqyhJHUGaN5tl77cy5eTRiC4LuWUXeZleho85j7m/u5XF1e0oMRiQYqF3HWBE6QmccOJVrNnSREDdRkckyomn3cIdv32Nh++t5vrTb0SJWcSiYdxeF0gSMV3Doah9i1/6BdvDsUV6U/HsCWEZCYEkJNtR6GeY+qHWP4D1Ls3/pVf/7ROBuT+LJtF372WJv/ZkpI1rWxIg7Alc0zTRdJOobqEZdn9OWcLlUHGqEopsw22HON4JFUGpz6CvYuJ0SLgcMlgakjBtMSup74Ruz+Ai2RPplhTP0mUZSXZi6DKmoWCaClFdJhq1TTeccXExVbFwqSZuh2lTPy0J3XKgCRdRS0JDxRAKpiUTsyRiwuaMR3QFQ7cxfksR4BQochwPl1VbjdEQ6JqFKoFT+fWK4v8rMHGnqogx56aRnzGSzWurMGPd1O/pJD0VwgEZFIuCjAF0RE3yipNo2r2fQNhFZrqPIEFGHzmeJR+volPvINWfxMSxRWza00FekkVbWCIkaeRlJeHrjlJcMZiD9TVkFY6gq3svkWaFTI/FARFiYFY2EdOkqrWR3139KO88+jijH78J8fJOnnjnSW6/9jzWN9dz9/k3sVhrZPlnC3jkmLO58o1HSC1IY0BaDtV7N+JT89CsNiIuwVHDjufU8Ufy/uJ3iYYl9jQeIMebTMP2Ot7+8lMWv/s5j/7wGkdPLGFzVROOoImc5qOrPkxFQSYiL5XdO7Zx1ugL2NteSbi7hVajkzHleWTkFrF6yRqKinNZt74bQ2/m5muO5vnP1zPAn865Z9/CvnU/Ut1USce+Zna2BJg9ayY7G5fy7DUf8sm8P7KhYT+Z5SWs2t3IzWefw7efvUNle5RJQ0rQukzMATL+QIikvFQkvYVQnRs5JYtwrBFNCeNyDmBP/X5K0lPpNkKcPaiYRfuqaW2DS+64kS8Wv4W0s5N7r3uc3QebuO3uezhu9mwuuuhC5syZg8fjwev19uC8CScd+4YDPQZOp4olxVBVF5IuUzJW4pE73qBly3ZWa18gd5USCHVSb1aRmzocSa+lICsHy1HCxvUfcMuVT1DTsJFps/7Iww9P5YgpcyC6nkhLFp6scrZs+pAxQ8exbOdm0oKZFE8swmzo5EC7SiBpNeXKDIpnDqS7aiff7VzGuLQBRPRkHnj4c067ZDgnzLyeqpqvaWmt58pzn8JV30Fu0TAaanfyQ802phw9kvV76tnYWMuT19zH1PJCzC4/qCEsNYmQ3kmaz4dlGSiWG6GIeE23iWQoWIrVJ3uGvloyhwvUh2OPHA4eSfT5iyySXt/3hlIOt86hY0ms83MpV8O0A6mt8KcQ0+xgrsoKTtX2YZWE3INBS5KEsCRMcWi/TqfTXq/PucuH2D1WfDCREvMGNt6NZGe/pgWSbBIVDoK6A92UEWYMj0vFpdj8biEkDN3eBlkCoWMh9ZTrG5aJsBSM+HlKAmLItoytZeECTNkO8gnZTdOCmABhgmJCWAGXAFXYFaE2J1LCaUFEgeFZ0v8dTNzjBsfmEHU/LcEvmrFCBi+++ylGCNxpmXgHFjCrdCYZXp3js85gasGJkG4wI3UoppxOcsRLQ2sbd150I0XDcjlQ140pAoweOQ1/isyRg/LIKcxALsuhORZmXOk4mtr3okUVXN4YRfnZGJLF3sYmXEqU9JRiVq+bR3e2zKL776fVtZsbzirl3OsfILovzIU330hsTwe7F66m4LJzGD96GuNzhlGSV0qur4i9ehMWLsxmnb1Nm7ntT3fy0+YdRNu6aXLpeAwH9RGNzkCMXT4HV116I/tbZcxmC5GSTCQm+PKHVYw++gQCDSEG+Sv4ZMF7OJJjRMJB0joVOva2sWXVTrIyiqjc00hOboySYeV8taCZY0cPxpI8PP3Hexg15Tg2VbbTHXbyyhOvM7Uom73bYdLlZ3L+888zIyOLI4pm4w91s3r9DjLcqUhYDPYU0JJmkA1XutQAACAASURBVKaFyR08hGSziSk5KQwshNZoFZmZqZQXDEeVI0wqzidiqESCDrYaGoUjB1JUmseRmbN47qp5vPfhDhzDhzD3qquIRSMsWDCfSy+9lJycHJKTk/tkfYksD+ISpVYMWRXIqofWgMZ3333L6BkX8PLKd9nq24THGEiJz0GWp5S7bnqWb176gQtO+i1NbUG8mUkMH3cGu5ta+e3c5/nokyuZNXYiA4w01n/fTYteRVdwDWFvEhnpDu646VUyRh9DUnMqe+sPMn38Ubx7yTo21NWwZ9EHdHYOYmLG0XQ7j0RvbeLuu08kO5wLbS0E6trQZInlP63GyvLx7dIPea/673TyPfs7lzDCqTAxR+XVF6/l2OtGIqltqC4QrhBppKIFLRTJjamGeoJtTI8i+rng9qcKHu7VH5/uX5HZP9P+Rfpdr/56V24m/vbeR6L03c6sbd0R3bD1TgzTDt6mZXO/NcNE01RiUQunpKAiUB2JJwQlDpYk/ERNTEvvc+y9zz/xVGKaOsR1wlWHbBd+ybYGj6woaJaLkAZRQyEmHHRoPsJRBy5LkKkaFHgh06mTJEVxCw1FaCiShcBEt3QiQiVqyWiWbcAhLJWoqRDVbWglYig9EsEmCjFkdKGgoxAzIBwzCWoGMd3CjLNd/JKGz2nhclrIihln7kBIN4mGtV8dP/9XBHFTkbHc6aRHc9ECafidKo/++S5mT76CWFcz4b0HKTx3ChN9M2nL72ZN9gY8ipOFTWvI83TRYTSSN9DHV9FKpg45noyBA7Fws71uIxlFJThbOvG1WchmDLROIl6d9rpOSgYUEoxquD1Rygfl4mtLx+Eawiilm8otjeQ4k1CiaRx12dU0ygUoJY0I0cln775F9dL5FEweyDEnTGDDlmXMueQyXHWNnPqXR5C8EE71M3HSUbQYQSYOKqe7JUxzWjIvnXc/7y5aSd6QXOQMhblzL8O3pxPnfli3vJJidy7HDTuC5x67hwdPvo07TjmNP07KwVfko6q6llZDw6oooKkLArKXSEhChBW621Ixm9uJOBto7pJJSgkyZHwOr73xDLplEsrLZvdP39DVmsSggkKOypP489k3M+6+vyOnO6mtEVx03S20mBJpHi8tbTp3XPIYQSmdlu59CHcOP9T7OagHuOakqWiyoEFvZWC6n5DwEOsO4EsWbDwQZOu2AKrTzarPf0uraVJcmMXxE6di+b0EDY1YJIKI6Wia/Y+qKEqfysGeACFUfH43O3ZuxxRw3c2X0NFUzeTUEoZ6nXQGKgmZdWyrraQutoGdi9dwzfXTaTc7seRkDu7bjyXV89XSvzDhwmIqt7ewprGFYZMnYShOavb7aWoOMNhXyp7qbt78+M9ILVtYun8l951+BzUt65k772zKjozQqqczsjCNls5NmPoSSieNpyVqkVU8gG3Vy7F0g+WftFJf/xEPvHEVuUYAf6vCnbPf4eTie9gZDDLnyAt59YmlfP18DQv31tCuJuE2dDQfCLfTprSZSZhxQSbd0KAX9e5wAbp3cO0fWHszSfqX0f+r1+ECdO9g3h/e6c9uQVERsmJXK9qlRpimhW7auuCGJVBkA7dT4HZb+LwSqmwh4gVA/eGa3vBR4nh0TaDrJtFoFF3XDzPo2KXwMV0Q0U17HdMeHiTTwKNo+BwGTqeFpQhChkzIUOk2XQRIIoaLkOSg25IJGyph3a4iFYYZ9xSV8cgWHlXgksAd13kxDIuYoWMIiOqCqGagmybICj6XQrJLItkNXrdAdTrRLZmwKdNtSOiGhTAFUVXBieNXx8//FJwiSVIN0A2YgCGEmCBJUjrwAVAC1ADnCiE6/lU/mckpwp0RJHdwFnX7m8gqSEK2VK698EXGlCQz9YSTKJ4+gZQ6uPaqy7ju/luQHCYXHn88H3+5gPXbdzPzyCF0RCDT7UdP9+ENt/Dis19w57NXki0JGjrBkywRa2tm6rCj+GLJj5SPHEvMGyMpXMvI/BGEGwv4Z8vXVAzKItidhdzexW4RwmnG8Ce7KU8t49Xvf2TPypUMnzIFj+TErGljZdsBbr7pPKaPm8TK9WuZccKJvP/Zi5Rk53HqlTdy+2U3gSbzzKIF3DT9GF559EH+seVHlm9YR8f8nayo281px88k1GbgG5bPa488zJP3XcN5DzzJ7Dln84cji/nrs2/x50f/zNKmBhS/l6buZjJwYvhkUmWJg11dJOXlUpZRhDPHwU8/rmD2kOEEk1W62yRemLeQK489lZ9qlvPP176nNkfgrA5w7kWnU+JPZtyZR3Hvn15ifmHpf/j/4b+y3RkO4XR4icQ68CelMeeU4ykcYnLNFQ8RJJm/vXQu7i4dD2U0WLvxCCfl48bSrbtorNxCiiuLjNJcDlS2MrDUT/GoU9ny01paG3aRWdxFMNzAjj1Bhpaksm3PQYZXjMEdSKXbW0eWUcCVtz3Ku68/S1PDTtLy4GBbjFarC6PBpCglk11tlaj4ueXc+3n1k78hp0hccOVNLJ0/D19SGi1Ne8keMIjPPvuRld/v4I6nz2XywLO48Zy7uPKGY3BNHsTT17/K/a9djxrai0dK5r6bPkZVLd545z30SIDCAXnMnHFiHxu6BK7bH/Puz+3+JTjlcJDK4TD53n33z/77M1l+HvDjx2b145Yn1hUyLtcheOYQVAO20QeAimn0KhiS+xto2BxxRY2vYx7anxCgCxkhWZjC5psbkkCNTzZiCQQGLkXG0G2bNSEkkO3jMYWCplvo2FREtyyjqBJu2Yrz1cGSbWhGi9MUEXb2byIhC3DIxCVz6SnBNE3DHtAsMIWEZlkoAhRFxRJxezpJwhWDgCo4qkj+H4NTZgohxvTa2d3AIiFEGbAo/vlftq5omKVfNbFlSxMnTJxNoTICV2OYN757khOuOQ9JcjHck0nOCfmcO+sS/vHcB8zMH8c/fvcJRYNGMmbaEAIxGHvKWTS0BtDDAVKTB/L7R/+NQJcGA/0YsQhFsp+p04+h1qVw0syLCHS1U1vbwqXn/JVjB53LSws+Qpcy6NRjtHorcQ5MRgsFGJ6dj97RxVf7trPti/mMmDiF4085kcpn3qVk2mh01aTT5eSDFR8wtqSCrT/9k8G56bRX7eLzV99hUOlAyjIknjrvYu6Zeyufr1pB88qtDMaHNzWN5x56EqdH5s/vvUZFxQhuvu8ONh7w8cFfX2Dphx8zvHA8RqmHancAf4ofd0YKg73FNLdH8Fs+rKhMbkUJImzQYbpZ8s0qjho5irpgN62BJiKhDl5/5U1qXFVkSWlMOHsKads6aN6+C1fYyRl/fICuJQ08dce9/wX/Dv91zTAMXA43hm7y6ssvUbV3EytWb2bJwjdIkwoYUnAk46efiUODYRXHU79hG6W+YpwZUDqmgqqdTYTTD6B7HLzx91tp0OYTc+9iyYa1ZKS5GZdxNA31KRw3YRaiK0qHZwvN9RYranfxynMP89iDr6L7AtS3KQzJcDGqazAZcowGN5w97DcEQxG+Wf4lA8oGkORJ58NnHiYa0+morab9YCNXHXMfvz/1MY6YPhBvRGXpT6/RlBTDneGnc/0PvPju/fg2xHBq+XQ0d6JIENJrKR8xnLy8PI6ZORP4Of/6l973XnY4GKT/sn+Vxf+a9kuTp4dWsPqwVNT4e9VhywxbFiBUhKX2kR0WmBiJ6lNJjr9+zldPGESYpolEwif10LHZy2xaYMyAiAkR3SJmQsxSMYSKEBIqEioWTswebrnLoeJSFRwSOGSBLGwvUEWYCEvHEoYtQCYrmJJd/GMPMnGpWctWmzR1A8vQbSldIaNbMpoFUUscYhgJgWQaSJZANSDoAJfOr27/HXDKacBb8fdvAaf/exsMKxmKf6gDrV7wj+VLue2666kMwrwHPmfkCZMZO66CpCOG88MHX/PBd09x7fVnc8pjNyBNTWbK3JkYUcjKz6Vl3RrSS/JJtbxs3XeAz17+FK8jnWdOe5ODDbBq3x42bd1G5drVvLH8Q5IsPyk56fz+r3fRMjyNTdtX4GkJUO7K4Id5HbQ3RDhn+oWMuP4udu+W2Pf+ItzNQZ6893Q+e+srKl6/hzMunMVlt9+A3NGCW/jxFueSE5Xp0NwccGex48BmJo0uJ+IzOf+O2/m+eQOjpx/PH/7xDn+64xHGTB/KU/c/zKWX3sLzv7uL0tHlaIEI+YNUWgIBXv3kOZ5esYHAimWcPmQaWbk63XX72WeEyM0pxhEWxFKGsX15M1qzRKS5jtHDRvLD1lrcHpVISxjdDauX/hk3WfgK3GSkpHDCdafz7rLPGDFnJGdkF7NOrsRthvtclwf7JW39Px+u/dp1Euv1f59osizj8kDIkpFUnW4pmUWftFO7/1u279lMW3sLTXoVKza8hp7qYeXWjwgqYV7+6hE8SjHD0kYT1LaRmzOE9p2bKCgbguKTqNq/j+nH/oagnEd2eQpdsVaWb/+BpIwiUnzH4rcKSYl24LAU7nrpclrrduDJ9JLKSMJGDVaem/RYgOe/eoetC7cwLWcCJwy+iKOLclDcLvIyx9PeFsCXUcolf7uU5qytzJw+hGtu+Z5QLMy1t00jGAxw7yWPM0Yfz4yrbqGxeS/tRg6SgD/c8zgfPP9Hftq5CdOVgpCCfeCE3kH3kJembRJsGAa6rh+CTuKmA7Yxgh00TfPnPptCSOgGaMJBZ0QQjZnENKtn3d7Zfm86Ye/A31syIIGBW8KWl7UlA0wiuk5EN+iOGYR1gSUElqSjOhLmGPFzs+wKXbt/CyH6PinYTyO9JHcl1VYFlEAXoAmZsG6iCUHUFERNCcNU0XQ7y/c6JJIdFgo6DqcEirDlcBGYyMSEApb9SbW91XDIFjIKFgqK7ERBQhEWpmEXDSmS/Wwg2z4RhIVlW8ihEMNByJAxbHksHMi4HAqKED0Ttqpizw0ITFyWicv76ymG/1k4pRrowK59fUkI8bIkSZ1CiNT49xLQkfjcb9urgasBHD7v+NFyCksb69m9ehO1qzZy0V9uYOZlp7Lr+48onVDBsoX78BVmc3BTDYXFGZgxB93EiIS7mD75KKo3VVFd20bt6rVcdeUtpFsR1qgmq97/iMzjyvh0zu+4rvoTsvxJJBteVqz7iSlFpaR0Rgjk+Fm3bRcp6Sazz7ySz7Yu5tsRV3P56vfp6D7AoFQnCz/aTnFhEWkXjuSspFIKOj/jrj0FuNta8akaLhUiuothRWkcfcJcHr7/Po4bN5w9NbuR3FGqtxlcctTpPPnKUxSPGUZZSgqVXa0cNXU4WsSNqmrsrN2POyWHSQOTWVPbiBYwMKJdnHneb+hsMFj87Wf4Kpw45SQ8nW52Ne+ioiwV2V/EUG8ya6p3EY4aZOZ6CUQPkuwvINmXzM6N7Rw5KZmlwX2kNOURjnZTv74Twwdr5y+mqqmBnVsqcUoa/PkJHpTgD+JQUO3/Hn75u/7rHa79mm1/p9miVoamIVkhfly1iaTUAJ9+uIyiAX7Wrt+MlbQOj28wu3bVk59rku8bzZb2TSQrI+lsXU9KVhae8EEs50BuOOsesjLzeP2beXR0f4sSm0wgtpvTL7ycb958jW7ZJHVQOpGDJtNnzGbH1teo2quSVjGU6tXbsdo7mDxmNg0Ne9jdFOC2P/yJU086h7KyMgqKmtn0bTdzrjwRImuJmkPJzeygqctgxtQpEBjO8FEZvPbpQxw77VoGZeQQbvHy+UdP0paykblzXyA/t4ArH7mQDZ918u6Lf2JixZFUblvF6df+FndcGSlRsNI/iCfs2xKwix3oJDTdxDBM5PikoWkKLFPgdR1ii1iWhWY6icZ0271dVkhRbGaQqhA34Ti8KFbvAJ5goUiShCUSeHacEigLLFMGJMz4oONUJVvgzLK36zEcTvRp9s4vJUQ/adqEgURCAVOYIMkqhiUR0e0JTkuAZknEDIkU1URVFSTZzqSJM2csElxxm79uKw7atEHDsqVjXYqEW4mPgpaEJNtVnpohEREqmogLbAlQBEiyzS4SJiDZTwtCyDbbSrJwSOBWZDTLfoKInwIxAboFlhAYwmJS0f9MxeY0IcQ4YA5wgyRJ03t/KRJX8jBNCPGyEGKCEGICUpjoKCd5XomZc8ZxxGVncv5t9/Lmk+/TWWlhdrmYdcppRGprKRqaSUbIQb3TYFzhIAorJrJ42z7kdA+Zw7KZdNxIuvPXss5cS8eBA9R/voTSkmLOeuMR9n65ij2bdjI7kk+kMUpVfQtZvoFcdfpcVCPGtZfeyBEuL+75e8i9+2xad25lYn4xb7y2jFnTxtL22Jc0f1fJyIGzeWfvEcwpHom1t4szJ13N2NxjKHK6GZw5iUVvv8XqDetZuGIz5TkZyAKGDk9jbfALvMEIT3/1Aadcfj1fLPmJkKqws6WaVQ17iMgKGYrFFZffRiwYpbOtCW9xNkuW/sCH/3yLNtFNZ6ALvc1JbXc9px+ZhsNRTGvLLg4aDpKdWUhOmYaqFrzyYJQOHSMskV2g4c8aQpE+HkmB8oqhDBgnceLsGeTm5nLJeRfwb7+/l1NPOusXA3jvoHy4bLv39/8qgP+a78GezNIcUTpjBgFXKhY6T7x/A9Ut1bQba2mKbCIYLGf/Pp0xZSmUFowgori4cMINaPUNyKleZC2JOquAOcf9lnGzT+T6v57OvtBKVqxtY2vLB5Al8f0/PyTY3UXzwVZWfbaSM866l/nzXyPiG8rpVz1G807BjWc+w8sfL+WD1QvJHT2FvLI8ajYt4LoLp/O7e65h9Q/dXHzFqQzRFEKeJAYV6STnpjAs28ve3ZUs2fQ2323+lDytlNqWDWzetBhnajXOiSO5+Jp/8NwzN7Crfgt/u3weO+f/yLW/uYexU2Yw+/RrMGOHpHj7MzMcDgculwtVVX9WoQm24qHb7cKlOmxDA0XGoUBC/VCWVRTFgS4sXG6JTJ+gwBdDcWgoDg1J1rEs7WeZeG+aY/+/umZimgZCGCQo48KydcJVxcCpGLgdBkLYE4RCCLAOqVT24POy2SNFm2hC9PbGNOOccQUJB6rDZftaGgYOycIh67iUGF4lSrJTR5FNJCuKpGs92Losy7YiuxwXuELgkARuWZDs0ElRNVIUgyQ5hojvT1XluNuQiVMBr2zilXSc6CSpAo/DwiVbqELH47DwOgQ+t0SKC9yKiVOycMi20JsDhZhl0anrdAmTmGHDW05ZItn96wWw/st44pIkPQAEgauAGUKIBkmS8oAlQoih/2pbt9Mrrr7737jisis54dyRXHX7wzx9/+2IWpPRR48kfDDG5r27KRqSgcCJEhEoyV6aY83M1MYgpeewaP+3JLmTCMkGZw2/EJw7cfiG8fq81zC7g0gFmbgiAUpLs5k2aCov/v0N0uYM59OHXmX6ybNYs2kDn3/8FjkBHze8+Cceve1qFnV3M6vTSctBNw+ufB7RHuGaa86mo7KGVqeLvVu2stuMMdk3gGkleXxSu4Xyo0ag72lix/4Wbr7iVuZ//xF1ukWez+DKY6/l9VfeYk1oO3XzVzHhlovJl00K00r4ccNaMrPyyUrWGFZyPLqyFd1lMbVkMo//7VMoz6IgxUXdtnqceRAJRsh0JdFd1cqsc06nRG3nuw3riHmz6IiGcDgtirJLiHS2s62qhrtnXUfZtAqueOQOyqUkXlmyhfMnDGHFpv0YBLnvrRcZVJCNctVvDxvIoW8w//ey9MMF/0T7Ndv+/6nlv/M+y9Y+zSuPL0d2Gtxw762cNudi0AyOmjQKj8vbAyUkGCa9P8tygod4aLmiKJjCwDAMFMmJEDY0YGuuqL0mE22dEgGYks3HdioGiuJAkWzjhoQkcO/Kx97HkYB0EHH8WrXhGtOwg7KqOnvkcIUQCNOIa3/bEAXIcb13uQ8PHeS4jriMxSHJXFmWsYQGwlYGFMIutRdC4FQVWwnTRpKwLANZyIQkCQwTh2ILUMnYaog27CShxDHthJKmKdva4sgKhjCwhBKncQsMYaA6HRCLHysqIGEIE8kSmPHiJkW2KZJYEpgCochYpkrMkgkbBgYSTlnBqwIaWCq4VZBNDUUxyc74dfZs/+EgLklSEiALIbrj7xcADwHHAm1CiMckSbobSBdC3Pmv+krPUESmSGbK1Tey/h+PUTruCJKHDUbXdaLfrqB1wnAamvch7WqkKd2Jtj3AjCvO59GLr+f+B37LmedfxEWXX01mrpOYCuWZ+Tw27zXyHQWs/GYZn2xbyedvvU7KID8OU+bsS37LPx5/ABEQDM3MwTdpJP+47n7m3nczJ04ayrOrfiI3OYvsgmS21FRx6zFn0Obz8d7G1eQHoowbNZJPfvqKOx9/itoflvPcOy9QUT6RMe48xpx+DE88+ScK0waQkpPDkvUrefT2+3jmxVdRAwHeWfg1FQUlTLnxYoxFK2hCkGqE+ctN1/PmmpXEnK0cWzaQhqoDLNi4m9NOO5muumYyTzqRJ+58ghRiIEt0C4UsVyqKabJyQyUTpucz99o7ePOdN2hr6iLdLYjiJzsphdbGBj5bPp8n73mIPbV1dDTXU5SVj+noJL+4lGEzL2X+4w/TbsW4aNfBw16jXwOT/L/2H2s/nDGDDE8nf//LV5x43lSW/7SXhR99w7rtP/HA3X9BV4yegGuaZh8ru0R22jtbVhSlz/f9YZD+2ieWkNB1HUlSsUxwOMw+OHx/xkpi+0T23NsGrWdg6AW59GWV0NNn/+WHq1btWSYBQsayeolqWTIydtm6LCtx2zY7g7fLQu2qX9sk2dYat+L969KhoGzr1ycKmg6doykAZJvfLtlGHGDrkNtYvNljdG0X/NjrGonfCYEl2/oqlimjmXrcx9OyrfkUGVnS47qLtilE72uVnf7r9MT/M0F8EPBZ/KMKvCuEeESSpAzgQ6AI2I9NMWz/V31l5SSLuZOP5bHvvsPQwhSMTyevq4hYnk5T034yvS5kw02b2Um75uVPSRcy6ssbuH7SONKLBuFTvZx9zS3cdMtFDCweQGbAoIQk2o6dRJFDoW5bNSt31/D1Rx+zZ/kSHnr+D5hZWezZXIcbG++59d4HuPOam9lltPLe3c/wxlfPM3XECFa37ya9U8KRk8XBziAP3nYXv7/9bp666k4W1a5j1/59mChEk2LETIlvH3mJKdecR+WXP/LWwq/Y+t0/+eeyldSHopwzqpzdwQCPP/UM77zwGrp6kILcDBRFwuPq5ECnzsBhg9ny4zbwmrhTsgm1tDBz4lCqqqpZXxnCcqfQ3t6OS4+gO50obgUpLGgzTFI8grJkH9VyM3pXLilSkE5VoySpkGDEQUv9LoYU5zB84gwWbV5BV307UU8Ow4xuTq8+fPD+f+1/rh249gIispurZ53BUaefghUMEvL5SDbNnwXSROsdNHvT9Xr7a/anCvavwDRNvQdXtyx6glVinf77PlxF6OHoi/1pif2P5XB0x/4QTc9yEnh5AsqxJzFlRE8Ato/fhj1sQV4LS7IwLTvA2gJTEoYl23K+su2taWfhid8z8demCwJYpm3zhpCxJNku1pElDhnkHXp6SGDrpiWISCpa1E7pXaqJyyFQFRlV2AwW0zQRCWMPSUWYOgYSmpAxgLL/bhVDIcQ+IcTo+Gu4EOKR+PI2IcSxQogyIcRx/14AB0jyZ7NU3sWfX3uS3PF5ZBgWujuAnJ5K5n4vM2ZdS7rTTXHnAJJFgGF3HsGc4eVkl/uQI/uprN/Kuqq1lKWmonWFWas1ct59j/L6ubdSGetm2vgTCLZUce3MKYwuG0ltbYx51z3F8LJi3n/xbdRuuOPqGzj1gWup+vALWtt3UDi2glV7djA7qZABZeWEOqMMMKP885WXcBcOJGd8GXWbD9Ae64ZYIwX1UbSaJj5ZuoANn//AtGlTmJUzlMVrN3Dy7deS53cTHDWc8oFZvPTJQ6R6WlB9EX7cvoa9sRY6ohaRUIx96yppikgcrINoexS35iEsZSB50jntpFKGZDhIdZqITD9utxujowuvW2J68RCum3M1O9u6cNX4STViVNcEmJgzikCsCcm5j/HHDWLDrjCLF3+OWROgqz1IVixGyhDj37tE/6/9D7TaYCVjCitYsPIHJHScPi+psVifdXpnqP3FrWyhKcdhRax6t94FQ7Is43Q6AZvSaWuSHF43pXeATQwcCfpg/37hUGBPUOn67/eXxLkSAblvVan9SvQL4JBA6YFz7KcUwzCxzRfs4iLdsM0eIqYN+ciW7Vqf8MtMwCi6acbFs0DIEpYkg2khDNNmqpiGTUa3NBC2cJUkLOR4cIcELzx+PsJC1gVehyAjGfzeGE5FoMafFCzLQlLs4G1YEpqho5sSphnXWOl72f9l+1+hnTJ8xAjx8Icf8LdHb6a8SiNUlcz8sc042prx1DVT1Rhl1I0ncZKZTZG7jMc/+R35k0ZSrOps3GeiyG3E6mU61BAh2cfkoSP54Z+Lufm0uTyz/FWOcx/J1xsWoiZ76LYg1RHh5b1r+OG2N3nvD6/wft233PnlK7w0+yLOOP983BUFpAsJZ6dGU66BK5TCUGcSbQNykDdtYmdXG/c99BzPvfooZncAX9RBe7rKvdPPYuH2Dayu38fI0aWUbNFImnoED5x2AS2Dinhp3l3UrV5DJQFGuJJp9yi4HOmEogcYnqKyulEjxVAoGFrKjr0HiUU7SckoIqBHmZ06hvnbvmfM7PF8+fl63IqFqsmUlhawu3E/WUKlORZmQMpAQtIBrjvzLh55/R9MnDycvDQXy+q3U6gbNARb8fp16jYXMXacjwWbtuKO6tzV0ZeY+mopnH3yDA7WtVLTXs+4I0/h+6+/5JTJoxkwNpWWvRGWb6gm5FbpqmxmwMBJjJk4FKm9mYPRRmZPn8n9z/2VHCMNtxcMt8apk2awpnEvVV1B9n69Cd2w95m4wRM3u6ZpOJ1OLnv0Rt685xkWzf+O9VuruPPWG7n02lNw+sa6zAAAIABJREFU5JeRlZfLyvWVjBxYxwWnPcGSld+x+LsvINBAamk+iuygPdxB48Eoed4U6toOMLLoJNLSulixZQUOl4zPr1M2aCgH6w4gyU58jqGYbgkjVE2ynIvm97Np83zOPfUGag58xO6WEKlGDt2RejKzBqIaRdx064M8/cEf+fSBjxkyTeK3lz7IF6veo7TAgxFr5+QTXuf9D+9B9XVT16Jz1OgJpDCO4O9/jjB6732QDS1f8c7La9HRcEcdhF0mnjgH2jCMHsGw/pOL/WGJRGDrn/H2DsS9JxN7C1f1/q5/5p3ovzcu3/t9Yls4BNsk3vdll0h99tH72Hs/VfQcE2qf8xECu3Kyl0t9QnPH3oeJkON63TELWVXwKhYO2cRWI7efPgQylhV3+LEsJDk+mFggxfelSPagIHoGm/hgIkzbcShh+KCYNuVQSMhCxu0wMCSBZCmokoqOhdGjHW5imPbApCgSiiqQsK+FgoIsq2Sn/rpin/8VQTxjQLa4/PwzeXLeW+T6BF9/u4wJJ0zi2PRpXHXppTy7agmF7UsIxvx8/dNO/vnxWjZ+dS8ZecfT5AnQtrKdr3e9yR7Nyb5tG5kz61hOnXouf3nqUVwWXDDvjyyYex+7twcovXgcA7tTWPDKBzgnldJeXU3+bWeQvLUOVzCKSBa4oh6cBT7aQyGsjjaKRgymprqFspQs0pAYNX0mTz//Ev50L5bipCCSRMZAmY0Ha5kxqYTU1FzcnokEzCoqty+jvUFj5NBxjJlUzvtvvcXFF53Goq+XctSUI1hbvwfqgzj8YTKyB7BrRyu+Qdls2rGFbN8A2gKtOGUvE6fMJs3p5b1P36Eg1YeEi5AWxolFKBxEzUpCNEUpHDKRppYahNHI83//lCuuuoJQcxspOQ5efuEznnv+fjLcYVZX7+epO97kiVdfxms1csyyyj7X5IMhmeCOUTy4mLp9QZLyhpHuqGJSeSlKOEL9gXo2iGSstkaksEbxmNEE21OZMn4cN8y9iT07tvCbWy7m5Bknsvib+Sz+8GOuf/weKspH8NBDj/HB+x9y1jln9rmhwS7ccDrimaEEp5x7CmefehojsorQQjpHzJmD0ykR6gqT5EsCYPv3K+gMHOSyZ/+N5154nttvv5XU5DD5jnE0ybtxmQZFBaMIdjWSbElsam7Ep1jILgnV4UGXuylUBuPI6SZHGcHewJd4rGRiQYvCiccw/4MfmT55GjuqNiP7unF4PBTlutBFhCuO+px3Pn+M1ZtW8+rrH3PehSOZesyJKLF6arUoPqWe/MxjmFhxMgWFw7j3z2eQX/Ebhj/515/dByevW82EinFY7sMXtxwOvjgc1gx9A2jvTDcxKdm7394Tir8U8HsH8v7+lr0DeR8cu1+A7r/N4QL4oQHEtPHvBNwgbIceUwhipq3LrcgGDkWO0w37npP9G4Bh2RWSLpX4E4PN3JET+4+v19vvEmx8WlIlJBlUIWEJe14CSenBy22dcQAZS7LZLcAhv1ApITMQnzuwEr9N/HwUB6pl2Zm5JOyqTUu1DSskg5L/bkz8v7JJHod4Yd4LlBx9LGdNGEPLH7bzUclGLr/qNPJqXexauIPT593JjsUfUz5WYevOIbz/2jySRxYyMSuX8uGpFDROYZexjLF5E1i44Ucm3nUt4ww3Xy3+hlNmz2Hhig9Jza5g9ccLmTzrCO4pPI7ZT/wRyQ/lJ52E0rKfcLEP/+5adultFKcVoctBXKbGEcOHUrX9AG2qhsutYshuZhaO5eMVC3C78ujYsot/e/5p3nnlaXIz02hWXZjRRvJyVZZ/Xsu1D/6GNd9vIMkbZGBuGmvX7abdqzNx0DCiIY0De+uJhHMJp9cyMreQvPwsdq7bz/5gFz5DZebxs5n3+ruENcHgyeNpPlCFSxZILhceh8t2fnEAmobH78SV5MMjuYhYUULBKIVphQRDMUyPzmXnXELV8i0MnjOTu6+8ipLBaTR2h7izsa/gzubjK2hy6VT4nbSpCvv3tdFxMIAhZTJhZIzyYWmsrk4hWNOC319Lm5aEw28xefwUPv5gIari5OgTz2blirX84dKbOPXcs1i3YiPHnng8eszgu/n/5MSTZwO99ahFXO9ZIRaLocYknB4nCEHQMIk5HLjMLhyOVKJmFI0gfj0JTdNoj2i4C7PJicLtp57HRvc6QsQQQS95Lj9aso6SmkqkrY6SinLW/rCMKdNHUBNqpsQq5UD7Jkqyh1LduJ7CsnNo69hGqGEvcoqX5EgFEX0R6flT6Wy2sNxteNQwioB2ZJ75/QZ+WvY+6UnlfPr1jWzet4/zjjyObbs3MCL7DDa2zMefW8HqbSt49fHFRPCybPzgn90Ho758kTPnnIspJ/dklYfDtKFv4UvvdXruqV6Tkv2375+9959EPJztWu9++k94JoJ4oq/+PPLDxZj+uPrPnyT6BnEAVbEDckw3sEwVh1NCjTNAJEnp2ZcQose5R1VVJGFPaEpy4twsVEm1/TqFjXdL4tA8gSzLSJaJFJcvkUVfZpAZX0+PG0HYk6MykmzDK4nz1YWEZYJlSVjIyA67itMhxZ3thQHC5pybloVuOWyWjywQsvjVRsn/K4J43tBSMXWATFf7Xj7fIDijbDaZU2LcOuchvhgc4uO556MX5jNAb+bx8z5l6JVH09JsseTlB9mSlMyB7xdy/gU3UFiRy9EnHUHNonqe//x51i3+iI07avjm7fnc8tCZzJp1CWuCQfZ98i73XXIvG7xu1rz/FJGCYtJ8GXRv28XGhr2MSU+ho6ONUJLE3MtPZd+3O6nMieAKmKiKizsv/i23v/0M+obdnHb6LXhjjfztqw9JTvUzbUgFga49pI+cRHdUZ++ebcwYlcX2jRE65A6y0iW84SRiBIlFDQy/H7lVo1XVGJaWyfr2CP5oAD0k40p14ZJ97K+rJXlIETmOHJb8uIzBGW6UNA+qN4lYMIqJicfUsdwuXLjpioTI9PtI8XvwpiazdPEm/vbCi7z5+qMoScl0bGvjqS//wSU3Xs4wyUuT3sYlm5r7XJOvjywm0KDjT05iUB7sbk1H1w6S6XKxJ9aC3OriiOkq69d3kTMgl8IcP5Gok/11tQwpH0NDRwMFKQMIBKOYXSbfLFiEIgwcTieGqRGKRvC5knpYEJZlEdM1VFUlHIoiZIWoLKNGYqiyjCkbOGUJU3NgKTEcqoeujhiqJKPEYuyNtON1eXGleUjRVZydOjPOGsnASSOJNm5C9SYhFCc1tbs545hL2VD1NWEzhdROk7JR49nXWk271YAjmolPdOFMTqKmq4OnbviYFz+8nGAojcb6HRQUOkEZRWvrNkqLS5gyai7z3ruTG69+kk07llFsFbC6oxk5ppCdq7Ns6UYGDE+hsbaJFOdocioER+aOofqG3/3sPrhyh0ZhqQNLPbwDPRx+kvCXgnj/7Xpv2zuL7w+79KX5Heqv999ES8Axqqr2GC70/u5w7/tv37/f3rRFO4jbxTQJ+VnT6vU7yGY8S+8nGCVJmGaiKMrqqfDse0xx30wh/QzisRkkcfaPJTB6+rdQJRvLd8h6T0ZvW8ypmEL0HLthl/L0/NYmpg3RCMvWMpclDBJ4v8Ah+v4G6SnO/ztStMHWWty54xhzxnOs+HwR21z7eO+9H/n96+fw8S1nEklz8Kdjb2VjVTslyYUMcEkMq1CYdMXl7L3jbd7/8jum5w+hvSNMtMUgf1QGrzzxMAPSSigbMYTbHzibrLRUyj1ORvo3kZIEry1/n/Ttz+GbkMyWR5ewfd1GmqlhWHYG3snjyB4+gPsuOIO1K7ezQ2pnlnsoXfXNVEwcwtvfvsYlFQo5w6ErtIp5G7/hzb9/zKgp42kJxlgmxyjNc+GSApQWJaNlZtGeYpBXlIRXNqntDlMnW3R0htBCJpbTxVA5m73tXeRGY7gz0xkz42giXd0EZIkB5UOJddTQ0lnH28+/QltIR49ANGQR7NJwqR6SPX4iukmSJxNLuNDCJjtqD7Lv4H7GjhrIfbffSKS2jY5AA5ozyu03zqVIkVFcTsYOGfSza5IjRUgtyGBn7R5qO7rJy42R6jfQlVRSUj3oOQYr10fwFaeguVOp3N+BWxKokRhWSwdHDixn04YfGZDqJKMkGQ9xowchUCSVFLe/l4yo/Sgc6Api6BCKaMQ0E6cRRTYsXA4PHR0GNVubqKyqoa1NIRRQMKNRuiOdBDwKvtR0fB4vrd0mIa9CQIWNlTXUrFpD0cDxFA0bi9Or8fgLP7J6ZxV1lS5cIYXsvJF8s/oL2lqDSFqIpFSLTt1PS2eMiqyR/PW1c+nuLCMtz0fYdDK68A7Csb2kpZXSFa1n/oq3+ejdJkaNms3m7QE6kg0GV5RRNMTDuvXLKBvpwaV2I+uCCdOOYmjBJJ6d98Rh74OcUgmcPw/gvYNu/6If6BsI+08I9saLVVXtmXfojZv33i7RT+8g3ztj7p9pJwJ+7wD+azRZ+g8qhxuE+g4cFnJcl0SRE9m/hGU6QDh7zCHsAiABltZjCCEkJW4CYb9kJFRFQpVlnPFCH9UhIckWltBBMpHiqoSSJCEpKl7ZIkmx8KkSHsXELTRMJGIWRCyZqHDYE5OWjGYpaJZkPx0ocnygidmORJKEQEVDwdIVhAayKeEWKrokbHaKCeHY/zFTCJdLEqllMiW+JLa0dlM8dASeUICpxRlsbbZoN6upKBzD1LKzGZBfyPwDC7BiXp5++C9keF3c//lb+BWZ30w8meDBJrrbYwyaOpCD765l4hMXc23ZUL7cv478rHy6FEFqR5SIU8JXWEvLvlLKlH18vSPCiILhyCkpJI/ooGnxQfw5fgpySunW28hMHUB7Vwv5MYMlDoXj0wUuZx5vLfwBDJOsojyyggIr3cJUQzTtd+IwFCaOG8jy7Vuw/KmUlw2jast6QpLgsvGlLFzbSlSLoKRqRAMSUclBINJNfko2bd1doHkozJDB6aR6TysOjxc5GkZ1KnaptC7jFWECJKPr7SRnZ6NVN5Jank3MCKCYgmSvBxNBalYOekc3LR3N5Lt9FI71UbnWJC2tjTNmTKXrke/7XJOvj81BHPQx4bLjCW3fwscrV3Dx+Kn8VLuXNLmYfZEQIrifrLQsMrL9EHUQCbSCCzL9ObTUN5KVorBtd5Ajp5Xx9tvLMFwy3oiJ6dBRJBliKqgWOGWa2gK4ZActloFXKKimiYSJplt4/T4O7KsmNyUVl8eP5ISYrmFa4HY4MUwJ1elAyAKn5EDXDUKxKDhk8tJ8zJw1Dn9qDpPHTSDY3UHjwXXgT0LVYmzdWcPQCUUEWoLsqtmFI6kUy2hkymQPNdtNGgwHnu46XnhwDc9+/xBpaQPYWrmGjq56SosqsJrChNRqFMmPYgUoGXcy3cEduLsUGqsPYCoZpGZ2kaSOpCtWhQj58RGk6MOtP7sP7kdgaAEcqq9nWf9gerjWH45IbJdo/Xnbvb/vD7cklvUXzYK+xg+Hw70PBWabCmgvl/scg230odnX6xeeKvrCRPH3GLYWS5ylokgysnIowPf/PYRkoBkqwpJRZdGTxff8Joi4b2Zi8vLnv3f/8zvck0vfdQ9l9nalkRnfV/wcxGGeZHrl0TasEj8eWSI7xfF/JxO3FMgIyrTLCmU+N/KGvWS7fCyr3Mi+qj2EGmOsXb+bjz97hq9b1rFn6xcs2PAC044oIGeMlwM/rvr/qHvvMLuqsv3/s9vpdXrvM+k9JEAICQECoUtTRAFBEKVYUKogghTFirwq8EqVJh2UEkIISQjpvUySKZneT++7ff84M8lkMijv+yuX33Vd+zp71tl7n7X3mnWvZ9/ree4H5dFd1OcWUVlVSl3tRIxYlF82rWTD9Y9xzysf8M+738fjqKd11QG6UgZiwsWhtgksKXCypttgau1E+tMBepvXM916Ia3hNAdbZUKmj3Xv7KFxzzY+eGs3jpkL8Pf0s2tfkHAgzKyKCZR6i7FrVgoqp6C4jodkHsHOQewOFwNxE0vcRrVmQR9qo6Z2AtXVBcTTCu5yL0ZOHEMqpMznJV/yMrtyFom+EH7ZjsU0kSUnOX4vRRMsmM4hZJcV3XBSZlFJSEFidpliZxTRq2PRgiS8KmokjiY5QJfxe4uw2XJoaekmaESZUF+HWpJDZ6/KlNI0NSfO4GB/7Jg+qfHmccbxxWzfspNo7x6uPnspu9IxzirxYC2TqcsY1E+uJdIzQLLfQEv3gthLZUUebpsL0RAQjXzmTpYQ7XZefO7PfP+276DZTXTZRltYZ3c8SmOsma7BGPGoSThjkhMGIZZCkgUURcHhtBGPRKmsrMSQBAyLgCGCKEtYbQqGaGCYKdKpGGoyQSQdIaVlkCQRLZ3iUMcA732ymb5wI819H5JOxmk48VzkDvCUurj5J38k3R1A9uaR4yqg1JZmSpWdYI+XpGgwo8rJ3OOv5P2dn7Jv1ypWf/gmE8tLOHvBJVTleLG6QiS1InyinX++0IetzUZg5S4SisjC4y/FV2Tg85fT0rMGxeIlbg3zrZuPXdQESCUCWOWjk2SMgOho4PyixcfxLN/xMvGMPX+sBT5SxrPMv2iyOJp6OcJPj7RdlCQM08TUDRwOx7jW/3jXPLxIaUoIojz8RjES+i4wkq5t5LdGNgwFQ9WGjxHBlA9vhi5iGjKGkYVQST7a3XEsjZR9WzQOb1l659g8orIoZlPCYSAaOvowPy5jYBHBqmRlD2TRzG4YyIKOgJYN6zf0bJo9hiM9v2T5jwBxRZAJiArGoEhXwkp7rsaewQCZJh+SYpCO2RgI9BK1wLonH+Nga4Z0q8yQlObE4nlsD6zEel49AbWHoJxEt5ukEXjox7ezeZKLB+++nbv+9hjb2/fiqylj19qDSCU2ChWFT6MxKoUcbHUmj101j6A/jydeeICTltXjy4/hTQ7ApBIOxFUSnRHeff4Dmg7GORSOs7q7kx3tnXT196LoSVL+BP09bczJW8y0xdNwF7rY9Pku7P4yXIVFeL1lBNvDODK5rF2zi2muakoqZuHQk2xrG2J7SytN3QcRfSYZNU1JXT6tXR10dYbxdxlcNWE2Dp+Jt95GT3kpubKdRTlumlSosOVjsTnIKXGTX+8hV48wYVIZfYF2PD4JQQhDxkEqaTK10o8g2djttXFyMkZHuv+YPqmaW0r+hCRLJtv4qDVMOhnimnMqmFLsoliw8/s7/sDmDS1MPuV0Hvr1A9irK8gpmYiqyAxkuiif6SNlFWgOJNmz9xCNLXu55cofccLEhfRqKueeN5W3Hv0Fv7rhNixpE0mWUVSV/jKFnz/7KAISkpIdVA6njVQmiWyzYKgZMokUFklCNrIco81mRZIENH3E8jHRDB1FlvE4bETCAmK0AEOz0t67ix/d8ANi4i5ahyT+8thN/Oa3uwlt30mv2YmrOEXz7jyGujLMr/8qldoJTPLNJNHWzaIJMzjpuEWEgglWbn6SzkQbWlpEiQ1ic5Yxb8kkvvvTu3EWzibHqGTF9ueJqTGCiTg5xbVYvIV0xwT+8vebxx0HDqsPRG3cxAyjAXE8rnps+aLzR383HliNPnZsJOZ41x+vXSNJGbKZ4UVMIxsRKkkSGgYaxmHVwZFt7O8cVigcBkoNgUwm62KZlbg1MIxsJqGxqowjGe0VRUISDAR0DFNDN1RM9Kyol2miyAKiaKKlM0fyusJR3jcjk8II9Tce1XRkwdM4Yo0LAookHkVfjRx3+NkJoBkGpjDsNy+YKALIAoj/A2T+jwBxHXA4FTzlDqySRn1eHjPcdkrPE8gUWlk0qYSCnHJC4QFkR4SacISTz2xgxpwCJi45jlurf8oV13yVq7/7EG2iwndu+yY+Vy75FgtrhvazfNVK3ti2nNk1PpyRDp6/9Q6mT5zL6k83U6YmCPoSWAMJ7v9oN15nP/OOy6N1o0FT5xAbdn1Ohd/F2bllXPGzqwgaPVRXFGMkMnAoRL2zmPLaCgI9nRAVOW1yPjs/fR5HZ5SDn29j4ZKFZJJDbFz7OS0bmplXm4tNjNGdibNi31ZyU7vIlSQk0UG+Pw/FEDESVoaCIgcOteDKd2B1KlgcOsv3bWT21GlE+kKIXW14fDqJqhRP3VhDPDlAfqkfp2Sh9cAgoX4LLS39pFIyA/0haivqyMuTGUx1kkkmsClwgc3KQ01p5JDtmD5pbN/Hf73VxRQqWXzSTKrLv8EPH/qM3iGVeHKQW+65ggvPPZPtH63ixltvR2gXUONuVM1BMKKik4sYayIR9+OyWPh4x0qu+P65uKcEuPXrs6nPL2Zb3xqmLlpMc38nhwYPcfLNl7Igp4iuf65FxkIikUDTVFQ1mwVIxcj+x8oiSCIZKZsJJZ1Ok0gl0UwwM8bhgW1qJuEUhEIpVq5czubG/Sj+GVz27amk1SrC+/YSyNi4/tqT2BwaonW5iZk6medfeZbKsuns3f8W+5XP2Bl5lbUHX+fDnS0E+gZIRRopUmcjdwp0qlEqfDWklTCfr97O1RcsYEDYgu7pRLS7EdIJaoqmEU/spS+wjUXlZYQHneOOA0EUMZHHtYBH+02PpR7GWsgjAHxEqvboMPmxAD5abna0pOzoqM/R1x4v3dtIyYLasROIYGT/tjuc7Nq39xg/cFEUD3P2o61hTdOy2XsyGrLVgqIoh4Wzxlrho5+L3WpDkUUkOZu/UpIEZDmrJGiaetYvW8gCutVqPcq3fUTGYCzdM3pyG837H342ZL1QDAQMU0JCP6zRYgoigiSDKGEOZzxSTXM4/6Z4OC+oLGTBHOHLc+L/ESAuGjLTZ1UQsIrYSzwMhRU6NYOdm+I4gzodqTgLTp1LzBDp7Vewzangk/cPsHdthp/f8gC/2vkHnIKNVz56lLMKnMjdISLAtx74Hmsf/wN1xWEuWTCdTzbtIBg1eGTnCvoHN3PK4gbCuX5KrZP55swSZvudVBZOxS3lUjrF4IzqKnSbQjoeY0VHL5MsBcxffDqhcJKKqjJCso+m1B7On7aU3LQHLEOsbupmsLKIqFDAxZf/iI5QM7rPzZKF06g50c+bq/cwuaaImaX15Lg0us0avHll+J12chVIp1Ts6TR5eR7K3CU4VPD53WyPRBgQa4gaBpVVLjx2D7rHR9MOkz3RZRT6y0nHDYY0k7q6KuqKvTicFnwOgwkNfppjzQwFVRafcyZh0cEFU6fwXvMB5N5mdgSOpVPadtmp8BVy/4tvEI00ce3VNyC1BNgWyKW/PcTqYC+LZi1myoyZpAZ7caoaQ73dVOXMYuHcWt56ayWFuVdw9ynlzClScAgenHEHFSWT8VeeiJmwMsExnelVE/CLdpS0zu1FZxEIRvjt228QSkVJWk3SoonF7UBy2QinY2iahjwcEGRqAmk1jShZEAUZqyKQ0TU0NQ2GiaobGGoMq5BEUbx4YvkEe7cihHx843s/pjlkxWcrRLd3cP9tf6BsSQ4K7bz77mraWneRwuRAexK3zcWZM5ewbvl+AttaCe4s5MrrHmTv9i3Md55GILIPQXdx8bcWUDrZwd03rGbHWxspV+vwJCbyyfoVuLwuquRSDnbvwV04OP5AEFSSpLOWp5HVA88KVnEMuMHRYD4aYEZbsGO/H33u6LoRUBpL5YxdUB19zfFomBHwP0zTCFk5V020oaphYn0JHv/r3VjsCraMiZZRsWnGURbtaHCUZRGrVcFll5HRERixikcSPhxNBx3Wb2H4OQzz0YaelRTI3mNW18REzuqaDNeRTd6WtZ4FE1EysuJfkng48EdAwhCzlJ4hMKxBnt2yP6Blc4YKOvpwkJZgZqM7TT2rsqjqxuGFz8MToJB9O9HF4QlXH/8Na9x/m/+EhU2bQzI9Xi+CJ0UqBN58J4lYkmRCxOmykMqEcbpcVDgLCMQGiMYMBItMLBUmz1OAOykTNNL88vG/kC97uO2nNxIXQ/SqAS4sO4v3m9aTY+lBd1rJK6mgNp1kz2CUlGJQ6ZhAKN1J2rBj85jk624s6RB7BzRmLZjN+oEt5DWb1J4wj03LN3H22Wfyu/9+htqqPNA0aiy5rA4NkRcPMWvhSXy+Yz1XX7qUvzz7HhMKaolJLSyYVMuBTCf2wXq6+3dx0ikXs2b1Ok4/dzYvPP0+RfWV5BdL7Nw4hMPrQMYkk9KQFJnKwhxkm53tn29HLHFRZJgIfh/EDWLWGA5dpLN7ALfLg8WikM6EKXG5sVcUc6h7ELE/hE0QyCgeCr0SRf4ydGcIvyeftct7ySszSQ6G+EpL6Kg++VO5i/POqifcMUCPJNPVFOUrZ17Oo79/lNpTanAHYry7upn2Pev4fPlmXnz7Fe548EHKHB4uuvJcKrxV+K+aT+36ITyWMN2DKfYRYnrNVHo/+Ziv3v4YP7v7Bzx77d0YsybhamggLQg4BA2bLqFYJBLRfvT+OO/u3Ux0VwvTTprP0jOXkcqksViU4XRaGpKgoJsG4VgE0VAwh93JbIqFuKah6WBmNDZsXMGKd25jX6uEL9+B3TbAUDyHTCTJ5Lmncs1VF7H+k0b6ujbz9/de48dXP8bq/R/RvukzJF+GvqBOWYmPqM0gz4hRWXMJz/zuL8y8YAIl+TamTpxAZnAbNy57hbolkyktdbPsrAtZOu9sXvrHw8h+H47CPpJ9XiY/c+zC5k8yGtaYyPaWLRSXllBSXMLooTzWOhxbP9pSHwuyYznx0eA32vVuvEXS8TxHjomoHEOnQNZHe2SisCGRtsk88+df096/njOuuJWTK+fzxLt/5epzrslqfI+61oif/BdRP2MDjsZbCxhtXR9ZbB32aTfF7AQjHNFLH3uPIzSPgYhkiFngN0Ezs28qpnH0BDZaAsA0s+JXgmkM66sImIKALhxxYTycxm647ZKUXbQ1hienQp/t/x4/cYtHMBV7DiVOC4JVIZSARKYPn82CKimk4wlKS/JoyxgQCWLFgaansDtkMkmgPLWdAAAgAElEQVQHSdIoioIzbNAfCFK4oJyG6dUc55zAoQ3NNHp6uHnRDVx+2qU8d+gjXn/+Hg61xTEkGTUepshqkOt1MmtqA39buY3vLpzM/i0JVjTt5HfPv4MNCzfccjlpU0VyuXnw/sf4xT3X4ZZctCQCzHALKFV1hPZ0IBf58Ti7SSUKEDUJf56EkTZwJ3NZE1nHpUUnsD1ygJBgQ00pFHk9bD3QyJxJU6lumM2bL75ExqqT4/WQNCRcThuRSIR0wspVE+M8vzPEhLpaevpClE8oIxZOUVtRx/LtmxETGnMqJ7KucTPFIjgLChDNHGonO2kPWlG1FkxDJD6QoDAnj8aBNuSEB3epxNdXdxzVJ++enE9cFZh/2mLSXb0cinWw9bN+Tlt6PieUT+Dtf77PdT/8Hm+99TKffvYeZ371m3zw6vOUFFdR5sth/uKFxPfEufKO7/LyE69w8rwK7vrjI5wxbTHpggwbP27iz/c9RBQRf00dyYyG1y1hNTxIYhRDUTBVDVM3cfg82AwBl8tOMBUnk8wgIrBp02Zq66vJ8+QiyzIDwSEUq4WkqiKLCh67m4iaQRQUFMVKXoGdX9xzMy2H9tKndyLlKoT3qNTMPpnTphfQ0yYxbVo9v3n+l+S60hw6EGHuxDLy60/mYGgdN134ED+55ypmT5bZsioHpyfCKZfdwMSqOl5+6xFiuwME7d0UFPqwKA4K/DZCsTinzPk+u7b/lokTTqW/J8KcUxaz59xrjxkHV3fvpbykilQS1ny2loaGBqoqKo+lJsYB8dGfMH4gzsgxI8eN520ywt2Ovt5YS3tcwBsDuJIkHU5gbLVakQSJ6793BQ2zHXQc6uF3Dz7NdTfdxqO//i2m6kKxHbuQN/Zex755jDdxfdFEB6N4eo68MYzQK9n6ES+e7HPR9Cw1ZAggjwLfwx43IwJZI7z5mEXa0YSIaZoYpoSWjTtFMjUESRxzf9noTk030A2R8rwv550i/7sD/v8oJYV1hOVBIgkbuhbGk6tiU3MJNKUori/ELiUYCA1iC6qYTgPNDCLruZAUKcoXaY9o+CwuYokQRplOZcpkCRUMpCwc9Bxi944kVd+fxvfef4TdB/aye1uMGrdGMBpg6rwGkn39ZJwCH6zdRUGRnTXhHjrENmpmlLD3Hxtxz1tA54Egjz5wOy+8+Vfuv/1qhFSY/GlFpAL9HOiwU1lzCHdZDpJVRrA5SPbqZEI9ZIpyKSmbgc8WxPZJOb1FaUzdQ1lBGZ98sJd2owPJEGk70Ep/tB27YGBz+bGm40hWL2o8iagZWMQ0y7vzEc0QOe5cEHUwIugk6ek5wDy7DSnfj7eymimBPvpox7QAzkG27elCNeK4vWUkUjHcVh9tnR1MzcvlkJah3pIHHA3iFU4vgtNByz/+wfcvPY6HP7RRN7GYVHcjn7Q1Uz2xmusu/SaP/+ph5s++iE8+XskT973NT5++AWfMx0d7P0LvjWDfWEF5oYvZs89h4wuXce6PvsJAT5BCh5U+p4Bd9NPc0kNFXQm6ZiAZaYpLioi6FJbvWs2Lb7xGZ3MrbTt2YXb1YfHmcPU3r+HBn93HgvnHZy04RSHlkJm/9CycWoZFpy3j21d+m/zyMsTuATA1otEUxaVOooJE3dxFbHjqL7Tv6KGy3E0y9iztHUW4B/rY2zMZMdCB6amkfIqHdFU1n679kNr8MHf+4FIWzLiMDV1ree69fzCtIp8f3PID3l7/Kld966dY3F4Ort2DnJtg+57n6NvvYUrVPGor3Kza5mXl9t2441EOtO1j+rnHjoPtezrwF9Rx560/4tE//hFN14b1wo/WSRm7P1K+CNjHA8LxLPLRADw6DH+EohlN6YwHpCPtFEWJdDqDIEjY7ZZsQmEBpFQnyfTxJM1m7rrt26T9YLXaUU0Nc5T73dg2jG3v2AXakXaO1I+24ke+M00JUTySEUkcloQ1jGx7BUEcBnMdEDAME10HTCkrCgZIkjkc3CNg6AK6ceRaiqIgcjQvj56lTjQkELOa4xnEbN5OKTtpHGW5I2OYArqRjSb9suU/whJ3+STzzNOmsWbvAUx7PrbeKEKxl341jiekITlEYrEEDosTwRCwi1bMUJSwT0BQdWwZiZShYc3xoKOTDvdhLSvC0w0JX5wqwcm0k5Jsb60iHh+gpryYsB6mSNEZigocGlCprfeT7jjEpMWX8NGH71PvkZG99YSTEudceS6nTFjCybeczxKXiE9ys2rrfkoqS4iGNWID3didBhX1k0n3H0T0NJBKduATvSStPoKxbsr7VUIziuhZuZvLzp/M4x+24vPnkUinUA2VSUU2klqKcMpGWrdhVUwkWaO7u5/KynL6hhJY1Aj+cj8de7q4aHElyzdGMd06DTXVeHLsdDbuo6LYyt7BOOcuvJrejo9YvyGBxxkhL3cyXf2bCat5ePNV+gIe8jNBeiURm5Tmur2Ro/rkmSKYsnAB0f0HmT/Fh6emllc/2MCE0rNZveM9ykqqWfrVm7jqrIv5fPMK9uzfzHsrX8CbthFKxTEVB3MafKzY2s+kcgWBBD+66mnu+NMtrHhnLWeeNo8nH/ojHRo0TJ+Dkowh1Hj5yU9vYeVfXqDA6sNe5CedTpMyNOKqiisDaVOlOD+P7XsaOfur52Cpr2T71m20rd5McV4eXo+LBQXlhJQIE/3FXPvL39NuyhQkZCS/nWuWncXHy1fwwG/uZfvqdbSYacprTA419jOlYgYre99jsmMSuuJANYfwWTMEMhboh+NOruNg4ACP3/0Jj/zxV+w/9AETyxpIBGKcddGNFOoz+WTzw+zuW4/FOp1TTj2Lb1x6Ltcsvo6Ks518/PZa0qF+Nq5q4sG8gmPGQenjTxIK9HP6sjOZOXMmuq4Pg9nRi5LjeaSMtVJHyhd5r4w+bzyXw/EW9v4VgMOwNokgIBigGSBIsHv7au79/U+ZO+VCfO5WDvY0sbhqGVu2vY+nZho/+cHD6EaUUCiD1+9DFiRUQ2NYDfeYex7fpfFY+mfscxirDXOsVZ+1xk1TwDSy7iGjL6sZZtYV0RxeVDVFdEM4TMmYZlZPRRy+ti6AqQ8n2xCH+0/PWvGCYCIMuxAKjEw4AjrD4fyqgIZIVb70f4+fuCxJrDy4i9ppuZj9faTtBv6Ejtg+REVOLlpcxWtzoaopNDON3SaTyVXIMQTuuPAKujMhTIdMOhqnwOLBMIuoD1nQFImyqhKCdhfbN7qwiE48YT/1OQUMqj0km5NkzDjz8vJQeg9Rm1vHznfWoUhxiit8ZAZaKPe0se2lp7j11vNZ5LXQti3NobRKMB7nQPsAN3z9Ssqm1eNJFOHvO0h96UK2N22jsGgyA24Hu3fvJRQQ8E9bDE2tTJs/ha0DGrFUCl1XScVjeK0OmlsNfEV1CFYTq2KQSmVIJpMU5RcgmjKiGKeyvIZQT4a8snLeWjfEoJEi0KlTY3fTuEsjkXYwZClgw2e9RJs28/cX+6mfnM++TJDaQpOvn1pHvhZjQlGahBxmxnFTKBSdONLH/htcfv0dXHrRrZQWT+Wt5nb27D6AOwd609tZdMIJzJ87jScevoq//eP3NO3dyDvPP8sFyy5hxolnY1f9JJIRehWdAkeEXbt6qJk9i/t/fy33P/wH3n78zzi8Ki/98594c7wIYoYlF57OV6ZMouUfq8h1eFFdIoneXsRUEiWZxIeJImjoooEeCFNVUMjmbVtpX7GJkj6DCquXipiGGYqxrnkfO/Y08+SK1dxw0dkcfOEVLDk2yixutGqDN997jfyqCUiFBTj8XaC3suTME5i5oJS/3fcJVX4vedUeZtbNxko1U/wNzFtYx3HTLsYQDBxek8bWN7jk1MtxVlZh8ZRx1jkXcNwl9Xy0/U3mzD6TofSnPP/Ujfz0Zzey4NLT+eDDF1i99iC51Q1c8a3rxx0Hl112GT+59U6mTp0+7OEhkE5nueIRYBptlY+1gkdz2/+KAvlX7oJjf2ss/z0aHI94zmhZ/luHVCQBIrzx3gv88nf3s27lLl555mOMyE56dIl82xze3vQ3Bt1OfnLJvfzkpssRJTs5OblkUmk0U4XhN5DxoknHRoP+u3LYT30UdTTeG83IIqMoikiygChlJWuzlnI24w/6kaTUqq6jG+phL6CsBK552L1QRECUBUQJFNNE0nVkdEQzm4A6y5eTzXiUVTlHNI1hnXMdQfzy8tDSvffe+6UP/v+q3PXzn987X6yhbEoZGcnDYFMvLn8RomyjsakVQ5DIqCYZQ0c1DaZUVaGYJhmnwntrVzFpYikFuTmYkkw4k8Zj6HQNBclIKh67H2dOAG8qn8pJNrZv24Fe5CHZKpFb7cclixzq3ouaN4ldBw7Rm4ijJAwcjmK2CW3Eu8PkT5Iw2gcxjDRavZ8Kw8bB9kFKbfms2rKGrqZDkO9BFUN0DVqJSibtrV3EQkNoeppUUuDAQBNmzEraprBrWxyLYuK0OhjqC3LCcQs5GNhL4+4OfHI54cF+DF3AMNPYbE6CoQR2RaGnL4CuSaiaicuU0CwyBYKLIewMDjVjMQQyMTf3PHgnc2tgyreuZ9/mnZT5dBRbgvYBB7vjKaKygNxpI2m0IvTrqIqF6TfHj+qTh77dxBtP/IldXb24HD4Ew825p1TxwdY2nPYEcaOPWEZgw7bNNB/YgSRnaI0FGGrdjWZPcOW1P+TDd9bQMCmHcDpKJqVz3T238Pc/3M/rK17nkkse5nv33syLzzzNPTfdiNVlJRDSyERTGE6YUlREntdDmcdDvd9HpVUmxyMzvawKr5R1zRIUkY7WNjxVVaSjMRLWDIpLoVgViJoGCcNOy1CAaXPns2DqXHps0NvWRlRIIeXmUprnY8/mD0CoIDXQwpT5V/HLx26moqiIPc3LmVI4B1kZIpU7yJknfI/7HryBXIfMx1sbWXryDNYc/JRtazZxxonfwF9RxtWXFXDZV+6irGIZh/Y0c9KCs5i84Ks8/+wPKc9fyOe7fsvE6QvY2fga1d/IHDMOpkZvwuWyIonZaEZZloez1RxbxvNXHg1Wo+v+leU+3nVHf45cc2R/LKCPWKGGISArIoaZ5p6Hr6Wuvp4CTykCEr/6288w1H3kp0vpG9yMb9JE9mz9kJ27W3jg0cd598OVOB12LLKCKonYTenw4vTY+xjr3jh28hlrfY83MY3v956Vtz1yXyOGjQlkM/5k25P9TpCzwT2ieORNRTfIJoI2TTCzQZuiIGQTT4jCsMksYWAgyRKyYA6fD4JoIgoiCNkoUtWQ+MMj9/fce++9T4zbUaPKv7XEBUF4ShCEfkEQdo+qyxEE4SNBEA4Of/qH6wVBEB4VBKFJEISdgiDM/nfXByiS8/jUP8DW9btpOtCJ6HEhhBKcteQMcjwevC43OV4vHocLj93JvmAPrWKCWN8Q1ZMaaAuGONjRQSoZozjfhc2mcdK556IrScJSkvABg3ahl8EEnHDe6dijrcSFAH1RG7v2hjAtkzE6YiQGFaZ5rCS8Gvs6NlBuraBlr8qO5jALlkxjVnkZgxv2sGnLPtKhDHK+G4vTSo5mJ2d+IW57La2BDlKaRpnHis3lATkPh5kkX7Uhe9zsHwpg5ARxex2YsoE7x8NgMIAv6mfptJPpbm4lqWRQJBObVSY314skm2jxDBanFRQDRRJQLQ7SzjihUAhVD+AeyqDbymjvbqTSXkRiqJhZsQh1dTNZWFnOnj6BNU2dzItFyOm1IntNFi+awYUnVuC0H9snc3MsbFizk+YOld7GDjqGOtnc0UFDWT1ixImuK9Q5nDhFnUUzZmPqLvobu0GQqJxazH//+Q4GggfJ0wqw5+TztbOv5fd33oKq2Dnj4tO56JyTad61l1dfeYm8glyig4NYZBHVDTVON5lglNhQjH4zw5BdxOF3UWp3UmTTsbkNqotzKIglyc9x0tffid0CVbk5ZEIhysqKECNxECXynR4+evFVdASsaZFV/3yFJ5/7PY6UhqlayCQbSMddXHLJz3n3tevwFDoIWAdw+qAxuB3sfjr6Nc656Gucfs5F+PMm0NWzmYP7h/BKbmrKZGK0E+5IMPv4M3h52594c+WliEoj7W2v8tpvTqd8WjUTKmN8vm0Hu5raqS+dMu44KMzLRdeySDI6xdo4Y/JL14+tGwvSo+u/6DsYP1x/ZF9RlMN0RVJN4JGtbNryIkvPvoC9B9/l1EmnMbnsPKaeeBUWAWLJQY6bNocf3voAVlS6WjopKinG7fEgKxYEWfrCNowF7fHK2OPGo2DGLsSOnhTHo6SymTkNwMw6tggjFIyJaR6hagyOeM/II1GcIwE/IxK0JtlEEyNtHOGODmcIOjKhfJnyZeiUZ4Azx9TdDnxsmmY98PHw35DNel8/vF0H/PnLNELNc3DdlK9yoDlCqdXF7GA1i09cyhMv/xXRaQM5QyQSRVQMnJqE0BnG6bSSIMXU/AZ8Fhcpt42ugSEIxDj9xAbWr/87sxpqWTRjIrLFwXHzFjPY2EPj5p0099iYXDGFIp9Mjx4lMNBNLKWSl2OjOQPl1grm+CdiizQxaVEuUzNTeeGDAE2hIOVlRRj5cYpry4n0D9C2u5sTL/0WlZqHpo5OPJ4MMyw++uJpxIyJRUwhe91YlQyFDj9aMkA6Y0UULQzF00xrmEJry36GxCizLj6FT3t2s2zZJfjy/QSDKVoOdjLYOYQj34/otCIYKmkjiRbrx5/0Yq3xE1dVOnIVwrE2pDwfGYfJX9d8hmPpeax79WUCzmlEAmnmVyqIsxvoS/aiKgFa9vfxYVuU5DiLKDU+mTsfvJTIwdex5u4hNmRly/puCgsk7Hkw0NGBzWslHowSzXSDO43FqZNMpmna0obdmseCk85jSJC5fPEynnn9D5Q581k0/zSEPic3XncVM+fOweIQ2d1xgIzDj5rWqPPkUGIR8CgODsoagXAKNahhBJJM9OdT5i1kgq+CSsXC/OpKTq6rpFLViRk6E2wqM2vLmZProzOdQUmkSJlJPj20C3+5ByMU5/JzbmBS6RRyBZ0fXHMTD95xC11t7bTu3YqgT6c87SLVk+LkeZeDKrCndTmF3jTr1r7H+5s+peXQLn508Z18/O7rBMIhHJYy3tn8NI8+8Q0GA4cos0xFTVVhy69nwYLnCHlyIJ7mH9s+59OXM8zIrSNiGT/YRzQUDMNyjPveWE+T8bS7x1qtI8d/mW08fnv0b48GOVEUkaVsQJJuZqMU02SQLApvrfqED578I21GN939CvfecxNqjo0VbavZ27+OQGQfljoH7Tvb2T8EL7/7JGRcrNv0Nss/f53b774GC2nSaRFNTyHJcjYJsagfA+pHvEvEo9YLRlMtI+0dfW+6rqOqKpoKuiZgGtkcmCMBTqYhoKlZymOE0sqmW5MwDOEICOsGgiAffi6yLGCTJZyKPKxxbqKbJpqpo6oZdD0DhoFoqkgiyAggSlmPFU3E1C3ogghC1s/cKqnH9MUXlX8L4qZprgbGplg7H3h2eP9Z4IJR9c+Z2bIe8AnZjPf/suT6vOzZ9wwXfftCcqs19ld08cJnL+IpcpMnGzgN8Cs2lHw/GUuGXsPJeb4kBa5CXt/xMX6fj2JB4NRpdWi5Emv29lCRV0Vn/yC7th4g40uw6bOP6cvEUINpBgZiJLQAyUQfbjkfh1XDImUw5TCSkuFAWzPnzJdYNvt+IgcK6IkFKfRqzKmaRzKZpMJVAoJKLJ5hwqRytm1bw+atW+iNx7HYKmmN92LRFMRM9p8tnoqjer0cTOzGq3tocFsIxsM40iY9wUH6AkGm1Tbwyl//mzMWziQZ3Ed//yA5uV4Eq8nkWTNpbmwlE0ricfrQNJ2oU0QPRxGMDL39g4QOBXjk938i3RVhqK2fBfNO4KeLTqO3Lc7f/rmCPI/MkNlPajDFdRe6OP+EiWzb3MqObU3MnFx5TJ/0pHpZWusntzTBhPwzuOKCQl5/YivNLbsZ6te5YNk3qJowi5nHN7Bu8170qIlH1pEyCnoySWtPP73929CGupgw+3geufrXvPbSHr7/o59TXDefvCpwkKJ9RxvV/iq8qRSCFEc00mScAi5nmqmYnF9fyOJyK7NrnBQUW6go8uH2S3g9MoUuhWJVZcmEiQwkQlSU13LevMlUN5SQ1rNrJyRFJpXV85e/PI67zE/NjBls/GAV+7qGuPCis3h25Yec/40z6NV72Ne1hTXNLWTsfaz5YAMWOUQiaWf752HCYZGFc05i7UfdhDUnV3znPmKdIfrNAey6QPPQQdo6LGgJEVU1aDzYzgPPnMQvfryK7u5+ij1+br/tq5x8wlLmF31r3HGQSseR5BRwrKfJCJiOWHVjQX6s1TlmDI+7jT5/vM/R34+2vA1zeBJRdSRTIJ60IBtp/O4E5eedTuvWRnyldiwClFrrqBAqyfRIBBWRVJcHb3wbJ1cXM+H4E3nkvosoKJYIdm8l7fcjp0ysiooiuUglk8iSiSwqR7VjLMUzdpIZreQ4FsAP8+zDgTxZ/lkfTkuXzbspycJRqohHnAXH6sxw1G+awnBfjZlIJUlCkhREUUCWJUQRDEND0DUw1SwnLurIZHOBGsiYfPGb2Njyv13YLDRNs2d4vxcoHN4v5Whftc7hun9ZIv2D+OtOoiBZR6k4laJcO7kleRi6QNjjYEjL0K9HEAbjKKEoU5bIvLJPx2krYva0QtrUDN6EnR2dvbhjVmIZG72yiCDBQLiPXFsFBzoitO7rJ1LoIS2b7N7Zzpa9AySj/QwFHQSDIqbpZKg3SHXdJJ75yMqbG+8nT9+BJaHSuK+Lja0pZk+Ziz3fSlFhLqIo0tbRQVGRD0Gz4bMW0d3RhKh7cdodxGIxSktKKCjMpTjHQTJm4hAyJNIiPiR6wwFy3T7OWXYGg0NdHDdrBpIm0byvG8miICqAqNHR005uTj5GTCcYiOLzeFHCEmFDxhfSia/u4sCK9ezfsINpE6cSTifY13SAC265iZRLI32ok2JLMZb0ZErqVWyB49i8LUJHRCed6yPYf6xlWLaogdvf2UKwR+O9xhW4NCvfuakUOT6d6jqVT1a9StoeRMHkim9eQ0WOhkP20RRtxVVQxdzaWXja7bTG4/zyjp/zz3WvkSjUsYgCJ546nUf/+hHHzZ3D7tZDGD5I6UMILh89IRWH4SLXUBB1mYGuPmQEyqvrsOeVoVgSuJxp7PY0LpuGo8yBQ9HR4ylsEYFcw07ckMjPK8ZEpj8WhnSSJx58gHtvuJ7H//gAl1/1DX78vZt5asVr5EoJ1m78lK5NBmVlJ+B2FlJlv4rUgEG4S8BTMIPbf/gbfn7/N3jqruc5/ZSz+ccbv2Za7XSmnfBdtOAgNrmWAkstEysnsqVpJZIricuZ5LjyK/jw5Qcozz8NQ4pw+U9+wcsfvYQjr3vccWCYCoaujguiY63kEZBSVfUoz4vxEjr8uzIaoEfXjf3+MICJIlpGzdIomMiWNL1tHeSYftwpKxcffxnBVU2cd8PNpHqSHF97HGVTfGx84Q4WHX8tT75q0tXaxmfr3qMxCDgV9rR1Ew9/CqIDU1ZAyoAoEI0FUdWj6ZMvWqQcOWasDO4IyI+sMWTPN4/ajlAZ+lGTwpFnMPqZHDmWYZol+9hHuHMBiyxluXCyXieaaWCK2X0RE0U6IpiVXUDVEcwjFItpfPmIzf/H3ilm9in+j/0UBUG4ThCEzYIgbI7EEhwKdbOlcTXvN29ne0sPFaIXjyowp2gSB/Z2UjOlilQyRo+qkV6vM2XCDEy9m3WrD5IXVdkWjxALKkR0ibRtAEcmjWgrIBC1squjjWJbDt958E7U3ige2UqJq5qgDm7ZjitjoAkiad1g7rSJWK1RCmZDb4+NRx7+mI6KXJx+Lz3NH7F7IMDG1X10d/dSXlWGacDu7fsxBpKUTcyhSPQwEO8jlAySV5zHYKAPq0Wmsb2N2uoaKsvcZNK94JQp9niZOncu69evJwSs3bmLqCHiKixFEAwUxY7L4SM/10ZxVSFDg0EESSISjGD1aDhlkdnnXoTn1EpmfHspf/3gZTbs2IRfseJze8gvq6a1N4xNtnLJ1Rdx/PRZbPs4ybZACNuUSsqtLqq8KuloxzH9k4Of6+bPpD+Tgn4f23sTuJ0zOHFhO7HufGJRB62rWolqYToPdaMV2Zk3rYHzLp6JFhnAGT7AwOQgRaITzxQvT7y4nNyMExPIT8Putavx1Q7y34/+krefepW/vfoS5a4cMKNcMK2eyrpcLjlhEmcvnM3MyTW47CIei47FkofHVohD8WMYLnIyBslYEJvbSWeRC+bO57/XriWjawhinAfvu43r7/weP/jDPcQLJO762Y+ZuuhU4pLCPTfeRWukGSEjoUzYzd7tOzFb+7HI+/ivp98j4vYRsDTy2vJHOO2Cr3PikkKee+55XnpjCxE9SaVVQbYtQgoNcMaFp3L9t3/Bdec/xJ9+9hlT65ex8JRTOWQbIqx9wGCggCduuwuRPlY1vjjumHDaLZim/SjwGQGkEV/taDRKKBSitbX1KK53rLjSmLH2P9pGnzdy/dHX1XQNWVHQDJ2uwABD7RFSB1t48Ml70Fv288TKl7npibe5+8xT2B/ayV/X3IVTmUNR/QxeXXsLv372AcpLavEKBrlVkGsaTPHM54mfr2bLno3cd+s3ueuh6zElGZvLiT5O6rh/gStHRXOOgPfIfRwR9hpJ+zY6CbOIIEhZ0a5RdUdg0mBEr0UQTURpjFjWUbFFoxZbDTANUDMGupYVyTJNE1MQMAUxS6uIEgYgCQayqCOL/y/SKV9Q+kZokuHPERm8LqB81HFlw3XHFNM0nzBNc65pmnPLiovoDPbjduUgYaPc42RPXw8/PuVKpi04HV+PSEfvALqRpqy8mJjVYPvOzwmZKhXV5XSHQpT0RXns6d8RMhP0DaqEomkaN+1jwcQppNUkhpzh/UIZ7VwAACAASURBVFf/zlAoTE9nlAEjzuSGCRTkVBDziXQRpD+UYsveHtqbQqQH46SiAb7/i2vpWbOV7t44vQGN+FAI2enGIolk0kGmH3cS0cgAMVuMHesa6Y2o2FUfkybNpaOtE1N3oaZT1JRVYmQyhIMZaic1oCAT1+J8vOpDrrzomxgpmFM7nUxHCFFOU5lvoT+awFPgZSCRobevmcr5pdQ25JO0pXDY/WieFMlAIz6vk5OKpjO/pIGyygIsRW56+vt46vf3s/CEOlZu+ozWQQsvf7iG0opyYiRp3LKJXptOUvKzo3PgmP4JdrvpTRfT2dpHJhNBUA5hOq1s2zGImBfg+IVTmLLkJHLaHTz74ev4zRl88OlKmtYPINqdtOLD1+LGkTyEorlZuHAWO/buQTBg+94WTlqyGK11Ana/zJvvvcZjT93HrIWz+Mpx8/FIdkqrytg7tIdBTMKIDCQC9KtxUloYu2Tic7mxu91EXHYKi11kAkki0Sjfvut+Dh5S+fFV13Db7T/A7/GSU1DM5pXrueSUs4nFRdr2trB32xqsdidlnnqm1TgwInZOOENm/kU1fLB6Bz/+/gWk2nWUfTpKOk2Jv4g5p1/OmvdfJdEd5mvnf417f3UfZ5x8EVG3yGvPv8F/Pflzvvvt63nquRdoPtDG6s/eoW39IfK9xczIz6GqNM2hUIA1u3vHHVS6YICQRhREVC2BoBuYgoZqQloUiKYMWlvbePbjN9nw2TowoaN7gHQsxPJ3XiIimmBI2YAR0yCdTiNwLCj/u3LMoqAgYOpgIqBqMWTJJCPAho3LeeK/7sbv8/BY45/5zUOvs/VggEWTq3nmwdv5zt1PcP+Pn+aiJTdSmDI477yb+fqsu/Gub2H121u44rwfMqNsGXff+jrfvPp7rFn/Ocs/fxN7RSmzZ5+BXY7xyO/uw2YbCSI6WrdlBLBH39doKmXkXkZL+Y7QUbomYOhHAPqIRZ71FDENLStnJeiIgookZpNSjLgEYmT9yXXdRNdEdI2jsviYQpa+SWsqCCamYGKTRayShCQICGZWwXBELEwws0JZOgqCLCBJYzIV/YvyvwXxd4Arh/evBN4eVX/FsJfK8UB4FO3yxcVhYfcTuzhx+my0znYs/lwqJ85iee/f+fUv7qRoWTWpWBjB4WWCowzV4aXUUk6BVEBmcIDcOj/u2UX8+eGfYcuXyMstId/hp6qikgnHzWWqvwhPeQGuRIrj50zn9ZeeJpXspzCsERwKc9mpl1EquIgaKj7TRtJip7dtgGvmTSSl9hNNi8iFVixFMj3JVtzFHgJ9QTqa22javYP5M2YyZ9JkcnM8ZGQNe56HVStWUVs+Cy0ZJhOGgokNFOWV4sgvxRGxkshk8DpySQ7F+Nvbz3LG0gtYs3UVtVPzSA8l6TykklEMgvEkvnwJu6MEV1wlZURYOn0hqbSTBXVT+XRfI4m+DHa7yc5dW0inDDxePzkFHkDG4nHygx/ewNN/+h1XLT2f866+kfWNQyhxN4WSh3RaJc9/rHvKm++tYtumD5lQ3EBeTozO5nzcGScNqk7bjgHWb1rFu39/gk2BMJfOPIHNjWvJK25gIJAkFk1SXl6JnKciuTPE+w5y7ulnsuzM04jrKhd98+vcdtsdXPz18+juUXnnnXcp8czlmU+eYMGcSeQVlTGvchYXl5/O2u3NfLKrhf17hlizrZmWziE0WUF0CRjOJELahpYyWHDKMhIeD3fc9H0cDo2KhjLyi0sRFIPdm9dz/Pw5fP/2W7jrF7+hoKqSto5GDPcg511wBYOhmexu7CB0cBrvrd+JrSpG9cwpdEUaKSuZTa5vMn976u9ccu7XeGHd25z99eNBg+tv+zpr1zyOItfx5K/XsGrFJzz0p0f5fNWjZMwW9u95m75AHyvXNJIyqnh91QbqXVPpeX1cuwZJE7EINlAzRFWD3p5BVq7bQmdfF0PhNh764bk88ujN9H/yFvt3vs0PvjOJlX9+kGee/iEDrXHu+v7loJBV7ROyUYT/m3IU/21kLUdRMjnYtBVTdZMxNAaaewlHI3hybHS3bOPX177J3377Q/aH32XZuVeRIkwos5PlH7xPvrsI35ypLGhYSNAeY3NmD2XnnUpuXhEe1cd9z/2apefW8Ny7v+SEWUuxN2q0H/qcxUuncuePHiCWShxO5jxWCGwEpEfKYT3xUfcy8jnaEhdE87AmeXZyGOHY9VG6KyNvABIjtEl2gssC+MjPCKKBKI3vfmmVh/l8QDezIlfGsP+hPBz2PyLSJZhk827qWZndL91f/85hXhCEl4DFQB7QB/wMeAv4O1ABtAGXmqYZELJP7DGy3iwJ4FumaW7+d42YOXOmuX3Ldu685i5+9vQD2CZY+NpFZ+EfTPFGSzOe7ii2pMJAjojdIlIiyhTVTKZxz0GKXBKVLi+bYz2kBINyTy6Rtl4m1lTw8ZrPyZk9iba9ByivrEAXwC/a6U3EkE0LF0w4HltVPr/47WOUTiwikoxiFSSsTgfpcIRExOSCq77F6396HNWd4g+3P8bjz/+SmB7C6rXjlD1s3XGIu299mAcfuo28XD9oGbR0Cqe1AF1KE0nHSPdDWk2QV1fNey+8wcx5s7j0mjP5fM1WHHg4dcEJzD/3YlZ/8iJbN6wgNiiQtEicsnApg31b6ensISYm8OeV4jSdDESDeEQrA/oQRWIp6VgA0+tiYGAIQxUxUhkEXcXi8JDUVew2C6GeXjau2UtUNTj+pKk0VOdiJFSGBHCLDr7VcrR1+PgEN9VVTgKdUepnFBOLppjmc9E+YGN3327y7fksWTaPF5bvZ+mM+ZhJGxNnTuH3v/slmp7E5XETN1Q82Nmy5gCaZOJ1CSS1DBZZQRIlEKB6UgUte9r56d13YLOEsfaF6d9ykA2tHZQVuTh71nH0DA2wc6CHUpcXryNDWUkRNpuNWCJFWyjF1v6DDIaLmX/ObIoMG0KunXyvmxxXEX2xEKQN1ny2jk937qPQ93+oe88oKcqt/ftX1TlOd0+ODBNghpyRDAIKooIBQQ5GzCiYwzFgQsWAAQNiPCgYjiIZRAkCkjMDE5ic80znWFX/DwNIOuc5z1rvetd5NqvW9ExVV1XXzb3v3Xvv67oM1DZEuPPqe9AmhlGH9bhCbswqB8uXPsukmeM4fOIYQwaOY/ueXcy9fx4FBTtZ+d2PbPxlJ+vW/0anTr3ZtX8tO/7chBJpoFHl5PKciWRkDOCeGTfRd3IPBg0axYDckSQYErhi+jD0somFS5ZSV5tHQUsR/T/adNE8eFGRICJQVl9FvC2NtZuWkn98K7I5ntLSCtTlZei7mLE3y0SpXZw0ZdLb6CWvOUiiwYBt8HDiQqmkZjgYPWocolp1Om97MVnUpdIm5+4/1yRBQPAH2b5vDWNGXU9DazNHN61kS94vXDH2Pj5e/joDcq8moKogOTqLQ0d3MeGqaXRJ60+sXksoEOTD154lMjiZom17WblgLdtL9rLt4F72b/2NB979huVvzCG7Uyq1cjGBKjWZOT0ZNngMn3/yPq+8tYz0xOjz7vPcIuW5+fF/RVFw7mc+c9z5Yhvi2Uj8Us9BUHHWeZ9pAZRl6ZxvBcpFzxm5w2HLcgcToqicf+6Oa4undTmFcwiwOnLi0f+hxuZ/BezeFherHGhsovCpheQsmIOh0sO6ukNErT9B+NHrWDJpPPVNLhKykri66xC+Wf8TQYcFb1MLjvRYqspqsCXHE5IlLJ4Idns0Nd4GesR0ptTVhk6WsGj0+ASF8tYmFtzxBO+9/yW6qCjy6o/y4OPPc1n/vjz31FzCPjcWUU+gzUOLLoRdrScnI55ql4G28uPo7AJVTW7m3vMIG9b9SFr3BPbuziPktSMIfmJjzIRDAeyGGIZcPokVa1egtDSRak7ncHkRV4ydwI9rVzNiVH+OnDyKQVRz/fSbMUlmVu9dS1SsFpsHMtKi+Wnzbl6b+w7vrPsMewgcDpE4h55gWMYd8iBG1LT71SRk28k/XkxKUiKtjU0017RiM8YQUvxEgqCoOugzFbuGynUHeGf590QbjCz85GWcYRdWcyIz9p08b0w+6JzMVUPHs3b996QkpZKRYOBwyI8pUoe6RSCzbz9s3mZG5wp8fFTHiYJ6vJXViKJAzz6ZuEMBYvWJ1IUb6ZeVzbIff0eUgsiyjEGlAU0HM9wjTz/MS8++zldffsaEKZMYO3EUD4+fzP4/DzN6UBIOjUBjm4sTte3YjHrSHTp0KjUqlUAwHKIuIPL18UN0MXSl1xU5aMMqstIycEdceE1mGk6UktO9G7+sWEOLT6aysJQbZkyBUDsRJYk2dyt6jRa9TkNsTDyx2gQm3jAcd1s7s+fczb5DB1m/YTMjLx/D3sPHmPXAtTzzylusXb2Mz5//kukz/0a/sf1JNDhobPRQ7C0gWLUVDDbefWMzdzzdGatrJJmDU+gbeyVOfTWPTn+aF00XpzYm7TnGZ5/8neTuPWgr9iHpSxiWk0B+cRNt/hAanYzeq8WkdSNHp2HwVuKVNRyvbSQ3MY12Vw3xKXE88sRyHn7kGRYvXny6Pe+vTocLHdr/lGJRFAVZjFByuIRPvruD9xbsYfpL3ZCKFWbe+jE/b5lLsm4gihDAaoimrt1LvCJR3vIHad0u54m5X1BUlcdLj83m7acWsq9hGWu+3IClUypKoBqHOhevXcYSUHAG62kQBBKSrGgDSTz+wEIS7VEERRGVdH5keiFH+oVF3XM/14Utmx3vO/vq9L6/8t4dz6jjuXUsEB1kWB3n/SvSFlWcVrM/HxR19h7OXFLsuDfpnNKhIAioTztsTnehK5J0Vt5NESAmyvAfOfH/CsTme59//mJ+Uy3PP/soOq0eg0lPc5GHpS2HWDJrMl0cyXhFA936DeardxejTTUieRUc0UZaqivoFJ+BN0nC2A4xMQ6CNe14AyFSkuNp9DUj1TYRjLWibw6Qrnew5uQe+iUmc9tjzxKZOJj5o+ZyzbyZ6FpbUGtMRLxhXKKfftYYIpKHYl8zuuJq3ElxTBt4PUaLisI/1xB0CIQw4vO5sehiSI1xEGu34Q80o9Wp2ffrRsxmA4LDwuTb7+W4s5HiE3+Qkt2LDYs+4an33iV/8x9UFVXRZHExKC4LVWIsBklFafkRnpmxkHnfPoFGp8Ho0FJdU0u81YTRWUFDCFzhBrp2T+bUyVO0u9307pKN4JcJajS01rUS1gZwJKYRdjchqIwEnC6ef/MtNqxazZ87NlGkONFpZMYb+mOYeey8MTn6nYn8ouPER1tQ1O10yuhBRXEx3R3pjJ3SG1WwnVbRxPYT4HIVM3HiUPKP5dOn/yj8SLg8LsJyHSaLGp3o4OCuXXgQuWnqCNpUXtp8Kh7++0wGDB7HkCH96dW3F2+/+SqPPTcftSjSUHWCGI2WAycr2FlQwWBDOjF6NSFFQolIBEUZc0Cm3RVgw/4CHn74XqJDWvQOEx51BJ87QHFBAT1ys9GZIpwqLae23ce1EyYS8HlobXHj9zgJKO0YjQLhUIBQwI+3ycPx43lsOXKA5pZjKAEDxcU1tFc2I6sCzJoxjU7WTsg+Dwu/fZ66hn24PE1ERUfR2LABu1XFkaMn6Nu7L4uXvsqgnGl4lAZ0xgbyi3axe/tGVm9dQeINF6dUXEe6c7RoA4Lag9BWiFqTwvK9R+ieaOTg8UbqW4OYU31UnGolOhIimDiY5d/9xoCsgaiMWgxqHeWFXoqr/yDGmsDw0eMRBAlJ+qur41zyqHMj0/Mj8wBhQQORACq1lggBvvjqNcxJSSx8/1kybb0RjHry/vwWc1Inpg99gF4jxjOsz0imXXM7K1d9hM4azXWj5/LZonv5ZNkTbPihiI8/m8eeLX/SEpPE6w+/T3buOHYdXMP+ghLGdx7LsEGjuXnOU/z663Ky1H0YPvEqDvy+jl9WrGXYiCHIsoRarQFEJCly3v1fqqPkXLswVy4IIrKsnNPZw1nQzpn9f6VqFETxjFjHGSCOgCKLKErHsaKo4kx/x9n7QgHhL9qCs0LNwmmec/iryHk6haMSRdSnO1dee+0/Q2z+V0Ti3Xt0V7ISFaL8mQx5cDYzpw5HH9HSq+dAvl7+MbMevR+tGOapkS8y9oUJpA/uibGhHY8mSIoliYAZtBEdWimEKIqkpqbibfOza+dexo0ZQkuUGt+xUuoCToxxDhxtEcQ4LdlZl/PpF19wqqWQ5Yu+Zv9vq3HXt1KTbsbhEvC4nNjRIHnCNCXamJJixtmeh2JJoKgmGSG+nhRFhd+oobGlHatsJa/wFL1GdMPd1ozKbIKwH8GpJTfbTn29kVP1+7h96GyuvfkeistrGHfbOLplpOJuqCY9PYeRY0fw03c/IBiDKIYgl3fL5sjxCKWtZWRnZKDRapFUQSJOEbUhRFiKkJacgbetCZ+6CW+biexYDY0RPWp1iJqqMIqvDUmUiDJEU1JSxd/uuI3jJ48SF23mmrGTUYW+pumJE+eNyduJ6VhMYXxiC8lxqWid1bhFCzFWI1F6LbHRVgJ6CPoDCGIEkQTKC0v5eeUO7pp7L2U1p+ibEUVxeRgkFwa9jSuuGEdpfj1+TTUHDuSxc8VvXDNtHINHTOSjRd9S3egiM8ZCRBTYvX4zzz/+MKOSkumdnEKTqhG3Uo+nToXfqOAIyjjtauqbAvQeMxm/GKalpZWYJAdSg4uSpka0RhNN7la2rfmdhOgU+o8ehxjpyDVH2y2IKgWzJQG9UY037KauoZ5GbxtaIL1TNhkpyeQdOsSm3T+y57dqHnryOiL+CFpLE65AK9HWbrzxzDfMf/9Wjp/Mx6SSSEjIYMiIy1m59jtiHOnIhnriTfGYVd1oFfbx1JPfseC5l+n65cqL5kHz7KtRaRVadBHSU6w0HqwiPSYNoa2GdmMsdepKak/Fo3NUMyxnKNWlJwjaHHRxuXDa03C6TuHwJnOytZmnH51Dp74jiDNbkE6DUs4tWF4YjZ8L6hFFGV9QjVHtYfXq9RzYuwKXyc1liX8jv/AIPksFalU6V6b14tNtn9PZquH2Oev5/JVc5LSeJBrS+G3PauItlxGoKyFr/CACTfl8u/MIzz/wKDeOmMY/flxN7152tv96jNdeX8hjj92I09PCm//YytOvPMR1sX35cuuHdB94BS8+8AZ+wY9Go0NRpLPyaxcWMM/YuZ/xXLtw0bqwt/5SbYlnOcmVixeHMxH8pc5z4TO+9H1dur//zOJht5n+7xBgaXV6TGOHMGX6ndw/ZSyBsBmNWktBaSlXLX6BRbd9SLxR5O3ND5NksdGv3MDAsROYnHoFPslLlBRL8doC8hurSYkoHDy4n50HD9C0r5BKo0BbYQGGODM9U7tw7eAJ5PQcTqr9Mv44sYk+A1JQghoKKispqa4ld9wIbM4g5qCEUVBT4fVwdPUOggWn0MWVI0Qy8CshcruEubX3WISIFU1bGK2io7mulXGjx1BeUoAsqekUF0+yaCO1s5aTjXWUFRwjNTuZX3d9wSvLbufOJ8az4qNPcdhEevcchFeuY0C/YSRnWvjs6yU8d8MrRCX3Z+o1fYlSoO1UJVp3EPwRREOQ2Hgz8TEWVIIPp7oWKaghK81KrUeDWVFoqWwn1iYSVqkwWhNoDUUYPWow9ZUHyOzsoCVYQWVrCx+vuVjZx6hpJyneQapOpLWklagEBa8zRKNThRxQU13nouRUMUcPNWGPtqEXBYaN7E1uVicaC0pRt3k4eaQeBR9qrRGdUcu2Tb/z67ZfOHGgDoc+kY+/28zq3/9EI6XTv5+Nq4flEBQFdu76nU6DetErtxMBsY19FfvRyDrivQmEpAghXwhfBNx1bgb3GULY68fXUIPXFSJS76S2tZVdBw7QyRjNqb2HsDriSE3PwtNUj0orYbJqURvUNLXX89umb/jHkjc5+Ot6/MVlmFrCuKuaOLxuPfmH8qht+IMHb5vLxGu6M6J/OnrJTVulgLMOhvUfy4uv3kdLvUxCnIYRoyegaNrZvX87nTt3R1KqkP0GGnxOSn1r6J6axW//fJmEpEvX+ltUIQqrThKur+DAimIkrwWVJZGRs5aw9VQ1Cc25jBxxOdJxO4WnSmj2xDI6ewpOvY3R42Zwx63voDQYidY62X1sB3qNg4had5aaNRKJXLIoeK6UG0B1bR1GXZj6Wi9r1n6NR2jh6sEPY45LJGRso7qykt5JQzD0SKdzXAp9Bt7BZwsuZ5O7BTywu2AHVnsCAV0RMV1jOVrxJ9W+Rp657w76hRNY/NX17CmZT6XXyYx77wavG5fSij1az8IP7yIuEEfGsFyCji68OOcNgsYWGlobUak7CpAdIBzhos9wLjnWpZz7uXYpuTv4a0E7I4B8dqEQ5LMaPggyCDKiig6w0Ok+8wuLmmfOfW6+/lxh5Qu7gM7sO7P9p/ZfEYkP6N9fObDtIE8vmMe8x59GDGvQxTo5cLSOhfd+zAd75tE3dwrF+ds5vPsYT21eysz4Xrz6xfP46iSOrNzJ86c24P92O2sKt9EtJQ2rIlDa1oDcFiAUVggSxGazoTGYcZhjMFuM+FUe/MeDrP59Cw6dioF3TMBf24w9Joq6pkaiIiI1pZU8/MhjZKgaeGPlN7hiBTKy+lGypRSjPZqS9hoSNT5iHZ2pr6xFEDXY443MuHomP/+4lOhME4o6gUSzilK1gwzXKbZXBnm0l45D+lhOFp9CbtOgI0CbxkasPoQ+omf0tCdo+uVVJt38AV9sfAdNgoXSolIC/jCVVXUYDcnExmhpqavAaAZDvANva5jBg9M5fKgGk16HotYjSuXo9Vm0lfkpa6qg3e9hYN9cBqWPYm/9EWZePZV1v65k6Nod543J9wO60lhSg84hYdaY6JsUQ1AQqfY0IHnUqJLiaSo+gaJ2ICGQmxFPRrqdxKgBaBUDny55j9SsVBr8TmwOCw1tTjLi4iioreP6CdfzwN0P8PILT6AoNnJ7JzJ42DR2n1hDZmo3zJjwedqZPu1OPr5lOjnp0fgNYZoragh6/VT7vEieCNE9u+KNiaKxpRlPi5ZIqBqbNRq/LDO632UUVpTSNTuHZ99eyNB+wzEIOgSNjMvXzvG8AswmO/0GDcGo14Ek42p3o7IJ+Mvb2VW4B5PRwrBxV1NZsJZBIwZSWRkhZ0Af6svX8viMj5h0zQBSu/XCGqUgBDrT4qomMSMKu8WF011IXZ0FRB3dcyw01LdS3dREmn0I1w29i5P33XDRPNh9fSxT+t5JYek2PMEaMlO7Mu3GJbw6fwZpSaPYsHczA0b2x2/TIxW20dmi4cPNW3j77fnkbV1Pu7OB1GgDGw40MGtSVxQpkyHXDqdzp8EYDIaLcsaXAgaJoojH56apvJZf/liAq0XBqRTQ6HHT3dqTstJCHCkDqck7iG2wDX1FHMZ4Oy0lO9Ek5BLjFzjVvA9DfF+0Kg9ypYreU4aRqEmjsqmI3XuWY9VmokT5kY0xpNmjSbAPZ/f2l7jn0eWU15WhMRj48MdFbPvgD442HGXZp29hMKYw6/bZxNljTztx1dnPcVFL5Gn7d5H4pfzepZz/2eNOX+88h4+MIKhOd6ooF4XEFy4Ol4rE/9XxiqJgizL+38mJL/nssxfvfvBuRo8cjlqvRtBDJKIlxmHj+llX4W6FeQ/NRtKIvLV+FT+/Nh+nt55jB/JZ+8ACRn8wB9fuAnYc2c603sOo9DTQAKQIVhorG/g5bz/HNu5AVGkwNHhxWgQCYgjnkQISL+vFnn/8weLGPdQdyufjv7/Kp98sJ0tRuKLLIEZdNZVP35/PCVUjfTJ7UtpcQYI5DpcUxOyVSDDokMQwSpOENdpKp+ws1P46CivK0FsMSO1+GqO0FJYdYUDyOPKPH6FfTgotLUFana2ow0aags00BgNkx0iI0Qn4mmo5dGQLCbFp7C8pZVPhEdoqGwg7/XTLzqK4sZwuNggLEWwWgbZwNDZtMxl9e7J/4yF6d8+l8HgtvbvaqHKpCVW0E8kwoTPpyE60s29nESOn38hvO9dSdfAof+4+yuh5549Jwz/NpKTpaPNoUIkWqpRqistcSKiJS02jrrIWg0aL16ohRlRT2+LluuHX8/TLb5CVm83q9WuwpMfS0lQHkRAJCdmU1VSjVSmE/Qrf//Qusx/4kB82vMddt93L5SOvRGo1MPmaK6lpbOCXTUvxelT4xEZ0Qgh/gwu1X0uRtx1UBtKHDKNKr6atoYmm5nZCThcBRWLgoNGE3XVYdBZibUa++O5ncrL6oFYLuOQgrvZWik9WcNWYa4jJzCRaI2I2mvD4PdiMejRhPQaLmt59BmIUNJgFLakZ6ZwsbMVhdHFyx3e0uTQcKlmFyh3AnhzD8bLdPD57EUFXgLz9O6hp9GDT6whFnMTaDJw8FcGW2AnFo+ftRX/wzKdDyLgheNE88K6OJ7prJsfy/0SiL6X1Few7UMdV02ZS721kTFIiktpA99gBZOVGc/JIBZmJiSxb+g1+tYYx6d3Ys+NHnn/8ZT74ahuDukVR4vIwqO8IwhEZgfBpMWL1edB9tUpAUWuJKAEIRdDLUez6YxUVzVX4nR46xZqoLXITivjonHUNpWV7sXbuzDNTF1HgrCE12UJpgY+Fjy/kcMEfBBULFfmH0dv6YjL4OHJiDyZSuGrkRL7btJpYqwZR56HhaIT8lgMEi0oJ2RTyDixF61OQTCFibLHUH23mwO5vmXznW5RU1tPuLKR3jyEdkmdqNWpRTUSKoNFqz+aUgUu2+p2xS6VdLpVauTAaR5FPsxKCQAf7oKKcUSMSO3Lsinzetc7TGgXUoupsaVMQhA6ffxrhKZ9mPDz3+q+/Pv//GxbD/79MUZTzNPnOPERFUYg1q4gYYPGWjWxe8iGRn/fTGGvk7dsfZZHnKM/2GUvngMCqRd/wj5UbKfW10yU+hfa2BkZeNYprewxnbP9hHPbX05Rg5NqubwG1dAAAIABJREFUQ7nv1ntIuupGhtu6M2/nV1zTbzAlG3aSFDFy7ZzZCInJZF87grikaJJGJiEpEY5WnWD+rI/RNdRwa+9oVLo2OnUP0tKcQCTejksd4vC23dREmYlKTqaSAEPGTKP5SCu6Shs1eXlMGHEDp2oUth5vpMkfS2Z6PDG2XNJSL6OiUubwT4cYMvImUuzJNAgiY6ZMQtQbSUxIweiwU1RZhkmA8V3jSYnPpqRKR//+udgMGVSf8KG3CwRM5ahTveTVN2GKBAlkqIhx+knUWTiSV0NMrMDKbz/mlh5TGTRkBnc/9dBF49E/Q4vHr0cJeYhOjqAK98IQpUORtVQUVyCEw4QNDrqao/HrFXTmKD7655d88v47JJkt3D51BoEmJ53jshg2eAIJCfFYdQqgp669hqT4brzx3i1cO/Ee+vUYz4Y1KzCna7j7rgfw17URaXQw56078LR52JtXx8mT9ex3ldDQ3EJpdTXF3kaCVXW0t7lpa2zB6XQyuE8vWqryGTRwLEK0hm83bsOo1WE1GiEUoeDIUeSQhnFXX41bckGgBUlQcLlcqFQqvF4vSjiIEIxQW1uLKyIRNrYQDsmcaviJrVvWE52ThdkYTXV5C/ouGcRkWrnnlm+Y/8oTeNxbMSf6MUZpkbUOzHYLzkAxsfEtuFvLMca5eGGOilj1pemETOZ0Dh5dSXL81ZSW7OXue94jPa2KAf27smvdZkpjQ3iK/+CPVd+hTsglohOY+8mr3HDLrXSRTOwuPkGhJRdXswVHYgy7ir5ixt/mUlZWhlavRRBU5ym5n3FSoXAYDWDUmBAMBmSLxOhpt9NaFcbqFdh8tIg7HnmYUn+EvQW/cuXwW7FpXSxd/zCCrxF3e4gbJl/Lj+s/53ilwtH8Bt5YsoXg4aNcddOdJPRx0Kg7xbXTZhJnTSVsjMLk7IU520is/jKiu2WiSHaavWkcKM/nm49eIa7axLZtyygNt7F91TpqSo4wfcrdyEoQQa3tcLoCaLVa4GKagHM5VC7l0C885kIK3zPvO5OmuZSJohZRVJ9Oq8jnnetcYNKZc4YlibDUIR4hiyKIZ/jY5Yvu/39j/zVO/IydpW08t0nfYGbR4i+YO2Ii11Ul0uez5zj8zvc8+dMXPBQ3iJ6P3oElKY7XVy7miztmc/KXfYyaMhXJYmP70TxGp+Qw58M36BexMe/Z5ygsKaV6fwVbl3zBzc88SLjIyb2jr0bRqzg5OoV7Ugax31vDu6++xZ0Pzybp8qvpkzkAa3s081cspFlKpd4YQ6yjL6mJiQzL9GBUhak/5CauVzo51fEUHDvBbeOmobOokA1uYiwqMns0UR6u4/PPvyS762UM7d6flfuKMOBmlMrO0u9/wdzZisflJpKkpThQS9mhbVjVAXw+H/XNTXj9AZJiE1lXVE/Fsf3k9Mog0ZhIRUsTnVKt2GO11OWpGNd7MI60KDRiMumx0Zjpwo4th+k5sB/Tb/k7kYCf2lQX7372Kvn7frxoHLaeCIBB5obeem6LttB3oA8xbESlEggHBA7uKUYX8NMsuDB77FgSTVjVGpb98i05EwZysDifyeOnMGboRAyaGA4f3orNpMFk1uGXXDQ0NNDaLnO8fD1X3NSN/cf2opJqeGP+k7zx7gu8seRTXrn/cTZXHyWYbibPpKfolJoWxUAkNoGy/GoKquqorW4g2O7FGhdNotVBRu+++JQQq37aQpc0B46kRJqdbZRVlJKS2gm3qwWzUY9iMmGO6PD6/Xi9Xnw+H4JKJKBSOiLTYBh7cgKV5QFk2c3dt79Idpd46k41UNdWgN3RhVsvfxyrxsamdfPxaQ5QWF1CTEICzkonbT4noVaRfr1vIuCyohZE2lsDNDq6IRguDcJJSxjJoY0VuANhNm4pZ/+GxRTWq3j3o0foPMBKePVBwp260LdnL3Zs+440Sxur33sfIeilNdzI6MlTuH/iTewo+pgu7lpMoX6canCRnZ0JgEarR6O62Onp9EZCIT9FBSVs27GZW+65gcXvv47WHqZY14I11sK7777BtJG3o7WrGDTkcjzVsQSdsbR7Vaze9g1HKn6m0r2LmJgtDB0+hR++Wkqb1sPiJe/QWg+mcFcWLHubWIsJnc6OsXuEcLgOL3nU5wWZcNm1yA0BYvHz6JzvcXQ30SA00r/Lnex3/8rCtxcTQYNa0nXQvSqcbs8D5QJWx3Pzzf9T1wr8a9GMf5d2ORNs/pWPj1y0/9xrK4pCKCITDkuEIhKh0GnpNlkhIiuIlyic/qf2X+XEzyXyOe/ByxK3X38dihrmN6zj8LeruHfxG8xOG8WIJ+9C2xTA2erm7bueZNZHb5JuS+CKnH70ve4Kfr31JYbNvJK4GD3mGA1ffLiQPGcZV48YTZXLSXZiInp1AMmsQzSpuD42i0G3XUHRoVIOVlfx8+Y/UFc2U1lSSXpuEoNMuVSX1qMJyfQaN5macoWp4x/l+Wdep7j5GBu+3U6jO4RsS2H35jwixliu7zuKkK0Br86Iq7yJe26/nsN7N7Bu/y56xHclIulYuv1nsg2d8csim3ZvxZfXRoNHxC+p0LaJNLfUEfH7SI5Pot0XwZg6gjpJQ4xax5/Hf8YUY6G6rJJBg69Dp1fwtlfSJ6Qj0H6CvINN9JwwmC+X/YOmwhpUWgWrMYkVL37By/PvxpTU96Kx+PSmXqQ5zHx/XGJ3TAxlZS5GjjAQDHjp2r0Hd957C9ePmcEDPSRSEz2oTumxxkWTX1mCQaUQFiM88txTtIebmDJtNNdccR8lhR6C/hDRUWnUNFZx08RO1OXXk54ax66DvxCR45n34st0js9FaWnixmnP4Cx3U95YRVH5JqKGtLHjZB4JKZ1wBXwE5RCBcIiwRs0VA0cQslswiGZWrVuOyaYmLNsJ+GVCQYlwUCYtMY3effrj9vrppDWj2PWoFDAZjQTDYVp8bvSKGo8KPDqBiMtNvMGIOtSV5moVGiELjyKjs0kU127lm43vcGz1brQxMQzuMgF7TAwJhgywG2gulolLu44ju6sxGLrg8Rro33UU5moz5tpLT9gde35n6tSXsBDh1cfGo+08ginDxnFwm5esqMtw9e1NkluhteZ3suNFKgNWYiy9ye2czq1/e5pRo25m85Yj7NpVj2VYCtW1Bkq/XorX5yYiyYTC54sRn3F4Uhi0OjUZmZms/eFbhvbthTuwjU6x2cgaC8agl+SEKEradnNZZhpf/fAsXXvHIVqaqGnbhsai5kjxSfLLSzkWSiU96wge/z5GzJpE5qjLSXQncKB8OQfW/4OS1o14/AbamzQ8eMcyVBENrZoSjLY4MoclIpmsfL3qFUIx0dx2/8vs3Po8C5/6ik/mzaS88U/mvDWFKrmjMByJRDpQkKf9xqUi5n8X4f47534hTP/cv//1/nN4yNGcd8y5+e8zr3UaFQadBq1ahUYlngUCnYnGL+yO+U/tv8KJK6cRSx29lsLZavrZrzuKgsasQxIUFEFHS8hP/YlirA4dy975mOge3Xj6rddJMiXR2Z5EwKplQI++DEruRo+HpvHIY08RaArz8Ly3qSxupWBfIYNuGEuc0YBfDpCQFIVOVBA02o7rhUIkyyCGw0y/chRfLvice+64gaEZQ/H6inn19bexmQX+2Popg1Iz+GDjN0weNYFZU+4kv7mEe16+n9m9hyMEvNw0ZBKbtu1HdPWn6ngdnc3p7Nl2kt4TLsdTX4irpojDZfmkjxlEdFoK1112FUZLLIWFRfQIWIlxpFDvcpKdkYzF7qClpYX6mlo8lYXkxjs4WLIPKRxNS22IaIudP37dgl4bi9MQxSGfF0NqFvFGP+8v+oDJU29FH5fI8J65fPTRRyT3z+K1Oe8hOS9WGt+rmYQhpDAkw0Dt3jz8sgOXX8/wMd0xWmIIaRSO1Rxm4a8+IroEjPFe/GiwJSRiNdqZ+bfrOHD0OHXNHl6dt4B/LPuQ1z79iOZGL0IkTM9uqbSXVaIKBqk82sSAnGtxaB18suQbpt49ma3bfiPVrufQ3hMs/3gVBYUKOu1V9O7Zl7amWlqDXlqbPPz98acZ2K8vYV0EY0jF0SMHiNYmkJPRlba2FiLuIC311SRn9kAd7SCi1RF2emgIuPHUNSNpNATDAXSKjF6jwaNTaHEFMARViLJCWKMHY5CTx3YzZMRE7pz2OqEKO0mafogBC/akeJqLGtlxZC8hv0hV8wEaakp55JG32L1vEWFDCwF1ESqTh/3HT4EjQIXp0rD7rMxmgp5ysnOTeG/JLsZeNpyel13LkAF9CYaLueOWe3BHjtOm6KgrEEjJspLWuwstRb/hCdbz+p05DO6TTEVlCerAcOa89BiL1y9DCUfxwoPPsXHVP/CGwyhiK43OMJIcRA4LiIEw3oCGlvYSpt/2HAdKDuKqN2GIcqGqaScmKh1B0tJU3cy+vVXUVBWzZe9OdJEeBNp1ZMdG0y8zhx++qmKM+Sq++mQXIY+VK3vdiVluwjZgAPGxFvTRXamutNPiP4AUqWbRR/ehNbkZPmAMa7a8iadNxuvUYRF0ONva2fnD1/TscxsLXryZDfl7CLksLHpmJfX7TvLjPxYTEkMIkoIrEj7rL85NFV1Y9Dwf9PMXyZUgqDiXAEtRzgdC/auWxDNQ/A4KW+m86PvcvvS/fu/oR1ed3nQq1Vm2Q1BO94xztg/9P7X/CicOEImECAYDRCJhwuGOQTlDsymEJcx6I2JExtncQsTrR2r18szts3ntnbe4ZsBQeo4YxvFjJ9hy8hAelxdnxMeIfpcRFkXCIfD7wkwcP5GTJ0/S3t5KONzBEqbRaDqUqi9Ybc/kw3w+HyaTCX29gRVbPuPrn08SqSmhp/0aJnebyJjpDxLpbueu2ZPY+Mt3hPPDDOrTj9433oxp4lCatu7hxgceYl/BfgyxvThQuZb0gT1R6oLYzYmcbHXRI74Hoco25tw8g5fmv8bRPw9yorqd4xWVfPjpJ5Tll3G0pAlDexgvXjKyu9CsaWPatEfw+fRkRSeiNYvsOXGKyjov7uZyAqf8VJfVEQgUM2TYSMxGFT26xNOlUww79uzgyLHDpDlSWL1mHZNGj79oPHZsXkNRUwifKpriZhUmXwMHdzZis/VgaJ9e7N6wmy2HDzF92kwIqYnozfjlEL0TuvHQ/c+w6M0P+Me3i4i4Q2SnpLFzRyGLP3iarKw4HDEJlJ5qpK2tCrXZjz0ljq5JnZlx63i8Xi8xjkxuvGkSoWCQcDhIVJQFlQBbfl+NyWGkpLkO0R1h6nU309BQR69ePYiKtuMMuvGG/fgjIbbv3IXXE8QV8KLTGciMj8fsDaJVidhMJoJ+Hwlxcfg8ToJyEF/Yj7e9japTBZg0AqIqQiDgRR0RcbU2EW+1M/26m0ju7OCrz9cTdMu01BxFbZCIcQikJNpAr8Xp8XDXzLf4/ucXiTXnYpCjmDb6RSqOlaHxGgm7nZiU2EvOgdtnrmXFtmXsXb+Cux+7hWFDBuE8cZw5Lz7HgIETWfbNSjK7zcbUqQ8RdTR9ug7lvfkPc6rSwldfLyCp+0iaDDU8+Le7OVmWR4Mkc3knO6vXLCPVbMGU0wedEubonnqOHjyAIgYQtVDUWMOHHy/l0y9fpKxoM2OH34TLGWbXzlIijjBVFWHaQirK69pIsXRGq4/Cr1UoLPsZyaiivCHEiZoirrl5FK0aJ/c89hGffr6cjz57nZbiQrb+/CTWrAwSEuIYP3gKnawjOXCoFJvJS3W1j5qavbTXWfF7XVisEWy6LO4bOJ0X//4h+7cspseAMVjUfemd2YtQQGbl5+tQx8djFnUIahU2QXvJuXupNsK/HLN03vYXN0rH63O5Vi5lZ6D65173X/Wtn7ELC67/6rj/rf1XOPFLgRDOfM0IhUKEEWh3ewgBZoeDn/N301DfxJuHN7Fy4acsf38xGYKB4dOnsGzdavI37sQTCNEvuweSTktEEQiHJWRfAAGIirKcFjeNXEThqVKp0Gg0Z3+eQbld89hTmOOTMbrK2Hy0ggpFpE/vgaz57gueip7MC59tZt6bC9lR9Rv+6gCqWAMnNu/mcL84RkbH8/HqNYzqfy0TZj1Hn15J7D95hMbGelJSUnjk+Wf56IW3OBJp4r3nFqCOiAheiZb6RkIhP0mpSdhcMUgJMWTHJlB2sgqxSsWSH99g6vCrOVZVwtSMq/jmrU+J0hnRxHahQm6nqaoVlS+RWlc1wwf1YcSg8QwbMJyG9nq++W4JL782n++2rUO5BFGSiBe7RaSopB1zfDSiZCW9Uxw///ITS5d+QHt7GzePu4nvvljB0H6j2LbqdyLudob37otg0qPY9JSXFNCmFDHl9juxRiukxI9BVnQcKTzAVdfNIpRwK+UVMiXFZZS6KhnUbwJRZgPxsVHs3HmQUFDGaIpCktU8+eTTJMTG0eRrp6q9lVtuuQVjtA2DQYdaIxAKB2hsaeS3zb8Tl5RMZU0tiBoivgBp2V1olTz4DSAJEopWQG3RU+dqBlmNPwBR1jis5lhS4tMQRW2HooyiIUg7kgD9+o9n6bKvyekWw8LFL9J10GWk9evDmMmPM+/Fr0iJGkBLWQCdkMva7Y8x597nqC4rxO0K8eFnj5LVeQBOaTdINmr9l564+7ds5vn7nqM6IpN35CD3PTad1JG51BeX88nSr7FFuzlUWcDe/X/iiuSjNcYy5YY5xHXqTGJKNnpLT+SdJZQ07iMzup2yf77FvsbD5HQdzJ+FfzIwJxOnYsKS7GDNro9pbbOzbv33fP/LqxDeT1VTiBkz76fFF6TvkKmMuPpBio8WYlUZ0cgBYhMsFNQcQ91ixyFEowqm0M2Wij1Zi83YBTG4ndaGLRwufpurJ3clrC+hqqmO3zcoRPaDXrBxcP8W8lr/IKtLJqKgkJKcQ0ujhMrhpbGllZRugwkaA0x6sje3zrmSSTPn0lLs4esPv2bTxh+ZOXcsqQMTye3TFxQ1/oAf8RIOVJa5aDufdvZ8uzDtcu6xHdvFvCp/UdWeed8ZXvIz2wVz6gLU6LnnOjdq/z9Z2JRlhWAwTCgUOf0zdB43QjAYRBA6uJQVRcEegOMHjvDaB++xZ/tOtvy8hvX/XE3G3VN47m/341Ik0KppD/jwNLWi1qtQq0WEcBi1WkQlCGcb6mVZ7lgowh3XjUQiRCIdRQpJks6SyKu8MsvW/I4S35lly5cw+6m5lEdqqG9uYsLdD9FZAGt6T+Y+uwCbzsiVQ0YxzZLGty89T4Ixjj5Jyew7uY9Na7dwbH8RdrudruOG888vv+fhWXMYets0kuPiEKKsIIPBYSCESH2TG78nQLN0grDXjcsv0WtQJ1rrK8jsPpoDpZvINnXh5SXv0W/CeGbNvIelH3zBgPQBZHXtTr3HQ8/UHuwoyCMougARgzGFxvIIH77+Evt3buWXr5deNCaPPLiA+hoLOb3SidVG49fo6NMrB5tNg1cO0ynXwe781WRelsjrn7+DRq+j/lgdv/25i0F9BuD0KvTp2Q2Lxc6TT8/m+N59NHtKUAUDxGtMNDfnYY84GdonCZNVZtMvn2F3RAERykuraPfWoTFL/LbtV0SNwoo1q9BoNHRJ6MTDN92B1mzApOv4v9FS38yCV9/gxLETjB46nEMHj2C1RRORVIT1GtQ6K3aDAWtIRUtjA3qdlmDQTyDgQ1DJRGnUNNTW0BJw4fS4CUSCSEjExsYjyyF0Gg1V5U28Mu81bhw0he5xydi0EiVHWtm7+yBNzQGeeuxtEiyxSMJJQm2ZCLKKy4Z1o91VwsQrJ+B2+dFZzYQsLWRKl47uPlj8Kht/LeLaCXN46fH53DPjHpZ+/iVffvoSM+6dhVQfIjcnh2f//jTpKclYzGaGDk+jR9cMZjz6HHUnKtAP+xvNQRemQCzfF7ow6dJ55fWbGHHVGO6bNJ2VH7/A9u8/JVoys3XrMvJKtzCmzxwOlO5jRK8xTL6/O9tWzUF0nmLTxvfpFJXObQ88jEVtQW7yEZRclLYX4QzU4bfV4w0ayTt2DG+TD0fCYEJWI+31AQIRP2VFeVRXeHngqckU1/3Jp2/OxtHFQrIsYjKrMZjtmO0NeMVqDFZoFcIcPXyEREcco7rNZvGCn9i6cTm9+nflh30/sOyDFdwz/i7uvncu+3/dTJOzGbWowsNfQJ+zARkdIHhZ6RAuPrcQqSgKEUk4b5NkiEgKEalD8PhfIUDPc+bKuWRb/7qt8d/ZpQqqF77+n+y/wokLggCygBRRCIRlXH4Jpy+E0xfCHZZQh9VEVGosagOCSiRLsHDFxAmUVVVTG/RxzwfzKfA28vt7X/HWL8sZeeUYBF+IoKiAVkfA5UdEhcZkQhTVBCJhkFSIdDCHmUy60/2zf+XizzzcM0i2sEFCJ0ejkcIdDIHhMA/MeoIvlv1OVCcRpxSme3YXpHoni9Z/x6wP53Pnlwv5bPF39LplEr3SujBwyijqCvL54p/ryUxOwF3SwNTbZjLj0Qfok9GFP/Yf482PFiCkOvj226XEp8SgDoVYsvqfxNnSCbb68PpVHD/RgKi3Y3GomDz9Nq6+fRr1dQ3s2L4dxaEhIIc4VN2A5Ncgq3ScapYwtUQQzfHozbG0VtWTHKul1uMnoWcOTz4596IxscfZeeiB+7hm6BTGTb2aEaP7U+cLo5EsmE12HI4kVH4TelUSLzw+D5ezjdcXLubhuU/w47If2bZ2K3p9NyYNv5n33/yUVSs38/cnXyVxaG9uvXcOO3/NZ967b+OWjFw1+j6cchvXXj+Y1hYvndKSiLfauXf6LCTFyrPz3sKiM9C1ZxdUop7MnC5YjFqCQTeSpGBx2Ljz3jsw6GxUO9s4erIYuz0VvVbHuMsnoddr8fgl2rwBoix6JJVCjMUBah1+l4+QP4TOZEYr6DCoteCXiYuKpdXZig4zpyrKcLbX07N/D97+fBk1kp+thT+SnZVDW10RNqsRl3CMZT//zozL/87hA/l8uOQ9xo65l66pozhe3IaiaiPVNoBoVQKNoeZLzoMfv/+GPkOS2L57PateeYAI8MSLrxId35n20jzK6ovQKWEO/bSI3r0nMXToILbmneKH9esp2rybCfffxbVDh9Aj505KfIeZ3iON8bfMpckNm1b8TGtaLdWH/4CKfzJ+4l2s+3kRlTUutDojMVHRHD+xAbU7mYyed7E1byN1NfXoUmJZvngVGTk5uEJe4nLj8Yla+nTvTW7iTST2zOHGYc8gGL04og3YRYiSuzF41PXcMPwBEpM7czK/DC9puOw5hIIWAoJMcVE9JYXVtDeCzmyjrkpFj/hOCO1NNDWWUNy6ljeXPk1YgeQuw+inyWDuqw/R99qpLH9pHldcNZ76mlrC4Q61KBkJSYl0MAAKIKIGWUREjaB0cKwriMiKQDjSQUYlnyOv1gEeEpEkBZWgxi/4iURCIOpQ1GH8ej2ySofKYCAQUYiEJCI6AQ1qAhEFNSpkQURCIKJARDkfiHQhxcGZfReiN//vRuIKeMMKzqCMKyghRzRIsp6IpCES0eAWREKCBq8ENY3tfO0r48MbHyKw6TA3f/E6akHmyivGEWsxM/e+OwnJETRaNaICRq0GtUZGpZaQ5RCyEkCtAUkOEwwGURSBUEhGVgRUai0KIoFg+Cx38dlI/EyV+hykmCRJBAKnNRHDEiOG9iUpwUbBiULev/MRHnv3NQbEpvLG/AXMnD+PYRl9KCgu5PVHHuSUuw6rSU1xQwVXDLicp9/+ir6OOJ767EP8B46wdfN2Qm1e1DYzU0ddiVMKojWb8bQ7seuMBGK0vDvlIaRmGbGsiQPlx9m1YzvuKhc/rFpHjtGBN6hlxaKl3HXjvfgtCZzc+jtvvfUSNLfR6YqxSL4wjnqZtAHDLhqT4tIScrOzmHXfbDat30rpyRp+/W0zVoOAVpCQggGkMPTr2Rej3kKU0c5d987kymvGMGfu3dRVl9Ov12DumjWLBW8vwOmrJ15tRKhop2DbLhKjZVZ8t5HGilMkmBP4+N21PPvKfLL6OsjK7Ua/yy7joy+WUNtYy9dLv8LtdmLQm8jO6YoCNDXWYdKpUKsEDFEWBAUaysqpLalHo6jo16M7FpNAQd5uNCofNivodAGC/ggH9h7glxUr2P3nDtqC7UQlOujRrxuds1NJTE+mS/euKKKCzWYlIspEO2xotWrGjRnL5t9XYpItyM02br31BpYv+5LGula6ZQ+htaWCLUe3kptiQ45u5Icf3+Tb734iR2cjRrTiaz+ETlAREzP8kvPgw2+/JTvGzMO3zOaFlfvZsuFXGspOMvfv77C/sJLErD78cfKfbD91AkHWMP2ea4iXnLz96psIMRaGDeiJJdXKNWOu5Obpb/Pb1jomjbiCNEc6olXNVFM6u51+8r0SRz55ArPdicndgMu9l7BPorkxhLOiGX/Tb+R06U1uxkhSdDl0H9KFwhNNZCeO5b25O0iPj+H4vh0UVa3F17aTY3v2c+JwA35Jg1cMYE5s5LIho1izaSPJSVYSUzWopFM4jFV422oQ/HZ0ajchq4iiiiLs0hCntqEJt6ESwzRVeBiWNIGAR4VPSScnMZndlSdYtm41vzz2PEMefZCkhE706N4LjUZzOtASQFEj0CF8LJ/+JykSETmCLHDWcaM6P5ctyzLeYICwLKHSaghFwghhNWZFTxiQZBVGtx9TW4DXn3oRv9NNTaANi6DGL3Tk3yOnMS5nznlGkPlCOoAzLYkd/fogSXIH2pPzBav/N7ny/wrYfZ++/ZTt2/9EEEQisoysqDoeuiQhKTIiCggyKhmsNiuSArUHT9CpWzaSUUCNgBoRQdWxogWCHUVLSVKIhMKotZrzqssqlQpR+KsAAufry3UoegsXNewLwunVXJYJhQN/obkAQRLwBtqIskUjySokEfQhCGlBrUBbQzNeOci+AweZPOJKfjt1hM+/XMQfq1Yx++ndWTbZAAAgAElEQVQXmDVpEuMmT2LQ4L4kOboSHVDId9fwyrwX6dutN/ZOMagUAUWnRlBpGadL5KZXnyXBGkOb5CErIx1bQiI/rf6N5NhYZjxxF2P7DGDz1l2UnTjGtP9H3XlHSVVlbf93Y+Wuruqc6AaajGQQUBGzoGPOESNjdkzjjDk75jhmx8w4JtQRRUAQJEmWHLobOseq7so3f380jQ0y78z3ru9d63v3WnetuqFu3Trnnn322fvZ+7nscoScIImt69i8Yz2P3vsW5eVlzJ33FV99/Qlnrt+xX5/clkyheFXWrN9AqjWBZKX55KvZvPLBqxx+xOGouCmpyKWusYHtG7axdNFqyivKyGQyeN3dFGOJVBIRietuvZV3X3+TWe+9gdvnpU+fQbz9xd9Y8tN7PHj783z44RtsrtuGYssEs3LojEbZsKwOR4RB48ZjZDRCqsj4KUdSUZSHJDoMGtSXVSuX4vWF8IgqGa2LLTuq2FJTT15OHwoLygiHc3F5vMTjcZLxOAGfn2hnK5LgEAplU9tQT0NTPfU1e3B5XeSFwvhDuQQCWciqm5aWFhSPB0dLE/Bl06+iiF3tWxlRXMmsWd9y9xO3cskZM1m24gdmz55FV1Sj34AQpQOziW8QSedkc91NV7N7RzsP3v07MmUO/e0htCRKqXzj6d+Mg+N++Ibbbr6Fj7/4ig7D4Mkn/8RxR8/knN9NJJMSEQWbVatW8eM3H7Jq/SY+Xf4zp44axpV33kT1piqUQIiZF1/J9rp6fqlp5KH77uS2a65nW3UN155/LtfdcQMlSjO2aFJ5yDSq61cx3u1nrZEmorYTbwoxelwIszpIQ7/NDHFVcNZJT3HD42dw8fRbeH3Wi3g921G78gn0K8HI7CQTKWDM+MGs39SALbeR7SmmozNJKFjKtOOP5sfvFtHJDvyuAiSXSVFZiLrmTSTr++DzxfH4CmiMphCdNLGYiRkwKShUsSM1KK0+zr/tjzzz0hOs+aKOaGeS9Xt2sHPNIjKGn0suuQi3S+n2d3eP3L0tKWALv60hfqBFDCDurSKoWSaiKOJSVWRZQnNsFiz5kcNGjaOjtoXBgypZsWYDdSvW0ZqTRo+2MHDqSRxeOQKvrGLIFkbG+i/rmff+3d7neqzv3kFUx3EI/4dp9/9fWOKiKKAqEoosdOMn7QyCrSGLNi4JFAm8kozqVuno6EBKaxQf0h8roKBICoLlIIgiGA6CBR63iqp0+8EVr9rtDxd+7UBN0zB0C10zUVUVSRJwqXL3dyQBUfgVktRTHMg0TTTdRNM0EukMtiVi6A5axsI0wHRsAv4wpm7hmBncGQMUG01LImoGgYIgfYJ5TD/lFOSQi+mjD+WQo47hT/c8zjF9h3Lkrddw1ykXcvUVd7B61VwW71lL/9JCxg0/hEeeewpfMAvLsVFVFcEWOHz6dHLKixg09hBGjBvDig2b6Yh3uyJ03eSSc65kYGUFuuHQv6iMj994lVimnbZYgpy8CXw3/33OueQEpg4/hOmnXvCbPulKxHnro/f5evYXrFu5lHseu5sBlaUcddIJ1DQ04Al4GD3qUEzDYMyo4dz9pzvBtHG73TiAYRi8+f77rFr/CxddPIOfV6/hlAsvpv+ASgb178dztz9KSM5myaJVHHXYdLq2tXHY2BOJJ3XySvKYMHUIG9dvoKmhHkl0aG1pQbAdYtFO2toidHYlqOw/mEi0A1O38AezKK4sJdaeoiCvgEQyxuYt61i1/Ds2rV9CV7SW1vbdKC4Zjy9A9e46EGSKC0oZP34iZX0rcXsCpDsjRCMddHR04FIURMGFbIuIqgtZCTKodADFFRUEKzJMGjaOPU2NqG6V6dPORTMk/nz783z9z228O+dHTp4yjb88/Rcef+h2WgyLTJPE5uotxMS5Bx0HhitMn/5F/Pmmy7n3iivxbttK0GPw7Vc/0dEe5bTTp3HfXXcwZMwxfL9kHbYtM2zSFBSzCMPblztvuYd3vviBqp3VXHHCEWxZuZjTz7mI40f0Y9XKpQwYWUAqbzANWWFqOqop8PWn1VvEwDyFY8gjOxRlY20HO1MNJLYGufrif/CPRc9TIhisWDqfgV6BQXlXoColTBh7CUPzz8bvhzUrF1PeT8Dl1Whu6iArlCCc1cJzrz3EsVNOIhETKM6X0Luq2bmmGqVzMD5vBDOQoTNaR74k4RVkilWFfpKLwXGFsHMoWRNHMPvvb/DT502kZJlQto8X3nmEitFHcfXMy/H6uiHBHo+7F/mxgCgKSDhIOIiOvXcDWRCRBRGJ/QOVgiAgCgKS2H1OEWHF13PYumsL4SwVWfLx5w8eQ9CiyMVB1s1dxZkzb8RpaGB98y4MU0c0Dj5Z9JYD0/97AqOO8ytL0MGu/bf68z++8n9QHMfpVpC6iWk73UX6FRVVVpBFBVGUcBywJRGP349m66guGVkzEJMZZFnB0a19TKWGpmObFk4PVZNgIogOiiqgqvJepS6iqiq2/Wtd4t5sIb07ufcxi+4Z0zDMfZsgiCCJpDPd95IlgaSjkbJ0fI5E0g0ZRyLjU/GaoDkZ0orFjefOQEtZLF+9gbePu4zb3/2Axt3VNDS1U1payOfL5nPZLdfRd/hgUqaO6vVgGRaKI3DXJ28xvGIQaUHDK7poTaeoq6vjn59+SltnhJDh4pqr76CjdheP/Pl+ZElloKeAP975By498RgWb4hyz7Mvc9S5Z1O/u+Y3fZJIJCjPLWTS+HGcf9kFWCGVR157ml0bN2F1Jmmu2cPsT2ZzyOAR6Ckdx7ARRRscAxsTRRXZsqWan1f/whVXXcm4MYfw+nOvMHz4cAyfw5+ffYCzL7iWeT9/wsSjzuTcCy9kd+MWPFkZahs68WXl0ae0FGwHLZNB1zPE43ECXh+ZTIb2tgihnFy0RIrGZCcakG7v5KzTL0BLZmhvbsRx0mDKCJZEZ1uc1oZ2WlpaaG5uRlZcKC4PiBKSS8URRWxR6FbcDsh7lUIqlUIUZNwuD7ppYnkFvvvyCy485nJOOnMcl11xHoeOn8imX2qYMmUKyGnirXG8Wav4YfsKFn30FkpoA3lylFynEMctosd/WzcF4LW/vMSIyRfQqbqpGD0arWQSTV2byC7LwVPoo8+gceiKm7KhA6lr3czXny5g4JBRTDjuaKYePpmla1dw83UXEs4vZE1VGjGQR3trNXmFQ1jy7T84PJRDTdVuyholhkhhgkY96ab1aB0Cg/LyEOIyud4cBFc7Bfnw1dwn2bDiS4SCcnSrgcOPfYijpw6nvj1FjquJb76fhWQM5cGbv2Plj9tJxFzgaadPSR9k201puZ8ff3qbYFYJkZSIKYco6peHlVVLWzRNsk4l1xMi7WlBE9Ko+T4SIYd4noMeqkKJNKF4deIu+ON7z7Js7tfkqj6OP3QIkiSRTqcxDINUKt1dYdBxsO3usgI9gGtxr4L+V8iPnn2XrCDRvfpOxjMs/vwr7rz0Kj5btYgcU0Voa+D71YsYd8pRHHfBefRxhfn0y08ZWTkQDZsujH26bB/MsRcdm42DYXUnJ/Uc/++6Tw6Uf6vEBUF4WxCEVkEQNvU6dr8gCA2CIKzfu03vde5PgiDsEgRhuyAIJ/wnD+E4YNK9WY6NblvotoWDjSSDR1FwuVRcto0LB2QF07ARJJmMYGPaOrYMpmRiODaiLCHKEooi4ZYlsLs5B7EdcCzcLgXVJSDJvwLze+oTA7+6WHplbe2brW3wyC5UVUGWJVwuFVEUEPbiTRNJjc6YjmMr2JqAKcq4TAGPZaFYFrZo48KF25Jo2l3Fy688x/GnncWI62fw088r+PucbxhZOYZjzj6XwfnDWP7Ldi4952xUx0G0IeD24w16mFDQl5tu/QOSJaGLDk2btjJu8GA2r9vAC089QU1bDbPefZvZ//ic86+dQW1NDTMvuYzjp57J6j1VfP3+G/yycC233/hHTj3//N/0ya1XX03fsgp+Wb+RXTW7sKIGR005lguPm8yan1YwcsyhDBs0kJ3bqrjgwhlMPeE4lixdgW2JSHsdXOX5IW6/6Soef+g+Xnj9NbwemVUr1tK0o44n7nucj9+bzVUXXMOVl53MhZdey676GjxaOYcPOZ3q6tX4/V7stg6UrhRZnhC7tm6nM5XA7fdSV7ubmm27OOuU06nfvAMhnUGLZSEGPCjBLCZNPJzyEWMZOmocQ4YPo//AvsiygNulkkxFSKejRFqbSBhJookuCgLZVOQXg6gihbNwSSKObhNNNpM2U1RV76B+925GV4xhxJQjmXn3jZx1/r0cfexZaIbMtGlHUbN7J6t+aeTGm2/ir88vp6+iUj71UJLtJUS7bBJ6NYVZRQS99QcdB5deeR4rlz7H0499xvZv3+cvd1/DpCEjePaxV9iwcjnvv/ZXBuY28tXs1ykpHsyGlQuY/ckXXH7x2YwcM5JJEyby6ew5DB1YSb9SmUxnFDsBL736PNOufIjGYJpLTjyBxvwEHVmtWOWHsqeuGl92KbscCUnykU63IhhJTCPJz+s+RnXnMCDo4+Tpl/L3937Phz++yVEjhvPTgoUMGzeRYX0KefSdGRT36cvNf3iIM4dPo2F3DWmnFb0NmmPtiJkOzESETCqJpssIcZmcLI3cQos6Yz1aPIoeb8OJJxnVvx9bN2ynoqQcw0pTnD+JmVcNpuXnF1GCufiLdjF/9zrStonX60cSRFR5L0ih25ZCwEQUu7lGe2B/smMiYWEKBi5sNCODT+yGGLs94BGS2KJAJArP3/sgA44fwvpfdnPG2BP48yMXYtc2cdixR1Dm8fHNJy9y42VTyZYTKBmFJlXDI9gYZnd8TBZkECxcLgtVdmFbCm7BTSLThaMo+DBx4QHTQJQFLFskk0lhOey3/afyn3BsTgESwHuO4wzfe+x+IOE4zlMHXDsUmAVMAIqB+cBA59dyXweVMWPHOkuXrtw3G/UQh1rO3iiyYe4NLO6N7toOgiSC2O3TlqTucpA9fiVrL0SwJzDZU0j+wP96MGxm7+ysg0WX9ydP/XXm/dUXJuxdWegAKLK4D2/eO3lAcCBtpBEsgY8+/Zz33vmYxf/8J8dddAbfzf6cZ955g5Flg9AViXv+dDtuE7r0KMHcLPRohkI1xN8XzkWxRAzb4pWP3mNUWSVb2uqp37GLUF4O7bV7ePapZ3AEG1PXEQUZSZG7/8bepmiPpZn15Ww6LtnfpXLk9wtoaG7i3HPPZcbll/H6G2+wcPlP3HnL5Rx9+GTmfreYjdvq+GbB98z/5hs6ol24ZYl33nmnG8JlmiiKBI6ImTH45z//ybhDx6GqKuFwmFQqxZbtOzj9vKM47dSLWbTkCzwuC5fj5bkXZvHHB89nyvgrefWll1FlhXA4jI1DSUkRBX2LyA9m4bVBzIZx4w7lqac+oL2jCSEQZPphU9neuJt0V5w+hRXYloGlW/gCQVrb6rFsHU0ziHXFcblV0hmNYCCLzkiE7ECAuKXjmBa7du1izMBhBLKykbMCxE2NbEvhr2+8zhVXnkRescqyX9bz0dtfMLh8FB1t7eRkZ/Pgg4/x2ZezGTt5CNkFpVRtn4Vb8DG2/GRiRoKm9BrK/vrzb8bBKVs30pH0c+2FR/PpnHk8+9ifWPzlMqpa63ngsYeZOHkas9+8j7QIo8ZM4eSTTmdA/0pOnHYM77/9IW9+9CG33nwbnZEGUvEEpeUV7Fi3mcHjxvD2Ew8ghL1U71xGju4QUDPM3t5CKtuiXMhjZ/MmCvNHUFo2gFjdGjq1NIahI6oizZEEBbKbfkccj6dDZsH6l6koO4LBk6ay7atF1Hg2MEbIxxlQhNUisKXlZ4L+vjiWSV64gNqaRsr6FNPRspuEGaektJDSwjwa6+M0dUQZ1n8kKa0OPSOT5c3QrHXhE7y0pxTywy7ocigoLWZT+0aKrSMpKMzi6jvuISSrCGmdYChMWkvvN5YPLCrlUl0kkgl02yLk8WHpAhklipAJogkJZK9KqhWSLfVUa9t58rO3ubjyVPqfcBjr//4jF15zIRcePY5+V57D1ORgxp51CB8s+JqyvDFcMu0oOkwHbANRB2/ARzKWwPT6yQd0wWTttm24MxkGThhHe6SdbCkA2MQzSbK8QTQnjSy493sfQlmu/3ccm4IgVAD//A+U+J8AHMd5bO/+XOB+x3GW/1f3Hz1mrLNoydJ9jS7v5Zg7MG21Zy5QVZlExsB2TLI8HtJpA8syep5hP+vZNM3uGboXS3bvTj5YplXv478e+5Xx+sAU3F+TA3r52fZGwHt49nr/LoBtGUiqguRIdMa6cAUCfPzue1w04yJwLGRJBQsShkZVpJUrfncalmzi8blpqm5h+dr1/DR3IUeccCyqKBFCpTUgkKhpRDMyFKle5q9byVlnnoVhmyiijGbq3fh4UcCyLXAsOuNpAoEgjx1QhP7Pto1oG2ALZHSNRCqJGRP5fMknVK3bQml5BZ3NbRwzbRpZHh8PP/IgYw+dwMyZMwkEArjdbhAcbAdEQeT9D97nogsu5OGHH+bee+/loosu4tG/3Mu87xfx5dez2LmziuWLt7Nh7VKimTr+9sYS5BDU7KpCEMHUDWKxGLJtc/bZZ5KIRcgOZuH3uWhtjbF02WqGDB1Lc8Nuokaa46ccyU+rVxJpqicY8OH3Z9HRFcPr9uByuXC5XPh8PjIdCWKmRkYwyeg6ssdFV3UtCdtgWN9KEpkkTZE2yvLLyM3L58STf4c/mE9H21Ze/OuD3HjNY0w+9DD6l/fBpdgYlsRf/vIEl884l7+++Co7tr5AJJ2DXBTH1VBO+TiJsP9o5Puf+804SM28hz6jfbgbBf72/SzOu/gKFn/1Gng0Sjx9qRhzIhNPmMxHL33KzGsvJZ5pp7g0nx/+/gVfzZ/L51/OY8WmteSH8iivqGTl6lUsmz+PyROO5IVX7+D4oSeyYtMykkYLJS6HkmIXqpTLG0tXUtH3MAJ2K3Kwk7gepLWrjaAUJJwVoqs2iqtCRLIcQkOGsHrOB/z54R/YtWUFi76djePew8ijb+eG02cy46xx1HfaZIVswjkBmprqEAnSf4iM7ChEUnFM3UU4rxDdbCS/cBBV27cgO2FsS0fJ01ANC3dWBfHWbWQHywkWeBGkMM/c/w++m/M5Ha0WgyePYFh5X8hk8AWySGeM/dAhknwAzluV0RMpPIqK7nVRs7uZQ/LzeebvL5PlCbN+6Ub84QhxMUmeaxznnnEUt193BeKIHI7peyE3XHQZ89vWcczA0Xy2cDErHruOY576hDF9cvD4A7gybsyuGOk8N2FVYO4n8zjp9Omsr6vnu/f/zthjJ9DsgUOa3RROG0kg7sIdAANw25DAgvSvHgCArID7f1yJzwBiwGrgVsdxooIgvASscBzng73XvQV86zjOpwe559XA1QBlZX3Gbtq+c98f6HFdiM7+xet7lHhcN9iwaSN9ykrpam5jwIABeDyevWzR3VaxaZr7YIKObe5jyuidmdmjjA8sQwnsZ7n3nOt+jp6aDL82uL23lvGv97H3ImUEnF6JHb1T+0XbQZclRNtCtiEp2gi2gxcVRwYdkEwT03JYsmkTD902k85IFx5XAD2t89bbH/D+nC949q77QOr2FIkOkDHI+BQUQE+nESwbW5KRJQlVEcERME0bWZXQbQfVFsCB+5X9fYXXJiMolhdMAxuHjKFRkJtLJt2K1u5m1cYVPPPCY3w7byGZeAqv34Ou62zfvp0RI0aQSqXwerzdvrK9NN+m2R0odrlcACTT7VTvrOeNNx/lhaf+wdrt6xg+uJKJh51C375FdHTGSespkskkiiKjpTP4/B4mDBlGXVMjfYcPxG0pdHVpyC6RVEJADbtprq0nZqUZWzmEqh1VtLc1oes6mmUiSBncLi+apqG6ZNxqENGlEEvEyMvJpXbHDhrrG8jtW4wgCBQVFKNJIvlqiOxgDoKlcNufbsUnyyTTbWzYtoG2thZuuO56BGTi6Qyi46a1oQpbg0VrfqSsYjzba2uo3/Qls+bMY9u6PTwl/5a5pf76G9jUtp2zjj2dT9+Zw3U3z+Dlxx4m7GS45LYHeeGRC5i7WUeKRXjltVe49Y4/0tjYzocfvM3tN1zPNXfew6VnnsmTjz/Eh/+YTUdXhE/e+xt7Ni8nkxJQC6sxokPxGrvo7NIZObqSf65czYhRA1gRayKry0+4wEVHfQuuXBFL95FIinj9EK/LsLhuN/dddB3rty5h6ZxNtCSjvHzXWyzQf2RQe4A5DfOZdtgZNDSuJRCUqW3eRbSliIK+KZyoTMZK4w46ROriBAvyKcoN0xFrJxFrwE6GQbEJyDaRtEpFZZDKvidTEPRTtWk9eSPGc92YGVSXpnnhzSd56Kq7CbhdeNwukokEqkc6IBFH3m98JlVQ4zrfff4lO10Zyh2beDTDno4FOKlcauM78QgKzUmLCleQlBKgOrqOC4+6iZVr3yaTKSKspEnGo+zRMoTDYQRTYOadb9CybhWTph+HvCFCW5HER68+yVFHn0l9w2ZW1/3E9uW/8Pt7/4K9q4qtyVZWffk15z74BNK8rfS7/my+vOtlHv3LoxjC/gHRQNZ/psT/u4HNV4D+wCigCfgtXurfiOM4rzuOM85xnHHhnBwsTUe0HdyygmQ56IaJoyo4joUkSGysraK+PkHS40FwuYhvXMvaTev5Zec63nrreVyCTNw2EWSVVCKNIDoIjogjZLobRpAwrW43jKSIuFQXkqqSFlLYmoJh2pimSSqtkTF/9f7YorAvmt3j0ulx6/QobkVR9uHJu/+bgG06+ynwHlx5T/DUEkCyLEQEbFHAg4RblLFFGxwHl2Uj2yKGCvGudqQMeGQfrUaMrOww9e0tPHXffTgy2LaB41jYgo3jlVFtG8fsRt6oXk838keRsPeW7xTl7olREcCRHGzptxmEuZ4gAZ9KIOjD53MT9PuIdaVIpVV8RR6mHHskL//1dU4/5RRcHpV4PE46naZfv35omoYsyximgWlb+1KeJUnaG0zuVuZuNURTSyNXzrwTR4VBQwawfPVuJh45imQ6hmAbiKaFSxDJcnsRbYd4pJOWdAflxQV0RGJolowugdsTJOh3YXRFCPtdjK4cSCwVp7C8iMHDh+CWRdweBVHqJtpFUUjb3RO1YQv4fD6WLvyOhmgDReUlFARy8CDR1dWB1NZFV6qT5s4GNEHHVg2yi1Sef+Fptq1bD2Ynr7/yJrYF0eYOHnniYb75bi6zv/2MKRMnE2nZjtZez9IVu1k8+weGjxx00DFx961/5OFbH+WHL9fwzPP38s5bnzPhkELClYW8/crtFBVWIOgmCxcuZNeOneA4vPrqSwwdPJ61WzdxzVWn8uCjj3HdQ0+y6MuvKAkFGXf0aAYdchjtsUa06FDa27cT8BSwQ4tRYLgJDx6O2pqmbyIbtTNJIt1JcUUubiGPwiyJgNvLsH5HogRinH7sCNavXUfA4+V3Fx/G5dddwZwFr7F9zkJcFTn0CShsXP8t8c46Nq5dTbpTI6+wi3hbAn/Qj2UKpOp1Qnl56GYzu6ubUPKzcBSJQEEMtytAl50iKwTplM6aua/w7N9uoTXksHPPIr5J1PDLl19y7rQZeD0uVEUgkoyBW+lW2o7cjRG3RAzD6i61YXcjurJ0jfVbqhkybjxda9awetscttYtoKa9Gkf2EfTmMaBsChNPPomI2Eo430uoq4AxU6dy6tjbaW9qp0DtQ+HI0/jdBVdw0xXPI7sGMTHUBzx+3nj6WVa1VfPy068iKDob6jfx5oef4haGMaxwGG89dCY7Oxtp27WN407/HVXzFrGeZl56+S2OHzsZI+qiLcuFIwtIHhd+2fUf69L/liX+r879d90pI0aPceb/tBIjnSbg85HWNLwuGVME3YAtzc18+sk9+CMOHek0gWCAo8+ZyeKnbkbO7U9HugFHF7EMk5OPvYbjzj4X1W1gmCqKksSxZURHRKQb0qMZGRQZanWTTMygfu23HHHUuQiqhaHryLiRJQFZFrElB9G09xGl9qwMehd+P5C4tbe75WAY0QOzsw6EHgHYpoUsKWi2zptffMLHzz6PJasknDTJqgi7mmq6XUKiSEbL4Fb37/TfuoMOTsrac/5BcX/L8F57/6Vd9yTkoCgSiUQCn89DMpnG7/fv424E9pUy6AkOy7K83316vTfo2GS6uggGs9EFkTPOuRyfz6atoQtb76IzkyHg9dHc3EwgEMDUTfqUFGJZBqVDh1EWKGBH3W58Pg+CoWNmdAS3SKwzQlNrEwgSHncWpigyoHQA6bRGY/tuPKqKJHYXXdPTJhnT5JeN6whl+/G6VERRJZPJEM4LY+gOXpeXpGbg9WdRWd6fY6dPZeXiebz4/Is4lsy7H7xNeXl/Jk4ej5Y08IeDbN6wkZ+WLGTUqAoWzF9HQUmAp555mkOPPonHH32eN4Pe34wD8ZnXmHbcsYwc0o/v5i3g5adfYPiUEaxatpiMriHFJG64/iZGDK9g0Y9fs3TlzwwZNp5Vq+dSkNOPN9/6mO/nfUKspZ7XZv2d775bzvXXXcvydV8w6bDJOPU1pEU3LWaCMp+HVH0Kl787kaFBiyCIfSj0irSkNSJ6K1ka+ALQHktQVBIgkshl09pVeAvzmZQ1gZaiBNUbGlm5bAuj8oKcNPN4flm7BtOUsDSdgEtB9QZoSyaRNA1Z9hMMudGdLvxykOwyg0hNAo9UgOlxI0teEno1iYRMbiCItzhFos2mqyrJkCOPZE9sNyOtMTzyl6eRszQsUycli1gOeOwDSRj2ukTpfh9/+bmWFVu/o615D232NjximGjTRlSllPZMPX4tF3egE0OZyNDyAfy87HnOu/YffPfP57ng4hsYOm4MH7zzDufNOB8hplEkh/jk3ddZ17mSkaNGsHb2Foyydlo7I3j0JvpOuJGKeJj5y96nK1vlpPMfZNXHDxKwLjAAACAASURBVDDxlCup/XoVSU8nSzd8yykn/J62uq2ccuNdHBYoQHQrzG3cxthgEYV52f9zlrggCL2pSU4HepArXwHnCYLgEgShLzAA+G0E58CHEAV0S0f1erEkCcsw6NItxE6blAKLPnqay4+5m9XtbQitW9i6dRsf33MtdSmT5lgMqVVAM1sIqlV8u+hFbvnDFTx+59Wk2tqwdQlDE9E1G9O0MUwTxePlpdmzuf/q6Tx0y3F8tWAut91+BSt/WEhQ9iM6Nl1dUQQHdM3E2cto3VMwq8c107sofG/pUdIHZmr1rpd+oAunt7LvsfoRQDYddrY2oHjc4JJAN8j3BQEbUQTTMJBF6V/ep/fnf5XS+6+O9V5tdEv388uyjCBI+P1+4Nd4g2F0Z8H21L7paZsDkyx6JhQZgWB2GASRJcuWkhOATJdGQX42uD14RANLj+FWHETBoivWTkXlALJC+bTuqmPhsh/RYq1s2vAzK9cuZfW21axfv57m5kYkBGRRobaxiq1b1zH3uy/o7GpizOAx5GTlEfblYCVMREVk4y/rCAR8OJKIbgtolo3H7UPUbNyyQtLKkJuTTZav+/9G2tp59OHHQBB54+1XOfvsc/H73bz88tO0ddQy74dvGTHyEK66+vekU/n88OM8GvYYbNvawMRRR/Lhm68ddBzcfMXFDBvWD0mOMfrw8RT2H8lPc38kIMkMCHtIdNaSytSxY9s2rr/xFvbsqube++7igvNu5YzTz+Wqq8/hmGPP5tY/3scXX85j5aJl3HDWxdx45PkUZ5Ik5DK0jhRiRiGzu5MhE8JMKSpEd6vk5+djUUeD3kqyo5mQJmCHbDbWdJBXMIwt62NozV0cNmUqBYWtnHP1dcR2ZvCmY7z60l1sqIqx9NMNyH6ZQFgEVxJfWCaWbMfWDZIZBxsJW3WB7UII6zQ3ZFAH9KFT7iIlQlzvIpxVRn6Wn6rq7bTtMTANSGUHqVo1j5PHnUzlqUNY2bQMUXGhuP3IgozH5d5XC6lnXLk9Iooi4GDg87uoblmIV7YZPW40I8pOpWF3FfmeqbhEP0X+gUS0FkoGn0Gu8zM/rLiLYUPPZ/Xaj7jr+vc4euR4vG0J+g8qp3rBIgr9IWJeqG+32bp5ER11NiOml9Ba30yW4CPaVcjOdXNp6C8xcODxnD76QtZ9cz+ZWoummnp8ZRJ1jVupCFRwwtGTKZ8ylUNLiugKKcz/egHTywbjUX3/Rmv2Gqv/ATplFjAVyAVagPv27o+iG+OwG5jpOE7T3uvvAi6nGzF4s+M43/67h8gJBZ0TDjucvv0H4tgSoyeNYMCI8fgG9eUfn7xF1U+LiEY3o6pZ9C2dSEPbTmL1bRTkWXTFNUwnRloPUJCdzY5ohHyfi+FZIAWGIOcO4aJrbgNbw7B12jrbefujD9m5cT55Ti5dyR2UlAaRbD/XzXiAYVOnMm/zz9QvXcOl518MigpOtwXak+GpKMp+yuhAi7c3quX/hnppv+t6AqKGyVmP/4nWBT8R0dPkBvzkJb3MWvItiiRjWcbeSeXXTLHe1nNvK/xgDNs98oCw/3x+r21149/3PVs39LJ3neYexe04Dqqq7rfC6Pm9nhVKz/GedGTbtruBiB4vt911F031DSQsC8lQScXqsKw4huVCNy0S6RT1jS1c9fuZtO5ppLCgmF0b1mGpJmkcGmvqcYkqlmniKBKZVIyAN5ui4nJaki2072nA0TXU7ADh7BwKy0qId3bhVhVWrl1NlseH7hioHi993LmkbQtXwEcyFcMvqXhLcpBNAb8vhGFCdq6fwX1KGTr8EN7/4C3ef/cTZAXA5uzzjuOK627mmCOm8c6b7zFy3AjqaxsYNGgIC5cu4/LLZ7Dux23MO3LIb/o/74N3aKruonJEGXm6n7fnzOFvb97PucdeT0tyHgX9BvP1x4t45IGHuPbKq3niuadYsnYFLz7xEM11VSSSAYaPGM3jj9zI9bfcyTdzFuFyS2xuXU4iliaQaSNkm+xuyFAsa6jZQY7IL+G5nZsIeXPJyXdYtS7C4X1k2gPFoJusXbuDISOPoLQwjy8+e49oo0NLXSvnnTyWoqmVWFGFq298iAVf/wO91WH+4reRs8ooLiukvnknjmWQ7tTIDuXimDppRydbcFMQkDHCCfKKs9m8vYmAGMDlCrFr1y6ywwEk2U+O10VhgUCro1Ia9HPl7d/Q0b6W3EQBUp6PYX1LSBkakiBgGeZ+77QoyuiaSTKZor6+kVnff0C2oLOjOU6By6E2Uk80soui0iF4va2kElEEz2B8nQIz7nydvorIzLuvIt5RRdGIAdxy8pN8+/1rbEltJNedy4b1PzCi77HIgd3s6fRR19hEv7wcFGx2pBsYFTgVSV9OdcLFhefcwOo1c9nSvA2zfSNecRhiaRqP5RAlg5wwGTbkNFbNWcj7X33Ly2+9ydSzpnFiv0P+3wU2/6elLNfvzDj9CPY0Seh+mTKvTMKIY6CRVAU21+9iRLA/Hsek1WwknD+EpoZd+H0G8RaRrAHg6VTpiMQIFBSRHdRpbrHxqmEC2R4aIi1otkhxQSWRjk7CRoxtkSiTysvZ1bqLbE8OnU6KwuIh1Oysp/8wH1lKH64451oqhw1CMtxY7gyaoKOlbLJUdzdHHt1WqW3bKLIHsNEdC9mR0MRuhebRHbosjYDLtc8NI0nKv2Qa32c5IyBaGoguJs84A3FPIwlLxyO7GJ3Xn5c+fu+/nBAOJgdD5/TIwZX4wVOEDyYHvke9YZf7viuKCE736kGzNFrbO3n06eeQHInG3VVojky0tQq37CcYDtLW2U4iEiOT1Ln+hj+wefsOFFUgo6fJyysgGcuQiXTQFGnBckF7tB3RgWhrO16vF103GTJ4OIZl0li1g5b2JiyXm0BWDoGgj3XrV1ESKsMVUAhIMqYk4ZUksMDr8QMicsBLRtMI5eaguFzoaQtFErj7zj8y4pDB+/HCGoaB263SEWnjsccf55Zb7iAaSTBnwWdU76qjamctV115Aa3NcdqunfmbNvyz7VC1p5Wa6jruve0hHv3oEV546BkKCnOp3bKMPS2NPP3Mq3xw111Muvh8XEGRgtwhnDF1HDPOOoP6zmpG9R/HgnULmXL8xQwbUsyGBRuQkp3UBOIENBO/EieuV6JrCUqLNJbsbODo/oNZlazFV6fhKysjmYqSSrRjxzPM/ucS/vTwwwgJL4MmDeK9D94gJzubin6T6GxezOP3vMUf7ryDYUNH8cOKWST0DNlSLhktRTjsJqHFkD0u0h0Zgp4cMpZAQd98iuUyqhp2ktajSJaHiJmk1Cfgz/FRJ7cTNofidet0mL/gVgZgGFGyswspkcq559l3mX7CGcyf+zmCY2KaOp5ezDqCICDpDoJXoH5PO4889SiHlJdTNu4YWpbOY/KVJ/L8A88wZuLhhPoXUL1kNfXpdlqqf0ZWkjRLnYTkYv5w3nv8/cubOPTQY/n6y7kcetQkVi1ZRGu8mXC2n6BfQbG8xBAIyQbJhImBzow7PqTjixU8sfwlrplxK9r2dppamgmNKKd14RI2ddWQ30/E6JIQFAfb0DEUE3/FYYxRcskuHICvPMRZR574v0eJh0Kqc96hoxFzZfw+L7sa2igscyNpKWIRN7m2QDDLZp0eQeny4i0T2b2tg8p++dTU7KFP/1xa0yrVjQmy9RCqtw5fYCDR9gaEYJiIXk+J3Q8pXUdOUQG19S3IoTIamnYzrKyUgmApc9atx+UpoCDHS3l2HJcSprhoAKdceRmt1XWcOP102g2DdStWYEVTHHXcVFweFwjgCJCOd2LakO0PYtmgmxaansGvykguBRFhnxWv6xlkWf1NOxzITIJtgKBQdvR4+hgCGcHG7/IydeBY7n7+ib1ujX+vyA/mjz5Q7j+ASeQ+5+DlUg/2/X/la+9NPdVtwVukNR3J5SaRSPHIg4/RVN9EfUsD2X4vSF5sI0VrSxOdsSjxrgTTTjmN3PxCEnENt+xi+45NlFaUYZo2tmEielS8koqg2ZjJNF1WDD2ToTMWpS3SQSgrRF5RIe3NLWza+gvhrBB+rwdsnf79y6hrilDdVE+xPw8ry4tgdPv5o7E4xSVlJKNxZEVBdrnICobAlijIzSeTjvH2Gy91ryhkeV/QWpVdGLaF7ZjIokAilWZn1R62bKqmqFAlHCpk7NgxPHCQPri8o5Oq6kaCJSWQjjCkqJRzL7iB0aPC/PDDNxw6aQJePEw9+VxuvvQUXnlnFk888gCHX3AaVR++y7GX30l7w8+s25ngpuuuY+Ahgznl6OnEjT0EbImigkHEhFrcAZVUpIuSgmLSu2MMqEyxPJZNKt6AX8xldXU7fcr8uIUoQ8dfzMYvZ1MwaRQet8mlv7uDex85DU/FaLbOW01GDXHB9DPZUPUCfQqnEddqiXZ2dAeM0zbR9iSWKVBWUorlpKit2U1xiR+X5COWUvCGOxDNfrhCCrXbtuCWPFQcMpAtm2rpWyHxy/ZaDhs3CLc7F7+YYnPCRSjby6znF+CywXB0bMNEEOV97xx0o79UW6LLp/LRe+9hOVms+eFDHnj5ryz/YD51WToLF3yGHauntCBAslXEzkuTkUUuPv4Wtu9sZsPa13BnS7TWOJhGE3phAWHbTcDroq2rgaLCXLoiUTKCTVFRiLoN9fQdPoo1P8/n/AteYF39ehqW7Wbc1H60b6ulJWWjsYvyfsMxIxvAl08ot4zq7bsYOepkUlozVQ1bqBx8HCdOPYLjxhz/v0eJ54Y8zrHTB1FgWERlm6C3FE3pwNA7qdkToU73MTbHC6ZDUHDY1giiINMVN1HzE6i+XCxEcn0ymh4jEnEYUZJHXDSor2lEy3YxNCRQtdPEL0Bp2SGsj+7EiscQgioRR+KkwYexeMkWJo8ZysqOtYQNN1mhPOz2NKWVhezu1MmWsghnQ0aziCY7yA7n0NYeobKykuceeZH6jihloRAxTUeWFWQARSAWTZCb7ce0bGzbRFVlTHN/JXkg7FFwRASxW4kXTR5JiSOhyZDl8XPWxOO5/p4/7lsF/Fe+7t7ukx7pbSH3/PaBlvj9HPy9ONDHfrBzPWLzqwtHFERsTachGeOOe+/FaY9jIqAYJt6CML+sX4utGQwYMJiOSDOK7CK/uBSPx4coyPg8fnRdJ+BW6OiMktIy2KZGMplEVl04gojX48ey0vhUN5Ii09LegoFNQ3MLTsYgocfx+8NEO1oZ2K+MfhVllBdnES4s5YfZi1i/czc+j4AvK0g8mSYrJwcxrZHRDdweH4Ik4/EriIKHcCif226/kfLifPx+/7421fQEsuJGFGQcG7bvqWXdhjV0djZy1u8uJpTtpqm5k7dLCn7TtqdvrcXn99LZ2MCn877i0ouuw2N08tMPK/jy2w8pruzLU4++yLqfFvP0O0+Q78g4YpTtu7YxbsJ5qHYnRxxyFu8ufoMH7nmbZx+8kk4BjHSM3Jz+BPNEWnZHMJVm1IibXbEGJo3sz7bdUQ4rMnl3TSu5hQrJCBhGFyW5AeysMMUBNw1NCVbsbOXwYUdww3nXs271J9z3wnMcecFZmNF6ivMHsX3zT/glP1nFNrKs0ljfgSQoeN0+XN40dVUNJBMJvEoJvoEBLCdBXbVJbiGMqSynOdNF+64OKgrLOfXMN1i3ZxmP3XwtJ59xJh2pNRSU9sdOJHn5yc9564uXOfKwY5k8YgyCI+5FHf0qLrebp154nWsvvpQmM82zz9zCiSdfwebv57GiuYrfH3suZr7Jpy+/RVppw624cecMoKZqBfgUrGiUon4jsNpM5JBDU+MeFMeFV5TZY9VzVGUpu9tdyB4dvaONpJKL7JXJyUkz/ug/ImsCVRtWctHhp/J18xZqvpvHFTc+xJO3XoNVJmNbrWAUgq+dXH8fIp0tDCyvYEdnhFAmTvGE6Tx+3X3/e5R4VkBxbrxgCPN31lKa3x8n1okmiTg2uAIh3J216KpJW1uA6tYE4w8bQNWmLjxenbr2FgJBNx7HQ5Eo4CsOEdf9RLpq0OqbUUoqyFb8VFRkkUrprK+OovoSlLiz8OZ7EWNROgyHPdEMRovEySefjKTptGZSJKMmbakqxhV3EbX6ULWtkx1124llFE4741Rsy0LTNLKzszAEh217dlPgckNAwWuJFOSEqWlt5JRjTmXr+u08//zzSLJDKpVAOuCl67Ho9lVOdEQcNETJQ/6hQ+kvudGlbiV+8VGncsntN6Cq6j4LsAcF0lsOpsR7TxY9+wdDp9zn2Ae1rHvu2/Pd3vu9EToAiAIC3YlFmUyGO555BrOlk1hzG1LQRzIVR4wk2JOM4PF4EPUEO3fU4wuqTJ4wBTU3j1RrBI/UDZFMmTrJZJJoPEFJnyI62puRUhki6UQ3O5EsUburmtKCEspLy0mlUiQSCQzLpqu9FUcwiWcSFOSFKSstpLSkmPz8UgTZpji/kNzcXKwui5dffY2EpuEJ+HFkm7a2DixBZkDlIHJ8ISxHQrfSFPcNcdvvb0GSJMLhcPek6nTHDxwsZn/+IR+8+ya63UBJ6HAsYQslfUYzfuxJrD7ttN/019WdEXKyQ2xdtY6Fq1eR78uh37h+eDSbJ+65krHjJ1KUM4gnP3+GY4dOo7Z+Ky6fzaDSQjbt2EaOEiY4fABd9RIFUgoxnMP7H77NoSdNZu3PKxnaZyB21EL0uGmwO5lUYNLYrmOKrQSDYzm8xMvz8zbicgtk0iaK7DB4ZAXLvt/GnY8/zcQxU7joqmu58bRj8A6eSHlWKQ89fhmGlM3mbesYMjxI9cYOBBKUlJRgouPzy7S31uHPH0JZnkM8mcCdDwMHDGHJgtV01Mv06x8k1mbjK3URbYtiaGky6e6g86xPdjDzotH4cv20ter4/X5uffItvvrbh0y76DxOHTiMZCaN1x3Y907ato2KSPOeOGK/XJZ/9wMlA/qhJevZuXoHy+e/xJZognETJ6F3hnBLXZRNOIKfP3sRpV8hbTUbOPSIm2hvXMuuup24kx04RQH8XSJduk7EL1KSsGm3M+RmBxENEVv2gdlKXt7pDB2Vz5Zv3yDoHk3xiZMxlXyWPvkGrlGtKO5SqK9m0im38947D1Dcv5B4RzPZwTBOVKZfsUZb9gSitbXMnjX/P1Li0v333//vrvkfl3vvf+D+xmaNpoiEXxrIloxOvF4j0piiKyXSYMQwUjm0JSOUh8vpjEYR3GmCbgeXkoPsZPBJIGZ7iMWTZLQMwZw8WpIqjhmjbGghe1qjrNlS1008kVDQdQvB0YlbxUian/ETjkK2ZRqaO9lZvRkllGbrtiX4fG6qdIuCpMXIiYezrTFBbtpDvL0D0QeOJeHYFtVVUXK8All2mITRSrsmEvR5iWsyieYqcp0I89bM4bPP/kZXcz1zF//E9roduLw+8orC1Na1k5ObTUaXiHZlMDFJqxaKLfPWi8/hlgVkl4xHVOlTOYjDJx2BZqRAdrG7I84zf32W7xcsZNiIUbh83r3YeAHNzCA7IprRXYtGQMJxusld4VeI4yIe2K9PjnDuwbQsBBs0XUeSZQTBQhQcND2NoSskVYvv5i9naP9ytLSApIjMWfw18756jkUrl/HNt++zcesOhg4YwYzf30yuoHSjPfQUhp6hPRolLdiEs8O0tDaR0jRy87L5P+y9eZhlV1n2/dvzmec6p+aqrq7qeUyn052EdAaSkDAFhKAEwaAfInwq+OHIKyKCiiCI+qKR4RVQhGAgA4EAIdKZOul0utPVc1fXPJ2qM4/77Hm/f1TQpBMGfb9X5bq4/9l7rb3WvtZZ66z7es5znvXcmzbsJBxNUqtV8C0DRxNo1eo4koCoSIj4BGJRHNNHEkUcT8So6wRDIcx6m6XlRSrVMrrRQUSk0aph2AaGZeO6Fu1Gm1yuC8Nq0q7XkONBfNfDb3ewXIfLrrmUN7/p1Tx68Aj5fJVYLI3vekiui6DK6I5NJhHHNWQmJ57mzvu/zqtufg2moaP6DngOjz52mDs+/i68UI6+vl7ylSWaDZvzJ0+zWjpL/LVzL9gHY7PvxGi3CXb1ErBL5JeOM37wOGomx9fuu4uKbvDQfffx+KPT3PCyW5g4P05X7zr6N1xGUBzh19/3Ae6/90EuHx2lqLU5+fD3eMWv/x6lqQq+so7dV/wCb3nNG/j203cSkeLIQYeJmktwqI9OucVkfppQRkMPRajpEpF0nEbJIdyVoLpU4vd/8/cILJd5auYZhKU2r7z+cvSFIudqz9BqLpCJJhgbjJHrC7OxO0mtXiZkjOGFphDNLEhVenK9+LUEZ06No0kaOzcPMl6Y5KY9N1OaXiQa66UrbjAyCL29WR777gPcdvsf89Dj9xKNRfjcB+9j8fB50vEAbbvDvg27CRDAk9bOZVimhaQqeL7J0/NV/up3foXojnUUm21CLZGvPfo9PvL5e7i8dysnJma49S1vZflEHi1gcWLiSZpWGTEUQBebTDw5TfeQh9eR0TsyOhZdPTlayzXkSABV03jp8BWUNYdmQSTa1cNy/ikKtSlK0wVWYg6rs2fpVObopJdoGAInz4wTGoowM/EMfjxIRg4QRsEMyFRbNhUBjGoR0ykydaac/8M//MNP/Sj+/G9hiY+ODvsf/qN309LrnDk1w9zSLK7gsVQr0CiWkZs251bq7N/Yy2ytTDwUpCE1kEoqvSNp8ksSWwZkxGSbYGaIJ48sk9ASqFKNdC6FXy2Q6+tnZm6RTK6bmYVF7I5DIDZMIukQESQmZ5fYvPdy5LrLTHmJmCtwZnWe3pEkouHSo8Y5vdIhlenGO32BpiQQ617HukgYLeVgpQyEapSGFmJ5RScaEQmIOjlFY0daZ1Zy0MJBWkWdih3EpoDlWoyk+6nOani5FOXSef7io19AEGOEQzGcgM1dn/8Un/3ip0kpIWwE4sEooVAIp89EdaIMpjx6klFadhgVEb2pY0kaWjBGsWCxf+8u3voLb8WRg/zDP36RM+Mn+fhHPoxn20iSgu2uuXdezCcuIGBbFqIKZ86f5YGHvsDsuMBf/c2f86nP/w0zD38GNbedameavK4QDEhc0tVLoTrLlq3bmDl1nNHLb+PI0ys0m03qlTyNdotms0ksFiEVT9Os1hFcF1HxWZjNs+fKvbhSkE6nQzqdJuaLFN0Wkm5T82y8TguzZeBrAWQkRMVBNywEy6FltrF1i0qlhG3bBLUQrryW2Q7fRVVlMqkoL732AI88/DgXJmbpH+5hJNuNFtUIdiVJhxPEkgniIZmQlmDdui38zzv+jqm5eXp6+lBEFd+0sWUI5Qb5pTf8HD39WcZG+pGA8aOH+PBn/ydL1SaBUgXdL7F9JE6j1qTTMJAyKbwK7PjmsRfsgz/wfM5MXODg177DNbdcR7mwyiXXXsPxhx7ig+/+TYJbM9z+nnczKoa452v3sm33y5ibeZgz43cjmKArDsFEhu6Ox6lyi1woSdnTyckiQthiph5naXaRpBpiz04QjADHFvNosQSL+RIoJoM9A4gdl+XVClJYxLA0tnelODUxx+///FvpSvUwEInx2x/8I+6drPCLv/0OnrzrH4lmfartJl3xHrxZlaf9Klelu5nxZ0mltpCKrtCopskNaMzOjrN/92WMLy/RXqoRdGTkmLR26EqxkeMputUUclzixPICw+uCVI83GNhzG7e97a1cPboRz7LQXYO4GgPXwcXB9H1aHRtVlEmEQhiKwx/+zjtYl+vixrf9Lv9yx524VyV4zcZb+eQf/xVHja+zR9xJaMflWOOP8ODp+zDjQQbSUeqlKmHXoyqq4Lv0dgcxO3UUScXUBRB9lIiMKuQwhSyb1g/z1JEHiYQUpIZMyfERtSKJSBfl6iyZ3ADF8ytoahxP9nA6oEQsemIpCjTRNYNQNYQnV4kqKQIxl3/69OmfHHdKKKL5/aPD9ASzBOISSjxIbyRJLBqlJ9tFsnuUrlyIsJQinVLRZHBDUVTTZGm5QF3I0yrDxPELIDUYn83j2os0yjU6Todjx1bRZBHf8TAMWLe+h/6Bdczml3jZget46HsPsn79GAvFEvmpKW6+8VWMH32aLdsHaYeCLC+eRfAyRFSV+eJZfvHV17NU7rB3KMX9Z08jNfrZ0dXDweJ5VucqRKMK9YrPS3dmCUVmkFs2NTPAasunZUNPb4LV2SJKJogQFNGiNktzDjuG0yhuhLpoIdZn8aIJwmqSw+OriI6N6wmEFY1YMsDWLRvRqybppMjUzCl6121G8MBxNcRAgNLCIvFYEE30aJkCdSnM7i2XYHba1Ks1PvShP1nza3s+tu3ysXD0eWvyPnctZMsRbD7+iT/h3PlxVMkmX7V56bX7mD6zgLs8TTveT0WfYc/Gl1A8Nc6yu4oa6EfoOOzecxmnl9tU2jp9jkRL6dBut7FNh5WlZQzBZ+PYGJ12nUK5xIG91yPJLqYlIGoKtuUhWxbLzRL9iQzFTpugbVCv1qnpLRKxOHIkQF1vIHswu7hEOhJhdmEWVQ2gd0y2bh1lcW4WUQBVVggGQ4wM5ch1JekfGiQQjPFr7/ot+vpyDPT2ke0JEIoOkwiFyPZF0DyBQCTB9u17+bs7PsfTc+fY0r0B3bfZu3s/W7Zs4wufvYOPfOJP6M1k+P23v5lIehopfjUx8z7Gm930xMIsLVs4SgzDarJvSxzxQ4+8YB/8f3qLb957D63SE8wst3j77X/A+IVx8nNlHn7kHuzaCvsuv5HhSw5w/Y038dcf+W0KDYfps98h7Fk4IY1CUyaW0CgsNKh3bPZvG+HU5AIRIUL/QBPHrhOIDrK01CQ+pFKvFMhGIrhigPxyE1UOYBpNNFGjO5XFsus8enSBvZv2UbswwZtfu5/CksFLXrKfe+/+CuL6PhZ0i3LzApptsiHcT9BzqElVglGfaUvFKLcpd1R0p4GqiiQiXXh+FcUSCfX0Y5VNtLREqVKiUTXookHhSwAAIABJREFUykWQmuBFdSaXkly5LsCCZrFe9Smlt/Gl37sTSW6iaRoSKq7vIMky9VqNe+6/h8uu3McHP/VJNnVHUanwmrd/kvu+/vdo8wF6XrmX8a/ezRZ1gLw0wyf+7kPc9pa3YyyUODM9R741T5+aY8opsqUnTkE3yGgSTUXF7dSxTYtELIMveISRCWfSiEurWCkdoRGmSoyOVKQ800LGpLs3iCRmKZfqxOIhaiWdaDaHY7RRUypyvYWmipSdNiE1jaIYWA0Qgg4P3jPxk0PiXYmIf821I6RDISZbbaSlVV5z86s5MzdFSzDoCC00QaJT91nX3ceJ2WlsSUewc7TaHVShhVG3UHM9iH6ApOITTw2QzoWIR5L05Ubp7cuRysSRtAC+otCdyKCqBovnyyzrbQrFZVamZ/j2o/exLb2J88WT1ObqSN0jOMYq3YlevjV+mIFoEtPQ2XPJZTx2+AxRVefqA1dw98NHycQ8xrZuZttIisuv2cyp8aOcP1UnIOqoei/d2QBPVM8gmiKhjs90sUo6k2VxvsG+/b3MNpoUGxbrohE6jQodUeSy7Vs49NQCim8iSSpxRcMVGyTCEt25dTiSTTCh4jYMfEnDEpL4Toes38aTDXyrjSHGKSgxwo6PEgrgCTL5lQobxrYiiyrv/MV38Nm+geetye8aHVBl/vBP/gAtYLC8eIZSoU1SiqLES0zOV+gZ2IBc1YkGVM4XFskMpikW6/SHd6CGhlAUm1ajjKg3KPgitmXgOj6xYARJEDBaOquFZdSARLNjcMnVLyejqWB41J0OjmeCY+GLApbeIRgKITgCkXCQqeV5RG/taHUgHmBlIQ+Khq836dgGS4UCL7vp5bRbFSrlEqv5JUKBIILsEtWiBDSFbLaLRDjEwOgYO7fsYXUuz5nJp7jjM19CkUWuvm4fQ73rQBbRAhJaQKY/u46DDz5DJSgS0SQ8T+aTH/5Lmo7O6vwU9971/3LDy/+GR+7/JQ5NVunvlbDzDRqlAFo8wmLNY6DfYfM/Tb9gH2z/6hc4/dghjp94lA0DW4iM5MhPTRAyT/PIvEB/WKBv0xW0lw6BGOdP//4kX/nUB/n4//prrt22kYbjUjdTDGVaLC0WmMxb/Nq7f5W//ruPYHRcrtgzRiOvU4sus79/AxMzC6zYOhuHxyjNLWM0PAzZIpBIYNaaDHbFEGhxaTDCvWWFnRWF9etHmJbb7M92YfgZJu2TRNs5js4dItafICyItN0QA3aVes1C65KY7zSZmDVIpDTCYagWV7jpNa/ikS8/QHp4jCXdZtvGNKJoc3R6gT7RYkUJ8qbr3kO67PBbj32W3kaDSHoDhh/n4bvvBBeQBBxfQhUEPEAyTP78Yx9ivpHnhutu5YnD99E0TQKaTLXq897rX88dT3wDTThPGY1bXvbrlE6e444vfxojoqB4DfSmSndOQm/WUEwDqSeMZMq0vA6JoI9rmcTCMVA8RLMbJ2wht8sUpB6SQgPVaVCTw1R0g6F4gkrFoac3wTPHzpDsimC2bOp6h3Q6SFiIEIuL+J5Aqe0hqRqyAIIp4CoS3/3qsZ8cEo/ENf/6/evpqBqG0yZYlLh0706+deZxct0pPCmI1jDxJZGx1BBPVs8zoCnojkeg5aPLHV617RoOzp5AjMkkhAiqIGG6dfqG9lMunMW2bQiC4cGX7n6IV157BV65zXJbx7XB92RUWaS7J8m7fvZdVAMxvvX4Jzj0vWnGEl1cODuP0htGiptI4hbMI0e5/De6uTZ5CxtufCtD4UF0rUMmEsSeKfJk+xnMmkC5soI+VeR07SGqcxU67RYnl1awrTo92QFOTE6TTnQjqwK1SpUrD+xlabWMXEsT7XIZHPU5dmgVx1dBlpA6JsFEGFSV4R4Vy1WoGvMMSiF6uzP4rsdy2aSgt8jXfCKBHNlUmwPbutE8HashMrOkIEcjOIJC34BGPr9K4GP3P29NCr92FdW8hJyWWV3W6RuM4lY7mJqL5vvgBLAcm2CsAfUIuqxi+7Ay3+aqy1+BLPmcnLmA69kkAyFky6YjeJiOiyTLeJaJ7bkUFpcBkUv2XU61bBCLhOh02iiyhC+uRZ8IgkBTb68lNfN9okoQLRiipreoVUuEA0GKxSKhQJBQV5yFpUXGevoIBYKE42F0vcXc/PRa/LAgI4oSrm+jqjKRSIR4LMJAfx+9uW4CCETjMSLdWQ498Ah3PfB1wmGNA9ccwDAMcl19yKrJ5k3beeLQLF5mHb22T75+iJDeIZGtsVByWKmucvPP/Rb/62++yM50BcJFKk6Io+dN1g/HufzO8Rfsg/P/z0bWBccoFWtE1CBlSgwTZs6RODFdYf3wOnSxRI/jkU5naevLdNBIpnbx0MMH6RvrJRMY5Mz0d8BIs2v/pVRqTS679go++t73c/VtP8toPcZjq0cwKqdxg2kikkfSM5lzO0QskeRwD1HDReuJ0snnqZs6GaFJesN19BdERpQspw2XV73jF7nmxpeQVhO0uhO889a9jD8+zlhao9Ru0rIyzJshNvatsj3j8JVvzRHbuJmF1bW8RqpUR42asBInuCVEe7nClvWb0M0GK7Uq/ZuDVI4VyFy2k/nHalx/y008+L2HeM/7P8DP7H85juBQmJgh2dcLoo8uBekSJHwFJk8dY15vs29gP2fmjvPtb95D99Yc933l7wl2KeRyuzGnl5ByOQ4e/Drr1w+hmwbxQITF5WVUycEJm8hkGBwaINsdZG5igXRygKXJ05iWiKR2CMfD2LaNLCTBEwnlTKodHUl30VWRAbWXp4+P4wpRYrFu0gEDW7MJyiHyyxV64lH0tECP5bEiG9QnPDzfJNDdRWe5xPgz+Z8cEk/GNX/vgX4UIwRah44c5EDvGN9bPE3a8bBEiZgcxRcMNiXGOLo6Q0xwaGsSCTVM229x6dBOTp45RSQWxLMLtBSFQEdgdPfrmZz9BvWGRzqRJBlP8KXPfY1X/NxtqLZPtTWLHayh6D6X776Rh88uIbh1rMYK7/y1O/jMPR9GLYSpNhbYvWWM+fYEXX19rBxd4U///Mu8+u1vYnVmmrBX5tW3voKZhdOEdZe8p+LXPYRIN7VaATUqkQh0MTq8nki6j93rLkMbjDAQjRKOBVk/NoLTgXptlWplhcXWDLUVmxPHT3Hq/DQz589j6DqNUgnP97HUEpfvT+EJKnOzHbIjWURXZPtIlkplGsHpZaGkku4z2RbROFtusrgUZvK4Qc9IHEHSyXYl0VJ5AorK2Geeft6aTPzsOjK5CAIZwgIUdYFE22F4fYKILNAqt3nKrLM61WT7ngGeOFbAtRRuuOYWFucK+E6DUCaNaRiY7Rai62NioygK5VKVRqNBTA2T6M5CIICIgmPX0RSVaDhIs90GF2RNxTRNVosFfFGgsVrCVkQ0SUbs2NiqSDKeYmlpAU2R6PgGq6ureLU2IyMjhJNxooEAxXIZU3TQ+Deha0ECy/WQEIjHo6RTKaIRhWQ0Rl+qC623m2w0iaaG+fSn/47F5Qu87JU3027YrB8eoVTLM5YY4lh5lZDpg3uSUsOEmMbuy24jf+YO8vk8lXaQlgBhu8lgdpTzU8vse+CFlnjptt1MdlrsiMQ5763QrfSQqlWYtCWWDIGRwQS06mh+BFuxyCVGqNUMSm2PWrtKpdVgz871jE8exyvbKOkspUqH/kiIeL/ENdtv5zuPf4Atw9uZqzfQy8sMjYzRXqozoTcQpBBz5xeRtDCqYDHalSDUFWd5qsDLr34Xi+YQajDDvFNhQB7BOvan/N6DdyA2HIbCOf72N36Jx49+Bat3HW59Cj+4noC0RNyL0dUTY6FSYGU1yrSxxOjADtrNKSRtANFbJpgIoXQEfFWmYrpYq13oUpD68hRS1OLaXTdx4tRhchuCFJY6ZFSVT3/5Lg49fJi+LRuQ2hZ7913B9+7/F9aP9fJnn/5bzh37FplsGk2V8NQiXlvFsQUikRgXLozTkcZIhSt0iKOqGprYoFryGBqWINKNXm4gCtZa+txmADXgInohPKFOSI3iywJC3aBtGVjBDl4gyHAkhmH5qNEuGqvLCIpKsSxTrbUJBmw0y0GJBVBCMr2qRitgopst1vcPcu70ClY7iBSIEdRqPHbw/E8OiWcyIf/lN27BsiQCAQPaCv19gxyZnSCtqKSFNiVLJBC0WGflmFQt4gLUJPDbHYJagG39m3nwxCGkriDJtogpeujNIvtufBvnv/dPNHUfw7OQVRlBCZLesJ3q7HFW2w5aIwF9Id5y4BV84/h9WLpFy+kQN0Re88aP8Mlv/CqaGQSjydax/RjHH+b2jx/k93/zbZTrDfoUmdXaLC9/5atY6EwjNV3USBSh2mDr3pdRWHwM7B7IVAnF1rO8dJ5kQsV3WnTsCN2pfiaXDlOvu0iiRjyc5eipcURBRfNDSJ5MsCfJYLaf7uFR0qksm0e3IDgxtu/dRiKkINrQMXQqjSUMW2a1WqG5ssjkyZPo+mnOrTzD6oqKIC7SbGnElByJGFh2B01z2XDn1PPW5N7LtxJRU3SHA5S8BqO5JNO1CmNdAep6lZgmsTuU5kRzmUenOtx4/euxWiFanQKi5FCvONitGrKiEE7EKOtNgo6Hba6FCbZ1iy2btpHrH8ARBETXRceiU2uCt6buJHQMBEnE9X0QBabn1pJXdWo1XNmlJXm4LRNFDrK4MMOVL7mMTDrHPV+9B79t0O50UEMq8XgcTVOQNRVZezZtgCfg+D4aAoZh/Gs6AU1JEI5IZLJR+vq66ckkiIST9PT0k4wnOPzUMe65+5+56sAV9PT2UwnY5EyFPde8gS996naSIQ1TKlLVTWYX12GXC2y7LEy7ZWF2Gvi+TyK0g4EvvFBn88xN2zhZmsRIZXh9LIPZLTKxVKdP0tD9TbRbi/RHGvghC9dKs1gtMtupIHVkfFcj1uXg1jXEuE3aT1PTPVarC9gpjUathTsvsvPy9RQby0RjARQlRX42j9sTZLuQ4vzcIq4aZqDPo0dJ0RKj2Eac3/zTv6b9nb/iLx7t4a3vu4b7PjfBXrXD1LZdHIhPkOgbZUsmzuj6rRzYMsytN2zh3kee5LIdOY5WqzhGgoGhBvUlgYJSISwlmFvViYaiaL6IoViILQEn4JBMSiRCMcpWnnQ4y2PfrRKMpQh3dF536xuYmpjm6quuZnlunINHnqF3eBQvFGDvJTeQMBrc+9g/8dLrXsl3jt1LtC7Q6QN/2aZdAQIhPK+MGzLwLA+93EXMr7KitPFtnagUQw0mkTzANggEJQTVolkXCQcEWpYLUZuuZBJJrNEseRTaBj1d/XQFFVrNEp4k4ggymqgSEhykcIyl8grlSgSpXWTdri1UV4tgexiWiRIT8W0LwRUx3AD96xSaswqRdQPc85lv/OSQeFc64r/6qg1IQQ3VNYjKIeRMiuOzM2SjYUyrhRDuRbSKDAj9LEd1xEodx5fp1aLUNImNg1s4fuh7xJIBam2fhO8jd0ukhq6kduoJBNfDFjqYos9CzeDyK67i6aPPoHg29biP6wW5bdulfPWxg8SiPkgZmq7FzlSMVs8lTB15gLDtYscGuGZ0hEcXS7Ra48TcCHZbp96y0S7ZTjKfpxSOonoNpFaNHS+5jZXT99GSQvieSFRL0a7WkSQHI9khoMfI9fZRWDqPJUAwHmFk6BImHnmYZb1JqSPRm8rxyFPPsKm7l0hIRnMcCobP1TcM42plVL2PmrOMawcwGxae28AxYyyVVugIEulAjGGlGzOXI5uL0jPUxVBqB91j/azL7kAJNLmjK/m8Ndn5tftwG3WemHkcu1Di9OSTBK04ZyfnqaOSy8YJhlReuruLSO5VzM0s0bHyxGLdlKpt1IiIaJiUCkUATN+lVCqB42IZJgeuuQFCYSTXp14uYVot1FAEwfJRZBlZU+m0aximiawqz56C9PFrOjWrSaNVRfBcWh0LvW1x5RWXIkouWiLOqfETBJA5MzmBZ4Mh+lhtnYCvEE+FAB/bcREEhXBIRZbltTh7QUAU1zQaPVFCsUWCsQhdXRGGB7vJZrsZ6llHb1+OmfkZPvu5O7n+6usJJ2zy+SZbx3Zzz4OfZvOQi+ZJVJQohdoiqWgco1LHpI9q1WP79i4yH/vWC/bBiasGkLIGotrH8blVMjGTZEmhFk0RT/UyIqxgB1RW8nWqQZveeBfLUxeIZMdYnGySSsHYkE+7JVOutGj7YVo0mFq0MDsG2zbESSeGWW43SKp1CmWdjm6TG+mHuVUC2Tgly0LXZUKizDt++V30aP3gNfjwHSepVrvI9qX5xXdvYXeozF/97Xk2bStz7YF9VJwcfTGDB792N/cc/zq1vEh3TGY4GSEXqSOpNoFYjOkzZZZ9izNTAsGoxcbBNHbco9GAnohKkwodTaEwHyTqZxlKxThVmkQ1JAY3DEAxSHnlMNEN29nVPYYX0BjMZnnqwknOT54nHYrT8isE5TQBtUOraRNAQFcaXPuNyf8MOvuR+OfLB9FbImPDUXS/jhoJoRZMqpJPJiMh0UWZRQ59efEnh8S70yH/9TfvQg0oRNo+htAhHM1R8Gt054vkQwqDwT4ExaPqq5iiTdZso4dlNE8A2SOS2MO5qdPEVR9bsgmINp7VIH7JW1h4+mt4ro9nWFhBGUV3Gbz61SyOfwPJ9lhttfnavWf/r3/O227dQy67gQuzh4mHcgR8E9kx6Nl5E8ePP4DgqQQDMXZd8jLOHrqLmukgRCXSeoqDnXmGfRG3A5oLCypsDQWpih5y20Ea7mVQNvFMD9N32XbJTSxMPIIXCuL5HQZHrqI8f4xUWOafDz3Nm97wO4jWEfBFqraK99ufed5YH3rjLkI10NQEkupi6CK5no30p6Pc/JpfwLI9vvIPv0MgvANLlGk1aridDrIHDj6iYOM4DpYPq/UKpdVlGrZFX3qQ1738tSzOXUAMBWi2W4iKTKvewBJs/IZFOBrBchwqZgup1sGJaBgrJby4QsNpEW97+L7OdKnGzi07GBzso7CygoeAL4tYzQaNWh3b9XB1i1KxyOiGEWbnl5BDAZrNJqVSie7uXtrtNg5r+dzbgo/q/ZvuqiBIBDWFQCRKuVwkEtbI9KbYv2sP/bksu3fuYnZ5mQe++XX27t5BpVpHwccLCwQliTNTx5DdFjNVlahpkx6Kk+yEWHFXuOTuCy/4fjy0L0XP2Hqq+WXqDZtEpgtB9jFLNsFUAn21QKHjkekZpNhcxG35DIzKiEaa8WfOkchpXLk7xtn5Bq2aRs10SLQMbr/tEhamJyh7KvmqQ8s3adR8PN/Htj0ibpxOyMd0dfrCUcSQxlD/dl553WtxXJN2vcJffuwDkNzOxivexIC4jphRYc5PMjz2KMloD7fefD1NAhhnj3PXP3+IxopLQ4OFWJ194iLVQpZzYofXZUQ+eiLBWH+bc8eajG5JoFtB4qEOgYCN1bY4fqoCkTCb+gbIxnvwghbzMzWWVxtcvb6LvnUDVGwdrZNgsnaGvnVDbFm/DyEY4uhDByEMQmuFFdPGTxp0tcMQ0dn5pfP/OtcfeDai9v3Pob8PCM8v/1fjAwI/Fom/8JjffwFERMJoJEIxqu0SrXAAo95G9GVK/cOcu3CMmmCRyIaIk+SCY5BKppjXy8Q9BcsRGc0GaAttJFFEs1cx6EUS4ni06VGiLGcEgg0T0bExJBvZV+mSI9TbZQYzXf8pn7NPSyDUdLKJELGYSL1goEUCrFbnGM2M4igtLL2J2daRFJFcIImu2kRTObZPFogmFcRYDL/UxpMELNVEaDq0YhKuZeE3K4RDcSJykKAXwGy06TRMLNFkcEzBaLRp22m2je3k5JGDzBurUDe4dP2lXJzJZX9yAHXYIuAOUmqfJhxdR7s8wVxN4ND32lRKEkbPFTimTzE/RTAcAEXDaOpr/mZfoK1brBSXWFqZJ6LGuWHnlXSPjlLQ21ihIAFXIB5N4fs+utsmmkqS6opSb9bwzBZpS6Qa0bANAzukYOQrGJKHqTeYn2ny2l+4ikqpA9KaGG5Y1dB1B00L4lFBUwVaiIxs34CiSciRICtzyyiqxP5LL2Vm5tmc7N5abnNFVvB9Ed///ulTB88ykUSFRCyNIspUaxb3P/Av2K0mAz3dvOTGy3j9627FNR1mJr9LrjtBs+wwsvMyHj1yjLqukI7VCGXitBahk1SJCFnghSSuhBI8fvQ4W0c34eglWi0Hve0zGE8TSySQvAZ2PcC+HpGjNQlhCJyZVebKJXI9MYbGBpgpLGLXA3iOiqybNHtiPPDMHOmUhG73MpmfQrFijG2OMbdYxMKlGbKIugpmxWKyXqNTbPPF099h/MljHDk2zsZdWymb/exU6/hH/ozHAqv0un1Et29hXegKTFmjXGpiqBa7du5l/FdPMXigB22mSdK0mGKMRXGWndke/vFEk5FkH6mAwlUHuglHwWh4zOWfolhx2Ln5JchTj7EnMIgkZrA0kYwTJnDtJVw3W+GRyuPk8l2Iqsu8M0tvfASzInLKOkEu2cVMe4XYUodK1EHuNBkQh6iGV9ggbgDOP4+8PyD8G3F/4DnHJC5u8/37/674b0Hitgg1ySJQq2EpNmOZFGXBRFM0JM8isWMXcS+IhQ5WiG2iQsYzcMMygY5HK+UjcIH+3g6q2YXW7KKpqjQ7DTYGUxx324TaGoNKjKZk0BREYpKAKkqE43GstvHsWWn/366wdv99/LBfLM/t+/22z33Ps+jtkfC8NGrNJBwU6OkL41smke5BzPmTWIEgmhxH00RIa1iChKFAxvMwNg4SEiw0U6YTVgmKDqlYGDPoUNU9bAu6UAgpQRxZRpVgMJulYTq0HB3PtpE1m1ZLRygXWL9xhEEjSizZTTMpcLGSdX9IZrrZhICD2TIRWvPIEZXBTI6Z+RCOC6JRQ68XiXtgFw00VULWTXTfpNbWKVbzyLJKNj3ATTe+Zk1l3tHpNDsEFYmmbiApMnqrTSoZxxRhdm6SjqljdFoYtRqGrJAOrYkXK5pMoCBRV1Z59/vewanxJ+kPpFBdmUAoim1axBNRXAlqzQqeYZAJhMhG4xTKBebOTrN+60Y0TeGJJ59m7/5LmZqaQfRFPFFAtHx8ec0Sd5y15fMFh2qziGU5KIJGS/Tpz2ZJJLOUSlW+9c2H+cbXD7Jv72UcOHA188Ua/vIJLiycobhiMzAcp95QKMw0MSWXm3alWDnz4mr3eQuGMyMUVwyWi3WCYYhEE/hOi0AgwkoZ0pkyC5ZE/6W9nDlWo+iOER+uUSvZWPUaraZLxXCQPAcxLNNYLBAeXs/hY2cIKIskgxl6d0WJh31auodjRqnYM2zdsJW5okY+b3LizCnGT5+iXCvhSzJd8TRW6xRX7v8Fsk0Ts2Ow4DZQogMcOjzBS15/Nd09Gc5NXmApKrEhHuHkw6v0bA9iCzGCWPREdpNwniAbGuBcc55sei+C3ERvB5lbPkep0iE3FEaVZHKhCCthmeMnJxiyyqy/bA99F05xaHKWT9z+a3zkG1+nUFxipH8Eq77C0uoKkcFh5mamuPzSSzjx9NMMhLsom1GmqhV2JNYxby3Q+0M46GIiv/jZxfiPWO0/qs9zn/+gsbwY/qPybP+/wrUcWpU2ddEn0BGYm12i3NTpVJpUqjVWFlaYKheYblVZxadmmUwIOg1FYN5qYbkGrZZMrRgEUcNOa6TjCt1yD4bfIRKwSXotcMsoIYPetAphGT+u0hJNoqn42kAuTiLl+z+cvF8MF7d/9vg7AIaGX2uiRRzKhkDLA08J0tJFFMFHED1816HVatGxBIJqHE2OIoVixF2XUFAilonRm0iS7kqjCQKhUIBsJEyqP0X3UA4lqaHHYDVq4zkWvmmi+A6K56F4KuG0zaZsFx0/TVkTsAWBnPVCKShJFBkM9tObShIJaGTiHkHRp6mvIxVJkIjIBF0bFQ9JEggFVVxFxHRN6vU67VYLQZAorNa4dM/ltFpNwkoQq9EhIGmsNmoEQhr1ehUPdy0McHaKRq2MrrfxXZt2q7Mm0lysYFkW5UaFWfMcH/6DNxG0LtA35CCHJOSAgiSJOK4Joofp2ICIIkkISOi6juN47Ny+m77uHEvzC1x55X6W5pfwnTWJPNu2UZ/Nz+44Fo5j4XkOprkmMSfJMuFYnNG+fnq7cvT09eIHQji2gofIqfOnefjxxxlIphm5bA+zp6bZvWUnuttmenyJlVob2YMLkzWOu5EX/epYlQKm1aBYbiC6MnQsRFPHVSUEMUBxuUp/LIpj1ymcyyPITSKSQ6ViImtQrJRRgimCgowlC3i2B11JDk+sYEf6WG4kqFlhVuaK5GcszI5MpWkTDl3CQHAXWaGbO798F08++QSC6FGplGm0mqy/dAtmB1LpbtalAyR7RHb1j1GxQ7RrM+iOxczULCODAyjROKf0OKndG6m0ZDZGE3zrwiKh2knOLmXZsP/N6DPT3H3vnRz87uM88vj9zC0XUIIiy0srpKNdHNizjwPDw7xh8wBCX4pqu8Cxs6dI+0EeOnIEKRomPTjMImUWRZH08CaKxSJCSOOJ7x6kKXiUjBrd3R5x2aXpzGI6L9Q0/WG42M3y/euLWew/qP7iNhc/ezH8e8j7+/hvYYm7AmhpcFSZpWqJXk1jUAnycMtkUNaoqB6e5NBl+GRbdc5ERVJFm07Eo65FCQgKsU6D5WCYenOBkBDAVjXK3hLR/Cxt18CpWVRTMZrVFnFTZEOizrFagz4zxpReWRvIc63pi63rF7Gs/xUvRtwv0raWUglEYVDtwddMhEaSmrtCR2riJJJYhVWC3V3IqkB8OEOzYJHWPFoRl3RTQHclHKOMFwljOB0kfMKihBIOsqi4dEQPzYvQZ+hEWx51JUQiZSH5MuFOAKE7jGRHaPtVpKDBqJ6kJuUJRUdeMNZzzQh2rcJG38Ho6BBbD5Fr0Wpl6p0iCkoWAAAgAElEQVRVVmpLvP7Kn6V6ocIwUZ448R0+b00yHA7wdHkOea6M6lq84c23E3UlSrLHaqdBKh2jnl8lHNDwzLWEWZqm0azW0SyPZbuJZEm4hoGqBjGqZcqyB1Y3giXx3l+5kVMnniCW2kPYmeLsxFEmC1H2Xr2BSDBDRAW/sRYFko5mscwmR48exXIdctkehLJBNtfF448/SSQSRVQ1XNdE9j0cOgQlDQsPUQ4gOh7hUJxAOIQoSziWieT61IpV6pJEJJykVF0mk8mityyefuYMrrGW8+OKA5ejaVHO/eMxLr1mE13iAOfml+lK9bGrfwg4/YI5/5mfeS8njn+dHSNZtL5N+DhItRWmp2eJ+TI3vfxVzJfKmJUWu8dsHp6sUi7mwfJQVBs3qFArXqDhhFHlOL7q4bkeyZaKIQTYlJRRuk0K5TCmvYpnq1hhm/7VCTZt3cv/uOs+li7M0agUmZ2bRAkHGD99iqWZJR781oP83C038Zpd29ndtY1bPvs+nvyld1EQRcRCB3VdgHMT0/T1poj36ET8XqDA/WcTbM3G6FgKflBl/qG/xBO7yfZYWL6B77pEkuDQYSyxh2NLF7g+3se22QvcIjr8w+ar+PPiSX73Tz7MprkJ3vPkUyTcFuWVFRLREZYNHb2cZ+toN9MTc2TiUcqJECM4PFrPs69/DHdliby8lrztueT8g6ziH9bmYhfLxe6Yi6/f7//ctj8I/xErHH4MEhcEYQD4ApBjTcnnU77v/6UgCCngTmCYNXWfN/hrivcC8JfAywEduN33/RcmingOQrJMqexD9QxqLM7jxRbr7SbDI6Os2osszlcYinQRSyU4sjxHwB/lvKailOroqw6MhFFFi8mVEt2qhRBPMnvuETJdQ/QLHS6URNoI7CCM4Agshxyi8jJz8ydJrruEfi37wkE9l4R/HDL/QfXP6fuBD3z1h03Dfynez0efV/7k337uR/a563mlNwNwcVqnT/6fDOpF8Ps/oP6FUdcvRPmicuv/cCzfx+pz7s9f/PBdv/G84ve1Ct/Pn73gPX/+x7/HTQe+iNPVhz0xxTMXHmVmxWT3zmFqUoaOpdOpVZiYr/C623o4sjLG6249QF84QTwcIJ5J8vSjh6gpHoobpLlaxPDBS3n4bQPLC7AwPokvmlTUDFJthWY6Q2gOXv6xDzF95DSTzQV8WeTRJ57knnvvYXp6hgfuu5t911/HXd86SCgS5KlTh2lYBpOnTsLWPuREgkgiSSKWplVcxCePrqjUO73s7zLp2iRxzyMBNqR8cps3Eywv0LJtPAG6EjlcXySR2sz4+NO8r+/VjEb6yFdnOONWOSxXePuNr6U3J5CfDJOQwswUK0SlLhyjw4a4gJXo5+jpc8ybTaL1Mr16jjm1n1ulGIdmRBq4lOd/9Gr/OO6R55L2i5HyDyL5F+t/cd+Lif/HJfMfxxJ3gPf4vn9MEIQocFQQhAeB24GHfN//sCAIvwv8LvA7wM2saWuOAfuAv332+gPRcTxCMZ+Wk8D2BfavOsQ2ZPlWaY7oYoFLBgZoyjJLnTKCrePaNoXCPPsyveTjNqFoiCWzRdAyCa9LI+CxaXAHkmiQkFM4cY8dVoysLnG+2KSeDRFzc4SVJPPT8xxvlP6NgC8m4ovrfxxr/Ae946f4KX4E7vnOKX75misxtwd5yeCNvHPbPgbH9uHLYJo2/akE9eoCoc4i3oECRvUMhcI0q+YaKaajMYbSWfIrJoFUmqrhI7bbzBd1tm+KMnbLNTgTDb4w8y9MXPoKOpftQf/TK1k4cY786iorh07zqW/fC4rARz/xF5w+Mc75ep4r6w0+9TcfJReP07ttO1/88F/w0e/eyf9449t5xXU3oteqyEEF03fxgxtYntLoaKvUaibPfKebq8Z6+MLhg7ylax8ddQKeFTYPByJUdZ3G3AJWT5LPPHOUK5SneFWpxW+kRMzKHKEpn5X5E3zwzge4dO8ow9EUi+IKc3mLazffwOrhwwxFo7xseCOvuPkm3vueDxIeDXHEb/CyXZdxlI284fC/35yovvf93P+FjyBKLm+as36ghX4x2f6gdj/qV8B/9M/TH0niz2pn5p+9bwqCcBboA25hTWsT4PPAQdZI/BbgC/5a7OKTgiAkBEHo+b4G54sOQhboKau0+hRaK20mUibZ1SIBQWLzZRtp1Gw6hkdKBy2dRYnFiCeGKXstOoaPeKGMOhSjL5YmJCpUbQEt6LMwVWHTzho3q1kmAjqPLl0gkYiQQaBKg6Rj8/nvzPzHZu6n+Cn+LyAQAJ58nN/6+FuIxhSOLD/Co9/9e64+cBMPP/0YxcICuhGntrrEqtehP5njisvfSCgVY2zzZlKRKNF4hI2WS6e6QKvTxvGqTM1N0S4bJIJxsj9/Kd9920G+WjnBG797hE+W5jn60CM0OwaTOAx5Gm3TRAkoOJ7NNSP7MdQgP/+2d3L46SPs3bGb+WCE8weP8Jo3/jwXZqbZOtBPIp3iY//jt3ASe3jVvo1MN8ZYNutUp+5GDUUZ7dvF4UOPM7J1LyeeeJSRdaMYLYtGqcpAKMD+7Bhnlo7ytXVbORjv4dKpk4z8/KWMjm7ij++/hz+67rV8ST/FlJNHKfukQqPc/d15No0eoHby26h+m6e+/TDHrApX9u7izIROdXGS+uSd/Mg4vRfBDX0DyDtuIvrNu39ou//qyJV/l09cEIRhYDdwGMg9h5hXWHO3wBrBLzyn2+Kzdc8jcUEQfhn4ZYBwUKEdMjm/WOZqrRsnk0DqiaLb0Dx/Fic+yHBPjKq+CJaCmTRgvoMci7EvJjGl5unM6zzlFtg6lyUU86gXbdqCQMyP8c35YySTCbb0JPBQWT+4BUupcOOv3M7n/50T9lP8FP8Z6DMNXD9O+fQcg0EPX4uj0mB4zxa6o9sp2acIxW3WDezloa/fz3WXXIHrF5jLL2KWghSmK4w/eJRA7/9m7j2jJDuru9/fyady7urcPdMz0zOaqNFoNMpCQoGgAPY12YIXGS4YGxt8AeNrI0wwmGwwxmAsQASBJYKQhLJGcRRGk1NP93Turq7uylWn6uTzfhDci9TY4t5P/NeqVav22rXOrn2eZ9dzdnSpGCqX9W9kPrKB4w/ewWcrN/KZv/xT8kaB5x4tki8c4dY3vInX3/YjLsr1cej0QZozRbJ9A3RF4vj5KI888hD9sTBXX3sNU8fG2XzjH/GB176Tj3/soywszHL7889z4OjjtOYW2Tw8zMFHH2evPkRQPMzn372bf//WQ0R0mdjmKEp0mD+9Vuex52axqiWiUpJJu87K3PMku9cSzM2wvGYA5eoPs7FboHckRGahwZ1KCwIVvVXAEOKEOhXy+SwrS88QSg0yLYcpHRgn293LgbHTKK6Ce6bO8NZrgBcXV326t5ucF+BrIu16h62xJK+cn34Rz8e+833K1UXe/v/j/v1zMoEcyHyg/lIn3svj490CH4oPA7/fAfP3NuKCIESBO3hhgn3jt6e+BEEQCILw/+n/KAiCbwLfBMilIkHFM1mfH2T//DR+JE/v0QpL3VnWaAmOKvP022fRmeogeC5kPcSawWmjgRzWiYj9CMICO/vW02gVuFDKc0twksvjQxitCl1DO6jPF5m1XIq1MoWyxpXn7+Irn/sh3PDJF8l1y9+8gcNHT7Gg2Az29ZEVkxjDo4ztvZ3M6DAXCnmej0ZoLrWwhNPILYuK6xEPLKT8HtzJJ6ikddYJOpLnktvxGuaO3oUuxlmsVanKHsNuCFN3iIcU/vyC1/DQpEVt8mGOJkzkaoudO9/I/me+Rl3cQF6VEKNRao0Cqq7Q40XIZHtYLrVpe1VCCrguWLEoCcuizxA5JVcZ3Xg9zxy9nS16F2ftHma2FWVy6iSSqLK4PIceDnH2tj4WD04hdl8A//Li+3PtW95BRNRYmDuK68hsXrOHm895FZXKPPtTc2QFl6hk858/vpMHVyrIpkOmrx/Fh+LsLFeu3cZgXaOzJseE2KZjOdz0VzfxmY9/nKnZGQbzg6S6epmePUk4FCUQXUoljwvXq6zZeT6x0n7O1AMyGy6kPH8IGYN4zwCV55+mK5EndvZFhGeOUFZdAjuM63RIpDZi+UWeParQcmuIuNSbBqoocezYEaLxEIoSQghAlCVs3yMeDmGbFr4skw4nqNTKxONJ2u02iqIQIL4wE1URcQMXPRpBDEQ6dQPR9+jYPvVaiT0Xn0u73UL1ZTQtienWuWDTFjJagNQPMbWL0vJBVG2UiDvJ8qf/5/0x6CapLSzwJ6NbmVw+QXl+lvNGz+Xo+BHknipCy8aZa1FeLDMU72fmsWNkh3ewpEjYnWV6etcyvN0h3O3QTYd2SGFDuMryqc1w1Tmce9ePiC/7vGviCTLzXdy9ch9LSxXya/v51r/8jPd+8L10J7O47TI3Xf8G2gNpnnvgQap33Mct3/w8n37T3/Kjyy8g8vQiR1Ima/sifOEzzzDXnObef7+czrTNG1MnyMdzOI3NvOGDJdLCIBY2x58a40s/eIQBL0HVqeHbdTqBjxQJ064s0t0bo9tXcQ4/RjV3Ed/91/00u/r4xDveR00sYbc7TBye5rY7f4ZkVPE6LeY0g+wJD7vbR16wMR0HOZfl/He9l+VCnJcacaVWJojnWLIskrkEU9XaqnswvzJPNBpfRZ+87mIuedt1fPVz32NmYg5P8PlgufEinggqJcVc9d3PCTqxZJJYSKEtaywXZvk7x34Rz42hXXzdOPU/L5Dfwu9lxAVBUHjBgP8gCIKf/ppc/I2bRBCEHmD51/QF4Lf7mvb/mvbfwhNhfUNhqrZEuK+LfNun6tQIdw2i1JbZo2S5a/owG2SNc0b6OWYmUEcMdroqftNGzpg0DJVqZ5khLc6vKkXOyg2xILusFyLUjBqWZ9AfSrIjN8gaIYSJyWgm/f8Emn6Dv/7VHYz05uiJ5qg1G/Sl09iOgxtxkU7N8B/141x50Zvwwy7WySrnn7ubsbEJlpt1tu5ew6HaAdY7IUpRj5SkE/Zl/EAgbrmUIgqbtCRD8S6mjBWWinN84a6fsHPbnzCvyoxUwriKTtvy6e7eSF6NYwUCsiOya8suTi7OsC6/hjMzs9TCKlv0HvZNHSHp6ZQDm9F0N1JKZ00mjyqpdKezbA/p/Oru+xE3vopsJsr0XJFLdl+EHpjcd/AQQ5lutkeivLQg+ZkHHiCSjLF10w7K5Sp7Nmc4Gj1OIFrUpqd5dmKcs1SNS7tz3HVkis2jI3htiUx3F6btUixXiERT+ONn2Jkb4K60zS9vu52MmuBMy2Xem8NTIJnKYLUD6uUmu3cNszFTRNAF5ostRocyeIJFtbFIb9caZDFHIp9D7O0h1rKpRAO8qkAzJBBVRDYMdPHY0ecQtLNIZ4corSwTjkiookgoEkYLa6ha5IUhuqqKFwQIwQvDjf2wTr1YQc+GsPw2WkjE81wINDwvoN1pYLYNzHIZzzIJSQq6qBKOpYgP9+PYLpF4hERYp7RYZceuLchiFDFUYbFZpst28Jt17G4bV3ppVv5qLJdNOqk4OUlkoKuLpriC3/C4YFAgmggxHe/GWZlFyir0BF0kEzlqywFKwyMb78IomeR61xJJOcT9OisNGdFsUs/CPd/8NsOyzNN2lfdffxMPL06w/8QhBrp7uXzDLh49dS9vfd0VNDstgrLDV+64hc/97T9xxXmvYMye4TvXv50nqs9y8/hDqOoiidkCKzWXjgbr0xt57Og8o9FeNM1iYU5CDD3F5pFuasICei3B1ldrfGXPlVxx3ic4Pl8gKrXoiHNMTjyOvVLnzjufY35sgedLCpvTXdx4UR//enqBoDtHfzPCYiLErnQ/t956K0tBCcdzUWyXYlRiTXUT2159Ia85d5h/+sDHsY4+jbD92lX6ffPgKD+fmSAuh6gHZaxoZBVPIEdJt1ang9659zjPHZ2hvNwgpsdwg8oqHiWscv6yu4quxyJ4VFlohunKQCQWBl5sxL8nTbNB7mKM5suuE/j9slME4NvAySAIvvjbvwW4EfjMr99/8Vv09wmCcBsvBDTr/5M/HEAURMZ7ZLrqCXaHhrjTWSIR7qa7FaMSizKxssClUheNrMjBmRky52ymM3UMU4pw2GmxbimKJrRRhBhms8PaVIhGTWHRsamtazLUtDiiytRqNq2SwzP+PO9Zt5WCvFpJa/UMI2KYNU2fw1qV3kqcxvphKq5KSvLZevFm4pLB8YZJz0A/D546QMU0MGIBmlUm2vEZl5oEsxWcANbtjqB1bJqaiuJ6aLkIx0ozSD5E0il25tdQjQhoapgxt0qjU2GH6zC/pBGJ16jjkZKH8OouK7Ui8+UCITfJmNdE0SKsVWI4ySjh4V6K9QrLiklXxcNKDqNJAvcYFkJuhLDqIRgCuibxs72P0t3fheR2CMtRFhR9lR6W/QIjlkZhcR4zkBg78hwDu3cxMTVGvmTixlKsiLApFmJ7WmfB8bDMMs25FlZY5LnWIotBk51yDrtSIRISaWZsSkaN8y44j4OnDjEzPUl/fz9Gs8PZW4fYmXNxPJkoIWL9aZrRAFu0SAxkKDcspLbL7lSeE3Ml5G3rseccsqE4WttDico805ijq2sDLX+IUmuKkFTHE6OIfkAinkJUQUBAliUsx0FWFTqOjxYNI7YtIoNddCwHz27jej6peArbMUmmcghBDsts03IcCDzwXOyOTdGqkjBETnZarB/ux21JLK9U6BscYXlOIhrqQq/9lMGhS1g4VeRIa4kB2VxVIftSrJhzTC61GOo6G3PqEOnIRlKpJMUziyz1GuiSQgsXdWGBUiLGmdkj5HN7sCMKhcokjpcineqlWFnmuF1mx8hO3vHeW/nsd77H7V/+Et3n7yTshfnxI/cxWVlB9WzO3fUa7p86QXCoRETTyWVSnLftPL53+w9xdJ+//96/8OW/+DtuvP0bCIKA3lYJ7X8eU47ieQ7RWIbFhRKbznoHuxJ7ODVxAKUZolp7lPsmCrzp2g/zaPEz5Bhk9Ff7WNwJjXITVWtRD6qkugYw/Q47Lz+Li64LSA8rDK4RKYz/glcPdbNv7s+IEqE7/yZ0eZRb7v4hhbl78WWDoKtEV1vibTf+kF4H5E0Rbv6vP6dhdAhX9rHyEv3KbZMbugaxOjb7jSWqsSgvzjOC6plDrEQVXvNSe5XJ4YViEHZw12Swj5uA8SKeXkVkvvd3lOHYDUwlRFLXsAMFxNUr4QYjyz2VVXlO/y1+n2KfC3khf+xyQRAO/fr1al4w3lcKgjAOvPLXnwHu4YWMrwngW8B7X+4CQRCgtgNajsHe2QnOjsSZqi2jZSVWllusUbKccZs0qjYOMoFfJVfWqDRN+oQwlmKxIvtIioqg2RxxXaajBoJqkPR0Tkg+NctCEmC+U+JNqc3UfA+z4a+SJbmpj4rt8Eytw/R0g2R/F1EvypZUmKpqs1RaoGS66JEwJ8wFTMviyqFthAyLbEsmUONoIZUeNYcUiqHWHBb1gJLURNBzLJTLWE2PQLGwOj53Tx6k6jaoN5dQrDa+65AXQ/QnHHTDY62YoG4WufWZvShWgi4rTs1s0i9IuLSYjJg0K03mTy3Sadbpbdgo0w4xKctirUa92aSmNslrQ5ysLzHbqDOckHCrNZRkltn6JE58tSfsq1/4Ie/8X/8nvREZyXOYDme598A4Jw6c4sHlWeZqKyxOHWH/8Qn+9JqLcItzqE4Dv1nCnythmxLT9Sr3VOb5ducYe5+4jw25rfT1xHj6uSdotlQky8KoWlx61hrOPXuQWakKtkhD6FCTFHyjRkzqQvMDtm/sZruQoG5U6Y1moGoQCqssWyZFe5ap5XmGiTMxvkxdWkELZHRtgFgoTDgaJxQKoQKK9MJMUVkRMawGyVAEyXYIhxQUxyYWlskk4uRSSSRNQNdErE4bEPEDCYQQQqAjEUZSoiQSMXpSaSRJYrZQprIyixYOYVkaNbeGZQUYDYtTlZPEh3t55XnX4NVfvs1DEIuR6HhEwjYNJc6CXGCmY3JCLlOYPMQKDpIUINBCMB3askdxskCrFWWp4jK/MM10qUq4HWGl3OGZQgslDLd85kuIjkjYFchHsqTXr6EnlkEPJei0Laxii7/7u+/y9ne8h1dddSnxZJgdg8Pc9KrXc/LJJ2gUx/ngO/8ZrRJAMo/jJ/DdACvwcNp1NF0ikAKqjcPMVA7x7YNfYqpYZfeFVzNWO8Dwuj9mtjVGR+zCf+ZuXnXHLdiZbrqUy7AW+xgcfB+tFRnBmKMyOc2Jx+7DLO0hkW6wfsnjstsM+vp3kAtcgq99EkEQaBWr0AhRLM3yoY9czOtel8fy78dzxvEiCxz79ure7c2RK3liZJhTQ+v4yNc/zbff965VPLNBQKtpr6ILcsDOvMx1N17PJXYaN7z6ySqoGbSWVx8SLTlNPN9LR/Ow3RqCt9qIP1hbhGz6ZdfIb/CyRjwIgieCIBCCINgWBMGOX7/uCYKgHATBFUEQrA+C4JVB8MIzRfAC/jwIgpEgCLYGQbD/5a7hA+MLiyihMPNp2Lc4QdgHUQnj1ZtUFcioYRpYrNRKhAWZx5QaU5VlTsxPcabWokvQkYKAgq7Qp0eRZZFkOoEhOxitKo7moIZENq9bw75gHkVxESVrlSxpw6LUraL3qOTXJDi6NI7SbNI0ZBJSirpdJY5Hp+2ScXX0VIo7505wXhAiYpUpxgwU20XNawgRGV0QiXg266I9VIsHCJk6x0IGtqxiGE3iAawLx5AiCmgCr926m4rQoaBZGFmZdtgiJLj4moAX9/HCJklNRhJkvHASsSVRsFu0owFL9RLzlQYF0SWCS7bUZuvICI25Rapxh2BmmSCq0A6ppLQ4upDBCkWxWX0Sv+iCPVx2w/U8vH8vw8NZls6c4NZ77kQe6qcvkiPqhwh39VOqWTz03H6GukJUqm1mrA5F2kSVDpKcpm12WC6tkM/2Ulo5yTl9AtdfN8jFa3UMz2TPpTlyPWWCtoNdbbFgm6T1MIFlccou0fQNFkpLPHn8GBO6Q2jnWp5vLtKUfUpLBYKeKD3JDJloipLrkE72I9oqjXoVIWggCKBoKooewvagYzmYHZtO2yKbzGFYNi4CgSghaBq+B54XYNsugi+QTGQAEcdxiMUipASXuOCQ0kViUkBKUKmLLuFAQQ1EiA+Q6VvPyZPH8YxlWrYLiTx1x8ESWpy872fo0ZdPAA7iMVqSwPRyg+7ePLV6CQQXoSdBYjRHJ6rQQMD0RJKJDF2RPEgicSlEvKePREznrLBKKS+yLpdlIKrzzjdcS2HiBIVCEdMwcTo2PakuynaV8GCKu+76CUZzkacfu5eTZ07y3NP7eezJR0hKMt+47/t84uP/xMZLruXzt36MQI/TXimQiAm4WkAyksKxJdqCxYb8Gg4dPEo4luFtb3k3y/MNfMFkwRUptTpk8xqnthscKC2zr1qjfmicB05+n5nCAmdWTnHexW/gwsu+Sjj8Xvbs+TrDW15PZOFvyArdLGxL0a2sxzj6EMrOK3Fb6+kfeTNJ6/VcfNGPGdHeR6d1Ba/bOE5n8iIGj1nEXzuwSr/eCFzUk2bnVRez/44T/PzY8iqekAPvf+tfr6K/ZfgiTh48w6UbL+CuI0+QXru6WG5OV3Ejq/fVT3/1Df7xvMsICxnaTZko7VU8bbeDKiovu0Z+gz+IsnsJgWw2S6NtMGSKCHENsgr2io8TlZF9n7Lo4Dsuvb29uGaHfDxFT08Po/2D9PcNUZFc1sTSqIKC0rbptyU6pkHdbCLqOmdFepmxmnRsh/lOi1a9SSKSWCXLWKnIaE1mbSROhg7rhQiSouLhUfFMLl37JszARpUDBL/DUDTC5lSWRj5KSXbp1+Ks1XOk9BhdoQSLis8aP8a+SZvuxAbESIT1ShI8jU0b1hCWVdqNOqIsYDomD04eQrYd4p5OTo7QMUxysQzn9G1kMJYjG0sg62F8yyNX9Rlyw+xI9bGpGaLPiZAMdZPUMxS1gPmkxuLkHKMDm9E7HgOb16LW2+RDOQg8BM/lbENjQ2u1UfnEJz7MN75wMx/44HuJJzI0KjbnXXwhz4+f4nBzjsMrs0wvLjIjeXjxYYIgwe6RTXTbKjFDo9OJI9l1Ltk9zM3vfwvvunYbYX0GPT2EuKiy6dw4X/jQuxiN64TkKL4mU5UD1g2dxVx9mVZg8cfbr2P65ATxwSxBKCCstFieKBKNtPGdFq+9+M2o5SZnSovUBBev49Bxq1QXp5AVAU1RCYcjKLJGSA8jSRLRaBTXswlpGuVyHcuy8AIf03YxbZdWvYXZsRFEGcf2aDY7RKNxdF3HMJpEsjnkZJKG4EMiSqBIJNQIgqagaSGi+OhApdEkCMcpNsoU6zoxSWVsZpz+/jghYXWw7KXwTIVybRFV95mdmKdtudiyRp8Z5djYMVxHYN6oM2msMFOpUFisUVda1JvLRD2BVN3jQLNKsppC90RKk+O4uszI7rU0zDZPPL2Pb9/6XR7Z9wRxK0Fzsc2mkR2IUojv3vZ1Zo4cZbywyOiOrXgZgR09W3nPH72aJw7eTbD9StZqEn3ZKK7tIesqqViafFcv4XCURnGeheoiy6USkVgUJRmmXqqRq5rYKzWcJYuSqLBhbAHh4x9h3ja5bPtbqKcqZNXNCHKdLcNXkU85VGYMjh14lHvHPo+5olF8RQ9la470rErsuotYt30Ho/lejhQP8Ivbfw49BSyjxf3Pfou2tUQqmWXX+avN3E2veSu5cIaTew9RjGUY2rR5Fc/CcsAHb/7YKnr3O27gcHGZHz14L4fbBk/+23OreKpmmcGuNavoX/zQl7nph99gceU03b0agvo7fPGSw1mJ/pddI7/BH0TZvQTkfIFILIbfMejzVLKZUZRIlVQ6iye06dTqBLpOtdnirFiIpVMGkqohKALNWpGIG1AyW6TkgLJh4VdbmOAyO5EAACAASURBVFGROBphBDw14FXBAANdfYz1d1BlDdF0VskyOthH0oJis0XO6qch6ETUNrt27aLRFJl68vvkr3gvmUqZsBpFEuBMp46ieGw1oxwslwjaJnpTwUnIrF0ock/UYFdHoeC2sFQFp+Ng1UxqUouG71MNhxBMj4Qew/FsHC+g3qpTEVQ2KDFqoTCt+hnadhyrWmZYT7Nl57kcmHqGfjlMYaVBJJwmIyoEnTYrgYtYrbMn0c0pt8HK1AJObpiYK9KdStFwPKKZCO1yh05vhEl1dRQ9Fe9CsZtUai6nxmfZsGGY+x69n34p4D2veRXNRofbDx9mobLIxJiFHQmzZ/cWrrvkIqKJAJUI9ekC5WaButdkrHySi9PdtAOHakQhIaY4WVnGbZZI6EniyDhLK5wMFtkysoOyoHHk2DThZIKp43uJ5/oRJYeSU8NwFWRR4ZGjj1HzHSqKR7RtcHY+wnjLwzENlFCURLwLX5bxHZ+wolBzLDqCTzis4jgWIS2MZ7XpNBo4nks0EkeWBDp2B8u1SMbjmGYbTQshSRLJZBJPErCFgEQug9O2SUs6JdUj78hIikwHB1/xSGhRZMPDd1r09mylbRxlvZDn6MEJunfv4uXO4kFHRhMEmqUamUyatmVQEFqsTYaIlxRipkAbFU8WEcMa3eSZ9spk4zm29Of5SXmWTKXD0hoLs2zTToTplvJEohPIrosCDA8O0BF9MjGNVH83pw8fwZdFOrLIs0/tIziZ4dFH9qK0fEYv6uG2e4/wiz9+Nz97zblMXXonyd40cmAQ1QeQPIeJ5QI3/18f5am7TrC46BOaavKjg79k7frNtFs2W/K93Dt+iKgmE90xjOwoCIsq6XUhnj56mLPTZxEeiLFwwKFanSIZ6sNsFTh36+VktxZJfXeKyYU6uZEMe1szDM2UcdoORwuTnLfzdRx89i4azTSxngTEUrRNm/qSR7tvtbY//b0v8qHPfISTN3+PsCoxfvQIwy/hSdRNji3Nw7oX07c5ce6942Gu2X05X3n73/O1Rz7Px17sTuffP3QL9z33APBiV87Gy6/l4h2X04wIHHr4CQp6mRdnZMOeTRdgKKuzZf47/EEYcV8UqPoW5XoHKZFkvr5EWDC4tHc3QquJH4nhhAI0RcZNydgtF1eWMCwTVU4SbQXMWlXW5D3cuoRugZ2Jkw+F6QQiTdmmY1r4SodHjk+R6Q/TJw1guat9wWZboCcpEhddJgWZZLFFygh4Yv8z9GkaRixGXzjN6fllJlotEqTosiJMz0/RGdIwjhqkRjciW2VisoyrieRTQ0wYRTTFR6x2uGJtF88UlvEFGzHik0SnKmi0dYGrkts4I7kcCAx2eglWzAZyrIteNYHlC0ipHjQ9xPGZp9ic3ohdbWPrIqgL6D0pxCXYNpwnFvGZiITpR8TfqCEVXLpGNmCtFLAaK9jxECktSrRTZmGmRfIleihXZ7E8neM//inXv+cfkZrPsmXbejK5PA9ExhF1mU0Xp3ndjk8guiW6tRXuOzBDvHyE5TMu60cv44x0kFRXFyXDZb2QICJtZNZrEHaLLDQ90uk4Ch6H55fZFslw7prtjE/OIfdupi6pBJ1xQtFdbO4/H0cwqBpLLM7WyKTjICtMPX+U/PaL2R5tEw6GONxY5vB4gYimsq7nLHwhgS5LKCm47fanUGWRdWvX0zHahBQdwQto6SqoAqbRxsch5qroio7j2VSrVcRAwXE8JEXEF5NEdImeaB/1eoNUPIGFTbhlEIpG0TSdiCLimBZCKKDaLjMQS9PWPPoaaeqhZZI0sMTU73BgvQQZkR3eDiY6NTqhYbKNKqcby6z11rMSOky0XCAeGyEiLZN0BYxQhuhMkaWwwcR+k6u27+bQ4hg5ZKxkmFZLxu/VSXkhVgKfnkDC9QMc02XWNaiZHQJJRRB1QqrE2Mw8GdvmTLtFKN1D+ESJi7ds5h9Pn+LyWJretaNUmlVyuR5My2JmeYpHnv4lFW8CIZjhXCmM45URggYJq85SrEnhhEx6Wwv7jM703uc576pPoQcdzvG2MpgrUzdNakcmGY5vYX5uhv74KE4+yY+/+g32XGkyeXUKagZm0qT/tZcg+xHSqX6Ge8+l45e5bPf1WIGA2HEYSffzg50/YOH5MAmrsUq9l1/9v5j4xQRSo4bSM8iAPbSqk6ccDVFrr848mTr4PMNX7mal4BH2TV45fAXw4klN+zJJhDOrnwDOnHoKb3gbO/q2MX2hR7joAM+/iOeNr3kzX3zmAX5Xf53fhT8Id4rgw0ZVJmrp+JUi50kZRr0IMVfGQcRqL7JjIEfIs8iXLSKKhqgqdHSBqG2x0iWRz6TxNImUFuJMHPozSUTZwQsckrLO+cNdmE0HJ2piL7UxtUW+dtnOVbK8LjnE5Fib7aFtxJdOsGVPH4IQJx0XCG0ZRdRdwnPP0rLbrA2JlJUyhtTib9//dmS9xtmjA8QXa3REBa9u0UrqZJcMOmKEjh6nW/T5+UKZtBDCaAr4QhotkqDUcGlVDX48e5CW4fGqcA+iAq01CdrzRdb0rGOyvghhEXe+gd+zifHCYXpGClzT9zzDkV4iNYdW3KRcq7OsdKMrLl2KwImGgbFW5sG5g4SVDpf39tA9UUJXHEZ2/gX18urgTaLqMXnqNG1FYk1WJJHZxrXXXINmt5ANmzBhDF/i5KG7WKqN86sDDxMkFEop2DIyjJLTuabvMs7fdA7bURH9JuPhNoPyADsGN/Chq3Zy4YCCKgywbWuIkJZjSXaJ79xI1XGQRJ2Yk0ATO7TcMpFMGjW6AT0bQcuFiTaauGsz+A50UJhYHOO7//pz6tUaRqvDwUNHUfUQmuTyjX/9F9LpNKqiU65UUEM6ju/Rsk3C7YCICf2hJF1iCF1TsF0LQjKyG+BLAR3HpFwrU6ss4xgGeB3CGoiyheyLdKVyBL6LbXcIKSpiALKkgqBwqmZSWZlkpjOG6oTozvUTm51bpe+Xwq8ZnPZ9Bqo6+aEeXKHJRbEcTQnCTpiq06BHizBOnWZthSW/Sctu4o6vMNCV45mJWWZac8wXqjTbAVarRbQhMPKqVxAXJQLPQrRsfDwkQcQ0TbzAxXWaNKoV+vt6cC2brnSGbLtNxbV585/8KR/+0Ef4+je+hB+Y6CEZSfZImBG++4vvURw/QGGqxNykzZnjJRqzVVbmwhwvTmEeUVgWmvQ2VbpyAtde+CbcVoFkNoyhdQiGZwiPHkcfeRJp4DGM0G1Yye9TDT7P1e9YYN5dQHJbuJE2zdNlhs46h0Tap2b/nH2nPsLhwic5ufARCssfpBD8HY80byQVWHTWVOn8jg6GV59TJLd9kZbQwaGGoa8OQs4Vy4zu3L2KvvvGN/Dorx7iZ//yde5/ei/y+aOrePr/8i84tHa1n/2mN/81RqnAodM/Z2VphkZldTHQQ3PTvGn72S+7Rn6DPwgj7koBP2yAEZFQs1kOYjHmW1jhOLOFMs1omv1jUyQUnZJuIYoiqihjWTauadJVaGO3bVRP4vTyApGmw/ziIoZl4jdazByZYeZgmzoadjBMy9LpWYrzp3c/uEqWvXS4+I/O4q7xZzGHtnFs7xhSQmK80qZwqkBMSnLU7hCP5ZiVHMxqi4Qb4gs/vg2VKOOeSySSIeWHyY6sJbJi4iXDaPYySVuj4LtsTeYYU4pkAtBsE99rslarkJYF1ke6SAZlvKhE+9Qsoaem0HsyzJYLdGsKiU6LQtxjo+nT172ZMTtMJbyVfe0ax5t1NEdB0xzEYB6z1uRIpURxZRLdjBKTBcbHD2P3R6iFAxS3zb2PfpkrLl4d+LnHKPHKTcN86hMf4E+uvYFO2OL++x5BFRwS0RyqFKY7uQdRKlNaeZaVapuQGkKo2ozXqjRaBnfMPMIDh/ZTq0tEc71U56fo1FeYcUt8875nOFXrpyNN4Tv9FPwO5qLDmZMzKN1JGvUFJhoVVNUnHO3FXljGt+sM+3FkT8JQHNamBxlIhrj/gSKPHVORM0nazRbz8/McPvA8+WyK79zyDXq7s3TnulgzNEIqm8EOPERNIZZOYkdVvKiGmk5iawpt1yScjCJaLpYk4voOWkgln8/jOA5m06W0VMP3wXE8dEWl3TJQFAU/cLE7JpFQmEqlQiKRoLD4INftHMF0HXqyfdQMi57k0MvuibwscVn3Gma0RSLTDYKIRm++B1+Lcn5qHVHJoV436Zn3KXcqSIFKz/Agzy6expBcJDosn2wTy4WoKjpn1BpaJM3pR46g62FMRE5Nz/HmG/+MmJ7A6oCAhu+JxDUNo14jFo1iNlos02BmcprE5j5CQcBysYLrgi6HqZdazFmHiWoxqnKMqKKwXJqj3hSpmf0szYdYfC7LgdPHiM+o1AdyNDoeKxWDjrvC4uwcy6Uy1YLBwulpimemKE5N01iuc2auSGu2TbEpkVTjGIqCZLjEUhqFlTJGs0VhcRpJ9gm3fcpum6oWYKPjGiFisRibzk6T37g6SDhldPPY959meUXkyMP7GJte3aOvWp6mPbu6anLx8Gka5SJ9m4dYidq4B0+v4hnMeizvX12ws/fYLxGSCr+8/XHqhSU8abURjzoWFWd1e+j/Dn8Q7hQJkctzAccWVhCtgL5ED/ML0yiSTTglEao0SIWiNFZqGFh0ixoHbA/T8qjYNr4A84UauQ0badY79GazzI1Ps27zZrqzORIDOeScQe+CyeZL11MpJljyTMLp1YFNZo5xuNpNf0TjyOwSZ49spt0yEA2LUjqgR9YQTahIBpGOjNU/wJmpAqMVEc+AtWKMhcYCQjRDbWKKrrO3MDNTQXW70W2DlXKDE0rAHnI0Qh6xUBS5oaJm15O1m9Src5RTfcydKdE3spGQaWJVLB6ZfpodZ21g+nSRGV8msBeJdMsUjAiTVoneCLyibz1P1VrcZVe4JBNhvLRCOCSxPX8OpdYyxrJJpnuUsadOs7JYwsgpzBvLFIPVRQlDZwrU+9I0agF96weYnVmkcPIw2655M888/x9sGN3MgCfR0BPkjSrDIZWNvsLjK0ssNItcsXYXX9t4GZ89cxq1v4d9Bx9noA7uwA6ksTbzskl3O8zSiXkuvP4iujoZjnonGHA1ChNjtJpLXGB18/CTzxOPNHluLMBtTnDunlFGlDRnig1+ue8JQu1u4ok67XqLfEynKgovVI+1G3zhM/+ArAjEk0kymQxGs4MnQkzX0CUFz3VJhuJYjolj2uRjSZqqgqTqdPV3UbbaJFUdSQ6QZZlEPEMsHKPRrOB5DmIgstQqIgcCmqcRiUQIRAEtpBMEHR575F52r99F3o7yybe+i6d/9gzXXpThwanVKW8vxcmFJTYMn8VmKYLWFeKC+Qx7H3gAac12zhndzR6lyf6pJs/JKsp4AS+9nsWpM6zZkOfYoSn29Mfp0cP8dO9t/N3bPkx1ymDe6LDUsEDzMTt1Pvqxj7Jz507+7Z//ia6eXiyrg6oI+B2TiKLgttvEtBAsNBG7NW77yF9x9MQCyWQcUQRZllHCEXSnl9YXP4q9bpj5OZO04dDtV5COjzFh1qlfojJaHmbq9K2MyWswX7GNHvkQpm+TFAVE3adKHXwLx7dpmiuEpCSK0aFh+siqDr5P12CCpaLHqWOfZPnMAsmeYcpLLfL5HpyaQ8s12Z6PYVoOrXKE0sAK2bk4U67NxY++WL8f/4t3ce4r3oC5ciuxgXMJZ1ZnmNz2s+/QPzCCuOfF9LY7S0UrMZzMcTA4xpZztvLSitBT7RJrencCLx5AXptsogkSl5+/i9se/AlOLMylL7mup3ZIdf/+RvwP4iQuoXD6lEE2PYDmh2i0BLyOQsjxMCWNRgCT7Tbhnjya6VDTbYpNg3UhnQ1xjUpYZfNgjt5QhvOyw/TXG2waGaLStGhWazzfKPPq4e3MWBbJII8Z60OLw0p5dXtKsSvGhTft4VezY7xq9GKeLsxiuhpTY9PEOkVOHS4iOhba2hAnV2xmT8xypNLh4VYRLzzAfYeeo+AOsFSymK+bmIbNfLNEJbLM6UkLpS/FxloGxQkxW6gQdBSqZomxiQXGyxZLpkYu20ssBL5sU/Or5KQ4qcgoST9LvFdiw7pRyHUjJjcw4Fv8w5vfyPnnv5IZXUGJ+bxFGqXPNenuGeLdV7we0XeIeBrXbB6gY7Qxszm6h9YysCvHVb1pQuLqBWOmdVZMl5s/+mm2nrODx396NytWlIXpJ3n7uTewWxtCGx1E9gxKfp7ZiMWxdoH16RE+d9ONJHyPv5/Yj7l4iPXJgK9c+xo2b+ghCLdJrl/DJcMb8Fjg6i3reeLwo7jOPDvP6+XCwW52yjLXXraLHe+4mh17LubUYhazk6PihHho33G++dOn2PvcSVKhXuIZmaopIMUVqrYHrocUQCKeRhTBdD1CiRRl18YIi+ixFI6s4UoybUGg7QcESgg5ksASJaKZLqKhGG0xIBGPISVjeMhIooLvu5SqJVwHjI5Dvd0goWrEo2EUGRTfpbAwwx0//y8eeOwhBEGm4JfY55h8+vmHCF1+Jd9ZhrXnvOVl98TOs9+JG4e1b/wzlEUHKzvKJz/4t9ywbSuHzxzEUZMkkhbnx1XSiQiJ5iEu33ke8uQ8561VaEoWDbeNoTb5yePf4ysP3cVKQ2GmeAxXCWiLYX7+ra/yune9gUysB7PjYLYtbF8AJYKhhiFQaCsukXiC3de8lt4LXomaHcQG2q0ODaNFR4Bld5mp9Vv5yc/28kyhyOFakaPTEzwuGrTcAEfezn+hsL+eIp9yychRXvGWG9h7dIJwT5STZ8Y5cnyMqXKZYmOZSqvB5KkicmYbJ44+SytIsvGVN1C8+z4WZztMz++n2JrmgbseZ2qpwI9+cQtvufkNLM8usK9wgF3nXMx47xhieYqZyBS2vjpwr5x9AT/6wbcQIylOnt7HL+66dRXPE4fvZu++H6+i//Lg7WzoiZPckOf686/iy1/93CqepfMu4MJLz1lF788lCKc1onqUXHIAVwmv4rnru7dQtksvu0Z+gz+Ik7gvBHRpYaLLJrHuLDMzU8S0EF7HI2FZKGqMDj71ioEUSaKIHWorZS4+eyN6J+AqW+Go3SEWVVmX6ie8Ls3+UzpXrzEIxDDvQMOYeY6rrxtm/Jm7qBgRGjvO5saNW/nAS2TJh1KEnmuxPdZHzW4QEwQ8pc07L38lTtPkg29cx53TaSqFRc4NuRzrTHNZ9wbc9jB9TpNsPIde3cdEwSEVA9c36K8JvCKyli/oj7JjJstEq4Cc1NlRM4kXT6AO5ZktLxF4Kpl4hPr8JMcONcjFLWS9zvpUP6pb5an5cbrcMHIwx6Jbw61Osn3HRj724N0kkUh4ScqGw8/ik7wieRHL5f3ccfdjjKkCl2by3D85haik6ek7i5qwiG17mFIPu0ZGeKn3zioLFNQi+Y3DTC7OsSacZak+RaWznVJ6CFUT2HH2Lp4+vJ+z+gX8Ro6/vvEvKWoBqVSGoTNF/ihzE150iMRwipWjY1x4JcS7RxHmFzHDkMv0UpkucKlgkE330SrUWQgZpMJR7E6F9dlernlLjlgyhSz5CFoVt6MCFpLj4ss2jqyjKypmu0NYDdGyTUKhCJZjI6gqXssgL4TxVImKYBESJFRdo7y4SLI7g91ykcQA17XJJJLImoqKiOnYeF6AYRlonkdjpUYzsJBdAVkUCWthREWlajRYXl5meXkZ0Q+IhBPEYjEQJRBFVmaO8/ATFaJCkx888iTRHVdx8P7/RHvz/7wnjAc+xdnnbGHh1p+TTfaTNob51J+/g9FLz8E2yxx4wuTk0jHWJfs50HZRGi5Ws0Jo+HyOtxJQWuG619xM/5MP0pVez9bBDL+68xa8SgTBa6B6LaZ1mUFJwbAaSGGVSCxMIEoojohlt/EFD8dVqIg2yjP7ePjuMf7v936RT33p44iyBvhs3bYJPe3y8JOPcsMbbmJ/+SlKzwb8Um1jR/p53Ssv4fY7bmXHlkH6P/RW7v/mk1R/9QtOPXELX/vms9zxz2/DKS8TDSfwlhtcdekruOP2u4hvDMi2l/noP3+fH33krSxkCmy96WNsvPsOnsvA5jVJtqwr0J+L4tnn89Adn+V9n/4Ez/70s/znv72fD37uF8yf/DbPFhaIVOur9BvqHiKS6CXalgiJGufs6OOl3fCv2XY5mhoCnn4RfSS0jdLJBfbO/pJ8LIcUCLwwauH/RXL3hYSOr3bFNB0H2wdVUpk1Knzla1+hwk0v4mmFArrM3980/0FMu+/JJoOr9/SRaltc9eoLmT+xyESpyeC55/HAqWcRBR8heKEXieYLrB89h+P7n6YaD3NFdh3jCyWErMXG7echFhc4d0c3j04uojdVGv1RbKNBNnwWA4bJ6aSO2KjRk0oh1ld4/Xs+/iJZ/vMLH6Z/cB1TgkWXI2MLHeanZ7CevYcN69ezvMlmYVzlVZf/FW1ZJqIolGtFkrEsraaN4DWYXpzE7HgobQEzYiOZ0BIkpCDNrHGGgeggZWGFpAWmZ0CrRT2AeJAlKijMWCuIgoq5MEOzWyFm5zGtNlnbwo1UGFuIkM8HiB2fxVaLEA5CREcIybC4ghXXaUwsonUlqAU+cREko4GlqeiyiNN0sPwOYV1mcUUnFq5y/fOLL9LDna+9jE/91cf42/e+jSuv+z944LHHaNfLyOEKCFFiYphLB0bYl6hxgdNETQ8zfrKBopYpuQV+dWcVdVjgw6/ewuP1eVQnygfTI3xn/Blm8wKJRAY9SFMqTeFrMTbH07SLAceVMuf2DrK70+T9P5vAl3XEQEYPdLzAxZY9HM9G9DwUQcR3BfRQiACPjtFCVjWcAFRZQZc0BtYO00mGSKlhUmbAVGGeifExFBnankM6lkEUBBzHwrY6tF0bXVJwRRFF0RA1iXggYzWbNCX/hSIrx8GxbERZIhaL4fs+sigRBMELJ/+Qhu/7iKLKxa/dzZ5ch4LdIjPyRs489wM+8A+38/ORwVX74OO/ZQg+/YmriFqLbB78Yx698w5ymTQbtmVYKats0sOMyRVkK4q2O8z9n70LetbynDnHO6+8khOPP8dsKM4rdvUxuv5d9Dae5yf3/AfuyB+DHONfv/wlmsU6sq6SyOUQHQNBFpB8ESUUJSKqdPw2kugzt1TGazX5oze/G71d4BeP3oMbaKjhGHFFZ3pmmpv/7P3cO/8oHdmgt+GzEtLA9xDMNrlMHEf28StNkqkRrnv33/DvX/oUN1z8v6l7z3DN0qrO+3eHHZ/nOc/JdSrHTtWZnJGMKElU5hXxmhERVEQQUYZhZnz1VUcRFBMC4oyggAqogDDdpAYb2obOdKoOlU/VqTr5CTvdaT7sAmlK7PbDe13M+nKea59977zXXvd//dd/PZ7yy5/gSW/8A/7wfT/LZGcLSin8eICxkn4nZ1OtsrueRFz2VDbvu5O9Uwl+4UpO3fo58omLGKcxE8RshIa82ElnbomV5ZpoS0LnuGRli6bHPKEoyP7otx5yrb/2++9ib+Oor/kKH/7SZ3jjr78e/Z//x0PWWfv5XyLLMrLf+fWHLFe/9dt87G/+mswJTkQ1Umt+8sbbHrLOylVP5uiOaR7zqU8+dJtvegt23DA8vcyXbvsqm8urvLF4KJ3wA1dcznOe8gze++4/+L+n273OOlx28PtYdRWfWdzEK03odTl07/0s6C3U4xHZREaxvsp6VZDcejOb/SHJ4pDPr51FeYkqu5xc/DS7ypR3ffk2rvJDbmsUuztQrls0X2RG5rzoWU/kwe4ePvbn/x9DA3yHE/+Taz5GulHRn4w5c2aVfjbDD/zUj/O1OOOWwRmKz/aYDit8+IZraI58lSOjI/z8ru/D7lPMvPC3+d1XP4MTa5Ife+oMKuugh2OumL2ST9xzC6c2Rzzxwj1c941PQrpBt8rQJiPft5dKLbN+VNJM5/QWH2T/whO4Z26Jnpa4xTtYmxAc29Lj8rtW+e2XX8obP3EDv/u0pzFcgz+57wxDvUq2keP39Xj+7bdy79MPIkpPpjQqAuoOw+lplpYPs6u7i1Gxzmq9zAWXd6luPx9V++mX/SgHLrmaiW09tJAk3ZRYb+NJT7+EPG+4eHaOL9y3xr7DQ870RjTRkPHFBWYkuXTiQv7iU3/JD7/oxRxu7qOXdnn5U17GX939ZX7yzb/AdXfezJmjyxS+5uXPfgF7Zc7BSy7nl//0ffzsMx7N8KbD/M8HNsiSOUb1iChVjIfLhEiiXNqKVxmLrQ06jinqCq0laS9DkBFFgqYsGAxX2LPtSRgfOHr0MHcefYDBuKCTdkiShCADBktdVCQ6IU37yFATS9XypZuaju5gKkPam6TBIFAkiUH1AmVT803hBu89aNU2XTaeONG4EDjYqbjt2BI/c+Vzec/f/wUvf8nVvPZVj+P51/3b78Rdd44Yb+9x3Y1/xZ4r97McTXFJNsvi2i3U89PIpTU6T5vkxOeP85ynPpX1Q+tcmc4Rjj3ITOcKpL6H8fGGj3727Vw6kXFs5Dj85T9nY7iLp1x5JT/0knU+85Exq3qer17/FSYme4w2Blixyq59exGNoigMr3jJD/JbP/Wr/OgvvZKdz5xg250XcXZzCVU4NuOSiZ2znFy7l61hwB6RsDYZs9NvR3YVud8gFgnLC/PsjhrKtGL105/kcVc8jSPlgIULXsRX//JaFg7+FNnQIIUgm4XGBnxZEInAeNITn8xxc3tZXJtDuxHV5KMw3QQ9qtmMI1IXMczPUtBF9jR+3LC2EGNdoK/X+aczd/CdHJPHveEX2h+/DG8G/kU15F9MppqNYkD2nf5qXHD0yP089xnfz/zeWW77s0+eN3bihY/j9G++/7zlnSgh6Wje94kPcdVTnkzIUuChTlzOTZKnD0tC/ZZ9T0TiCzNZ+MlnX8iok3HPYJOLoz5Tswl3nDrNVh946ZMfzeeuv514fp47NFMdJQAAIABJREFUF4+T5oEHN2umuhmv3H8lH/zba5ELXU7FXWbzMfNZj7lyndf8Pz/MOz71DxxZc1ywMIkdd/lPP/paPrJ8L/f+zUcpu33++ZpvPORYXveiq3nMxRfQ3+ZYOStZfeA4fz8+CUPF9ExEogJUk6zFMenwKPeujpjqL/DE2ct4zGtez5/95vdzevkAz3x0l9PLS9ztlrliyy663lMuGi64dD/18ZOIPOX42SV6IeeW+ZR5IfDNiBxB3pvjCf157rj+S4wP7OBEWRH5Pj939SxfOHMPt6/0MfefIZ2MeXS9wZkLD3Lf0u0c2Lmf/Z19HH3wTvYudKiLlLm6z7W9Za72EfcJS9ZXFOWIlzW7ecKFB3n7bV+gpyNm/+bOh1yHP917JTff+AV+6WdeTbwwwdqDJ9gYrPPY6QFB9jidNKx0HRf2ZjhSrqFkzInFU7zpCU+CVfjUMcXzX/RYbr/1w8QbgTtObfDYhQxXpFw4O8fuH3o0d193MyeWTmCalM1shu3KcNv6iDRTfPRjtzEZZTgCQbZ6J6aqSZIOwXmMqYnylKbwCOlQUYIQChcqCIG6LkmylIyUwXADJSOSpEPcTalrg1IK52oCmoDDe4v3nshJol5O4yyx84y8I5YKZxq01gQ0UkFwHiECsffY4HEeAhIbSURlyHpT1BS89seu4NjgDEEGpkLESlGyJemy5f3nsyG+PRKvq2V+8Q0vx6kGYaH2JRWOWgZsaamkYFs2RTmuCF3Dzm6PCybm6RrFJQcv5NMf+kvm9u/l8JHTvOxtH6A+dJJl814+9KnTnPIDLs0jZmTMaXcB+w88ga9+5bPsTiJiHXHzvXeS9ma44/gxNkebXPSoS7niaMPTXvcafuUtv4KZiFGNY3phHisCP/Dil9DTEaUZs7U7wcpwA60jEArnHEJ6lDFESmPQGBQ6eHSmcA3kskutGnyQiBBwtiHybe9di8ATCM4TCTB4GuUJNqC8aqN3oFKByThFVAYpIkxiGJua2bTLDZ+6lqfd/rDqH+fZ8Z98FSFS7H7Pe//dYz/y9MegvnGUH1l75Nj2t/b76p9hQkre9d4//r8nEh8SuNNbTp86RBgk3LJN4B8cYSLN6fGIO66/jp7owtoiPZlSnF3nrQefwOTlJT/y0Zu58Kp5Rk3Brl7DqJpgRTke0Jq167/O2vGCdOs2jq6s8qztF9EEw+joEeLtCdVt51dsxtEi0+PA//jKKUSUMhcisk7O8y+/ks8fv4VRk/CctMNpX/CZY8tcePEBDuye4oDbRTzRo45T9HiDS/c/l/uOXEMMbGwYzq449nQmUEvH+HxRkBanqBvPRAQLpwZcfeVVXJ4v8JbPfZhnRzVfM6f4ylTM0+o+8UbB/vkO19424Iq5S7nnzL3ku3PW1CxG7aRzbIV3XvR0HswDH7jnNupOj1MDTzyl6Gclu9Y0J3JB1PE8PVpg4uKruf/EMf7yno9z+ewellc3mf2O6/AjL3oKb33Lf4E4pdws0VFMdzJDP3qGe85uEK05+mPDkt5ENhVxNeaZYYrtG5qv330zi4drnvmnf8GfffS9zE3l9PfEbMQTVNOT7L/iKt7+wY/RsTuJJrdRlSeYzBuOlD227k245oMnmc66BB8RrEUEQRxFxEmKCaaFL+IEYww6UhhjwUlCsERB4o0lkQlmZJCdiP7sHFVV0TiPKQu0imlsjZQgG4OUEiEUSIWNFdVwTBzHeCVI4xTvHKBoREA2FTiBQBGAofRoKRFKYK2lNg2TvRxwTAXFNt8lrKwxOzHBOMAl89sol0vOZ+Y/1P7o597IwekDnK4aVJyABBFFBKWZ6EXYqZzUBBrVFssZLIMkYk177r/LMPmcn+O+5Q30pTkf/vx1pItD8q2P4tH7ejxuasDZUQ+Rd5k4cy+3Hr+XBx54gL0veA5lUXPJ45/Ih//2YxRVTZwo7r/7EE9/4rP5oz/+fea3z7DSVFjnKOqCpz7z+zjy9W9wwaOuYO/ei7njnruY70/gvUehiRTEsWboh6goQYnWueMNJkjQHqsNmIAPbeNsGWkaCRJB5ATeOWQU44IlFoLIGorEE7ygxhFCYAJNPR7hBBS+YKqOSbKU9Y0x9504xb7Xv44df/BHj8gfnX39G4gaTxw5gjqfY/5IbHzvSfZNTAL/fifezSY4bR6ZDC18jzhxnOfsqRPsnF1g65Tnqxsr7JrZRp4lLBYlYVxzRhXMzc5gJwQr44gv2/tZ/uwke/s1e8MFnJKLbOvO8c9r97A/m+f+I6sUK45hJzC9NOLSgxexd+EiwuQk2sOBMMEX8/MVcj+75rmjqVBll0vqMQ/MxFy4Zxt/d8dNbF3YReKO89XRKUgj9j7qCjbqMTcdPcNTfvhyzFLNJWErD7qzvP/zH8dPSLbWBp1nyB0rfO7Gs+zbM8tTt0/RDdPcnBhOnzjNf332C3n3N27kUZ0eT5zZwWO3zvMXq8vsDhln6xFV0vCff+iFvON97+Sj9V5IDNttxIH5JfbIC/n80PKGu25g587t/P7PvYHf+PBfcOLMOhevS77RdUQhMD0xw3oFd4s1/O3rXHNynSdt2cp9fkT0r6iwffrvruHgFZejuinri0tkSUwaGxbWPDccO8J8PIHOW3x9Pppn0EtZYo0PnrmbantFlkyyZyZnWiZsm55jW6440tScPXKGW1ZuZD6e45kHZvnMfXcynUZIJqgTxf/6nVuYnEqokkm0HaB0aKMzV2OtxSKQgPQCISKsbZkHQgiU0tTVCCToRJMnOTSK0eaIOI5ROiDjhKZpkFISQsDEbZQPghACjW2YSFOCddTSoxCE4BBK4gOkul1PCkUIov3rLFoElFB0RUKwHhMFSuswu55Cb6/DVp7S1JSuJN6S03Ddv/lKVDt2Mqwc0zM9Rs0GVkpK65BxxNkORMWQgbGQRWS2T9M4bNHK67owy8mjK8wrKKMRTWUZ9Brk5hQZq4zqhJ5bQ26c4Is33cyJI4u8+df/G5/88N+ysLCNCy44wKtf9zq292fYOjnFXXfdwzYEmyeW+dKh2ynjwGSvx9LGCvv27eErpxfZv28bb/qVX2Tfjt382E+8gqqqkDiklxhTk0TxOQXJgLOGxleYwpGkAuMqnO/jg0UokFqTOYUlYLyn8ZbIe4x0JFKjAkw53c6AQgAETjhirbDW0gUGrmC82RDVjn3bt3HJU57IkEfmxGOliTsJE1lKt9fD/ubvYN76y49o7M3PfSrdB85y2eX7uO6ur7A5vZ0cwXPXTj7s2BOv+0W6aIa2Ylacr6ny3ex7gmLoXWDn9l2cNGe4qTJk3YyljQ0OnzxGyGPmZ7cySUR96yG2HS9xpuHmB3o8YO5ml5A878lnOLO6yucOnSBxCVvUBJPTs0Q2cIWYIBJD9m7vk08orB3QTSxTGyV59p3F5rAjjqmSimy2y+JCn+mx4+577mfPzhy7dJoHj65zQ3GUW44e4vS9t+HXVxChS7+e5OSJw3Smu0xdtIWDvYx9aZ+VzZIshr1qL8960n7U3ASjcUOxFhHbnAt6M7ztho9wbNLy7rO3cktR8Y8N7Im6IAy6I9i5cys33XU9N/ZgZrJkas8O1I7tHDozzbWLx6k9vOuFP83vv+i1fOyTnyZZHtE/MMfxbZoJFbFnapZJ45nxhjseXGZpUPCirEtoPPF6Sf6vKHnEczH3HbqbcV1y6uQiURLTmcz5Un2Wy3bvQm9NuW9YMjU3Q7DLzKmTTO9bIJmaYvv2i9jS7zKuh+T9GawbM3AVmVTsvXSK08lhNoaL3HzmPvJuTjksyMQ8H/m7G4h72xkHg19bxzmHMYbaNNSmwQWP9x7vwHiHJ+Bc+wEyxrTFNygSC4kXhLqdaUVRgnMO7z3OWCQBJSTeBhLrSZ0gdYLMS2IdYYMH2TrrpAmkJpAbyJrAN+FH7z0OR8ChhUR/U3VOtYqH1lpGtSXRCn92nXxUERclWbyVYfHwEGauJf2JDptig1jPoOmTyWl6aorsDHif02m6zNWTbIQCE3t0L2Zkhmh/imRaMuymDBpJiPrsMVPEPlDGGucsNutx3Z0P8uCxUyzLii9dey2Petxj6ZGQpx1u/NL1LC0t8YfveQ//828+hESQZRmjKDA1MUVZ1igVsX/PfgZHT7Fzz27+6oMf5Fk/+DyqqsK6Bu891htc8ATnsdbiTNPCJVKRp5punjHRmyRo2Qbovl3PWvuteyulbAv8RBsVByUpBVTKUQmLEYGqqTHOU9cGVzdUTYlONEIE9u7eyXvf9cgcOEBVlIzHY1ZXVzlx8hirq4+8xZpKchbrkiue/HjSkeP4TMQDU48Msl45dYrFjSXkuGR9fD6j5rvZ90QkLoTk9MoGtdWc2tjkWTsu4GQzYjgaUtYNwhgWO30uu/hR3OqXkWqe9eIw08NpBn3Pn38lZm5uLxvNOjt7PY7qlG5vlvLMEcxV2ziwucDdN97H017xAxxbrrjp0AnmI09v/vwiF4Nn96nAem9IGYGfnWLy+BE2ljs0y0Pu+NvfYHH1FMONmr/9m5u4cGGOa2/+Jxa27eCOr1/H8q2HeOmTn85nT9/LkJorLruS5eEKNoYJkfHYNOZr1Qp7+l1Msc7GpuHKy17A8tlFdj5mJ5efPk2u4GjaY7KTUB8+w7A7xfvv3ODKbbuY7+3j8AO3MNh5Ab/zg1dSnT3CNV8/xJtu/gg7Hpgm9x61b5oFlWOiDWTSYeeOrTxw8lbmTY903356KyVmNqbfFIycZ0OfX9G2/cCjufPz/8ig2AE+sLqxytRERLwu2DUFz7r6Eu7wQ7ARhydylo6fZuLkIkkPThwd8KiFq3jTK57BLDA5rKj0bsTkiPpUyZ5kF4QSzsZkaYcrLr+Mt7znU/T9AWR0nIHs0J2MCF6gIokMYIKn9hblDdK10rHOe+I4bR20E6RJRtPJ8N4z8AYEZA6EkkRZSl2PiWqJjiAYSyI1hawR31LNkAhjCVrjpEDYwMiXREKjRYRUrUxtkAEbHFq1MEpTG5wPeKkZxoJulJKFmBBHjDcNPp9gUBTEKsOODVnPcT5z+aFWm8C49pBHyFASZxnDegxa0Y0i6lhDnrE2LBFkKCOoxgacpNQJkchJrCHLE5ZGsB5NYJJl+uUMlWq4/e57GdqaX/j51xFW1/jUP1+Prx0T3T6nVpd45guew6Hb7+Xlr/gJPvOVL2CWNtg2t5V169juJXnawTaSe+8+xPYrruQP3vlu+tOTXL77AELkKBXjXEOa5TS1a7Fr4RGRIjqXDTb1JpUTVCOPjwSxUigBSMWmcAjj0E6iAhgpiKWg9paQKryTqCgi1QmRjIi1wSJQSYz1NdPjDJIEGQWM7PCSC/ay+ItX0y08ZxpD4huqSOE9TEddNqgQxpES4X2NxOCcI4o0dV3x8LqTre1/zJO4cfU0l58+Q2k8L7rs8Yjtfc51ovw3rdPLUE3FuvJE7pG75u8JJ65qz2uvupqLnzSNGSp+6nc+zoUHDtCZ2MV65NhiPWFtg/W0YIvOiHsxPurR68wyMGM2CkHpS7YmU6yerVDJEv1cMHvVFlaOLbKup5gqxgyiOfT6EL+6SZJ5ds+fP2XRc/Oszwjm0g63r5xkYTyk3DHL0pk1rrj6El788+/gwc1FtnbnOLBlF7fcezcmylg7eQ9h/T78gSn+/MG72Lk7YVeeUqyt04kNIc5ZrQt2dDpcZacZlhvMx5Z8T5+VpQeIZMHSXSvkEwn3nvVcJAKcNcxmPWQFw7kKf3qdxxzoE23s5ujhY7z9xtvZcfAC9MI+njEaMV4es3p2AxWnhEnL1v401WCVwW33M+MnSZKEyZMNa9WYTtAsrTu2xH1Ecf6E7J8+9w+8/Lkv4p9u/2f2bt9C4jUzE7vwccHZoCmPTFMSk2U5FyJILz1AMZ+iSJAigUhzmcjoJJo0Tmh8QOIxLmCtJ9ER1tTINOWtv/br9GRKaFYI5HS9R4QG5yXBt1FMcA7lLVpIHBYfJFJp8KClIs+ylg4oY6SUFEVBnucMh5s00mJrT0d0MHFD0QRQoHVNbMG5hkgn1HXVslZcAAJKaFSkoLFUvpV7UFqhhSDVKU1t0d4TlG4phYBuavK0hzWemhpnxvTTmFFw4CWl2MCN5MOqGI6CQqcCTcrAQ1SX5DqlHBrWtUBUjkEYt7OFKiC0wghHPtGnrCtcUdA4SeMDiawJqiZuUuqoYLw2ZPHBb/C0pz6Tk0dPcPDgxfzkrh/nHX/8+7zyla+krgp8UXPjjTeQSclTdx4AU3Dk6w9yiZjkvqWzzO+coidzPv7xj/KmN7yZu+64lQMX7GNzUOBc0UJUwWFsCTLgjEcphbHnPLgMaBmhXEDFEmkagpJ4LzHWMp3mNMKjlEDpgKk9QmqiukFUliAMdROolKbGk4VWytnEjn4lUalmHGpqGUg6irEpSZA0SjOdJSADPRQhgKkbJlUKOoCQFMEihQAvMdZjq5r7XvUaksox1oGsCRQRZC6AFIzqgqNfvokrf+IHyZcWefHVz2AkPalTXPTkqzlx+/3c/erX0Pctro/wNM4QRRGusYgAcSTJdUQjNf1I48z5DWu+m31POPGQSd7+D19l5uatbA6X2LJzBps2xI1l26BAbN/GFj3J6vIqNkQsLZ9l62QfIsVodcCO/XMMT4yIts+gonVsAfs60wzkmIML2/iGknTnp/jSF9/BQTpcuKvLplQc+1eSB4+bmqJbwNfLgidN7CaYhhfvv5pbx1/jxLDESM1jd1yEW2tQG54dzZhV0+P9f/1GKqW5PJlD9gXV0QE7pmNuW15kYX6WbmE4uTliY90ho5SdborujoaJmS73HXFsWbiQZOxw3SHTY0Gvr5nNMjqdtsdfvWsH5alTnAh99j3uscwVmyRBUSPpZF0GdkDPKyZlxkoqiKQi2RzivCHrTeCCZFx68jRC5orB2VUujgJ5nlIVNSe49SHXIa0cX/zqFynLkvmLL+d53/8CBoM1BI4gJE4IdswLbPA0eFSk2xepLEmzjMoa0IqicoyGA2ScMCqH5HkHUzQYUSF7Gf/1rW9rWQgBvtlqWwYF53Bq7wPg22SYBBcCSscEawnWtYwG0crH+nrIWKSE0JbJV5sbRFKACURRTFNXeClII9s6i2FAdBQ6SrAuEHcyTFmjowgvwAWPdIFIKoRooZRQG5o4Isk12kFpC5Q6d7whIEgxVpJmKbZyvPNP/pTx5oA3vfEXybKYxhR0uhnntyN5qGVJStM01LVFiITKNthmRBKlhOCQKCIhCEGCDFRl+wEabQ6QiUJYAS6AlEgfE6zAeMNEJ+czN17Pj/zof2Bzo6Qqaj735S9z0e49LC4uEhwsr2+wpTvNsCjZsWMH920uMbz2Zt5y82f46nNeysv2Pomb776dt/z3/8KNt93G2TOnOXLsKNt3bqOqKlKtCN6jhEQjMd6iIo1tDFpolDqHd7sGgsCLgBYaKRVGeKSH2hpsAG+hLmq8kESRACEQkcYTyJQC38JedVO0hVpZgguBIvJEtSBDUUWBWMRYa/Ai4AgQJI03JDoi6WSMyiGx0ucYR5I8Sqi8RUjHdN7FljXDFPrGM44dncbTBAsOtJA0iaKT9zi29ACJ7bG8vsZjn/oUbvrcV1A7Z9lVS0bKghaooOhrTekNvU5bmOYEuNoilMAZ2z5Tj9AeSY/NncAHgC20ZUnvDSG8Swjxq8Cr4Vvt694aQvj0uTH/GXgV4IDXhxCuOW/D32aJEGzZNcEUjm2XHuTs5jJioyKtIU0nWD/zIBfv3M+MFMSTPXbNZHTSPrgC38sZnhmQq5z+ssVXMbN6gvr4Jslcwl3LJ8ln97CdmnuaEld4oskJDgwatvTmufE7juXkmYSOF+zvbmOEp1GGD9/7INnOS1lQXWYv6iNiyZxL8N2IDe+5SibkcY9hXWJiSXR2gzP9CJ1EPHO0yjB00EKzq9Oh3Gi7pMdRW9Q3EJZtFwdKX5FnmnjrAnu2TFEUa8RIamGRUjO91tDdtZ0i9YTKEjWeJEsZr6yzurLGdLdDCA2nxiuILGE5CeQmxhNQjFg8vcSW6Zxadbn9xkPsnNnOdNLBeMewPJ8rsXP7Nk4Ml5kLCS94xrM5NjzLZNXilbVtKXrDLEe6QLCOyGsGriBPE6pyRKfXxQyHCNnqbNfVkI5X2FGNl7Bl3y5e9TNvRKsI7QOZ0hSM2ghOBAgC7S1CCAIO5/05PFpim4CUql2OI4iYOImoqwEylG3ELBzOO6zT7RhriWJFbRzWttc0SRJMUxGEbVUHfZs4dCEghUQg8C7gAzTBEysNui3qaZoGqUAJ3bJbwjef5QhnHU1VQWMRUaDTy3n3e/4EqQNvfNOvUDXVw0bixWiA1jHee0RUEwQkQWOrMUmS4b0DLxCADwKpFUiBSlut9FhE7b0SnjzS+GCwTc3S+ipFucHRE0dJ4ylUonjJi1/GB97/Hrz3jEYjtvdmWSpHLExMcfHjH8U173svT3vOczj9+RvZvPN27lcCL1I+8IH/xcrGOgvTC3R6OdYHkKGFTrz/Fr7tvGsTmsETgiNIRezBCY0TQCRxhUWrCGUbIh1jgydCo6XENZJICrwNhCDwweNkQHr/LScuJ7pgQQyGuF6XtHZ42cJuqfOUtqZxFq011lriKENICMHRNJaJKKdsakxwyOCpgkdKSRSgtg2NhhiNiyAXAteFqSymHhU0jeVxP/BsDh+6k7m5Ppu+YbrbRU1p8rRD0sBas4FMY3ztUR6IIpogsOMRlWlIOh0iBFFQOBz8O6jfjyQSt8CbQgi3CCF6wM1CiM+e+9/vhRB+99tXFkIcBP4DcCmwDficEOLCEMJ3b/EdFDsnd2CVYLqeoht3KFKYnN/C8OyACzuXAJrOvGJiZp7eTE5VKMZmnd27OkwWEWVfkOqIfUMJPc9msGRBckUUMzbrxFWfyf6IXZsN69EEZTNiZ3S+ZsjFlz0eaS3LzZj9UYcqgaRxKBkzqGuCDNA0bESKTAi6m5Kz8iw+HRI2a+gH8DlTq55MGg5rx6wSxKbizNoKItKkUymq7LPerNLNJD6Ilp+sY8YDyCKBSnPWFk+z7kvuf/AwozMGqWN8rKjHI8a2QSQRs90ueZ5zwfY+O2fmWVhY4KQf0mkCqhE0gPGe2ck+vdjxgT//AFc9/ukMVw1z+wXjYoMIQfkd1+Ho6imcDVz52CewtrpMNCEwsaYJgqzTwzlHCA1pnmJtIKSCsKGIVITzNaKRECeUtSF4R5wlhAC2MfS6Xd7yhjfwskuuZHm5LeKplGPVTzAYjdkcjCibCkKEjiNC0NSubp2DtG3y0FpQnq6ICDKiKhu0Sqi8J41TRkVBt9slmIZgBUIJamdRGrxL8Xi8LBEqIoSAV+Ica0WTRBHBtVG4VxIRBHiPFzA2NZmKiBw0qsXbtdLgwzmHG4h0Sm0NxJrQWETQaC1x0vLffu1Xeftv/Bbnq/Y81OK4deBxHFMHhyAm0wkmKEpf45F4PC5AX7TnMyjGbed3JalrS6ZjGmlQPkKlCQTNiQfOsGfXBWyuNzzthc/kZ3/uVVR1QASoTMPU3DzD4ZCZpsMrX/caPv6BD3HV3nkmn/Mo/t+3/SrNlq0sFyMuvvRqHn/1BTRNwyUHDnL4+Ak2NjZIIoU5l0QOok1KZklGUYyQOkJFCeOiwiYtpVMVjtRCpVpmCSGA80RaUTUWLyUheBpjyeIM5xy198RRCo1Fyra942A0JiEm1RqqBqM0BtBaoD34OOBLA2ikUCgU1jUYW5EmXcpRjchThPJI135sonY6RiHayFi5gNeCKIANHm8aYqnI8pQ1VzHfnScygclkBukDg1CwXjl2VYphR9OXEalrZx5FgMR5UgKpTGiKhkqC83GL47vz83XfzR7WiZ/rVH/63O+hEOIeYPu/MeTFwEdCCDVwRAjxAPA44IbvNiBNu+y+6vso6oqkcUwhsJGkdpbenmmMcGjriQCdKDY2BmRekuoIIzzjpKZYGVFIiYwjomFMHMB6yxlr8AiSaIPieMEhpYiyMRQN/Ctt7MaNxdQWnWacKAtmbU6hNWWwmOCYMDE+SjClwwyHpGmKdxHSSUKi8YXFMGJDR4yVpq9yBmZMqtpp/2TWwQ0Nm2KVONIUtUNJwW2HDnPk7kM0vYzpECPyGCkUtbF4D3QUaQQGgVQ5mezibYO1NUXhuf3+EXcdOcPy2WsphiMOXnwpWhuEc3gfeOoznkmT59x01130t+xhz8JuvnjNHTzjOU/GNufHhZ10guF4wPSerZg8Iq48Ig5kUUSqYsZNgYpa5ymlwhWGjoKqKmmUwNYlBEee59R1ja8cQw3dKObm67/KD+y9HEvDrulJghD4CB5tBWZKYkXAppp+FVhuCmoBK94xdg1iXDMuR8SdBBMsx88MqU1FrARBKDICvijpqBhfG6wBESuCDUTESAkOh0AgrMJK30Ih3hEDvqzxUcs9TrIUi8Mahw4CFWBCRy00YA1aKEoaYq+oZUBKKEMgFgHrHUE4oijGuIZOEjEY18xPz/J77/xDXv2doj3fYQWBJMsZlSWpCCgRqKoxQQuUVagQMChipVgbbqKSFK01SjjqyqMkFL7mm9TJwfIGSRpx8uR9XPL4x/Ho7RfxuS9+nCdcehH/++8+yit//me56cabODFYxQ4L+kSwtMYFl13G4cEmLzQT/OP0NNtn+mgkqXdsrK1ihp6P3/wxLnv0FfSzDBXFREpSlu2MyErJyBkSlaJlQjEa001jGusRvsYqiY4iYjzBe0xVUuGJogzpDYIIhCSNM1ywKCmQtUBID1JiJSit6XkD1hKcwCuBxoIxKNtSEBORMBaGCRVRCYcOFuMCSdrDNIZ8OsEaj7CWEASZjEHBqBwx2e9T1B7TaStydRPIpUCYCoLFBU/UBDQCEUmkchAcXSuZ6aZUqWXOyZam6tuEuJIBlXQwwSMCJAKka1lwvnLYAAAgAElEQVRN1ou2AvgR2r8LExdC7AGuBm4Engy8TgjxE8BNtNH6Oq2D/3bFmJP8K05fCPHTwE8DTPS6VCtnyVWGyCNM45FSIbynMg4pIRIRjRaMqpoVb5mxgkQ4pHOMlab2oKVsxWjqEuNa7LZ14KptdJu3iczGWIaDATY5v7TVN02rjaEF3YkuUeHbKVQTgZYI06BliosUMumglCRUltQGjAHnFJ20w8gVOOfIdYRzkOiYtJ9RVRVCtInT0bggyRIm+lMsnrqekOTIIBl5gxoIvC/J8oi6GOJkDI0g6BQBpMEh4oggAt4HmqrCIZjK5pnMZlg6vcjQDqjLioWZrfz9P/wjiXQs7N7OcGWVsZhkVJSMygYlzu+4XY9HCAdz03PY2pIlGU0wOGgZAkoSJTGbxSZKKXSaYb0HpVBCEIIg0QmDcUGWZWghyaViZXOFzrFlkl7EaGwQUuACZEiKpiakGmssjS3wY4dp05joYMi8g3O46qBouP/sIrXXxHGMPDeFlwjQitrUREmKjg0utHCEQOJc+3L6IHBCIJ1ABFBSgVaohDYiRDAuS5RSSBmjtCQ4B0hM5RFKI4Oii0QJ1Tb2xuKcRAePEhFN4wmyhXw2hiNckMQGRPLwjZKnZISralKpqJzD+wapWjzX1xaQ6CQiVIaejnBCYIMl0HZjxwuEFzRVw7AZkiUppnGMx2OyxNKZUNSH1tm5bwf9gxfwlf99HbWS1KdX8VKx7moi3XKx93ZS/uANP8OxwTJT89soplMe/9gf4s5jt/JjL38N/3TtZxC0kXRjakrVfgRjNK425ElMJWqoC7JEMWiG9OIeMkrwHqz1COlx3qPjmFQLEhFh6nDufARlVRF8O1MSStFYg7eOBt/SE01NJDSxUMgQaApDlETUpkJohdAdJpuUUhjAM2g8cZrQWIsErBE4B8FrhBCMNS0urTqs2oquCm3iBvCyrdZtfISWClM7krSLqysgIAMIJUhkQhMcKoo5x44knCvzklJh6hKpNQKoKweyhQyVlMjo/4dGyUKILvAx4A0hhAHwbmA/cBVtpP6OR7xXIITw3hDCY0IIj8mzHBd3WBeBjVFB4yrKssBUNQpBZmFDWsyooBqOSaqACgqjJWUITKQ5CRJlPR0ZIYUmSTKsD6RRTG1aDLeqKoqiQPhAt9sly75TFQGSJKHf79NLc2xRoYTEucCGGWNSQTTVypR6KSitozSGNI2RucJlgpArCtE8hL9srcUpzfpwQJQmCK04ubaM76V85oav8vq3vQ1zZoVIKvqVxFtH8A0E1x5z2W4v4PDOEKyBLKbEMfaGyhqcDpSuYdOOsKmEbodOp8PcljmcNCjtOHTiLMPVda688nI2ZxI6nYROnuPc+ZWrzkuqomRhZoY4jlss0QsUAls3qADKCzIdE4sIjENIjThXBNNJYjCOiU4XGdoydZDc8NefJMjA0NZ401DUFYUpGRZjamcZVRXDsmBQlJxtGtZrw6A0+NIhB4al4Fm0liPrAyxZe1zGY5sGbxuMkngpEEpSNiXYGElOJDoEn+C8wTYNIFEqavFsWsdtfIuXhxAQolUrVELgTUPT1AThMViCBqXEuQRjwCaCWglGcQSxwkYCGwlUr4MIBqk81hVkqaQqx8DDR1mr1YiBqxn6isZaTPAYEXBKtPuWDmNqpPLU2mK9oRmMsWvD9lzqCmgj8k6nhSGsbal+zabnE1+4noO7L2PdwTv++6+xfWaW//Qff4LMCcRGSWkMp4cbuNpyeqVm6vufx0vf/Ga+/7Wv48Sdh7nn9Elmkik++v6/xBhD4yx1XRJpiQsBDzS2rYYtxwVKCHQUEYSgk2YE20IGTjh8MDgl8LHC4qnLquWaW09VlgRncK4tZPK+vT/KWsaRw0tPbB06TiidaStYNURZjAu+DSC0RBYVVSxQdUAYQ57nVFVFFLXPQBRFRFGEkK2jVo2j2RyRBkniBQYNRhGsoiKitpqARwRJEsc05RCdxCih26CqsUitkEq3+YxItkFdJ2lpi1WD04ImWBrffoy0yohVB0GCCucHVt/NHpETF0JEtA78r0IIHwcIIZwJIbgQggfeB9/SmFkEvr1VzI5zy7779kNoRfYTiZYOLQWpViRKIfCsS0deg+zmeCHIaRMoRrUUn7Kp8CKQJDG1bSAonA9I2uSHlJI4jknjBHxgottrCzrU+advQ6BqLMZ5pE4Y6EAlBTNphyzAeHWAHRmoDa6skbXFDwrGpwsoAm7UEDWCJEmQUmK8I8pTKu9RaUyUJQxHBf044/d+87c5cegI2/rzrNoNKgxj4TBCEHxNkmiCUCS9STY31zGmxtqmTcZtjBGDEjYLug3Mqg5TQpO5EVE1IneayHVJdB8lc+oq0N82x58878e4++tfZvW+u5iZnmQ8GDA7eX5zjNp44kiRZjGmqSiKIUIIvANXG6T1rAw3UVlCHdqHsK4NwguUh2bcwinCWZq6xJmaRVkxaSQrsmnzB9ZR1iW1NQyKMY1wVKahcYFxbVir1ilDydCNWKkGrIUSMaxYHq4xcCUipmVAKIWWChFAe4i9oisjZuIUzxjPGKEqIt0glEIisFWNtw4vPFKCkAEhWuctpcRbS6Q0WkOcaKJItR8A4TBYSlvhaCl0zjmmo4wtoksHQdo0RHWNKhtmJrYhXUqeTZLqFK0liIfHO2c6k2RCk4mYXEVkKkFaiWgEAY+UgqmpPgKPDgGlBDqPSaa7pDohjmMCBqE8vYlO23nIe6anZ7n/1L08/3lP5rqvXcuP/8eX8vwnPJ6ZXfM0weG1RCceU24QuYpxuYaKh/SCYWP1FBu33MAPvuKFnLrla6ysLZPmhuF4wHg8BOFZXV2mrKu2GCp4jLPoJEY4QUmgsJbgoKgMo6pmMB6AdPiqoRmMSSx0vAQhMMaQpx18Y0nyjOFwiAse5w2NCMySkDZtIlUZz5apGURweNvgVcvnDyGgUdTa0QmSsW9lG2xVowJwLmFeFEOsrfHeYF1NTKCXxEhrUM4inCc1gsRKVIjQLiLRUfsRNy0pwdqmDQCCJHhJbSwuCJQUeFOhlGA0GlBVBVGsSYUgEi0s3I1TnCvxoSH4CucfrpLgX+yRsFME8H7gnhDCO79t+dZzeDnAS4FvKih9AviQEOKdtInNC4Cv/Vv7CFIQI4m9xsoOpamIFZjQFnFMdLvYzSHR2LaOXClEECRKUo3GNJGjdg7lHI0PGFuTZjFR0ibW4m6Ory2V8IS8nUKpTpeN5nyi1/p4gyzvk6Fx3mKcRQVN7T11acjyHGEaSm+JE93CNkpjEkcsNFE3BmvxVYvNOSCpA/2uoohy1gcj4n7O4aVThCxncek0WoKRECU98qyDkILg/w917xZr63red/3e43capznX2fa249gJiUuSpk2bkrQqIKFAqwqQIIJIXCCkgtQruEClcIVUCELiHgkp9Lb0gouKUkXcQBGHgFpqJRWOU3vb+7AOc805Dt/pPXPxDm+3Xq63L4hkf9LS3tJee2rMMcb3fu/7PL//77FcnCeLjBaSqBLL5Gm3DRHBg8y4ZabkxBw9JYBE0eo9AfDhQimCNEWQkqQKzJb/+Kv/G8url7jlnj//Z/8Ml/UlXN7FmQ6dIuobphgRjWFB0KaCbixZgMuZw7Blngu2a0neoYylXFNztm0oMuNSYPKBXT9gXr3lIzNze4zMQtEWiEZxNIm+SEQKnHSmOy+ILBFScx4dCMUcCi6sXATcrzN9v2fxkQ5JVoUsBJSKiCUgFGgSIDqyc1yyR1hJazS5CGKKqJzJ0iBLRAkNsVCUQJSElBKfBQsJTWZQBiMN+ITSgiQKzi9o1ZKnhVdiRVuDz7Dpe3KIOD/xKtzTNjt0MxAFGGPw8dMX8TVHlhTI3lNi5ng88sUvfpFXd2/4/Hs/yTk5ZJA8ONhbw7gsbHe1XOhmTxICITQhe15+9Ir9fgsl80f/2C/yW7/1WzzunvFrf/7X+U/+g/+UP/lr/xwPb+/ptgPnZWJoLH1pQVrO44n54VKbjatl7Qa2QrP58g5rNaEUclPwxxNuvyPJTH9ZOImJbhjYREHcCHIMmMYCghQFzaDx5xklDKo0GAOZRLEN62Ul54LqO1wMGEDmwuPhEa6XlNMJaTYs0dd7OyWi0MwhYoRBasPqF7b9lnmcsa3FLJ5JrvR9X+VaIWKVoZRM0aXWpoVh6Lb46JkzyBLREkTTkopnUhJZCjlFkpH1NCELRUD2kpxXsu1I60qjJckHRHKcG83GKZL00DSUJVR1sRHoGGvPCFlppJApBYT94cP0P0xN/FeBfwv4qhDiO9LcvwL8m0KIP0rFDr8J/LsApZTfFUL8deD3qGTLX/qBZAqQS60UrcvEpmnZ7/ecz2dyqaELTjO5M5xSoJkdySpizEQKN7u6i9QSlvNYjXfA0HWk4KrxTgvcZWEwDVHA0c3sNnse6XfLKYd+Qy6FNXiGdiCFfL2hF4ZhwPvakRaiCo+UNKRcsaGSE7ptkAJUa5hLpO1bwnEkFMjBIyV8tI78d3/jvyfPM71UJKVQTcPqHLv9LTHWHV4NTIBUihfNlo9P94RO00eBURWfyrmSBbFUZpt1xWhNyRnRmIpkpYQuhWbj2Z0kH33xGebvf4vPtIlYfpW77xkhBeBK4c/+kV9ET/7KQUiihmkZSalgrcV7jxCaEiNGa2bvaLTBGIXKmaZrSAKk1CghmU5HShE8XEaariXZht4XXsQa2DgTGHxhshI5Fy65JiGLiKxXbjgm2DUb8rRw0/f4mJBFVH5cVjGVKJIiM6SAFLKKs4Ijx4zLC1Y09EMLikqkFJAiU2TBh4SWlapAZLZoIoVUCk0C0w6VQskB29TdbkjiOgRCsxcQ1xWfI9Za+qxRWVPGtcq6jLz6Pn7wpZoe6Ry9bViXiRfPnzNNE35Z+eCbX+Wzh1u8gn/6ScvXX1/Y244SM5fo6E1DLxsCkUsUbHYda3Rc/IRxI//Sv/jP89v/02/ztd//Pf6ZX/5TLOuZvrHM44WutaAEw25LjJEXm8eEtZYjfcwIJb+rPyCjtOI8znyu2/MQHFZpGi1RotTGsrGEZcVHVxOV0tYmcwx0+21FM0PERVEbn9PKZjuwuJmSM8ZqRJGkAJhAWjIha7YKYipopWrjs0jmsFKEqJqGtOCPHqvbypTbhiV4ZEjkAqlcsVIqgdKpFolgXi5YXU92JdR+U8kepQQxR6TWaKPIpZCloskZoRRFabjK1JS0GNVSmAkKDllwtJ5sDZslo9vuqomobp6h63DOVwc7GWU14fuUOP9J1w9Dp/yd+pu+c/0PP+D/+avAX/2hXwVUB0KJrMuIuJxrs8polJKsPqOyoBcWMQi0FNhGUtTVqZAVS/LoXGuvUVhiEPhQOV7tE3a/I54mpBCYwXKezmT5bvMglUwEXjx6xkcvP2az2XA+num6Bn9tKDkf2fc7sp8QuqCUpNvtmU8Tl2UlpUAvG4q5Po2txi8rQUaSKLx9c8+XvvSz/P2/9zuYjUXSoJuOOTjOy4W22eDIiFQoqTLJYtPSNgPybmK+2bKJDqsNs/e1Vi5NrT3njHe+4ngxIqRCSYkQmYvP9DnxbHxO/OMD//Nf+6/45fd+jvs/9+vvxIpTEXzlF3+e1GhciFhrCMEz7AZiTFAkaZ7QVnGZR4Z2W+vT5folDJE4TRRgWh27bmA7bBinhV5IoltJQrIisN7jZA1gLG7liOdkwLqCC45SCkuKVQJVrjeNUCzBoaUgFE2jNZRAJl9r2rU0IoQmF4nRAzlmYhmBhF8DMTus2WCkIaYaTmpMLbHEFNFJcoUgiLoQDKi4VnmWEsQcKKGy7BKI84WsB5qhwyoFuVCI9fNTAh8D67zQ2nc3D997/Rf/5W/yl/7iv8fsCrbf451ju9lg2x1xXXgrDZwvfH38iF/6uZ/n//qdv8vNo1sQkY0dWJaZYbOh5MjuycDsHN2iid7x5OaWv/hv/zvcn860bYtcJa1s0aagEAQR8CmjpMZHR9da1lQHRocQaKRBaEHKAa01T2VVwO63Fi00Fzw9ilYo7sNCM0fa2w0+OIQqDO3AdJ5ruUcIYqh8fk4aKQSpRDLVjVO3cQmZM2suDFly12S89xhjKihQJK0yWGtrCU8UrNpwPJ6ZQsKpSIsh5oSRGqkUKoPVmtqLl2jR4vyCbfrKo6epys2EoKiC0QZRFCF4mqYhhnAtRld/T/K+unxwWGtYlglra4lvlpkGhfISbzXSZRoPQQuyNixrtWSK63e34rs//Nr5IyHAErmwy5Jb1fLU7rhperqs2YgGGxOq1aRUkCgyhUhhviJMYVm5H88sqdbzZKNpGljdhYwnisgmCMZxpO272qQTEhMD3zq+fOe19KrFZMMH738Lay0P5wd8WdGihkYygrbfsEweY5rayZ7PTKczvbaQYNtvGZeZsnikT7RtS3S1OSkRvPra+6xvZn7yJ7/McZy46fdsZcPTzZZ0uVAuF3plUaEgfaD4FTmP9I1El0DxI6VUFldqjZASTaHkWBltUfDRUWJtjLrgcTGgV83QH2i7O8bzlr/V/Rz/5+99Ff7rd2cETucLL977HEEUhNW4FOkwpHGBxdd/T1cUrzEYqZCjYystZXaEacEYU+uV1Ppm09Vd0eIDJSVG71l8YB1XTilw72biEnApY8ZIDFWAFUskSZhSwouIJ5O0ZkqFKGqzbg2e1TsSiZgqRJhr35+iAmtaiMLV4b7GAHU3mdJESQvWFFZ3IserDlXV01YSYIqg8RlCLdmZUrBkdM5o1db4OIVGFJJwnOd7LsuR4Edc8MSciDnURURpkvs0ES0IUfhv/tu/xt/96u8iZaBRmTyfsX7hYgoKxdRWOdw3/+HXuX3+lHYz0CnF+f4eve15OJ/osuRyvqdTki5LUsxkD6JUy6Mqhr7v6fuWZb0glGdnBzphGWTDgCWujkYoZMxshaEzls5YWmXqCbQUvKko4+Iqz+/JrKXOO9VdgwD8uhDcxDI94EuiLHXjhRZoU9Ay0zQN58sFHRPKJ/ARg0bkRHYKYVrIK0pbIoKUBblUgOD+dE+MkWWaOcaI6jpuNxs2PiJjRoTv/swQAsfLmZQdFMc43iFNJOSVmD1DayAXtLXoocPHgPfVKR9Why4ZWzJR1Ua3hqsNMxLCilKglaBrW7LUNKIjtZE2OaJyhD7QtwopM1EEXBqJSSC1wYdUT3c/5PWjsYhrTdKGu+OJ1ZaKq0lIBi4pQUhQAkokTBG0IaOLgjWxeIdqNV1MdZfd71DSIKWhMQNWWL59OeJ95MPXL2sNa/UM7ZbP7z/zzmsZU2BJju1hz2az4fHjpzx9+gKHxDYdl9mzLgmhPcc8sYTIwT4GbRhVxlqNKYLNbqAZGpZxYjydKa1g39xg24aXLz9CNInLm3t+6rNfImXBNF6Y5hWfC2sJ+LISWCkapFakkDlfJs5G4ZdIUBaRCmoNCGNhFujUkOgIaGIQ5JgwCYixNkXFyjHMXLxmECe+DIxf+RM8N+/uDHdDjwwOm0AXwbJOBCLKtPhrzL1kjTENWvSMy0roJPfuAlazOewZpwVjNbc3G3KIPHn6gg/u3jDLglMaESpps5oM08KaA9/WgblkJiJpWrlTkWA0IWayUthST15KKVplyKGifUaJWrZBoVWhAbJUSGEhXBleUcsta45IbWjtgJANSymMi6dVPUpbZJHILJAqI0xL1JJVVMdFLh6XHC5lCro+YASEJEiyYabU5u5aF4q0TMjo8fNEDEsdZqA/vYpphETlwN/7v/93/sZf/5tkIUmNJfQ7tgmszuwbw3h/j2gGOmWIq8P2O+Rmg06JIjIXI7DbZ7z/+sRxXWqYxgjmsBBzRipY5hlXAk+ePsc2W1Y/seaV0ku8KZxKobQ9oUhOi2P0K3P0RClJStHZhmAkSqlrUAq6qFhVQVOhgvm0cLN/zGA3NLH+/klIkjQEDH6RRASX+YJpDafF0Q49OUfG6UzQAiEjDw9v6UPDm4++zRJntjtLqxIYURubQlQaRBq6rmONiSlmFhyiUehOUaiAAAFwMI2RbrNFYCi+jvYb54I0DYtfKTGhmhYpJc55hNaMOXEOsd5nobBef3+rLKUIEBrvEt4liAHvR0zQSF3d+MIcWHysD0efEFFhFUwi0Qjzh8eJ/2FdOSecc3RDjyxwnI60bcf5eKkYoNEoJG4NCK2QnSaLxOimylQWfd2FBiiByc+0xkKE0mt2zYBbHM9ffPZ6/FJ8/c2HPD08eue1aETd5Swj5+WCLLDb7ch4sou0nUbmSNKam6LxJbJoR4Phss40TcNpmeiMRlDFRK1tmOeZ+fKG+3XCR5iOb2n6gdEtLMvE7e5RjYGH+sRfSmQYNqRYk4AqFpKt0epxmdGvXnIvMmbTcYjgNEhZQFwTu7Iwh+U667Hu1o115AhamjpXctjyf7x6xZ/7hefMvPrH3oc//Su/Qrq6SnyObG8OpDkxLSsuRbCSbWvw04W2a0khY5u2dv5DQmpLbxrmcaZtG5pNDf1Ya8nAw+kE7XUHLyVRZtZ5rfxsiqQU8RpMkbxaZnaiYU6BjdCkVI/hRgtCkiyu7rC1MThfm4Yl11NCKAmtNTLXm3ONVzadgpDVz6KFIIW6U17dQtO2tQGZEpqKtymt0Frjp1CP2DFSYgGREdcyksiCnCKxVGom+UwRihwDkso+Y8C7+VPviYhAa0NKhd/92lf5N7p/hfUykfOCcwHtI1kKOttByngF0jYsPiBLYSEhtKGXHZfTG148u+H169fsd7dMy0rb9FziCFLTdgMgKSXh4kwuitZaRJEooXnR9zzcnxFWElsYRMVgs6xReq0VJSWiAKkMvRRIMg3VHuljYXPYc3f/liePHjOtK8Nmy8N8QsQazDqliEXiXS3fNG3L6t01wCSRqaZYkYKUM82jG8pxwgvDaV3o7FAVtGtthIsUcL6Wvmp9f8KalnX1SK1pi0AqS0gBqRU+zJQsWL1DKFUdLD5gpGC5nJGyitxEKazrUtlzKZBXKEOWqibOuaKNymZKqsEha1vmGGljQFwnSXn/FmNaQi5I1dA3GhYPnaSkiJOfflr7zvUjsRMvpbA97JHaEn1CdS1L8DSbHl8Sk5+JIpFbyZRWpDIczyeG3YamachXikQYS0qRbb/F+4jtB0quN2rTtTxczhznkaV4nm1vkLxbeAoxsrs50Pc9qWTatuXycKQtK147tniCuZCmlbMIhJJRc2Ce64MjpUQ79GitQUlUa7FDx6bbsNs3fOvjj1FKf8KnDt2G7bAhkXh4eMs8jwgtMBjcXM13nR6QUtL3PZvNwLbrUNsNjW4wTpJQhDISyowQDiU9TStRKFKJzOuF8XJiHSecc/iwsiwTLx/+gOQu/ObvTO+8D7/6J/8U6zIR14W8RNxlQcaVzgi6VmF0JktFERLvI7eHR/WBIRTGGM7TTFaFpm15e7oglIG+qTyslPTa0maB8gnpItJXjCu7hEiqkiJSIIvmveEJ9waeJkO5IoG1HFIDGVprovcE57BafvLeKlENh1AfRimla51cQC4oISqylhLKmDqMoK8PonVdq+K2yOsIN40QLUlIMhVtzSWCtIQk0MIiskJLUwMsudSHp9XYtkEoxRoDRel6cvq0KzrWeaQ1ln/t13+d48MZ7z373Ybu2vw3xnB7e4tsNc57NAK5JEzXE3xi6Lfcn09I2yCMRbcdtIamr+/jZtNjra4Rcy2rVRKBUJonTx9Rwso4jrhW8Ohzz1AStsIgjMYMHdLqmmoVBWVMHV9XCk1S+MawKZplWXAEztlz89nnvHUTadPgwoqkYJSuzhcEjbXsNlvIhX5oSSIzL455CeRtyxx9fXiEyKHfsH/6lNfHe1prSSlgG401gq6tBFXTVI98SWDMDu9KdaZcA2IhReQVOS2lEJOnyILQEjd5QojkLOrAbCkrhUJBGYvte7Qy5ALL6oi5oo7OLRijAEkJDWMOmOAYpglJ/c6klGibpmKfIlNkJBcPOiFFROiE6X7MVLRSCt7e32N1BymhkkQLw24z8PDwADFhG41QkqAlb1+/4dHtLfM4ItC0Q4OPicUHFh/pkyVryWmcsFmgs8BHx6PbHfM8k0rC0hG+z5CstDgu5Uh36BEznM5Hbvot5xy4XRX6pmE3jUyNZJMkk19xjUXE+jAKq8NIxXmZyFKA0pRxRK4B3Wc+/PgDXHBIUWt0JSsMFkEBWx8CqWSW0x39ZiBlQ3AzuuuZ5onL5YQgswhHmzXEhIs9MWeUCFjdIFBoLbDaYlpV2dNYWI4LydTdo6Rg2wblnyDjt955Hzb9wCVNtG1D8OG6020JIVCUQBrD5TKx6XuW8wjnCd1aXPCs60q/2WFjwE2egxko08riKyG0BH/lrjOZhMx1yMNaPAJLzJlYIqfJ8QvvfYWvv/yInVSc40rKkcZaSpKUCFlERKmLgSi17ipMixC1xqqwOJFRuv73FCMiC0TKqCKuadBKGkmt0FKCKkhRd8HLOiJEQqSCEOkqdiq0bYvQmpUJqzUpZkiZFA1FSoxQuMWxE4bf+I3f4Kd/+qeJMfI3//bf4n/9X/7Op94Tbp7IGcbjSIy/hFIGc62X5uJZl7oDTnFlUxTNZkuWCr1VoDXpeKTYhnbXI6fAcT4icsFdpqp+VeB8YNh3SNVDzLUJbjuWlLi/v0OUzH4zoNaV8TIybHasOdIUSQqBTmlKLniraZRBh8g4jaxZkTUUZei7Btto1tGT7i/0QrKeJmglj4YtEcFSEr22qAyXZWG7HXBu4jKubDe3yKLhNEPOJArdZsAfZ05lZXe4QQsYtj3LMlFyrPIyJXExMXRbcky8Pb6lb1rCuqJVIieL1KqSW1KzOEfbtqTiiNkhyWgtWeOK0IKcDT6mqzdeMo0T+1ZVqqmBpBPZK/a7lnWJpKBoGkEbLT7A3WHP7epIeIw2GN0z+wv/yuYAACAASURBVIWQAiX7Oj+0GErUuKzJ/v9fAdYf+lUyqGu9MWpNowRCKt7c3V1TVJrD4Ybzw5Hb/WPehtd476uhVGZ8DAQinZJIYZmS54XdMqmMCYFZarSC88MDCsFhf2AWiUZu3nktXknIHj0rGizdYY8PC1/YP+ObLz/EvVnYDxuQifNlZLvZcxwvbDcdl2VEqWuoYrtnWmZSKXQykzrDfQr1dxINkMki4a6NSFEKRSqMssSwInc7Fh8x54mpN+TpgtAK3Vh8DDSxIlFBgckJhSWWQoqZRihciZiSYK6oX5aZ0lYm1adKVLTJktRrzpfv8zDTBVwkhozRhsUHsneIKdRGT/bsNhsezg9s9wMGzduHe2Jw3Ox24D1FSMSmhTUhrKYYQUHSqWr6s1oRUo2SuwxNMKwyk0NACcnPf+aP8OV9R/QtH68C1QjM7BFZXSmUiFaSSOXESykoUR80Esha1/q3lCAUJdVTQMwZoRVJCNprY0xKSS6J4mskOqeIkHVnZ0QtN0DBqKrHLcGTUsQWXVN65KtLN2KUJefIf/iX/32M6hkayzwupFT4C3/mX+Bf/bW/wN11xvo/6frPfvM/J+VaQrh/c1eDViFyv4zcbHeEZeV8d0Rqxeb2AD4CBaEEXC502x1vHo5Yq5EopK3a2pC4Dpxu2PZb4nllVYppntEGVPQcNgeWyxkyJJVpuw3j/cjd+Z7Hpic3GqRk9Y6239CpXBdQY9hsNigtELq+3+6ysPpKkpWU6GzLYHriVSe8REdIBamrete2DS/f3NNZxWGzpW0Nl3Fmu++Zp5VGa+Z5xbSGg1IsywLKMOaFt28f2N/eMKWESpGsICaHFNXy2CWBuLlhGkdEiKScq4RLJoxpKEXQ95t6qulMRRjtNcSTI23X4VwiupXBWkKssxCygJALKa6kcyXqpAoE21F8YOoSLxyc1pXNbsviHPN6QsvKnZMNSrX4NkBMZCVp5A8vwPqRKKdIKenbjrJ4TCxXSD5/wmXjInfHB+6mM8vlTH/YoRF0ymCNYZkW9PUoLFTh9rAlanAhYA8HGiTnEmrk/WZDzIkb3cHyve4+2O+3SCE4TSO+JOa0YoaOf/DhNzk8fcx7N09YzyOqJJSSTPOlMqVZst/sq3u6UOX30qJNQ8gC7VKdgp41cnJkacmlKlJlLhAr4VBYyUVQnMMIyK3EhZUsIDpffQsUZGPqjWItRXc1farqQpPySooLfh0Zw8RpPDHfP6BEYdO3aAlWS6QQxJjR9l2HzPFypusHVheQBW77LTebA/YzT5nSyl43LMuF1mjm04SRBqM0j26f0G333F9OtEPPej0qT2+PbISpu0FXa9olZUqpi5+WhWggGYnS9bhrlWMQkj/xEz+FWSYmMllmfPKIkjDy6j0pQC61FJAimoLIibAuEF2NahOIxX9STiml9hqQFkGtW2e/kjP/SFMpX4mDejRv2550pSLiVVwkyKTgKaVKj1Kpf7+3LU/2N2hZJ777WMscxhiW+dNr4m9e3fH6g4+4f/kagNPpVNUFKVOmlegD/WFXAYCUqtlxdZzuHwglcBlP7LYDfdeQiyLEapAUqdIeskhO88K01rLNtuvZNANGGI6ne1zwjMtMjJnpGuG/1ZbJJNJlJMSFoxvRBtZpZRkX/LrU00+W+NmjpeHp06fEWI2iwtTSSVoczTgzFocqgkf9lhgc1rYUYHvYYpsBIRTjeMaa6kxS1uB9rNI5JarD3tTeCyny4vlTiAFREje2rxK0mDCN5WA63K5huj+ii8Aoi5aK6Cu/LmWNxUfnIQZyrq6dnARaW/puz3hxBF8JmpgqeeOsBKN4bHqsskDVCbiUUccLaw7ssuKYI+1mYFoWSkpoKbFa0/c9PnpCWmhnT+MCcnWo8GO2iBdg8isYRTGKFbifZ2Zfa4jxKjd6crhlWRbKeUZrzZQDcwr16ekcORUO+0eoWLhzE9olzmGtis1hi9SK6XRm8Y7705k5vJvY9N6ji0BLw2HYYhMI57httvhx5f27j3jy/Anr6pm9x6VI23f4UpuzSinmsDBGVyPaqRBE4VISry5nUqurUyMX8tWzXBOE/urtEKAkG9WilGG1mk7XEWSkDD4S14UU6lAEXUT1k5eFJi8YsWJExKiEti1dY1GiYK4CpJzAmhaja/Jz9b7uZt75UOpx1PQtyhqEKNw93BPHmcP2wN35QtE1ZaaU4eO7l5i+RdqG+/sjjx8/Zj7e4ccj43xEbzWrmxFSX+vKGS0UxtgalhISrwprSUxkZhl5NT9wcYGjm/jln3iOPT7UXfRVlxBSJBSQolIpIMlKgKw7QWl0HXQcImFZESnXhawUrFRYqQnf+Q5IUScFXW+J7yzeQurrTiuBrAraVhusMCihMbYeyxMFqRRN0+DWwF/+K/8Rb+/v0CRkDgxGYoonCMEnNqQfcB1PF0zXIoyuYaRU6G2DyIVRZWRrMUXwUy/eY52XTz7D/WaLtIab3Q0iX4kdXemummLukH3D/enIZzY7TgRuGouJBaaADhllJNvDnnY70NkGmSvGOYpEt2acr1rYF4+fYmyLaTpCThxuHyG0wecCUvHyzR3n8cLusKcIGLqe8/mMMJpRSbogEdpw9K4uZqEmZa2WKKM/cchP01RLXi7W5vS80hZJcoEoqxLY2pbT6UKMGaMbFgOxqU3ztDiSNSx3Z7KGKBLLdVi2NQpS5jLN5Fy4v7+vQ0YuZ9w0E5YZPy2UZaGXksE0tclKtTPuiqFB89F0wueEthVNNcowKYmVmpNfsFKxjhOdMmgkjajCsNNlrCloUhWQkVllYVE/ZohhSRWF22935Jjo2pb9sMGfRjbS0O42pDXWD4NMu99yzr42aRJEt9bJ5tJyPK/kxrDr6447jRPFKMq4Mk0Tz7sd53liUvXN+t5rWWbIhc3tAV8Spm2IMXLWNQ33RPek80xGsb25ZXO4QZiKmg27gVgCQgmEdzweNuys5dANPN7sef8ffA0bCtZnVEl4N+F9fchIUZWzJVf6wimBpH4JpDYEK9GbjmbbES31lBA9S5xxea5MdUqsMTMHGKNkjQmVCq1uEbahG3b1OJ1g9YlQMqt32O+DvPV9X3eoUhHILNGjjYRc8BHWmAgxs4RIEaBaRTu0PJyOmKYii6+d58ntM5Tu2OotdIZkK+KHknifcWug7nkkgzT0uqVRDUpKetXhE3BeOTTwx7/yZayuFEUWGakFmVLFVSWTlSAiCFKSpSIiyH2L6Bv0MJBlVcYuJC7Zc4orndEYq2rjVdXGXE61v5HLNRJ9HXAghMD0ug4bMIbY1MkeLmWUtqBrUzOliG0btjcHgmmJumVJkEXD0c1E8en1zqfbA9kHrFS8//pjNo9vWEioruFpu+V4PvHx5YF/eHqDSJkQArOrxr5l9TX52G+ZzhP7TcOzZzcgwTQWEwLPHt3yjTcfcuvh5sljJr9StOb1w9v6fR9PGGOY55WLTJzczON+y4nAF77yM4SQEEHw9u1DTVciuXt7IgTJwzRhNz3t0DPO1eYZtaRRmmdPniI6yzZ3hK4mF3WuDUNtDSlkTBEkEpvNBh8ST548w6KxqsqllFJMwVWKyQVESCyrp216bNMBkhRrGS1csT2AbdcjrGaJnmG/Y1pGWtsgEQzbHT4nHj19grIK3VtoBFEW5rRymk94AklkkhKIxtJJw0jEpcAWg8uRxTu0EnXTUDSjNTRJ8UDd8AVRmIJjzhHvE5WHk8SS0aHQy4adGjA/PJzyo7GIU2Cz2RBOY601upUYVppNywrsjnUAQhMlP/P0CzTJwBrYScP25sDt9galBP78gNUwTgtd07O9ua1deSVYo0cZy4VEp1vapT5Fv/famYFl1zOdzhjdkBOMp5kv3DxiaCzeapLRKK3xJXEeL7hp5PnwmOO3XvFieMQ2WbKxvLqcmYtnnmcuwfPqzVtcyjg8IQSsujLJJZN9gOTJwbERLSUEljgh4gxp5VAUVihWD7v2hl4p2usuXQjFoi2+aM4PZ+bpgvQzabpwcQtjGPHLiYf7l8zTA0aByAHhJnSuONz3XspIJJm0jmgK/uyISdD2DSUtPLnZse0Htl2LW2Ya2XD/6hWKQI4zH778mKc3B4pf6XcNQSSOD29hSnRG4rwHlbBG0UgLopBjVc62BIwH93DifHnLuE7o4Qlf2Bz4Yy+e0iqJxl79KB6bM1tlGVyiyYpBtRgh6W2DTdDkggyB1lg6bRnQ9ELTSE1StdyhhEAKTbvpkKbWzRWKXmtKclglKSFgRUfbbNBF0emWjVHYWNjojqYUtl0LItMU8MeFpiTW8VJNlypzq1ry5fucfL7nGnYDm6EjRc/Pfu7zdMrw0YevOb6+8Gad+dzhMZxHtkYjo2Ez7Oi6BlUSt7st4HhY7tnsey5j5v5+RnjJ5XwEaRAonu0f0xy2fOujj9GmcNgbfuGXfo6dsDxpb/DHmU3fYYVhv+sJUrLf33B8+YqcI5jC5VLBg+efew+yoB8sn3/8CBsTVmT2NwcoGhXCd08MoRD6xLpcIKwU46sATyS63jB7h5WK0/FI17bM88wUp1pHVrWhq4SjMT1CZ9B1cUwxkkKsJU0pOb89YZLE6gapC0hD46BVhiFlNtIQpwVDIYwXNij0HIiXlYEWmQSH/Z7bfs9hd4MpirJ4hgg7BNkUSg4UXRhFxKY6uDu4aiYUOtCfR7RPtDmjNKzzhEgJQ0ZbhRaeEgJGWoqsg9qD9GB/zOiUrASXeWI2mUe6w/tIKZlIxFhBOOzYzo5wMPy/L7/NYbe/ep4V3/zmN3n69CmneaTTFqEFbk2cxgvzOFUVpbXY3ZaiZfWTbDb4XnJ+9fad1zL5mTa3RCL3d6/puo7N7YFvfPhtBtOAUHijq5I0ZHa3jyoDvpx4/OIJH7x+iW4s2mqWaYYgGWzDh9MD82VEonBE4lWPq64TTVKCIjyhrFzUik6SInJVY2YBZOK80DQN6/mMhBo/LyB0Q2cFqmnYXlnzytgaXPKE6BA5IWn41V/503z48Qe8/8H7rFfF7ZreLSs1UbO4C5um4+39kX53Q3Yzy7KQc+Ht2weyyBy2B4StOKVRmlKozmrbEOaVh3HkxYsXV4F/j9CR1SWUlAQRUbLWERWSDsFDckQFMsPJaLyb6GXLN775+7Rty+d0z8+/eMb/8/IjVNIsSpCLYM4LjZVQBNc3ExC1/CIqz1tyJF8npuTrIFpJJoW6a5NSclw8Vl8nCsVEylCKwDnHtuvRRbPGhc1mwyUuvBwviM7QNApptsS40t8cyFIhtGKeVqy1XC4TSgk6owny070YdSwdxJRoDhvevn3LT7z3rJYXlOLu1QP7m2ec5sjnP7/nWx+9xKoB2WxwYQFhaJqWZZ7IriCsxKB48vwzfPTRR5xOJz7zuc8yB8ej/QG/Otp+y92rN+TdljULxHbgnBydF7TDhvE8IpWibBpUyjzcPfDsyfNqEl2q62cdJyZ9pYWodXifoW+rF0VqhVKGVKrPW7cttrOc357IuUIKfd8zzyPJGqTUnO7PdIcNfdvj5pWMQLfV3a+EpoT62TZNy7jMLC6ThWa725OXleQCDsFe9yyLQ24aZlUIh45YMmVcodeMq6PrBoLLLKd7tDWc7quLSMTMrAvFKi7FoxePKYJgBIWCibGeWq8eIWsUIsI8aJqUaZZAlAKhFKZtmecRLQ2l1Nh/TpV8AShK4sKPGSeuhcCEyE4bugJd16GUwZqWx48f07rM1AjEGnh0c8u8jLTdwJvXb3nx4rNst1sQgm5T5URDUagl8GJ7wwZNToEQEj4krG3xYaW8ObNr3k0q9n1P31WG8zPPX7CutdzxpS9+GZkEN9sN0a3sksKOnk0QbFJ9G9+8ecN2u61HwyQxaLquIynJZZ0RWl0fPt9lmaWsSS9xbcwZAaJkCjW4RAmkEomx+lCC99fyR6RyHYGUHCnEGliJlTyx1qKUwBhD27YMw5bdbodbquJydXV6e8Wo302Haa1rf8J7dt1QcchS6nxCazHGsDF1RuVhuyP5RNPUIcX7/Z5Hjx4xXWZ2m4p4tbajXAMlNVhSJ+EUIlIVpKyukyKvHLcQsKw83+yR54lnl8SXL7VhZlbPz9oDuwi9K+ySYicM7RXL+kebl1x/3nf+fOc911p/959aXdOcMGQFi0ekKk1JqdZpY4x4v6I7zY02zGHCeMdGNNyanoNsaJLAWk3XDXz48cv6842sw4Nby+3tLfM8sdttP/WeuH+4qwGTtse7TElQssK7yCEJXvzMl0iNZl8ajNJ02tIay+V0rsMMruPijDE8+cxTQowIq/nGB99it9uxvzmweEdKifv7e6SU/P7X/oBpXfDHiXA6o33gxgyffO7yuuE4ns8czyceP35MSong6slSC1lPmEJ9Mns1hcCmqT5zbavfpG1bOl9o5kybBM1cy1XDMLDZbIgx0ne72sQtgUePd4hr/wgy2hSkuCHke3IYIPRXRtt94gXXCKbTmRwig2mwZ8eUAmNbEC4S7yf82xE1RbbFMiRJmwVDkmzQDMOAEKI6iErhUTOww9AnSVckO2krUhyq2rZcZSffmb8aY8SlyN6LGvO3NTillMD7labpPlkLvnPVvIImhfBJvuGHuX4kFvFcCvnJFtM23Fs4Xo51inpMXOaFuzDTjYE1BZZxwjQGHwPaNpxOJ169fkm/3fHxx6/wy4rrNeLxjg/9hfsmMzQt0dfUnFtWVIb7tDJ/H1PYtu354PSKru85Ho/VQ+IdH3zjfYabPR++elkXaiGxbcN5uZBbyFGy67a4eUHGzBwjaENjB2LMfPjtD5CNYaE2KL+jdE0UsgDd1IaORlRa5Tr4FlENaW3f4aIDrZjcTFYtMUtirgMNqkc5IK64XGM7jFGVr0dRsuI0nck5oqUh+oIWtTHa6XfplLv5HrRmf/OoTge30DR1HuXpdKLrBsy1rPTm4Z6YM2F13NzcIIXAuaWy39Hz8u41X/uDryOwpOt0liIyIaervySTY+GUfSVDQsKXxCl6/tl//V/m3EvuBsW3WXEi411BKMk/9eiW548PZBkZs2O01Eaiqs05qUVN+8mqDPbEKykSqw42X2+4kmtj87pYSylrACRElBIIUZAqM80nXn7wPpOfccvKHCJ6d0uwmjRY5O22PoiN4W//j799TaPCfj8wDB3jdKIRirx++k5ca814vlBSxs8Xuq4hpUBKia7pGY8nHt8e2N1u+INv3HM+reSycnhs0dGRXH2NSls++OjbtH0dRrLbVdbbdDWVqrXmcDggjGVz+5giFRcZuMjAMU9cZKDsGi4y4BpwsjY5b/YH7u7uanZjWdl2PTFG2r6nWVMNcnUNZuiYp4n5OiXpOwM3TNtUIVgpLHOVuV0ul6sdEZrO0iZBcQkPGKlYxqUSKtERg8PohraTCFOBAGMaQi4UJETPbrdFN5bzNLM93JBjQF0b465VRAm6bZgawetl4iwLdzjOsk6C6vsa2ssCPigrq6k9laQFd/HCKgMhBIwxRFVT5cHXYS6UhC6JowisWrJ3inldWNe1Lvi5sLiVKAWTc7iwokphHkf6tkWmHyh+/ceuH4lFXAD5POOcI64L/b5HrzNPhp7zstZmoRVo7xGNIMgWkSPZndj3lsfNhng+c7jdU3KmGVfy8YyNhXJfSwDCSpbxBMExLxeebg/s9+8OQ/jm+x/yaHjOhx98gB4surHYItlvBy7zxHvvvUdZZoqKiFay3W9os6W/3aP7nrJt0Z1mzDNqb1nHE8YoXn7wPmJ1ND7TaEPjM73QdRfeGOgg6Aydxg4NnWlp7YDRHa0yrEUwDPsqke8sQVUcTyiNlJr0/1H35rHWbnd932cNz7yfvfeZ3+le38HGNsTFjIYUtUkIaqS0CoSZlKYtKLRKW4mhJFFTQgNtoVNImyZVVZQQGkFREpqElhKaOiJuasCOg/GA7es7veMZ9tnTM66xfzzn3gRfk+tKtKJLevWeYevs/Z732etZ67e+v8/HuunuHz0hCqwxtKPBjQ7b9ph+apZ4//vfzyc+8fGJyhindIn+LDTHImj2657Hl9fMjw8R3rHbdSRKM5vN2DZrNt4xOH/TfRnJs5Lzqw0Q2e12HN8+YyTQ2Ekw7X2kGeMNB0eDU7gQXy956ESRIBFaT0Q35/myz3sX3/XH/h321y2Puo6H+z1jtAgVWTNQe3j7csk7FgtOTLwhyUUG5RFakKmUVArqrCC5mTtliGRqqiS66F6Hh9lgUWmBjwGp47QD8q+lVAKm7xnDwHq/AyTlrCKOA9JPEUraLUlScKgrXnrwKrIqGZtrTNOybTu2TU+Wlqj8zSmGWVqTZBMwTOsc0KAzSHN2YeCwrKnQWOs5Psp45tmnmZ/eY7WPdKNkvjgilYpxvWMxO8ZbCTIl0xnVaEk6jzKGg7xgtd7Ttj3j2E+HfFmKdoq3nLyFIkiUSNE+MpOCMoMZkVQKVDZherODOb0diLmiMw1DJhnwiFZgrkbKrCBRJUkUCAf7XYdXEVmleOHRRxVZlpElKamSYA3j0NHZnuVyTnN9jXEjulBILfA+ooRkcBFdCIQyaCJb15B4SxxHquMj2n7AeIcictlupsYwJyeUhXFTeaYfCdZzlCQk1k2AM+Mo/ATiCm0gkxmzEBj7LZ4BbxpK+9r7dzI/FVhMN1CqjBGF7QPOePymRXWWc2Um32xvuOhbmjhFd9PoKDPNXBU0cvK3juPItvv/WTlFKU2aFWihOZofcr7fM6SKi7EhK1KyomDnDXFWsN3s2O022F3HrFxw4Ryv7jbUxyd46xh2e2SRsW07ZnU9ZYR1gg2Rzlpaazh56mkeXjxh+CxSiLO3PgPLnGW1IOoEAyiZ0MepDXywhiwvMRZCTHhysaEZLF2zp903mF2LCoLl4pDLixU+gDOefRdwQuPkFFdzSkCikCGSuEgIM6RcQKiQbipLvJ5hUJJZVr0efUxVQZFIci3QWpLmCVk+J6vmaJ3SjQPDODJax2AcHvH6ZHl0djJNPvGGQx487fjGg7bf+NQLFEpRKsVLL71C6Dzz4+WU8+0sahQkURBDYNt2zG/dZhwGovcMbUfwnsvrFchpZbo8PKJr9mRZhgkeL6azEKknMqUJgdE5DHHKYSM4RE8R0asN7/naP4DXkqEb2GpJE0D2ESMknQvEoFjOl7yjOuKZ+oC7acWdmCKFZwiGa2HYzzOyosCECXMqlCStF8hyxoBElBXWTDVsGyIoSZ5lRA/BQqISMpmTCE10nm7bsmv2GDcSBGzaPQ/21zzeXZGXGX/nb/wsT919BuEEw67jqD5gjeGJeTPXPfg4cHi4xFrLandBYKQqFMG0+NHQNA2r1Ypu36CCpr24pkwT5gFun95Bq5y2C9SL2/gwslhUWNuwu75gLFO6BGKVs/UjeTkpzJSQE4Z1tJRlztX6miRLaTZrCpXQbXvmxZJdiDxcXZElOfuLNcJ6fG/RQVNkcw7KQ5TMEdUMUc3YtyODHdhs91OjmZr6E5IkgyAwo3u9NDH0Dq1K+r5nPp+z6xqyuiLGydtqrUdKjQsTFtr0nhgSnLGkUSC1JilK+u2aWZ4TnAelUV6Ry3TqjfBm6qPodwgMwXY01pLParwDoTRr05AWKeWyxHrL4BTKJdRihu0jpDOCVBQxww8OiwIVGO0IasIGuBgm2NVgOUgKhs2eg6TgrF7SeINMNLu2w0l4vL5ilkVSpbFmYDH/7ZVC/L8+rLXcv/+Q5557jscPn5DJyX6e6wTxZEN6uEBnU+PPPJ8y4cvFdIHn6w5zMufFhw+pi5z6cMkwWnSSsNvtpkmj6aiyjGp+RFVVrC5WFEXBOL5xEh/Pr5gVJa0QnJ+fc3J8jN00U5556Bm7cbJzyEkTtVhWGNPTWjFhcEXCarfh+ZgxlzV7FXlydUHfOdJUI3QkRM+1cmRKowNkUeLieFM98UgR8a+xlZnEqWOY/tOjC8QoSYJASH0jg57wvN5MnsMY/VQeeA1nqSQiRHSi2LYNSkzdc0opUj2VGuA3T+Q//bM/y5/8k9+HlZ6nq1s83G1ItxuKtKAocvq+J1cTIrheFLzy4D5FrqkXR+w3V5Dk1LOSLMu46i84Pz/nC77gnSzqGcbZKYoVPJEEISYuhVAJMU6eS4RiIGKu9pTHR3zpvTM+/MKn8OcrmsstJk1xGrSNZDpDEJFSEKKfSl1akxvH07okikAQCbuu5fjtz/JN3/YtzGYzPv7Rj/Hw+pI0z/jwhz/Myy+/TAyGrjHTBJ9P/A6VpRMXJkYSmTB4iw1mShfdXL+60EQk2eBJU3h88ZDv/1Pfx+PzzRRHyxK2fYu4bqhnJW/WyqFkwXbdUOVLhMoIXjB0nnlxQDHLpsks0RzMDwiJYlbW3H/8gFvzGt9tyYTj2btnmM5w6QTdpiVNNPXJLeI4opwiDQprI3mZvg4nm+rpgcEORBHYbrfM5hUhwvz4mOv9nloXLM/m7NqGgztn7PdbiixHCs31bstxsIxDf+MDTZBJoM5qzDgShSBwc0ZlHEVWgp8WE3lZMQ6WNM1IZULbdchkMmwpbmrGN1n8ECPCRYSQ0/smS1hEzZNxj297Mm/wYUc+q3HeT6VM7yeom4zkssRaiyLF2KmjeDQWIRTDOBBTxbDf02s7CSGIJDKwvb6gXiy4brdIJKXUIEAkimo2m8pzftIAFkJPN0HX8Pj6irTIIde4yw1FkeCSwGa/o9aaLC9ou4GuMeRlRjf+9urZcuCXgOzm8X89xvhnhBDPAj8NHAEfBL49xmiEEBnwV4EvAVbAN8cYX/5nPkmM3D09xQw9PjrKckZuPd5Fdsc1D156iXc8/zb6waCqHB0dj0OHnmcUaYH69BPe9ewz2CLl1WbDcZKT6wTjHVWeTTolM1KWFT5YrrcbvuDtn8eji/M3vBSTRBrbkuU5MxLoByKe1EYKlRFSxRgj86qmFkahHgAAIABJREFUa0asH3A4Zkk+Aej7nmJW8Sm2LE+WcLnD1Qm5EAg8IXowgTOtsSbgEsmgIDF6AisJjfcTdjPKqasxEEmjBSEISiGFQgtJCBM4R0fo3VRrS2XEBYF1HnUjRogxohI1WXDkNIFrJFHKiZD4WWLLd+6dkZBi/Z71k0eEKuc4TCu3i2RELjN0WmK6hnG3RiuB7z1e5+RS46Viv9mzHa84PjrCFYIXPv0Jnnn2aT758d9A+jgd3oRp8nUR8CNaTKf9EXAyoTcjwlq6tuHf+Lpv4of+wz9NdTDZjxYxIQSPxyH0JGqwo0NaTaVT5knKSpgJT+BGykXBix/6CPIbPI/vP+T0+IQyTSjqGc/fuoMUgixLqGcHGDNhT588foX9rqNpOppmh3eCvdlzfn3B2Axstw3IDCMkZVlwNaz4one8m//gG76e4eEVFCVawHa/pixnbOc5J3dP3nQS13iCtLgw3XTHcWC5XLK5XuPE1ATnBVyur+ndwJ3jU9I0Z28MhcpwwYDpyZKEY1mwmB9w/9FDHl9dszg7RlQpO9NBqhh3O4ROcET60ZBqhdKaLJHTbuN6w+FyTr/foKRgsAOzIiEvNNb1LJeHmKZD2oFKBp6snpBlCTOdQXCMMTD0E7AsTVOCs+yGFoIgeocWmmxe0I8dUkkG25CoFGJEhkiKJEqPEJOgI88zNtdrApHuZhVfi5Lz2HGUVQwykOU1LkSMtUQsm82e/IZOqWNKFFNm3lk39UTISNsNk0lKCawJ1LpgkJLOGApdEoWmmhcMvSUximSmSbKMrt2T2kg7GhKlSZD0TQtIsIJZVlCkM/ZjP3l5D2YsVgP73HGyPJ6wET6QaUVaazpjUOJzt91/LivxEfh9McbmRpj8PiHEzwPfA/y5GONPCyH+O+A7gL908/c6xvhWIcS3AD8KfPM/8xnEtNJ0reXk5ATbd1R1zb5vSXrDs0/fZj80CCCmGh9STvIC2wyApr53m7UfiN5SpmDDa7xxiywlZojIWY7tBp579nlclvPS9Tk6vPGfH3w6WUYyQRVztl3PspwxRM9sXmGaLa3psDIjSSRd01GmM1ZmIPWAt5wmSy4fPaEKgqyuufj4fQwDic6hD6hU0QdL1JJIRHk5XaBMNXIXBvQNiiNGTbTgkhQvIY+enfJkpsIribWgx0B/U3xRUZCESB0FJqipg1GEqb2eqU3duAgIUglegP8s6ZSLiwu260v0cYmrM45CSlNK+i5wWB1y8eQJ3UxSFBnOGGLUDH5EjC1utCREFkXGg/0Wv28mzrpOOTg5Jnw0oBOJM5CKQBIjSRTslMTbgEKDDnQIskXJ6nwzRSEvNvzoD/0Y3/OD383BbMm67yiLnBSP9wGVZCAimY4E07HPc0TvEVKSKUEfLYcnC/7ij/3X/Ov/5h9l++QhepZRoBhHQy8DSk6Ev9XmCVJnnLztWZ7uIqYf2IcBheJgNkeEqcno2TvPcj3u+bE//1/w8PIJP/UX/wq/8tEPsdvs8dGg/WR7r5cHiAhp5wmrN1Ij3/Cmc56Dg2PW6xVGQ5HnjM1Ams1QwiOsoSoKLtcNB7Mlpp12NklZMNqRwbUk+QzjDMpEri4vkfOSe2VG6x3nlxfcqo+5sIaTUtEPPUpmVMWM3m3I9IxhCOiy5CivJ/Fw4pBRU88UK9OTjtMh69X5ExItKcoSHzS5yZknFWWacf/qkuOjWyAjTduD9RTVHKUrbHDIGFBSTEaswTAIwSyLGBeodMl6v0XWOUc65Xy9QlcVxkzeV4XE2pGxb9FVSjJGbA5pnjL2e0Y72YgGHM/efQv7ppkwsVLC0LGLnlmaY4cd25CSJQnSj2SDQ1UFvR1QYtrn+ujIswwZoR121MsFXb9lNC1EiSwLdBC4oZuw1ZWiGhVCRBo/IMMUJ7Q4KgeqyulCzwxJGKbSJ2KiPubzGZt296bXyGvjTQsvcRqvFfGSmz8R+H3AX7/5+k8AX3vz8R+6+Zyb73+1eK2H+bcYQkxb/+PFAcIFQjfS2xGXSDyR3TCyaxuElKQCvLEE6yiqktYMrOnID2dsr66pfEJZCOaLjKxS7PZbtn4kGwQhz/no/ZcJu46itWT+jeuh+UwjpJnoiaOj73tcNzBut4z7ljA6DurDiTsuFFJN2+s6ThOwXs64xnC0PGQxX7JvtpixJ0umLavWmhACWqrpZwRQMRAThwktNg4I6Yla4sRUJolKUIocZZlYz1Yi40BqRhLT40OHSiRKT2YRwpTCCNoSGAnCgvIIoQgBiJ5Eg4yQREUh33gzE0Lw3ve+Fy0TbCJZMUypgapg3zZUdU2Mnrbd36RgJHYcGYYBmSZELen7nnd/wReSCcU8yQkCnn/bW6cVuJ14H45IHyaJtWBKIWgtpwRDoum6Di0kRVFx++wWD558iv/sh/4su3XPKDK6tkcpjQgwNj2p1AQXpyxyCP8UU2YSLSRKoXcd7dWa5d275FnNeuhxwiOsn8Tb2wZlPLkXjI8vmBcZi2XNcT3n8OB4KhFITVXVmKHh4v59vv0bv43/9Ad+mMtXXqWua2RvUT5ydHoyAcnGkdX5BcI7xGe57j5zeD8ZoZRSLKXENFvyOoEy0nmPFZHr1SWnyzkxOGZHc556yz2GZo9FUMwP6K2jc45m3ANTc8toHck8421Hhzz/zBni+gn9BhbFCToqhLSv75DMDadnGIbX8b3eW9am4SCAzhOcH7g9P+akOiCVCVpo5gdTt/OubynKqb7dNA0600gX2NsB6Sx1lk3t571noTJEO+LMpHZDGJqrc+pcE11PAIokY2w6sJ7B+xuphCSZzRByuokY7xjMSDFbkpcVQiQUOmffrGjaa5T29MOWznu8j1gXkGlBJh19s0bIiComGYQl0JoB6xzOjXT91Hy2OMjp+vX0fpeKVGlSJGO7Z9+1OBvQg8BJSTsajPUMg4Gb8y87mmnRuWnAeZROyZZzgoB+HGi3OxZZ+abXyGvjc6qeCyHUjST5AvhF4NPAJsb42tX4ALh78/Fd4D7Azfe3TCWXz/yZf0wI8QEhxAe6vsOniv04EpQgO1qwGTuUUsyjZnZ8QJ2UDCpCO6IFNPstzpkJZWpG2vWGk5MzjJI064ary/VUo0xnzNA86re0V2t0Z9nbDq8Fsn4j13m3a3j0+IKz2SHZYs5JNUMUmnRWMnhLkhXoLMeMDjcaiqJgfnhEerwgq0oyoRhWWxI0u+2WxWJBNIboPInWr0fYejnJiJ2IeCWwJpKm+aShExltsBgFPhMYHRhlgow5rVSoXjE6jwtTi7hRmnS0pMaRRIFLJE0mMFHfAJs0gwsTiwKY8I8erwRRSWx4Y+QtBsevfuTXENuRbIicJDn9ZkchNBiHtJ6ua+j7nrZtGceePM04PDxkvV7jBYiy4KUnDxm9YXQj/WCoynpqfBGaPhoGFRlkpCPSRsdeWbba0irB8dGC/X4PyBtwVs9ifszVg3O+53v+OLtxTVqUDG6KKiZ64owI1ITMjfF1LZqQGm/DtKU9yPkrP/szXK53OMsk//WeMs3IZxXn22tEoolSUGTHPL5s2RlN38ubxizPfr9nHEfurx6j65LZwQFtM3LhezIHXZiMS49feBmTSvyu43R5SFol2DctpkCMnhdffIEQAm2U+CTHekFGxugs84ND7ty5h4qBpml4fH7OJz7xCZb1HNt0uG0LgyVzknJRY6RnuF4x9IZZoxirkhdefZXTe3c4OKvw0vJ4tWK7cxAVxhjKMmcYBkjEdOCbJFRFiQopXVGQqwIvNIPp2fUt7TiwHQwdHptI2gA9kagieZagrGfIoJYpYzBs2zVDNK9HEUMhePr4hDFXVLomVgVpXnI3m66pIsuY5QVCCGYhQRtL6mGmctpuTze0yEQSvOVyvQadEJGYMSBFjhlhtx0QZAwaZiKhtYZmGEmsIYuegKdVgcIFRGeYJyVznSGlQkaFM55m23O0PJpKbLs9zjlWV5dTb4eaIq3BOox304JN69e1gMoFZAQrIup4xnrs2G63uP0AMeHk8DZ1eUC0n/vB5uf0yBijjzG+G7gHfDnwjs/5GX7rn/nfxxi/NMb4pXme4UeDaUeCGen8yL1bt7CbPcRAc/8C5gWFF2wx6CwlMq1Wzk5OqeoFXddjxn7KB8fkhiXRYcxALBU1kvJkwY6R506OUTJlfX75htdVFBnHp7e4vlrR2hEzjHTOEJWm70YW1Yxhu6WYzzg9uYWWCX3b0bz8eEKsJhJTaBItaduGYRiwY0C/1tBzU6OWYeKQpEhUDMy0JgwDqQbjLIcqn25gHuog8MOKMdkxazYMWYPy0wXndSQLHjV53iFMHYxBaMoA+RioXGQpUiahIQihJtdjCAgPWn32yyAvM5on1+QqIUpBvay5XF0gtSCvctI0R+ucNJlWW33fY4cJ/TnLC2ScDDB1XXO5uuKgnlPk+ZTd1ooqCuogmVmYW8HCSeZeUltB3gW+6qt+N33bkeqMtu1pdjv6XUNvPePg+eEf+DN0dsQIh8VjTE8fLL0xE7gqhElzJCMuBqRWHFDgx5HUON6azjF2oL2+ZFEUpFmGHUeqqiJKgcpSxrzh9lNz7HCJtdeMpmUYG9JMIuQU50yynOurFaZrWXdb/NBzeHhIVdUcVjXbrqGqKmSacFiVHFTVm74/dDJFSaWU2F3LvCzwdiQ4Q11qVldr9ruWZtdzeveMk7pmUZZc7K+5vTxAOEsaAqkIjNuOpcwIi4zBdSACZrNnyBMWsiD1hsMy47m7ZxwvC9q9oW8H8rQgTwvm8/kkNB4HgjNQJRStwcYRpSJllVJVBXmeUhQZsR0ovCDzklvlAcUQKJMMM/SoMsN0LVVRkqQSrUHIyJPrC3SRkEcJUWKbjphBO+55/OgBRVHQ9z1JkRMThfATCE7FwHhzWF9VFX3TIlxAqqnLdhg6iipnGDrqeTX1OuBZIhhymHlBEiNjkZPOl8ggsasdqQikUmHakUIlVJUiYpBqKgWuVmuyLKOcVehcc3L3DCdhXlR07Z7G7jBNw9C3NPstFxdP2K7X7IZu4vAMFuUkVV5QFhnRjlSVQKiBKDqq+efe7PP/KJ0SY9wIId4LfCWwFELom9X2PeDhzcMeAk8BD4QQGlgwHXD+lkMiOJzXXK43FAcHmNUVl0PDSjrKqDk8OkEEwZhIMA6RJzjUtDIRkXSQWJ3RWYd0e+p5Rbd1DJ3judmCi92W2WLOxfklZ2dnXDQdAQ36jZ1zSVYxupGDW0c8fPCYw8ND2rZH1I5ynvNkf0WV5bS94LG/JlhH4jXze6dcnE/Y0JnO2ARPXZ/QDj37YBDJdHcOIUwJFSeRSmNCJISIEZpEaPCQ5hLjIUjBYPpJ9eUVWEVTSMaxJ9NTTEuicVFg/URlE8ojXCCNEhMlKE8MkkQkRA3j2JEWKcYFhNIYGUCUwP43/R56aVGbwOKZO2x3O3bXO+7cuQdSkyUpzgwUOiVdzFivVlRVzc5ZHq/XxOjYmx0yTLnXspwwp/txZNf1tDEQrcUrQRcnmJB3EYXGB4uzFmMj7/zSd7N/ckUIgXI2o0hSLvfXnCwOANhsdnz/d//7/MB/8h9RzWvSPGXmAt5PYmUZU8YsJRssIZVkNtLPU9q9oqwX/I33v4/3fPkXcTq7zfq6xXlDUu+RbcatszNevf+QVx9f8P7HH6Jpmsk4k5Rcr7dkTpLmFYNacXp6i+Ali6rkLYtDwjvv8cknez55/yXe84XPc1wsyGTJtt3SJQlWwZu9RZVTGGtJjku88axWa5bHR3R9T9d1nMzntPuOxfKE5mLD1owcHB9RJxWtC7hZxdDsmeWKy95wMDtG7LYcH52iREKVluhhoM8DXYyYyz3LZQ1CMcsT0rTm6uqay81EpBRCkCUVMpcUTkCtbljYcLHZcnZyyrC+5rn5gk+2PVmAWIIJA22cmvSEUMidYXARHw3Og2k78jSSFzOCk+wD6JjQsCdxGZEE6jlJVdPECSMbB+jCJFHwcTLD+xzGZsO9u7e5f/8+J4tb7PbXlFlF3/YEpciFwHR70mpOFAmi22N0oLee0mq8G6cbzGzOEzdQa00cLb1Q5CZld7mZVtWzfyLaVijabU87eA70nKRMGIfihvS5wXlBpqeu6XG7J1ssuWz3nBwcsLu6RpU5g596GtgOOOHx3rNrtm9yhfzT8+ebDCHEiRBiefNxAXwN8HHgvcA33DzsjwJ/6+bjv33zOTff/z/iaz2pv8WIUnDV9SRZxfZyjXcW2fY8r3JS4YlK0A2TLzLLMpTQFFlO0zRcr7Z4GUiydDJl7xo22zWLWU4VHBfVgJaK3W5Hmqavt+ZKCfqzHQDLiOs6drsdy8MDopaU85pEFJj1iDCS0YCyA2dVjR8HKDQPX3kVkWrUomKnHd6PpBlU+SRLdsGDlshs2qaniSIRYrKkS01IBlJlCIklxJEoPNYNSBGQeESIRDMie0sZJDFO3YhKCaSwRCkIYjKfhJvfaZJphJoYHijQN4S4JEwGdxksKgbyz/K/I0XCyb1bzGYlu/2GZ555bqKvmYFHjx5hR4fSk0B5t9kSTSAbLIWDxGvuHNzBDSMiwqNHj5gfHVAMDj0Ybp8cIxNJ7gKZtWTWUwSP9Q5uWuOjc1w9uZ4ajJIEREApQS5TdvuW3nu60RD6nh/5of+Y7XbLYAdwDhXBD24SB+87jIykrWMU4LctVYBs7PnEB36VA7WkXFSoKuV97/8V/pf/9Zf58Asrfv59H+HXP3nB7mrD2AyTNk4o9nJFfkdSPZfCWc/tu2+hNZHH6w16vqA5Knj2HU/TNK/SfPj/ZLOXpMOaK3FF2A5TTXS9f+Mv/DOGTeGyXdO3e4yIk4z5xr96fHzKerdlb1pW7YrsoODoaMHu8pzCesZxwi9kWYKUgqKY4GRjtNhoOe93mAy2bqC1I6kPJMGyevASst3ixQRAu3v7hOefucvBsoRgJxZQEPTtwNiO3Dk+xez21DrDGIeq53xsvaJeHCDzlDTNubxeszw5JasrskWJSyCtK2a6oCChylKqIiExIwTDfrdm3G0IXk4AuuBQCrrdFhUCu92GMYzM6oz5Yoodp2nKMitJkWwurkiRrFabqZPSOZJEoGIkWMd8tiBYgw+WLMtu8ukjgQk7LPOcxhhqJREyosuc3fUWKyKLWyfIPEVkCdXRkoP5AXszRQJnWYISYbpZBTFxZmKgmuUkqeToaMndO7eoTeDzD++Q7Q3lrCBahxsGkjDNPZvNhug8MXzuPPHPZSV+G/gJIcQEbIafiTH+nBDiY8BPCyF+GPgQ8OM3j/9x4CeFEC8A18C3vNkTRBfILcSbUsKiKFhnjrSQyGHyXPZth7xpHBn3LfPFjDTTWOMZg2PoDVVV8fzzz3N+ccVmP3LvuedZP3yALkvqIscGz+PHjyfdW5pSFm+cxR9ePeawXpBLzW7XkOQZw2g5OJoT9pagFCIJLKqay82KoCcjeL5Y0O0bZgGWUlM/dcKrr77KU/OTqQYaPE6CihIB4AzIiJwU6kilUFHjYwTngUl84N0EAzPEf8L/CCBRhPCajMASuREkh+lQUOl4k/W98UyKqTsx4Kc4XKJRcVKNhfjGTKoIgre96/N5+dGrqEzz8PKcqp5y34v5gmAdq+trZJlx7/YdtEy4Xg8U9YKoNE/OV+RJSjtMP3vXNMQczOB56i33eOXTL9LKwDg6QvBIJRFR3NDsBL/39/yLyCipZ4sp+iUd0keOZgteGh6ThYmJweGMj7zwKb7ve76XP/9f/ZeY4yOMsYgQUVIggmVmJJ0IFEaxzgTWGhIhEDPNn/sLP0JL4Ou/9g/xnf/qv8KT7TUf+LWP8U3f+odZPblm7Kcc/r5rJ3lynyNygTU7ZouCPDtg3/d89GMf45Unj/hCF/kLP/EzPMecL/mG72J+vuHLdgMvvOOIh0+nXK2vKKs379jcb9a8463P015vabwlzTOapuHk5ITtdku9WJK6EeNG+ustFsfx7RNW6w1mDAgtMHYkzxLmRwfM6pqLB49ouw4xywmFok6PuL6+ZnW15ez4Dme3a1KlyU0/MT7GlrrIuFhdcXp8RJqUvPzyy4gswQXFK4/OyeqaqD1Nu8aPjrN8xrpvUMsZae+Y3zrhwuxYOMUwdmR5ifcOp1JioghIWtvjrac+O+Xi8W7iijiPANSNKxUE0Vk8Aang6uoClWbE6On7nsH0WO/xWqOyhFSXDL5FSY3zZmLFR8nlboMNnuPFEd3QUhQZ3dBinKMqUoSaJNW1FHTBMnrP0cExhMjQtMyKEikVu9UaHRVpkbNvGkTwzI7mtKMhn5Vcra45rHI6M1CmGfvViqbpOKoXPDI79mKksJFES+obtoxIFSLccI/0584Tf9NJPMb4YeCLPsvXX2Sqj3/m1wfgGz/nVwAICU4FEi2IIdKMPZWXmO2e8vYp68cXk3iYyNXVFSezmrbZkpYFxlt6GyiKEmMGLs8fcXR6ih0sr3z6RZ5++h679YbL5pJuHDg4OGA+n9MPDd6/MVr39mfexqPVE5r1NUfLIySCNCjSONlcZirFbno2ecREODk5IZeKTz2+5s7t2zx69IAiz4kPA1/8tnfxsFlROYFHoEKE6JEhgJqs3TJONyZvPQMRHy0pgjFM4G8Zp5W1iB6kREhJCALFjY5LTHFBRYDX1c9hujHqFIdCRI8zI1IqiiwnSIgh4nwkCo3Qb9yQCRF538//An/wy76CXddjhKLb7Elv9Fh1WVGQ0BjD2u/IEo2NgUQGVlePuX10RtMMRCLz+ZzV5SV5WXK0POEr3/NV/MavfwIlA6VIQDEJK1KFj9M29cve8x76pp92EjEhRIVz8OmrcxYhZVEuufYDT55c8PTdZ/FDz3f/W3+cH/1L/y2npyc4Z5A6IkZJ6yfGeytGVO/JVELOZLsRScb3/rvfS3E45xf+wd/l8577fN7x9Nv5z3/wR3j3l30lsprKQXme89HfeIFv/4PfyO/+/V/Do1eeIOPUxr5rOv7AF/0eZLT88i/9Al9x96v44Isf4249o409j/+Ff5mPv/wSfzg0fIqCl6o3zwA//+zn8cKLn54Oy1VCXdevy5s9hhASMpkS8WRViR33rFYrZJScnB4xjj1dJzg/33BwJ2PTXsLguXV2j8yN3H/lEcWs4m65JH3nCbvtwLZrQEw+9n3XUs8WjKNHhki33eMLT11lyDShKGr2TYcqS+TQI2VFp0Z2fqQ+KBHrBjsvMVcbnrt7i/2mJWaTXCRRms32irpeTGA0HxiUwTc9R/UCYz1FqenajlQnVNWcXbtBophnBd5H0vkho7Pg3dS4FqbuZ2MMh8sDXG/wZiAERZmnjN5gHJRFMTFogiV6hxkDVVUg04ym2SOiRyUF182AThIUsB0nVtCsricrk506jWd5SbPesqiXkGoeXr6K0jljjJSLEryiLHNi1zNudwQl2fmeEOBkecTYbbH9dG3KJEULzd3bTzF6Q2vfPIb62vgd0bEJgvJkgT/vOSoPaOSeqjhhr7bcf3TOvWeenbZRY8/p03cxTYfrBkoi2SynsoF26AneU5YlzW4kxsjy9JSLzY7VbkcpJWf1RNZr05S+MUj7xkn8pQevckLCIxTkOWHX0LuR4BRZPpsA98IzF4FZWWObkY0fUCphP47cvn2Xpm+Y1XM+9cqnaURkZUdwoCOgJq+kIIFEYYg3aTuJswYVI0005CQY7/FqOsDVUmGtBznlk3Of4ISYFu1SIrybqIBhsiClmabtHFkmGG/KFEWaYrqe1joqOU2eeIsLb7zrC6GYPXXCf/MTP8m3/pFv4Wxe8PDcEbqWqp5TLOe8dP8VDtIZxjouM8NZUrDZ7llWB7Qu4vMCFwRJyHj0YM0HP/KLvPLgFWAS20Y5pUgSqRBpwny24HBxwOHyiIcPrji+e5c80VMsL3q8jJSpYvSOy/0Vy6Ml4zYytg37bcusXvId3/Gd/NRf+x8nzKeGNIJWCiskGQn7vCP3AWcCRgqqtOR9738vu27kPb//X2J4+Ypf/PW/xx/5t7+T1SuX/MLf/lskZcorT15hDCM/9zf/J+qjBaYdmVdzhBbs7UCWZdypD7FJytnhGYuz27z48gOKSvKPX/wop/WSn5jNIVmwbBLeLEC2Wl9xuFyQKo3QinZ1hVGBMj+k0jVFmXN5eUmWFqgYGBpHWc8YuobLzR7vDIt6TlFV2L5lUVS0SUtz/pBOZNSnJwxdP6V1VtccHJ/Q7xvmVcY2CpKZYjcMHJcVfr4gVSkiwpd88bv50Ec/SQiOk8MF69WKXhbEbuT20RGb8Qq70USRY9c98/qMx5eXFHmFNZCIgB/3HC+P2ew7yryg2WyZHc5wcWSW1+xMRy4UqsiIOuXa9ggE2gacDGzDQCo0CIuPgjTP2V5vKeoaaT3bTUtQgipfgPMY62jlxBHv7MQqaY0jrwqMHUh0gY2TmCJXKc889SyfeuklEi1o2y1eCJzpMM5Pnd7WEIlct5aYK5xpmJmCeX6IzlL8voVUYnxERUOTO2aHd+heeESWzmjbhuvdCuUGFAlKFvgQGbZ7Bm+pjpe43W9jx+b/F0MoRbzY0gLORoyKGB+ojxdYFeiuLjFNy9npKeOTDdVyzpXr2Jk1yh7BIkMryfrJOZhAuoAkSdhtVzx15y4nt0+5ePSQQXrSRYnxhiRGFvM3ArBSYFclPDOb8fjinFIpxCybuqpCINMZZrCYtGBZ5OyuV3hvePvbnuXDH/so+uAYGwR5WtDKDtdvmeUZXTfhKQWBVIpJPOAsiZRoOZlIfJyIhmkQGDsdVLrgJiysn9yBMU64WuIkc1Ay4m1AyASiBCHRWk0KMg2EQKI0aZoTnEFLRZ2mhMEgpUAlyVTC+YyRCMmXv+Nd/MI//Iecf/oVrpclp/USdXLIxW7H4wd7DmWFVYqkqhD3H/BQCIYMmv3A5XbDw498mI/EXQC6AAAgAElEQVQ9eYRtRlQ+5ejruiaEgDOGJBMkgLcGESOXfcfVxSMkil9+/z+Y6vtuspf/rn/uXRyenvDM3edY1EsWR8eAJmhDkifo5pq+uc/J4dN813d9F3/1L/8PxNiD0ASmG2FUsBwmG5BUEpkKJJZf+b8+wNd9/TfyP//Nn+LZW2/jI//4Y/zpX/t+5pnmn//q34s1EVUUfOVXfBX7ixWICdg1Dh6XhAlXagPjaEkSj/aSbujw48j1Zcfi9BjXe/YXD4niIedKvHFr+xljdBMgab1ec3B0TBSQpTkXD8/JyhoX7OvxtUEHVJ5SpznpaMlPjjBNh+86ovXsvOM0maOdpvOeIhNoJblz5w73X3mF5dEhTbcHLWjGHqUKlI94H8iqkifnGwblkAj+0a9/hMV8TvCQJBnGBaS8RmaaLgTOr0bOTgu8D0QZEXpEh4QYArPZjGHoEMV0gF3PC/q2Y3m4oDUepSdSYa4Tlmk6getioL9B3XopwBq0C1izo8wyjpfzqePS9cgkEoLD49FC0w5bfIxkUpCT0u725NWUpMrSAjMME+I5ZsTomOUFwXk+8YlPkOYZQmryoqbbbkhVibMjj65XnJ6eslzM2T+5oi0jO2OIUk0SDzwHx4d0znDpB2Q/IKVkbHfcvneL7XbLycEB/a5jCJ7ROY4XGbvzS/KTHPaWMDYcHH/uOfHfEZM41tOLwLJacN3tOcwqMIbdfkcwBptK3vL5b2Vzuebw9hndbg9SMNoB0Y3IWpLkGVVZw2BJBLS7LTEGri7P0QbSWUY3djAGDtISmaVcXV294aWEbUvXG44WRxwvFzx+8BCta7Ie6nrOutvhU8HZ0TEf+Ee/yhe89a1EZ1hdXXB6coI3FpUkPHz8hLoqubu4zbDb4cLUPq/F1J1p7TjpprzDhjDxn5mUYLhAlBIbPFoKvLXIRE6TLtOBpg+SiMf5QCIgBnnDNI6Tud1OqFWhJXZ0PH33lBdfemGKdinNWEi8txjnXndW/qaRJbzvQ7/Kv/Z1X8+P/7Wf4kf/xA8iS8F27HD7AY3i5f0jlvmc3W7Ho+0l9x8/4fLynEQInn/2eZ7+/C+m/l3vpnt4xaMHr9DaLe2+QctJbhzMONEL5WSYV+GGAU4gqjBtFORN40+mWW83DPuPcr3ZsNk1VFUNzvLWZ9/O8299J297+ztomis2mw3dYEmVZpSOJEiEVoxjD6meSJcBMBErHVoW/G8/93e4HNbE3YYf+VN/lr//v/89vvhrvpoxBt72lnsI0/HCJz/F3/2lv89Tb7nHBz/4q2RZQa966qLi8vIKlU5Mk0IlODnt8r71m7+FH//Jv8wzb32O5mKHubrm2/7Ev/emb4mT01NWV1ccH99i3zfcuX2Hh+cXjKPn9PaC682KPJ9y3DrLmM9nNOsthRDsrq6niSxPMDpSRs2+uSYRgSAjAY9Sgsvzx5RlTtO16DQlTxOUkCQyoW1b5rMZq/WGRTHhlIOfsNHReZz1PFk/4vDwkHee3uGDn/g4QsFbTo9I6pzrVcPQG/LCI+IS5xoG0xPltPgIMaIlaC3xIiCkJYZA33UUB3MerK45Oj6mHwYGRvIkYRSBuigZNnsakWGc4uJ6Tzl4ogEPKJHigwUnSZKMRGswFmMGTg8PaOxImlY3UnJJOVvQDw4VLX3r6bqBs7Mz+rFju20py4rTwyM2zZ75YsZiOUNKyePHU+zR2cDtxSFiO7Jr92ituWo7vHOcFDkqnXO52+OCZBvWAGxX1/jBkdQzZOLZ7nYUuabsJVoUuFTBZ7GO/Vbjd8QkHhT4KuHxxRUzlWNCw8HxGTqpcENONw5c3z9n7Fseb7fcee45ApEkzdl5iWsb9v1AsIFbx2eYvuP48IS9HThcHvDg4oK7eU1hYXa04GpoqJKMe8en/zd1bx4sWXbXd37O3bfc8+31au2q3qXW0t20FGwSyEJGxgZbjCEgWG2wPYOwmTETCDBCNrZnBsZ4GEBgg4fFGmgQQisgJECMtm6p1VJ3VS9VXetbMl/ud9/OmT/uExGjkqKZiJkIcSIyXkRmvhf35c177jm/3/f7/dx2LIUhuHPrJNcWR1glnN3YZZSFdFwHy3FZZRG9Tp/9Wzc4e/Y0+4dj2sN1yqqprS3jGEVFKSsWqzlta4hlecg8bbZsEkD8lXuyOHYuFqJooMxVTaVJhNCPYb0KwzDQLefYht80OPWyoJJQqwo0DU1WoMQxDxIQFabtUCuBYTcoM900kZqgKgoqSwO0RiMub1+JN80Vhw9++IPc8dJ7+NRTn2WmlcgoptvucbB/yH48IY4yNja2uPuuC3zVV34Nezev8YEPvIcnH/84T3/6oxR249YUmiQvBIbWlG40Q6Adk+BrFGVV4po+ZZWjlEBoGkI3kaIxR2VZxnPPPdcoJyyTIk6xdYNTW7v82UfezZXDz/L+xxTzGzH9bkCaFUhbx7GOdxq1wjVNKk2hVINi02RNWil8R+G4BsVSsnd4wK/+wW/R7pj85m/8Fu9575/wxMf/jP/6u7/Oxz75McpKsn5mg4e/+iu4evkKJ3qnwRTc+7IHuHzxEr2NTW5euUIpBL/0v72dN73pW/jmb3sTr37oVfyrf/2v+dX//H/wmx94Fxsvck1cu3oFQxi0um1MoZHnJegGncGQ1Wr1V3COPM8Qq4JVlLM+GFJXBfUiQnkWaVWjORa+7jGbzFGWjmdYlHmGjsA2LdZ2trj5wj5SyiYQrhYUasHG2jppHCN0ncFgwJUrV7GcgFarQ5JkrFZLTF2gq4qPXH6KruViZ3CYhATSw9Khv9NhOq6xnJgwVbhBgKLC1ARVVRGHCZohQGoNXQmdtfUdDuZTuqZPvWyktC2/zSJJGudrmiFMA6us6Hf6zJczmn6+oBaKMEnpr/XJihxHmESLJehNQNbnKVfL+Yp+r4dUOnGSYTomZVYhNQ3Tddjbu8lgGDThdrnC0GlCwKqKJE4xDAPPtXENh1Uc4e9scWX2POvtNcJlhNUJkLMQmQgkOYZV0m45rGK32TnFCehN6FdZFliawLVcxmFI2/NpC5usevHM+c+PL4tJHKUwCp3zm9vczKf0nCH7+4f4rkNcJGy2e9yK5liDNptBh3A0YZTG6GVNb7BGiI5SOm3XJSkivKDNYhXS6bQow5ChExDpElvTGI1G9HWHlViRL2+PBDU0m6NwgZMkWOt9cqkhbJsCMAQUoqLlOOheSZHXnNg9Q7qM8Xs2lm3i97uswhiDknbQIgoj5uMJju+hlMRSBrVUVHoDNtZQGEUNmjwm3RsYQoBWYxkWuaoxDAtVNOACVOO81GWF1cziFKakrEAZgrIu6RgOuRLoyoSiRDdMwtUKqzYwy4pa16HQsB2PMs2oy9sD6PPxnCrw6QUdwlshCz/m8mwPS0q6vSEPf9Wr6DgBV29c4V3v/n2efuyj2I6F67TQEfitAFWDZZuUCMhr8jLEkA1YwHVdzOPkQUM0ztEqz7Bcm6zKqaoaB4FracRxzKWLn6Plt9hd20RzXdKiZBknjMf7tNttbl7fo6hKLKVxPVw20caGRppnFGXBsNUmzBIs2yZXNXbgItIKR4OkiCllzdlgwPlXvpRLF5+jXC7ZGAbcvzXk7/z4jzKZHfD6b3gj8WLK1b2bfP3f+V7uv/MIrZJcm97gjhMnWO9s4vg6epTymauXeeHmVc6e3uXPP/TnfNU9D/Knf/4n/MF/+m1OrK296CWhSw3H1snzBbkskNLDrSTzOMQLbDpuI7EzzJo4lAROB0N3WMYJ7tBjlIRsuX36rs/1/T1OrG2ymq8QjsBwemSppO23md46oOcISqGxUArLsbBKG7lMSbKUdqfDlas3aXf7RIspSqtBSdY3uizDJUmZs2a1KJCkmsS3PbLVBMs2WI0XeKbJ9vpJXqius1xNyJOCM9unmIW3aHldallSiIyqqmi3XKaLI1whMBybLFlRhznStHFaJqbUmS4jDKnRavuQJtSpIq4bUHqtKgzfJYlWtHWTKF2gmSboOr7rcDQ5IAg8HN/kZjana7n4tsNqNKEsMpxOB2E08sq0MvCo0XWN/asT1naHiKTERVCLGuYhC78hNsWHEwxXJ11kFEKRHM1YD9pUmk5YR9hui/GNQ9Y2hgjbII0USmroKsM1HZIoQUdn6LUpyZlmCXwRUMuXGl8Wk7gmNPI851qccGZrgxcOj9AN2Dp7iqeeeopJlWIg6AqLm7f22N7cZejaWJrB+PCQtu0hLRun20IJuHbjGufuuMBqtWLQ7ZHOVwhTY1ImlPMl6Y7BZqfPcjq77VhaHQ8pBO7mEBVlxL7GQHfJ6hxN1pi+z3R/TFrX9Dpd0jKmqhOS0CAKUzRDb7ZwqtlqoYM/aFPEeaPllpKcHLM2qGrZhOIoSSVEU6fVtIbmI02qWoFuUZQSQ0iUaowAUlZEmqCRf+sYqklIzFHoVqN0KR2NgpKCks3hgL2DQxzHolIVQkAmC9RKNkoV5/avQff0LrcuPYM0Ld7ytp/k4vPPckd6jicf+xTveefvUsoa23Px2z5ZWdDrtwk1xSpP0YqaOsuxWi3ObG5y/vx5XvbKlzVloWWKs97hP/zPP0stm5JRrSRVXWMJULXkh37wn2LbNrPZgl/+z29HmTqO6ZBkBU+Nb6DiHF1r3G6laVKXio3uBqamE1cxBgbT+YxK1Ozs7PBd3/Wd/MxP/TRBu0VdKlqGSx2WFFWTTyKFRoWJ7ng887nHKHWH7/9HP8DBrTGd7T5nXn4Xj7z6lXz844/zhm/8ek7fcY5/929+mocefJB7du/j3B1nWM2nnD13njxfce7Oe/iBH/kR/rs3vxlpVFgZDF9+F+/6xd/ixO42cnp7euYXjrWNdWRVMz44ZO3kDvvjEZquY7dsrFafIk25sT9Gs0zKOgHb5SjO6G/0SJcpQ2eNdB5zdRZi2h6rsiDRBa4wydKQIqnwbIfDyZytrS2u3rpBvzckDkuCngd6jW3C4WyC57pIAZrrE0kY9LdYhksCt48hTMbLMd1Bn8lsyvZwHaU3mnbbbaPpsD8bUQrFqTtOMx1PWWYLtFInVWAaFk6ug68zmk/xfZ+szDFKicDG8X2qSlEf1ah1F9NIMJDIIidTisB3kNRUqoGimJZFnhcsDQMRBMg0p0wShCHpDdYahGGesG45iKxmGc0xApcqAdPQmEwmdHpt7MAnWiww0pL7v+rl3Li+h2G5SFVjoPACB+nAfLGiv9bHVZLcrvEqk9o1mMicjm6jRTmzJGHo97g5GrGzsUWehAT9LkiJ7wVoCFqtFuN4ia0LHGEh5N+wPHHDNNCEoiMNRvM5FuDYFtdfeI6u6+NpFnHL4NZqwYbXJspSlJSEZUynFbBarXDtFmEcMfDbrHX7LGdTqkqynCw4ofk8Hy4xk5L2yW1WoiJervhiHqRclsxWK9adDik17qriUFP4lsVkucK2LHbOnOGzFy/hFwVhFuFZFoZuESYxNjY3b9zg1KkzdNst9pdHDdU6aSR41E3NXhMahtQQmkCZBo5q7Md/xYZEQl01rs6qoNREg2CTEoGkjUMlK2oUeV3haBZ2DbquUxiSgTTISNGFIl4uMKq6CdpSTYCYr9lUjsRBwxa3+wf/3uv/No9mIS+5+z4e/e3/wicfewzTNButuq1j6SaBYVAkGZQ1URZx9iX3ce/58zxw973IOGMvmuNIhcwLLj31BK+86+Vcz444kwWQFii7+Z80AYahMHQXU9e5/Mzz7OzsNDQZy6GoKyyhc+rkDt/+D7+NWTin1gVVXRMfTXjquWf43DMXyaIGxOsZFoahIQ2Nb/jbb6Aqan74h3+YX/hPb6fvWpAXmKZJy3EJ86ypA4sKyhhf63BrPiHTFO/47V/HrTMeuOsCd2yc5OlLzyCk4uRwg61On7rMePrqE2xFu03jWOyjVaBZNmVacvf5C+wd3GD97lP87D/5H/gH//j7uLW/R6kp7Be5JsIwpqoqLtx7L7euXGO7M+C5+SE7woQswjYEuA0ur623qBBons3+3oTttT4yr/HX++yvJsxHEzZP7FIWBUZdI3RBt9NifDhiuLHOZD4hcGw6nku8qujZHjeme9iGSct22eqtMZo1KLYkzTGsOXES4nsOvW6bNHFZzRd4hoHKMoSKcUwbmZuUoqBWIbveOgeXrjTRuIMeN+scrUrZ3x/RHg4J8Bl2NwlXC1wnoKozbMciTTNM2yc1VrSVQVgLaHepqxSpg287lGlCEebohtHERlsWVqGTlgmGoVCehVZJqjyjKKqmpBSYaEWNSGscwySzmt/L4whna5NqmTaxEV2P5x9/EnPYIjMkVV4g0gxp2oRhY80/mk2RRY3rGCRGhafZ1IuILFC4bQ87lRSGwc7GWUaXX6A37JGpCttqmqWWbXC0mLHm+URZjO355NXfMDxbVVVgGdTrbcpa4XhuY5DRGnJ6pErqOGPd65Iqhe1o+HpTN5VKp91tsTlcQ2YFyzwjznJ0w2IVhihR8Xg2wvV9lGMiZjF6lJOnGWFyuxZTLlPuXjtJqUs8yySraoQySOIcvxXgCoOsTNGEwrOaSW2xXBEXGVmRYlg65+86R5JEzBdTdGGw1l4jqgvCPCWvQGA3hJPPg1hRKKWoq4ZuUpYleS0plSSryiYGVCmU0DE0E1O3KGtJXh1nd2gmC1uR2hq5DqlnEBUZedrUzaMkxfU9hFQIYaIZDq4XYAsThE4sbr+Z/cov/wLRaEk8D1mlGa2tDYRucOL0OR58+NVsrO+yfcedvPFN38o///Ef4wff/Ga++uGvwjU8LN1iFaZ4Woc4EgzW7mDYOsWN69dRtsnnLj6N7lgI1dTllRIoKciKnKqq6LS6LBchpuPy33zHd6K5NnFZcvG555mnEbPpCivVUbOCutR41SNfw3d/+/fxln/542z0N9Fcm1qDdBFhWy6e4/BLv/R2vuXbvo1RkbFo2dwsEsK6JkKSaYJaNEzOuaPY7a3x3l/5bTpnT1M4Fr/5G+/gk598nH67y9Wr17l8fZ8f+bGf4vLz17nz/jsZBj1O7p7BQNFpubzmda/hw3/yQaoyZTUO+dHv+Wc8vdzjAx/+IDdGB9j+iysPqqLEtT0uP3cFzdW4sTji3o3TaO0AXUKcZOS1JMoL6raD1Gvy+YJty2M6nbMscsaLBbJSeB0fZIOhy2VBkheswhjXNolWi0ZmKCCMV9SGxnR8hDSa9EfTNJkt5hhmo/HuDwLixQpZl6RZxtXrL6AbBr1BH9u1sGwNz97EtlroTkJaxPjBSQ61lHqjS7Xe4abMcQOfDcfnzM4pBqdOkZcwnUdsb59GKZN4kaBXgjrOMPISFZss0hypFahkTqYUspAkSYbwbLy2j1RVY4QrCuIypBY5RZ0AkkqDRFWUFixFjBln6HlB0PaZFRGuaTFdLtg6tUMZp00jvCrpWm1WSjShYkqi2Sau3yIzdVqmTs+zcHyLtu9QK4mTS8ZH+/gn+wgpmB7nOZVajSokwXCdVIKu25SVJI5DkrQpW83mS6oKqkJS/zU4rJ8fXxaTuJQSXcILzz3PcHOdNKlAOcQV0PcQdY5l+xRViawK9g/GJBJm4ylheETttlksGlu9THKC3KIIK3y/w2S85PzwJMvREVqccyRyMinpmSZnd7ZvOxa/FRDFK+JVjKwU/X6fLI2RZs1iPGGtP+DgxgHDwSaXjm4BilM7J/Acn57bxcIgz4pm656UFHFEa9jCr3R8jIYRyLFcUFbIusaoBKauMKkb84Kq8WTZhO1oYEsdSzNRdU2hSrJaktYlmDqWruF6Nh0EtqoRqkSPY2yh0AwTQ9OoZHMzKCuOCfeSOJ9j1RKSHFXezvMbmB5W22e8mHHr1i2WsykXTp5i7/IzPPPEY2h5yMWnn2RgOYTP3mJ8OEXlKedOnGA+n5OInKNwyvqJPlEyYthyMEwXX7P5i08/TikU2E38gEFDpbc0Hde2ONgfM9zYJEVADd/5D74Nq6oZtFr8/u88Sjif8djjH+Pg4CZnztyLKBU6islsxOjokDSM0GrFmbNnKauUG/t7zOOQU60eG70BRBkuTS6NJQWeUBRZxlQK2nWBUcfM8pDoyh7v+L2P8G/e9h84ffo09z1wD6Ze8ZnPfoT3v/v/5O+/6U2su5tcG99gPL9Jq9XicDrG90zOrHWYzsbMX5jx5v/2n9GSOa95+Us4tXMXl5+9+qLXRErKOJ6iex5xnOMpnTRPSFZzwiwjySralottGWRZQhHXnN49y023xvf95vx7DjYulAJDCVp2A1HZHK7THbYZ7PQxbZvt4SaB18JyPBzDZJpG9PxWs7puBSAK6qrAMBzCOKe93cH2XEQNtm3j930ODvYQymCySjHLHBknhKsML/CZLfYgbCa4ttJQUUZgeVzOp+jU5IspumbR9nrc2L/KtaPP0d4+QeX2cDodghMeqR6jjJqOPyDNasy4wK90yFOK5Qq9yBCO0Wi4DZvaUvTcAN1zEFlIy9BoOSaOsLBLl5vTCdKx0ZIKV2kocoIgoKx1UqXYi0fotoMooN9xMA2PsoDZdEUUZdR5hdPqUmEyO1oQ5wVlmZOqAtt1iY9mlLJkKwhIihJRmrBKMZIKWSm0tIQkpcglSlbodULb8UAYxNUKad1OHftS48tiErdMi2ix5PT2NteefpawXoFR4WkaPWWCplPkTW3L0U22NjcpioJOv8PmyV3qOCY6mJBUBdLVObIi3IGJLCN2T/QZuA61JVA9F80z6RoWwmuxnC5uO5YKQZzmeKaNKZosho7t4QiXs2fvIExS1na2qGTOrtulbbrsh0uSMuJoNWaZLbg1usXiaIoeuIziJUWUsDQrUhMqJdFrhdCbVXAtJVKrSFNJWetowgFlUcgm6zivSgpZk5ZFg26oJUiJY9ngGpSOTlbllGVNVTVaZVmDEs2presawzAQQiD1mrIsoQZDWCzqnFiTSPv2qtrYqijqghs3rvGqhx7E1Qwe++yT3PvQw7z+W76ZWVXwhpc9zPv+rw+Bb9LNJJbukIQJltLwNJOuZjDs9tg+e4ZPXL3EH3zwfbz3Q3/EfDlrjqmAQioqoWHQ5LxESUyr1WLvxk3MScia7rLbW0OKRjn0wvWr7C+mhLLg1nLCW3/xrbzjnb9NNFqS3YxwPA/dMinrikde/SqSVcIH3v1+zp09TV0VXLt2g2/6pm9CqUb3X9c1ZVFj2zZVXrAoalynhWc4CCH46Kf/gve96318Zm/MdBTyt77l+xH2Gqd3TlPOZ6BKTEND1bBcxrzsNa/n3b/7AVZum/f98Uc48+A2rZ0htdlikStsM+bCma0XvSY83aat29jAcHOj2VlUBegGrZZPy3dJqwrbsElnS9q761w9vE6/lITLBabV3BzjpOGa5nmOZdtkRUESRcSrkGjZkNWX8ymTyRgpKw7HB7QCj5s3r9MfdEmSiMwWzOK0ARynK6J5SOD5uK6L6/vcuHGDcLkiixNc12WWhBxMj9hc30CrJKc3tmmvd6hUxXKywCoVy/GUVruLmVaNPNgP8UXMTvcUd558LZpwGY0PyKuS+SxhzV2DWOLrDq4GrZaLbuloCrIkJwxDfMNqPBVSYpoumYQiFxhujyRJGserLLEsg86w2yykioJaCdJFjChrVJ4TWA5rehe7bhY7mWEwnY6pRcXmxhqWAeM6powitLJk2OviOza6rhMEAYHn0W13MAyD2azJ/llb84gpKEzQdJ24yMiVotPrIjSDKM6ZFCmOZ2BKSat68aTLz48vi0m8khLNsag1SXdtwIbtIsIEVzcRSqOoKgxDo9tpEYcJs9GU6ewIpTWQ1bLM0dYCyuWKQNNZc1tkk4iu18XyfD5z7Wk0WXNufZthu0tuyWZr9UVUGaWCwjLoba2TqAor8EjyjK7f4Wg0IUoT9sb7BIZOZescTWYYSY3QTNr9Ibrp4XtdalNDR3BhbYfdM6dxS7Blo/OWoikhCOPzMkLQXRBmDVqJ0EqkpqMZJrpmNuUW0cgLLctqHsdN0rTMyPO8YftVFbrQsCwLNA1Vyyb2FTC0z59qiYagLuXx3zIIR7fr5f/+a7+Bk6dO8dDDr+STn/gYr3jpK/i3P/E2Xnb+Xhyp8wPf/t0c2oqbV67y/PUrjEcHaI5FrikOFlPiImNZpKR5yac/9jjPfOLTLKMll556CgqJKOvj8o7AMCw0qcjLgm63y6c/8wQb21v0zp/gp//j/8SP/cxPcc/995EkCa7lcPPqNfI852A04mvv/woe+YqHeO/HPsDvfPQPqJVsQo1kTVYUtDptrh7e5Ie/4/tYmhLLsXngZa9o4Lm6eRzn0HxWShbNTfyYQKPrOu/8lbc3eSeVS1Rr/Pov/Bxv+qZvoNVp0x5ucO2Fy7Rch92tbeIo5RVrJzl5/jR3bZ/g537yrQw2t2m3hnSCHkKkGMImWd4Opr7te1jW2K5DVqRkqxVFnKLL5jsUxyHTowlOq02eFnTXBixeuIZvWxRIEJLJYokmFWg1URKSFjlxlrJarY6DpFbNDlg02eWtto/tmHQ6LeJoxdr6gJqaqsiwk5r1dh/XCxAthzqvSaIUzTBASpIo5cKFu/A8jyxLmCcRutcE1NVFyZUrV1guZqA0qqKk4weIrk86WbFqWQSVIAtdRN8DN8JWE0zNZDjs0+0NKKXBjdGYyvGYFDnucMBksWS6WoBm0G33UK5DHWfUusDRNNyqoCwSfCkp8hjd7OB7PVq+R1rOcWtFnCeowMbVTdxOhzBPUbpglazQA4+90R62pTE/GOP4DicNn16toUyddbdFNJ+zWi2o65Lp6JD+cECcpiilCMMQy3KoakWe5yxmh7iOhW7plEmMYzVy37TIqWsFwkSUNYv5FM20WHyRSJAvNb4sJvGyLOn1ejx/5TLTxZQoTpBC42B6RGXrVI5JRMU4WVEHNuvr62xubaFpUBYFiSYJD12qWbcAACAASURBVEacu+8uFosFNiaB16JSGgf7h7Q1i7yuGKcxk8UcK5XU0wVleru11ZGKgWGzf3DQZJaj8H2f0WSEZogGiisMwiKjnsZcOHcHdcemKssmvWwZsbG2iSUFyyJlXqasZvMmlUzWVKpJGqzrEmr5V7pvJSQ1TUa4EApNM9AQ6Mepfo5lN3pxvZn49azCiAuspMQomgnHMAwMrXm9rKsv2ritqsZ0hKYo8yaf+lUPPnzb+86fOsPdd96DaZp87de+lnvvvZ9PXXoKI7ARtonhOTz0yofpt3qYq5I/XVzl1s3rxzcQnRrBN7/29dy6fJ13PPp79Ha3cC2Xn/yJnyIriqbXYOhUeQHHDVvbbAxYN2/eRNN1Hn30UXa3t3jwpS/j2tPP0tVt/MBlPp9Sljlrgx6f/NxnuHzlKt/+rd9KFSXU5bHDtZS88/d+j1/9lV+GqubXPvCHHFy6imEYFEXB2toaxTFhxziGSWhKYqBQqiErKaUobNCTlOlqjFXCM08/wcH+TaJoRVZW+O0WQjPYPzwkr0p+43d+g2vzQ77uja+js7OG57RwbJvVco4QgmUY4Qa3RyB/4dA0rVF4eDampmM6dmNe0S2Gw3U6nQ7z1RzbaAAOvmGxWoQE7T55UWC5TW28M+jT6/XwW17T+B+sHSf72WRFxeHhIbJq4CKz6YI0L+n3hmi6yWoZUVWSylkjljFKTmBRo+smum6SpimH+wfs7Oxw8eJFBoMBVdGUEjudDmEcsYojDNfmpNej5XqIts1ePmM1nlC3PHq5zpQCza2pliuoNKZZgdLmZHkMysY2ezxwx50Uy8Zo5uMQdHv47Q5xmpGXDagjVwpL09FtC1uzMC2rOZ9CMl8dkaYhURTR7a2RaBW9VpvoYEImK/xuG6E3zEzP81hEczq9HnVe45oGTlby7P51bhwdwiplmaY4G31U22WcrujvbnF0dES3220iP9od0jRl9+Q2fuBSlxLXdfF9n363Q60qTEMjcBvup2k7uLaNqTvkZU2u/obVxHWhsRxNeNn99zPo9sAJiLKSzfUtbCnwhEGgmdgIfNvh2rVrlFVOr9slmYWcbPe4cOECn/3Mk/S31yHQuL4ckYicXuCRaBIKiZeBJxwOwznt3TW2djZvO5baUOyvxrQ9n2QV4mkmo5t71EZFmsZYmqDKC1RZkx2n+xlFQRkvqbKIftujTiOCYQ+UoqUMBhvrrFxBYgtqvTH62KaOoQsEYGAhsdCEgyYclLCxLAtDMzE0HcdxMBDYpkkpa9I8JxF180XQdJSjY5o2hm4hhI5SgqKsG6q9UliGia7ruEaTQigMQX6MvIrShFc/8qrbPofr0Yy1/gYHe4dsbe7Q7Q2598xZooMJA81idX2P6taM7/ne76dzxymWT11FZDl/+OijXL3yAn/+px/hn/8vb6Pqunzla1/DA/fez8bOCTTb4cfe+jbW77iDXCsJfBfSDNu1kEWJbVp4QcBHPv5R3JbLwWLKoowx+i1OveRuMlkRpQknz5zm059+grvvO8d0MeWdv/8+DK3b3PQU3HnhPP/o+74facBXf/1ref0b38iz124w6K/xC7/4v7O3fwhwXFZpcHS+52BVOYbZSDAtyyJME3Kvol5NuLI45Mz5B9DMNllSMgg8glaHVqtFr9fjR9/yo1jdDm/5h/+YH/8f38L2iV0m832SfEZ/2GF0lJCyJJfJi14TtjJQRUWVZwTrXXIhuTUdU5saYRhjmjaaraNrUMcJdWDjdHqMn7xO0O0hzWYSbvcHCCGI4xjNbAgzy3hJq9elVDWmbREEbWzDxvUDiqIiXIRomkGn10egoycrNls9ilwRFgmW7aKbBt1uF9eyybKCUyd2OdjfR6km5+fziwp/0KW7vUEpFZPZlCQO2d3awDUsusomyzLMqMCScP7M3WSrOV2r6QsN1zaZLQ/R3BVH6ZjNO07S2dnmhaMxYVqSVIqg2yPLczpbm1SyZhlHpHHIIipYVaA0h9p26LTXKQqJbgjyYoVrWhR1QXvQw9IN6jhD5WWjhooS7LrANR0kFtKUKNPE8F389SGVa7PeGaIpjSLOWe8MKVYpQRAQhiGaprG3t4dpmuyNrlJVBZYeEOUxk7SJtS0dHdc0SKIFtqMzX84p9BpNc7CEja3+v42i/f99SBTe1jrhIqasJG5d4Qx8jkZ7dLe3mq500RA2ismUju+gCtgPM+zAZRHFqE6H9Y1tqjhnNZ6zvr1FNV6QtS167TV0LeT60T6apnHi1Glu3Nij3fki2Sl+gIhihKJhJZoGjucyO4rYObFBHmaUSqAXOYOtAXoEsZ5iWV1WYUY1j8E38YqawHYZz6f0+x12nQ7zVY40FBkFspIoAaDQVIEsa6BZRUsJeVaBqNG0RqlQ103edl3XKCEwa0nhmFi1ga2BLhsUWW00CheBatyalkmFoCxKPM8hiXJ84eAJh1xWfMe3fhdxnt0WyBQoHWEoak0nW8bM94+w1roEQZtcEzhrA0oSnr1yiTyW/Nf3fJCf/bdvZb5coI9u8shXvJpv/c7v4crzl3novgd520//BEka8XP//mdYLBb0Bn2U7bGIUzTLopYVylJIw+TMmV2i1YTD5YLXfeVriNOUl77iIYa2zzMXL4HQeeZzlzi5ucXZO+7l4QdexYc+8ifsRRIrF5S1zt963RvRVE4altx5/i6SwznPXnyW173+a3n5/a8glTk//7/+HL6lgy0gKZiqGmodUeVUCs7dfR9WWLK/f53A71AtF7zy/B382Z99iDe87uv5xLNP8srzL2OWHPD0Jz9Gx7TYuPMUv/bBP+SunbOMLl5hw99CdA1G85B+20SlBa774kaOmIK8znCky/PPXWW71yfPc8I4pioyLMMmLSQyq7G8FkUmSeqQut/0AwzHpLPd55Mf/yjn1japspykrgk6XdygT5HltAOP/b0xmlGhaknf7CJ0iWMIZtGCoe7i+B41GcssQeg2awOPZLGishXZ0ZRut0/lW8xGE04GXWLZhFwtohW5DqLMUHNJWJfkec729g6TUUR3c8Do6BC710MLM1TL59nLl+gEnQZJFxZcuvQx7rnnPg6uH2L3NjFSjXA+49zaSTIj42A8wswqWkGbOmphEuJ1NMKZpNVWGGUNAowMNC9DahlFbVLEFYHfgzIjo6KqYLZcELgeUZbQXR82mL6bByzzFYHlUaianulRzJfUVcXiKMZxW3Q6PagUpueTFAUdaZErib05RMtzLOGiGw6rIqFapViGzapqzGWZ0EFrOLKOUFiaRVLl+IaLnt8uNvhS48tiErcMk5ZmMS5mSCHJK1jPNNrnzjEajWC5ZHtnh2WcYHTa2EqyrBvds8wKWkGLxWSKblsNsaNr0y4qpjZMkyVWGOK3fYTrYBgGaZEi0EnD2x2bt0YHaJrG7qmTPPXUU5imid9ps94KGB3uM1zbII8iOt0+yWRKKAVD16KkJitiSnSCWFIFFgINU9MpsxJ/s884vIVBQ0nTj+WFHOeJGwgMQ6MqJbauU9KUeiQCRMOA1IWGYTaa7tpXDEuNxBYga+pKgdYQrD9fclG1pCpKDFPDtxzKLCdwXPI8b1a8umCtHzSW7i8YSVHS7/bJFis6bY9QJjhxSlJJbu1P2ewOeOPXfws8YvLkE48zfuoiP/KvfoLLH3+CMy+9i3/x5h/kve95lLIWDIdDyipFs3RQNRtbQ7KsQGY5ptGER6EMDN2j1/O4efMqZ05d4L4zd7N11znm0xkt3cLqD6jzjE7LJ/B8Lpy7wFmnx6queOGZ5+hqAZmpkUYxeZHywT/+Ix5+1SNcvXqVjd6ASlW89P4HuH79Ji//ioc5eeIcyeSAZZayoVuUmiKQBpphUArF8uCQB176CsaT6ziBTTZLkJbiyc9+iulsxGtf+7VMpmPcVc5/+a138E9/6M08dLBiTwtpZTX9XgflWtRRTBU2ErXt7hp7sxm3Bz78P0da5NiaoOu18a2C6/MxvaBLC4eVXuK2AvysZG7FiLog8DwMdCzhY4hmte5UJfedOEmaN+Wj0tBZJgtMYZLnOUejEa7t0W93KYviOL99gE6NnaUUdUFd6/iBSxynyKIkTGJOnz7Ltfkh64Mhk+mcdctF12xqvUl0VClsDtfIJ0sKVVJoGZZv41gmh/t73HPvXVzfu0W/N2R+NGHY6pOFIb5vc2s6ZrC+QVYmvOJlL28Co9Z6lPqSo/ENlGuTVQolLXxl0z6zRRUmlExJljGtzjZ2a0ZRCpSUWIbAUAWyNKGWWKaF7jU4NFkUDNcbcpdWFyBA1RAuI8ibeGTH93Atm0U0wbBsJDXr65sYlsPNZy8TmxqtTpsySeh2OuRhjNKBqoQKAt9muZqjC9BNC1lU2JpOkaZogUuWJwRBQJRkWI6BbQjyNEP/IrLfLzW+LCbxqq6YHRywtrVOuFjS3uwyikPs0RRHabRO7jBdrvANC0tAppfYdY02jVg/eZJiGaNsg1WeUsYxeZKy3HKwei16sxDNc6kBP3AxhMb+9AjHcVDZ7Xe7nc0tsrLg6UvPsrm5zWK1Quh64+QKWkR5TNfzWB5NUUKy1h4QJXModVp2Y7rQK53R4YR+d4AuDSzTI9gYwAv7GEWJr+msVIGqJUKBbhqIuoHwlrJG43jF3SzVEaICTSCVQlPNJK2SikhIyEGqklIdA1l1rXGBVk00rdChliVxkmMYBkkUNzW/xYLv+97vJs4iwlDeRrIOPJ/drU1M0ySK46YJa9sYvsudnS5PXLrIj/3sW9Bdh47XJlxGRL90RDFZkbd0dk5sUPldbly/xSJJGA4HRNGKJM1JwiWO5WNJRV4VaLrZlAVoms2yUqwN+lyf72NfMbB8l2lV4l8v2NraYjqfcPGZS3zjN/5dnnrhMq98yQMIx2FSF7SUxundE5i6xtXr13j5Kx/EcSxWszl+4CKlZDybsoxDrl67xjDwCAY99AxUFVP4Df3pxO4uaaU4SlZYnstsNuHU7g4bpy/wNW/0+dM/eA/PPvUcX/d1r+Hx5YT/+LZ/x997wxt49uLT3OnvUE6WPL+cNrBlzcBybE7u7jK7NaauX9zIMVTNwiBFkRQxd22f4sbePlq7R8fpQqGIdMmp9hqXb91E13UyVZEWFZplE0Yreu0OZqeDkdVEeUpWZJimjZOW2KbDvFJkVs0qjvBshzzL8IOARRGx4bkcxgtarR7z2ZKyqHEdh8GgQ7KYMfQDlqsx7a01HNslrQpSITnl9UiUpJIw0Uu219dIJlNGowmu4THsDbl8+XkCo0MW5mxs7RCHc1JVEC4STmxukEUFytRZxglKN7i+d4M7d+5E31kjqDUmaUjP76HjsLy1xPMCVCnY3TpLYRvcOlpgeTDorTE/GuEGbWZZimF6GKaLASTlnGG3zWQ8ba4pzaBE4fkOpjDJ8pJOt4VlGRwe7qOUwPBNoixmPluSFmN018Y3DOo0J3A94sWKSCtRWU2AgWY28lDT8JlMJqwPOqRZhqEbbHcHZJoGSYaJCRgIEkzTwHEMsv8XK/G/Dp7NEUJ8UgjxpBDiaSHETx0//+tCiKtCiM8cPx44fl4IIX5eCHFZCPFZIcTLX/wwGp3v/v4+utSYjPcwJws0uwlI6gkXXzMbAnlgkWQ1bqdLpzfk+njMUTxj62xj0DF8i/7mSQopkLempKIkzjNm4ZI0bbrz/XYbKZuV7ReO5WxOPF/SarWYzWbMlwsKWWN7NllaMBgMmlTCqiIuMhZH0+PVfYxNTVXHlCqlLBKScMEd50+yisec3NkGFJWhCClBa4wUumlQHaMcaqXQTY2qrpu8cdXQSEBSyKaMogTkZYEsE3KVoGk5tqPhui6OZSHzkqoo0UVjaa+UbGroccxqtSRPM+aTCad2T2IUUMUpZzdP3/Y55EnOdLTPqfNn0GwbSxm4lc6lJ57iXX/xQW4+/QxFsiQ+GjF54QrxYkJiK7wzHXodFxXXjA5vIURBmS04OniBxWJFnuTYptfkqvhtWsMh7rBNa7NPf+hSFAWPPPKVXLt+GZFmVC2T1lqXbbcFtsbm9hZ5VYAmSPKcuzZ3ef/7349ZSnZUgJSSMk95+umn6fS6eI6La1p8+hOP4XkO7/id3+XcXReIiwTNrJGOIs1XhCJmujpiFEZsnTmNHXSQmsF4coRKC0RSsbG1w62DOWd27uTnf/XXuHjxGd7673+Gt/3Qj/DQ330tT3/6cVpXx6zCOWEgMHJF4Lgsl0vG4YLZZMp+NKfb6b/oFbEyJNI0yPMcIRXPjq7jBD5OVpMUObM8xleCy+M9Bhub5FJSSoXl+cRxTLczZBGlXL6+Txhl5IWk0x5QpAWVrbPMYi6cOc/p4Saj+ZRnrjxP0HY5muxhCJN5nBD4PSZHC0zbpr82ZLi2RlGVjKsE09IZDHroRU2hQ4HENk2SqiB3dOIyRyY5o9GIsusS2D6+HRBnOX7QwurYmJbi4PLnYNH0J4JOi4ODg2aXaLloOci4oBd02c8OWR2+wEF4mXY5YX//Cbr9GkuPOBo/j6hW1OaK8d7z9Loew9aQW1f3MISH7/bZtByKyZgynTFPDtAtnXm4ojPoY3oO8zBBmBZlXZMlKxZFRH3srq5ERdfvs5rHDLprpEmCbdu0HBs9L1hFS+ZajtdpoSlouR5lXTCdHSKsJqTO9WziPMN0HUyvCfVL0ikb6wFJOsVzGpNVkZVUx7X8v+7466zEc+A1SqlICGECfymEeP/xa/+9UurRL3j/NwDnjx8PA794/PNLDllLNnZ3YTSilCVGu00pMqosw7AsojShyCWrLKRdKjxpYBSwV0UYRYHd7rJ//RBRaVhKUWsReikpHYtBp8fs5gi7ZRPq4FoOHTcgqUFxu6C+ygv0umYul1RF05m+59wpPnXxefptn0iXzOOYVrdDFhdovsVqtSKsS05s7xJevkbdCxjYW8ii5Mnnn6PT6hAIwdqgx8GtEa5tU8qSXCmMSqBrOpIKXQhkJZG6QNZNXKiUkgodXQfqBgIsdA1Ml45tIURDcs9XIbbjkdfQ7feZTUeUSUXXC+g6Lj/wL/4J0WzG5oUzjG/sIRcrZgoeefBh3vuXH+alX3hOtIoP/dlHuff++1isVk2Snie4NbpJkRWUtsBAYOk6BRVx3sRvlnOX/XSKrkEn6BBGWaMWOeaBxmVGVuZ0O0O0JENYGpbvEMcFxWJFaJR86C8/xNBsU2+Y/OVffpRVHHHi5C5FUaCijJbXZVEseO973kkYxiRRiOFY7OUzTKnT6q/xiU89huNYvOvDf8wrX/IAduCRHIw50FZcfPoSFy5cQNUgaxMDmKman/7Ft3Nh8yQ//i9/lETMCXyP0WgClWLtzhM8+eST3H3fg3z4g3/En77vnQhbYAuf33nvu3Esm1LXeP60z27tsHzhiJACxzKpDJNT/zd7bx4rXXoWdv7OvtapvW7V3b6193Z3e2svbRvbMTY2mZjdhGAHmGEUwmhIIGKJlD+IIk2iMJMIyGQwYTPEhDgQwkCAMQbsNqbtbtvtXtz99bfevfaqs+/nzB/14SF8gDtSMgKJR7rSVanOrbeuznn1vs/7PL9fb8jxwSGve+BhPn90xLkv89AVRUYtasg6dHs9JNfFbpqEYUi6cGk6DlmSInkFSTPDvy0FEQwZLdFIooCR0yKVSrwqxbIsZtNTdENGURR028DsmUxPJ1ze2SeOAnw3IMpyDFHHHjRYTlYblWFVkBchq3WAqpvIcx+9o3IwO8OxTOxS5IXllFIqqXyPYWeXyWyKrEkYqka9SFB6bWxdZT1OqWKI0wgvjTE6I9buiqKO0TSD0e55giQirwskUyUNCyqhhFREtlrkWYmrKHT626RhgaHIbD1wF0fXxyxdn51Rl8XcJ1wv6A3bBHnFcrnGSzN6F++jzDNYrygkF0EccOtkwX5Xo3AM/CrBUhoUec1QNljVGW6UQqFQ2ipp5jOLliRVQpKkhLVGp9PgotTGn7t4RsSWtYH1Dc0WillT+gFzb0VzfxctykETWXsuRZSgOw5elFPJKgVQCRq2YeL7Pln+8qUQX3YlXm/ij5LHyu2fPy9h817gQ7evewJoCYLw53Y3SILIrRdfpG0Y5L7PyGhjawa1rGwaEyRIxAK5KCEKydOCtR+QuAmj4Q5JnuHYFnpWsqbEkSxkWSXIEuazCdWWSdfQ6WaQrV0mJNR5vTHe/4nodDqUqkK/0cGQNi3uj3/iU+ShT5GnzG6d4jQ6rEJ/o3nzA4adHsN+j2DlMRruIMUlqySkaN6G6xcFmevRsg1UTSCqUupi02wCBdQ5UH3JoVlXm+5FoWaTL69AyMuN3EE3UA0dyzARKzZlimlOw2khSCKSULM4m/CW17+J7/n7f4/z99+zWamtQ0RF5/rVGwRZRqRK5EXIs59+ik6nd8f/obE14PD0EJGatmlTZxV5HtMwdMo0QaamiGPyOCDwXRSxQg1jzvI1Q6cBZcFiMUOWNzuJNI1J6oJGu8d7vvq9rOdLKkPi/d/5bQz3Rnj+EsnWuDjYYas7IJdriukMJY555NIlluNTTo5vsV6vWXseptNgGXh4sUsplBRFgq2o6LrKrZtXKcuc5WJF5Xl84emnmAUrMl2g0XGwDJWf+en/C0WRaAky0XjCXVaPa7/zBOoioV0ryKuIgd1AJsOL1rz6/gfoN9u89MLTVEXI2fiAXr+DJEn83M/8LFee/yJKLVAcLdi79yLlwMBWJCRBYLFYMRiO6A22Wblr6ujLd+P1toY4dgNNEgkPpiilyOTgFCksoNUgU0UKTYKGwergiI4ko0Q5wXiJLalotcjk6Axv4qNrCkmSIAg1cZgwPZtT5xU3r95AkVRkTaUz6LMOfM6fP89qPCeMI5xum2G3T5oV2LaDoZksFgtkQyJMQpBANw1WUsR9ly5hpBVb5y7gTlx6jR6Zn5BGMXmZEK9mLKYTNE0hzTPiOqM/bNLpdxgMzyELm93k2eSUxWqFkJckXoAmaDham4aqogKOoVKmEUoFJ5MxE9dlOpmztdOm1ehgKg5ilWPpCr2miUFKmS7pdkzyMGA5XaDaLfaGj9LsGBxOnyb2M6y0Yug0aFQC2+0BYRlw69oVwtRna9inXCxpVDWNSmBkNRip9oaFHuUsXA+XzQ7Yjzer9CLLqeuSQhHRLZNovSkxLaIERzdpOTaJH28WEkVNFqWM9oekVYbeMKiVO3lGf1YIX0ZEv3nTRpL8WeAy8K/quv4BQRB+FngDm5X6x4AfrOs6FQTh14F/Wtf1J29f+zHgB+q6furP+vuDfr9+73veQ1nlNJvOpk48y4nrGtMwkAuBpMyRNZnaUJFWCfqoSxT67HT6jA+P0U0Nv8yw0PF9l2avQ5QmqKpMEcY47RZlsSm8RxQI8xxFUPg3P/XB/2IsH3j/B9B0BaHaSHslScIwLOSOQUlFvghZUrCjWoShT2+wzXw9xawVJFXjZDHF1DYPhqFqoMq48yWqqnL1+IDnPvt5hFqmEmviIsMUxNuTOSBu1GpFVSFW5ZcafBRRQpLlTV4ckbosb3PDK2AjlKgFkaLIqARQRIXIizBMhVKUeP93fAeqqFLJJeHNMzRDxey3mRxO2Gv3Oagi7v7gT77sm+av4r9P/PAfWxv9ne/6LtIkotfrgKAQ+gEP3HMv1668hKDKWKqK4zicTKekaYrlWOTFhkOvZAWSWDHo9DlbeAT4mM0GTcPi5HBMp9NjcnaCoekIQk23N+Tw5JBOr8t6tQBR59Kwy+x4htKxqEtYhT5tq0GUxFjdNnGaEKUZpqaTTSdIhkHTsBCQKAThtoVepqLcSKazArVhElcFomogRgleuKa/NURVdVahjyZI5NGmec0wjNuNcCJxnGJYKmvfo2Fa1FWB4bTxvTWSqJAHEaKpIN2u7pIkAaGAaeCjGCZVEGKqCllZ0BlsMZsvqSuVqpap9Jza9ViYMXfbDb54MqZhtEmCFXtbI5ZBsCGFKjV5ViBJGnGQU2egKClpLRDnGc22g1Vsat71iQ/n+niHR1hbfTRJ5NbzVxhevIQhKSBUTOdzLKtJxeZZtgyd0HfRlE0axbQtPvIff+WzdV2/5svdNy+rTryu67Ku60eAXeBRQRAeBH4IuBd4LdABfuC/5oYVBOF/FgThKUEQngqjiFKQyaqapeexziMEWUS9be9J5YpaEGiJFu3aYLfTZXx2hFxVjA8PKG0Nq9NCySvo2Ogdm/V6iSJKxEWNqVscr5aMs4hEFtFVC6dhkAt3HjBV4oblnNYpgi7SG/XIqpzZ6RR/scKPfDRBYBZscuyTk1Puv3QXkVAznS1oNmykKEZCYB2HJNMVLbuBkJRsd/tkRUUCZKSoukRW5BQIt1vAN8qtuhYQpI2NRlTkDZ1NgOx2A4+IQFKX1KpMLYnkQk0cxyiK8iVzt27Z4BiIosiv/fJ/JC4ywtWKRq+J3DQ4Xoxx+g5+5rNrvnwV1F/F/z+hizINQ8dfrciCBFvSuX50hGtIKJVEmeUbx6Zh0GwNKUqBMI5QTYOUkqiMmXsLtIaMhoijG7z04uaw/nR8wmi0xblzewgSRGGIbZoEns/u1ohGWRGKBUFHISk3SIc/0uoB1O6abLUgXs1IghWybqBYBoezMW7kERUJpVDRaDTIsoJedwCqSZwXiIJAGgaUaYJSG0xOz1h5Z4hxjrdaUysSQsMgL6vbiNkEzRCpZY3e1pBKAM8NULIKwpR1EpIZKkKpYtgGS39OlOVkoYuhSeiKgFSWm45mWeDm9BDDEmh01ohSiiK08CudnngXV2cJDdFErFIERSf0YpxGizD0cYOUmecxXk8w2wJ+fkwQhVSyyIWdPcSZT5DE5F5I1LcgSmkOdqjRqUWLwflLiIpIXqQUOSi6tSkJFqrNM5snmLaDGwbMVmsOTk5f9r3yX9XsU9f1Gvg94Kvquj67nTJJgZ+BL5nvxdZnAAAAIABJREFUT4C9P3bZ7u3X/uTf+mBd16+p6/o1lmUhGQp1JbC7d4GmZmEbFqUIqmJSFzmyUCI3VFKx5GbgYks6tmFi9XoISIxXLkKnzXo+o6U6FLmIrtkocUolCrRknaFuIcQJsioQZiVN8c50ilxVCIqKUCo0VAdv7W1aklcxclrRabcZWh0cZbPargyBP3zhGaoko62KdGQVW3MoxApNFCgVkUUYoks1ki4zPLdLw9BRpAbN0qanNNFFFUPSEARp0zavK4iijFBtAFC1IJIUG8ZHXZakZYGcpZiFhJik1ElGJQr4cYIi6pR5Tk1OEcTgRuz3thD9CKNS8RYumqzRbneJ1+vNDiYMeOYbvo5/vfXlCt/+Kv57RPz93/dfrMIBJF0mSFI028FqW5SmgFFXWHmBaSjkBRuiZZFSVwENBbYaFgQuVCWiarJIE4o4ZelHnJ0eM+jsspxOuPvBB7FNC6UWCKKMuExp9weIskJc5sRixmLlsaM3KZOKsPCRxQ3XBgnUZgO93aPnbPHoI49yV2+f0+Wae85dplIlctfDUVWyJGAdrjhbzXFaFqqqoisWkiCTKTrO+W06gxFVYdBud9F1ExuVRqURZTEoEq1ej8V6RZEmrJcryqqiViQOpmdEVU7PbqLlNcv1MVmW0WsP0WWFvDbQ7CaNXofmoEupy7ScJiNngCQarKYgihlZfMhOX2O4pSCWAorTxWwM6Ko6UZIgCQKCom6geU6bbm9I7mbopYEoqGwZGuvlmFCTyMWSXr9DS9dZrhdEYYJIQJUkFHKOoejklUhSZThNE8PQ0GsZigopyxkHHpkg46BhNV7+wurlVKf0BUFo3f7dAL4SePGP8tzCZs/zNcBzty/5NeADt6tUXg+4dV2f/XmfURYFsihSUXN0dERZbrCslu6gqxaipHHx0j2cnI3xXY+qyNEtkzDNmM4WlHlMlsYUcYoiKsyCFZqjITUUck2iKAVESSPPcwzbwI086jTdpFb+RIxGI1517z3ogsDecEhRVIwnE5SmTakqFF7G9cUcURAwVI0iSpCzEsOUWec1nqHitzY6reVqhajLiKaA3m7S0Ay+6rG3ct9DD1LlxYbVoMlIioytqlR5hihLZH9Ml1bXmxRQESVf6kgTqxrz4nmCiwNagwFvfuRVPPjwg7z29a/FsDZb5DLPkCsY3X+Z3Xsv8/RnPsd9r36EsKlyMpuQxxFKUeOO11hOD3N7B8t6+dCdv4r/dpEnd96HTc3EkHVsVefWtatYlkW/26O/M2Lmr6kMiWUeURoqE98jQmQRJoh2Y8P/yUsUzeB0MkWTRKRapixznFaTgy9cYemFvHB0k6atb3aWswkIAlJWwu1UzcHkFKfXotdss14uaXU6FGXJjZsHJFFAniUcXLvBfLXkrmaPw3CKVpbohoGiqpycnKAisN12cFfrjZasqtjZGqJLItFiSUPWkMqaxWqOIylM0og0TTm3fREKHUVq0WztIisiF89fwFJtNFFnu9PCVnWEuqbX6zHcGiHLMn4YUSEjaypZEHFy7SYr18eUTMZHZ2RZRpSEdPsDZEWj1Woh1BWn0xO6jklTUYiDAEFx2Lt4L6dnCwQUHEGgaRhYWUUpVrR3Byhdm9PAw60KjF4L3/Xod3ukcULLaWJZAlWqUeUBSq4TBB6iUuN6K/wwJI8jRAla3R6S3sDWLMo0RnZkJOm/bZ34CPi523lxEfj3dV3/uiAIvysIQh8QgKeBv3P7/f8ZeA9wDYiAb/9yHyCJElmS0rRs8jRDqkziMKLRNfG8OWqt8ewXnmHv0jm8JEJJa4qqJIwiHKdNGqxoyjpyuWFuUypcvvsSJ2enSJmM3rdYLZZIQk1apBjNBnKW0rvr/B1jCZOY5194jsqQOBqforORloqmgeS7hG2LYWkg326eqX2fh+9/gGuTGzQMnXS6QqVCM0xKISVc+SiaTFlnhGsfXYx5YP8892xvM449nnrhC6xvzTmb+iiWgVSU7GwNODs7I01TyrLc4GPriiLanFhHacbH/sn/zT/7dx/ixL7G9eMjbl6/hqqqCABlhW6aeG7CN77n67nluXA+5n/93n/Avmhyz8P3MtzZxxxdYDUd8/M/+0Euv/4hzp/fZ/7d78LSLc5OTqlEAafTQhJgPpvRanfR8ppaEUBXuH7tmF63hSxKuK6LquokSQBCtWkuQWBxNuEVDzzAZ7/wOfYuXeDo8JCOYaPGBUXTYr1eU5YlV69fY+v8iIujfeq4wvciMCqSIKEWoL3VR0fhhee/wNHkjLkX889/5MeQnQEf/vAv8Nhb3oG7nKLWMl/719/JQxf2CMIVT3z8D/nE47/H5z//JH7ocW5vm3zgcM+9D/IT//j/oJJE/uDTT/LSF1/gl37+Q8imwSpwEfKcK9eusn1uh7e8+TGmtw6Yrhfsbu/S6rR59tln2eoPaNst3KVLJYg0HIckLNg/f45mq8GvfvjDzHOPd775rdiqjqIbVDKcnk4ZjUZMJhOaLYM8uxOItfY37e2KprI9GhGHEdfnY0pLpe+0mC0XqLJM4cZcbvZYrwN6poOBQmmaG9SCplJoPqP98xwdjkELETEwWwZ5krI9GCHINVef+yKtXp+qhkajQ6hJNHSbyImYjidc3N+jYlNu6TQsBsN9FusFpmEgmTpZEhCXFfPZEs8w2em0qTSZXJAZWE2C2ZpOv8fx8TG1ILBcb6BRiqYSRREVNWLHZrEOsE2VhqLjr8fUVYm/LqmrGM/PKYtTFFGh3W5TJRlVKSNLBu46wItDGk2Bvb0RvhuxFHPuHuxxeusWa0mg3WoiyMKGCV6JhKGPF/hIokK/02VQsin5lST0wmEtS1w/OKTTGWCqClVV8Nxzz+I4DgBFGiOLFbaoEKc5xdJlMBgwm80oioJ2u42/jklKF7nQkLSQulbJ8xzdNKlrgSreoKajJN6kWMSavmUzFzL2nS9nYf3/4stO4nVdPwO88k95/e1/xvtr4Ltf9ghggxlVBCRRZjWfs39XiyzISanIagnJklElg9VyTp6k5IWE1WlQljVZkmI32pDnVLcZ21605qXjq9SSQKJkFN4KVZFI4xhZNfCDBFMTmZ6N7xxLVdEZ9THjnPV6TUGNM9qiCmIEVSWvCgLfI6sFnHYLY+hw6I2x7RZJ4CKLFXIlsQwDNMukqiqSMGWZ+wiGSpaXZK7Hsir53NNPE/kBA6ODcn6Am0QkcciV6y/RspsoirIBSgFBnaLrFlINcVXwrg/8db7lb38HibnF47/3MTRFoag22NlKkQjylHbT4NrJTdKk4JHmkK//R/+Uz1z5PH/jXe/g+ePr3LhyhZ3zW7z3fe/j13/7N3nb69+AaVncOjgiK3JGgy7Zco3T6eI02wRBgNXtkecpqiRj2TJ1lbC3dxF3uWJnZ8TRwXWoN9vDQjbZu3COw/Ehg94Iwpp+a4AXhZSmjqnq2GYDSZI4Pv0kvV6L9XiG0+kyJeTe1jaTeMpob4fj8QS5gofe8GqMK1eYffwpspXPq++6yPjyiB//R9/HY694mGWz4vGP/SIdrcHK9ynLEMtsMLrrEnuGhoLCfXdf5vGnP43Rb1JLMt/zPX+fb3v/t1JoArIIjmNyeP3aRqWmWHhext7oHLPVEnKB+WSFplpoqsVUzjnx59x9/jJLd827vuY7SaU1P/5DP8iqrnjfe9/LbDJlUcTo8RpBN3GabTw3YLC9xdgd09TurAmuZZW1u0A2BcIwwJFVelsDwiQlzBJarRaxHxC5Ic5WB9PoMFkt6NkmYRgzX8ywmg4JKSenY3RTQ1ckQt/FVDUWfohNk2C+Zn9/D0lQGJ9NWWgKESXjdYBuG6imwdr3SNKSVqtFv9nkwFuSyDWmLHHr+i1so4UsVOw0u/jEmKLI8Y1bNJtt+vt7vPTiC1SBj2FbLNcrDMO4bbkqqYoS1TToKi1eKlcIbsLYCNFoYVgaWVWxc36Xa1deoqxBkcDz1yiKhKgJBOkaRQZNM8jznPlqylZ/i8n1CWdFjiKJtGWJyXJKXlcEUcz+aA9/eUbLadBs9RiPZ6h1he8v6Q23ODw55qH7LqA2RLx8iV9LKFHJaDigqCv6nTbL5ZK4yFEVk1zIkBSJ1Wq1kSibJp7nQR0hCCaVHCAINiIiURwgihs1o2qZeHlMUhU0bJs8zwnWKzTbZjF3X/b8+RcCgFVXFbUXI1Nj2QbTm2eEYUJVlmiCQJ7E3HvhAmot0G44tJoNJpFHR9W5dG4X318R5SmO0yLKcrrdPpKoM5uuUfKCKvSR2Wy70tBj6FhIOfRl+46xJGHC+GSGf+OQjmkRyyWraMXc8xicv4Tt1+xevohpOwTumqKuOF35ZFVJLoqUikJsKPhpSJTH1FXGPXfvY8oqWq0y82N+73NPcvDCixiViFiWRFmCl3iIQomiyRv4VhSQpjG+75JlCQ3ZxDAMVEujYZislmvWic/s+QN0ywCguq100mUFsaqRS5Ff+tWP8Pu/+esklccTTz/O5UafX/3Up9iJG1wYXebsYMrRlWu8981/jSj0mR4cI8siw1aXs/EcSdNJy4qbtw6RK5GDW0dEixBv6mLYFnkB47M5iipxcnAL07TJK4l1lJBmHicnRyznLmkUE8w8qiTaCIzJmUYLqjjFknTKOmO6mCOINbP5hOn160wPTnn0TW9EMUzEoqLZanF8bcx2f58HH7iH3/5/PsK//tmf4md/+d8R5EuqFtiSyute+UqsjsXO+RGPvPXNvPINjzIybDhb8Ju//BH+xY/87yhBRk+VUTsOb3vLm/nsZ57k+vXrZGmMqRpEcc5qtmCnNeDFZz/PMlpy7133I5smYRpiiqDWAuHhgge2L9I3Grz+K9/N8vAWn/zQv0ewFRzTIDweY1kWaZqjiSZNS2fQtAjKEDSRTi4TZ3diRyViLFNFQcVRNqWqp8sJiZQTJDEZ0NnfJVQrlrMlVl1jKRoDSUVxCx659CAPjc7RqnTu298h92bsDobUeU2cFxRixXJ6xr1330VeSyziAKllEEcB27JKp9FgOXcpBBnZNCmDmCjLmR+eIHgxDUmnqOpNmaUQYOlg6Rp2rROVJecunIc05tZLLyLJAquZiyHImKpCx7CRauj1BthOAznfHNh3zSaVojCwOqT5GlsowV1za3bEuZaNlweEYUjdtKnrmiiPcew2RSXSs2zajRZpVHJ884R2ewiFzKquUGuVosoRVJl202E6HZOlIl5cQFZw6fwune0uKiKr6ZLmdht3FbBZE6qUWYlSaxiGgSTITBYJg93LiJ0tpuMxZlnTqBQECdwwQhBVTL2BanSosgRKmXAxQ5Gh47RRZRHdrInlFDmHNM1RsoJ922Sr7ZAVLkPxL5mera5rDEtnvphiNUxkXWPQ7iLmIJsmmqxxejZBtxv4ccKSjEYlscwiJrMpFjqDdh9v7ZLFEWfuknWwxqQm1QQM00ZVVZbzGbZtgyRTWCbLwLtjLKqhotsGu697mKIoSIKMZijTaBhcv34F0Va5de0GagWWoqEJAoOGsTHbBx69bpvI97DUjZ8wDRI6zQEHU5ff/cMnOJtNKcoSL1gRZQFpFpIkAV7gM1svERWZnXP7XNi7QKfVxmk76A2DpKpQDZO1F/A13/R1fPP/9K0888lP8unPP0m326XZ7aFZNiAiCTJ1CYkmsqU3UCyDj/zGr/H043/IQTGhd7LgeVwoFO57zUNcHx8iyzJlViKYGt12D6vp0O33KKqSJIy496678aqE+/fPo7caFIpIFBbUyJtyx6LcmOWrEkGREAwNMa4Z9Xdo9rsYoyZoAl7gIxQiTc3hnu1zJGVKImbYmopU1kRZSkrFww8/TC4LfPbJJ5keniBVIBsana028/mYKy89T2/QZbKeoHeafNW3fBOxY3H/Xa/Ana/Z29lluZrzWz/9i/zKL/1bnj+9RuP+u/juf/B9vPpNb+DWakqgiLz/XV/FAw/ey+OPf5zX3vMQlZ8ym81ob3VpNB0ausijr3oY3/eJ05QsjNFVDUVRWPseCAVJGtG6fJ6P/sJ/YNuSuXL4Eqfukrc99mZCRWZ6PGaoNdEdm5OjU24cHyOLClmYMvc8hD8FO3o2n2KaOr67ptR0nE6b0F3T0jWkrKQKE6aHR7QMA7vpcDZxWXohcR4i2DpHx6ccHR2jCAJX1zNyx+bq2Rlmp02VF5i6gSRJvHTtKm4aUyQ5OhKlIIIksfDWPPLQg4hFhXc0JlFrzLJmUqZYvRGOYSFnKYKsoxs2XhARJAHtjs3i+AxvvqSuKkRRJMtzRG0jQpBNnaPpmPF4ynS24HB8SqXJ3Lp1hTwOsG2bdRDi6DpHmYfZsvFOzlgVGb1GE1WUqN0ApI1VaLnySLKcdRyw9FxM20JQZHBXKC2dlmqwqDaEQqEsvnQuZloqZtvk9OSIk6MjpgczRoNdbNvm/v2LxEpFJYsotUBH0OgM2uiGzHDYptM0ODu6hp1I3Hf5QTrnLnCY+giCgCnL5EXIweIms9USoRZRVJ1KV5DTkijwieqCsK4wMoG1KWApBrMs5aVpSKZqKHobT2m/7PnzL8Qkrigyge9zbu88gR8hyQIL1yOrBJIkwWo6rKOAZeBRCiBnJc2dLWQETk9PKRQQNYlGo8HAaW+kA1WNaZo4qKiOw2S1oNFt48Yhh5MJoh+RN9U7xtJUDWxV58aLNzgtAtppzYIMf+UhlgWrPOb1D7yKuiwQRZHYD5DLkuV8Sp7nLGdzNFGmozZpKCaSYfFjP/8hroxvkhCzXo4RIp+luyaMIlzXZW9vj8uXLzLodrAsA8vUMVoWw+0t9kbb7Pa2sEyV9WzB3/zab+anfuKnWR2Pef7kFrai4S5dkmhzaCQIAnEcMxwOGeyMmMY+tSRi6waxIfCff/FXmKYBkyc+j6wI/Mg/+1H+2tvfTaXJ6KpGZWiErsfZdIZa1URpRFylREUCbsBnzq6zXC6pvZjzuzsoMqRVgtWwUSyDMAw3DU5+RG2ozFdL/NmSZiYgNAWsjk3iexy/dIXFeE13sI2XFXzgm7+d49MTVrM5SgV1sWluEgSBIArZ2d7m2hdfpMpSrr14hbe86Sv4+Cc+yajT5/79iyjrmFZc88Gf+j/5xKd+nxdeeJ5XPfwQX/+Bb+XNX/lV3Hv/Q5imzjNffJ6Vu+Zrv+6beNPb3sXHrj+DJNeoEoyjFZUsEZc5z1x5joXvE2cxcZbTH4woqMizDEQJNI3usIdtWrzija/nbr3DT37o3/DPf/JfMlnMec8b34Gqa7RknUt3XSSTKtQixyhrzEELA4GWIGNvtdHUO2mavWYXcoGdrSGRVHM6mzDqDXBUA8mxcYuETKwpJVilCUIS4AzaLK6fUcgitaJwPD6j0gSIM6owo85r5FrAsQ1kCbq9FmESsNXbot3tkqY5jUaDqKiZuSteuH4FL4kI64Ke0SAhp6laiPMl62AJDZ3Sc6HRIKJGatgcz+a0treYhx5Gt0WtSHS7Xe66fJHRaIfxeEp/a8BDD76CLE5QBAV3vEDTNFRF+VKKM0oz0rlPy2oy3N3bQNJWMdM0oKImCCI03UQURVqtFpqiogkSRZRgqhpRXRCtAzJBwJAk7Fqlrdo0FZOmYpJlCUWeMhxuU+QgOAqLaM4qWXJrdsLieIlcqyRpxSrOCMKQsi5ArJFk6Pe7BPGcKAiYHs0wsUnjAt1ykCQFU1QRy4yqThDFnLZtcmVywHw5YyiabCc6URqxY5lolsRWu43SkhGqnHZVsVT8lz1//oWYxMuypNfuMT+bIYsaSRgR3ZYnkxWsp0uUWqIIUjpag5ZucePgFnVd0+510auaLI05Xc6YpQmioBLECVXLhBRWsyXdZotw5VJmOY7dJK0ytqM7v74Xhdw8vsVJGlL5EY1Rk3OiglirWLaDreo899xz1BQYhkESZ2RJjiLJ7OzsoOkqlqFxfTLhd5/4NFeu3yCNE9RSRMhq1ksX10vQDYtLl+7CabZ56MGHGQ2H9Hs9xKom9HykumS4M+LiQ/dz71vewOUH7sJoNXjVo6/jbe98N5+/9RLZMiKVRERVQ6hBRNgcbkoilSgwPTjmA+/8GwwvbfK5YlnjVBa/+onfRS5ifu5DH+S7/vbfxVLbPHvlBbZ6WyTFRnqRVyXz2EfTdcSsZHHjCGnL4VKjQyJUeIaAWGQUeURR5cwWCxzdxNJ0jGaDoqHRMy0MVUEyNCZpxOX9+8jCGqFWGe7u4csZklQgZRHj1Zg3vucreeqZ57hx4wYnswliWjAYDrn3oQfp9npst3uMD0/RJY3P/OFnSPOSD3/o3/LsjZc4E1PiPYe3vfPdvPurv5Zed4c4qJmdnVIGAsEq4uz4BpJcc3JwiyIuePub3skPfePf4x9+/z9BtXsEy4woSTi4ehVbUNhqd1F0i9nC5WwyRZIkRFGk0WyTI3B8dsz7vve7+fhv/y7v+cav5d1veTNulrK1s0vhJxvAmQpXT25RWSK5UPLKV7+S6ckxBRmnszOqJCVIlnfch1FekQLXz07x5/MNnjXPuTk5QytADDPsSmLLaiKsQmI7p5nnCGYDzdZZ+R6DC3sYgw7ECf1mE1WSWXhLxkcrDL3DeOJTFCpFlOD6wYZ1XwkQF+wPRtRFvtGMqQpZFGJqOkotkNkqrl9yfHPJMs9ZLBZomsFq5mOoDnGacN+DD1DkOXIFydLl8MZ1zg5P0WUdf7XkaHJGo9vdHFr3uhSlgIyMpqhQ5CiodM0mtw5OmB3PWCYxgqawZ7Uxspp+b4sgiEjThOV6QSlAUuSkeU5aFrQ6A3TNZrZc0W1u0qxLz0eWVOIoxXIa1FFOJcnEeYlaKfSUJltii465hd22cLo2VsNge29AeVvgcnxyRi2JFGJFu9Gh5TiMBg1ajZqm08HPC3SjiRyLtLsjEt3ALWqmkxVOp83W/ohMqIjLEt3pEq4T/KhgEcRoZYZSbhYuevrlmfN/FH8hKIZ1DXFeYHdalHWGGmugqhhRjl/WVIpAJQoM2n1EUWQ+W3PfuYssphMyL0R2TMaTCQ3FJE4DVKeJ7leIQUkpCZgCIEusiwyj2URSZDJV5VS5s4xHtg1aJXQ0iaqChZsRqQZWU8RLfbJAxlI9UHqE64i2voHexGlEkYDT6PDE00+gIKKJNf5yTlOUmS0nRNM5owu7CEOVi90hb3jDY8SLNS888yKiprB/7gIXdnY4ma7Y6W9xenaLpuyQrzJ2S4v73v4O/uWP/whf8apHectXfwOrt8b8q5/5adS2iiAq+Ms5LbGBEMV0BZHmzoinb76IfzbnX/zDH+Yf/8SP0Whb7KHy6899mldfvJ/PfuqjTKqEt7/mMYYDk+roiKDIaNsattYj9Fy0fg/J0Cg1DXfuYyNSpRWT4zG1YWFLGrElkBQ57X6PMPBYHZ1QjhIuGj2iLOLerT0e/+3f5qGHHuHs7Izl5JSqFBGbTTRdIVz4JFXE//It386HP/VRzp79AovmFmfzMd56iWXYFFnJwWzMpfsv8Y63vg4DhShJEJIa0TZY35hgSQbJcoUuipxdv8rWxX3c1ZrAr+iPtji5eZPFbM6Fi/fyvd/57fR3duh0HNJ0hRt6aJLIwl3T6/V46MFXYFg6y/UKW9URVZWT4oQ3ujq3bhzyox/+Ba499xw//He/l8sPPsDF0ZDg7JiH3vpWsrIg8HwaloaJhBJULII5um6iIFOVIkmWodQioXsnErmqCwTXY3BuiHvjECUuaLdanJ6Mmekx9qDHukiY37hBuz9ESkVOzhYoTZtWWNB1LPqdLa5evYozHOC6Li3HQa5UdMvk5sFL7O+fZzWbM3XHiEiblfJySVOXWa4iTMshjlPETCA3DCbTCZpi0gwqrI5JRc0jW+f45c89wY61aYopxYpLw32e+NST7I92iPOC/mjAajYlSxJ2d4aUco0mqpxMxug19NodiihhnccMBz1u5h7DjA3MzZIYCAqDzgC/zIjDCFeumJwc4LRbJOuQV9x3Ly+++CJG2+TCpQvcunGAIqksFhOaqoY7n9NsNchLAc/zUIElArKmMl+M6akaQlrhVhHtTovZ+AxFNlnlOaYgEa4CRKGiaXSI1ikNWQPJIO1WRAuXdRTgOA6KXGHUJkJSInZaFEWJnQpIioxl2KiNBmUmbEqqOwa2rDMNYiwRKkBA2zBUVAPjTzmv+7PiL8QkLlATeS5ZogMViZrREVVOxZhW1yY6mqBt9TY8kjjHS33q0xJdUqg1mTPPpWU3UGWFUrIoBKgUETkvaJo2wdqnyBK2W12WnssqmvKGR17Nb33i43eMJfBd+v0B07MpWRLT6LbxF1P6/T6WZqGIc+RyiFSVWE2TvK6oVZH4bIUfRzz70jUWoYcja6RpTJxFnJxNePjRV2Nt7yL1TB64cImP/sHjLD7+W7S3u/R6AwatDq6/hromzzIWi8WmnEkU6e+MSOSIOkv4+re9m1kS8eSLz3D9+g0MRcSoYZ1EaLpOVde88tFXs7uzwzNffAFLkti+tM8P/uj/xvve/7f4zU/8Pq1CI792zFOf/QLvftdXcI+5RVbCZz/3DIatoegGeZ5RijFu4KNXBTUlaiGzTAIUSYYczu/tcytYUMQxtQzBegW3a/jb7TbDSuez3hG7vsBTt15geG7ItaNrWJaF2bKxmi3KuqIU4Nyle/A/+gf8zvyz/K03v5uiafDCtS+yXC5RR32MVpPeYMBd8wjDtiAVKcMEu+twcvQ8RWwxam1hNgym8xmqJCPZBlWWEiQRqqxx7bkr7J/f4eLd9xCFCyRLw2mZxHmI6/ubbfp8TaPRYLS/y3y5YLtwGLbanBwegaah5jXG29/A93/gfyTPKp766OP8xid/j363xeH167zj67+G7U6f+XRCSEbky5tOPWHjXpy4LllVYtc5liLRazlkvTvZNW1F5r7LF/mdp57gwuVLpGnO1ekJveGGO6cYAAAgAElEQVSAMvGo0gixKtm6uEe2CImFEkmTKYKA9gN7PPfU50myFL3lsJrMkCSJo5sHDDpdgvWK7d6Q0+MT7r73HlbXbiBpKt54AkWG2u+jaQqKLJKEMZqtoKgSVmGgSzqiYSLmGYNmlyvXDnBSsLdsMgoGkkngL2hud8hDH6Nls/B9xLxG0xQWqyUzd8Wl3R2qOscLXJ69GmI3W0iVwDJKGOUqyzpmZ3tENV6ytjUutprMb90iMkScUKbR6SLXAkKryfM3r+FIOnIlcf3qjQ3ES1fpdFpUVUEURSAZ9IYDMm9NUcMwEInUEl8tieuMLCzJsoo4jrFth7bVYBmsoShJkwS5aVAWGXuDHnka4hYxZrNPeZvVvnQ9Yt9juL9LcDShkDMaVp/aANnUOVv7dGqo64osihCqlEQIKOuSNA3J6hRdUoikin6jhRz8JdOziaKEommI8qbEy8o1slrGDERs0aZhN0mjGK0S2O4NUEUBwzCoBRFZUbhv6xwN0yJSKs5v7yLnFY2mgztf8qbHHiMxBEIx52Q5IZFKEg1Obxzwyt1Ld4xFDgqCIEBURKyGQ1mDqhus4oDF6gSptJDKgl6ryeHhIZ4XUNUKTz7/HAUCseujFjWzYIMfnfsu3/it70NKCh595BGSKOXxpz7D5dE5Hnv41Zxr9dAzgSovsAyTOAhxGhpCWRGFIZokc3jtBqFfobX61LJItZrS1hzOTk4ppAq3zNFKcDSbPC95xWtfx+Dy3Tz8xsdonN8myVLe+MBr+I2P/AryNOTEWzMa7tBuOfzGRz/Kjmiyjl2cdp8w3ti5fd9n7rtUsoyqm/jumvuG++zedxe0LWTHIk0iTFmmbVuoZQWGSm97SBJGWJLOtEzZkxy2H7x3w7IIXU5Wc6SGid5tMrCb5OsAS9xMsKPXPsTDW+dYpx7ZYoVViTx47i4u93dpFDLldE1uiCyCgLguWBU+186OePR1j9Eybeo04WhyhmyqjOczwjhgudwccmqKzPbuCD/O+Ka/+QF+5Zd+ke293c3hdZTiuy5xHLKsY3RV5a2vfJRGt01majiX9pFHA77h276N3e5F9lo7HDz5Rd739v+BT730RbwsZHLzBg+/9+1kUsoXn32KcD5hHfrEOrSsJlJYYveazBdT4jje6Kolmel8TpTfuRKPi4SPnr2IrVlEZ3NEP2G/2SOdr/H9kDTLsBSNW4cn2K0m/U6XZsPAMBRW41O6vSaDQQe5yDCRGDY7m8aarKTR7pCXG0b94cFNrP4ApeWQivVG4pDGFFm2cXxqGkJe8tKzL0JeY+oaT918gTgOOXzpRZSuxUNveC1VXtB2mhxlHvqoT8urCHWZ1I2Qw4TDYL0BdgGXds6zXAUUKXS72yhaA6UWyKsaIa9RB13iucd6vABDxSklPu+e4jVk6jAhVgU0TQM2h5tFUWB3bARZoKozmm2bIhcZT9fIqo0oGhuAWxjTVFTEtCCol4xXY+IoR6ssMDX0to3a0IkoN99D03ClnEypUewGXpwTxiVunJOIKlGc0nIauMsVtmFhWxutYqvTpNfuoFCQBkuKJKDME2TRgVrEaZrEiUteFqiqitNobRSMhoSepLiLKYHxl6w6pSwrdMsmjCNEoSa3ZYSFy/Zoi4PTQ3xVxE9SqGriNGDQaJIkCUGWoIoSh/Ea4gRH1rg2OyVNU6b+Gr3t8J8+/jsMTJN2BcNa4ZLeZFswmEVr3ODOWkyj2yMXRdbrNaKq4AYJlaRxrtunZdtEeUkoFxzdPOD/pe7NYm3b0vuu3+z7ufpmt6c/99y+qc5Vde1yV3bFNjGyRRAgOUSRHMDCxgk4vEQ8kAfygGLlIYLIRAkgZIwjDDIusCrYSarKNi7fqlu3Pfc0u9+rX3PNuWbf8rBOEOGWk4oEUjGe9phraGmuteca4xvf+P7/nyrKmIbLN7/1AT3L5cN33yPOM4I0RhUkfurH/wxZEPCpF18ibDK+8e7bPDe+wQu3XuTw5h3m6xWSrpHBjh7e7EDHvY4LpkYuNNy4dwfdtujJDeFyznLjk5cCf3J+Aq5FvzdEiiuSRtjl2QyH3/3yV/gvfvVv8/X/8Xd56/e+zsNHT3jr8Ucc7h2S+jF//S/9FaZNRqMIaHaHr3z1q2TVlrrZOclJishgMCCpa7ZJzmYTMGj30FWDdLVFiivKICGVa+bLnZq1qHL22x3C9Qbdtdk0OQO7Q5wXXJ5e8/yNu9iaxd2bd8iilCYvWcyWbP2QrtOh2+qTTDykvs18u2RbxxiuySYIKIqCtuUgZDkjw8WsQchTDBmOOj0uJoud744SYdsmqqrS63dQFIVMU5ivV4RlQqBUfPjkhP2Dmzx8710kROIwRRYVqBs0SWZktGm32/zOV36Xj/7gLe5073Fs3+bnfuIvcPYHp0QfTfgbv/zL/NqXf43enT7vT85Ybj1ef/PTpNMFkw8vqRsFwW7jakOq9ZJNnHK5jUg2Pi3DQpYkVn7APAhJJJXVdwAAlHXDKIT2YEDR1GyTmE0cIqoKbdMlq2r85Yau1WYZbNgsF1CWeJs13jwiQeXD02sEyaKSBNZRQP9on1KEtGmQTZ3p9BqhrLlYrAizkmAbkUc7D/CqKhBF0FWNRlLojMbIts3Fesr98T46Nb2OTRlveP+ddxmYLnuaw22nz/yjK2Jdo1uqVFWF2rK52RuhizJhGD4rQ82om5KqTHEdA1XY/Q+qNOVqfo1rt0gF8JKQJo0hzii2MS1ZR6t20JNtsTPLshSNTd1gtPu0W0NSPyeOAwShIssjTFffuS2u12RhjKkYRI2E2+7Tb3coSdiuPKo4JY9TBLGhCmKEssTO4aXxDYrQx9Al8iLGcSxIix3vNo3otl2KLMdRdE4fPSFDxDE65JWI0+nidrp0uj3aLZ2yLJnNIlR5hCBrbKMYf+0z6o1Jpj69gyMKSaH4l4jEvyfSKQCyZDBwxoTpFjXPqC2NdZpgmiZiUdB322yTFK0sUEUI/YBWt0Pge1iaxarKaeIIGQHDUOiZJnmaQlMw32YYlk4UVaRFSm80Iri6RnY+LrK4Kjy0RuVo/4D1ckXftYjyLefXEUmZMt7fI89KkrqgKGTePXnEfDZBlWSQJU4vzvilX/5F/u7f+lusZlO+7/ve5Hd++8vcHo7ZGx4QRRGy1FDGMaqkIssyQ0ei0x9w9uiE/cGIR1dPMJCQFJGPLp7SqAJZpeNYLkVW0t0fEfzB13nu5jGPz0/BkrAqKCn45Ksvc6e9h/ZDX2B+fcXtu3eI45hlFvHWH73N4d1b/Ge/+p/z1//T/4S/9ku/hLM/ZLWa74hIpk5f67O4vCRSZA46baJ6Q6WI2FabD68eE0VbdN2g0TRmyxWW02LhewzGI1abNSoyWi3g1BJeuMEwNKqmZr2eM2q3WQQhA9nmzJ/Tcxxe/NSreIFPmG7oHh2wWi2oBZVGkknXCW7bZTqfcXjjEM9bMfc8hjeGpFHMarolKZY8fvKET736OqsooTErlpcTVFFi32ghOhaTyYRux+X86QWuY1MvpiiiznQ5R5RKmqRGd200y8RAYTlfMB4PEXWd3/zd3yAMYjqdHpXU0MgNxvGIqMyZz84Z9Xv0xw8gLzF0jcFRh7TOif0ZiqJjuV3WyyWOruKnGdbAovJzjKZgfHhEZKrk683HfxBuizSNYL1BlCyWQsTIsamKkmKzoRQqem4Xy3KYLq5xBn1UVWVPVok8n6PDPc7OLlgtnlCLDsfdMdswIBYy3HYb79rnjZdeYx1uKMuYo+EdsiwhWy6JbZ39do8mTNk+KzKQNYEsDtBFlSatUA0TzdCpRNivRRZyQu3FyKqE3igIBYRNimTJbOI1+SbDafVot7tcX5xid4fMFlPEsiFIMqwwRbCUHVzZMAmKDT1Bw80kpBrm8YbBfp+kUBCFivPJFUeHN5jOZ4zH+8SRz2y6RZZVJBWkVERptfHiFDdr6LUNNpstqqmxyUJQRRSpIfTXKKqE7TpEZYbaNChegTrqsU5KMqDZethhja9XWIMeRZCiKzKaLJOIMlEUYzsOnudx5+Am54sLgq6AkoXIkkG02dIYkCUhgpBx7+Vjwjjn9OEJd+7d5Wx9SZVtcPZsguWcdO3ROfrnunf/M+17IhKHmunsEev1JXUZkojs6o2bCk0SyZuCrEixbAOxrsiqCs0ySdKSRlShFugPBjQyDEc98iJmsZygaRJ1naMrOnGccrB3iC5pRN6WWAI/jD52J+1MYNBuU9c1qm5wPV1S5mCYLqIos/VD0jCizkXma28nd69LpCxneT3lF37+5/m9f/i/c+/+A87OzkiikJ/60o/x4iuvEpc5JQKmYu34moaKM+wxubxi8dE5pm3z/sUJei3jtHrUgsLl2SWupBEEAeHWp6ozfu/3fx9UmdV6vVOwJinboiD0QsbjQ5K9Fh9cXNAdjwnrinWRobZbfOaLn0bTKo5ujfgn/+2v8/kv/ii2oBCoFY+//Q5pFPB0csnw+JhWp8dyMaPtupAmrFYLkqogL1KqIkMSGkZuh67lIABZGDNd+mR1Q1bE6JoC5s7sSIh3W/M0TLFVlU0cIKkSFA1lVqJIKgfDfdxGRggyXj6+R74IKFWBIss4How5f/iYg+MjRFHET1Men5+jyRp5kvKDn/sBZlnK0OiyjX06fRe7Z+PlAfPAo1YlomxHr1fLimnmc+0vkLIcJWtY+2tu7u9T5QXT5QzNUCmKivlsTbyaUVYRm+0cAo/E85g8eoRT1Xzi5i0ORwOORmNc3URuJMqqJklyZMlA003WGw+n1WKzDdg7GBMtffbcAV3FIV4EeNeLXc72/9GyJKdGIEKg6Kp0FY3T60uccZ/Rc3cZuh0auebD9Sk37t7Ei3yiNCKPI2pd4d0nH1HmBa/eegHTsLl8co4Q5xwNxyyeTslo+PbklNPLK+RM5tLbECxWaIM2Vtag1gKLzZpMFZBMhUG7S99wkGiwNJUw8PE8nyzMQZGokwRVl4jjkE0Tk0gFRVNSRTld2QZZQbd1GglKoUIoKl64cwdl66OVMYKp4a02bMsMkRqn30E2NPrjPURNQ7NVmqahrFKSuOR4vE8ehdza36eMtkxnu+/RlRQ6qIzsLlkaUJJQ5Dnn2y2d8T7xNqZvOOiijKnqaIqEa9kIVcmLd+8i19Dv9+nqBiYCmiCw9ZZc+T5KCSQForNTh3reiiSJ6LoOobfGdkw8MeOG2aFehuh2C7fdQpbA1TTERmQ42Gez2hJvIwYHexSahJAKbA2bulTZ3z9mvL9HU31cAPante+JSVySJVRVxbKNnedGVSHKEu22S1EUjDo9dFmhaRrSpqIWoNfroaoqmqah97tUSYWLweVsga4OMPURTWVTlxZhHBNFEfPlgjAMaZk2frj9joZPsqFwtZwRRRFRkmGaJqZp4gU+nW73mS2sxOVkytXVFYPBgCxL+Nb5Y/78X/g5/pu//Xf4kVc+haIoKIrCT/3kT3JycoI3nSA0NVmdsohWaGlFs4nI5h5uI1OWNZIk0em2aEQBSdpFJYPxiMV0hijUdDouotjQUGJZ1u7QT91F82Wc8sKDF3BUA/9sxksHt7j0Nnxwdk6Q5FS1SDBf4AcrahUeXnzED332TZIoQmwgixMkSUDXdVarFVmcYLdbTJ/xSEuhIQ9jhr0+WZUT5ilhsGW1WlEUu63f9z/3OloFoq6iDjvIooQiSaAozCOfJMko6grVUBmPxxzs7TOb7b7rq/MLZuGa0e1D1tkWe9RDLGt6bpttsGG4v8c8WGG2XQQv4tbBEZmt8+DOPSaXV1iaztbbIEkKaZKzWnoISFRUGIaBbdtYpoMz6vLOH/wRiiSwlnOWQcCrr77K7cNjvvj9P8jd+3fYhFuW/oqszlFlGVkSqMuC6fUFy2jD6PYheq+F0jYpqxRdV9lsPOqmJC4ySgScTpdNECLq6g7PpWrML695sHeTydbnLA24Dja0BYNW+zvg2vyY+6ND9BKsZUhRF9zr7ZGeTplNr8mDgESsGNougR9TFA1lAUlc0Br0GEoaaiXy9YfvIlcx9sBhWRZMn8yxKhkhylDignvDAzZewHQ6RTcMLq+nyLZBlKUYmo4hyKiKzny+RpEtFMWgkUWGB3vEecY2iUmimLIs2W63iIpMX9SxCglDUWlMlUerCXUNQRA8W7BEgvWK029/gH54gBVrlNTcuXULQRKZXF0jeTHr2YLrYM1Z7DE2XNKJh1sJaGnD0luTlQVBFCJrKq8+eEDHNgijDWVTcpJ4WKrJSLFoHw7JwhTHcJARqLIU1+myWW0QGpmqalB1k/UqoOX2SKOUi/mUjBrB1ikUkZdvHLPJt+RNjhhFKPbu922aJnEcIwkielIxjGpiqcEsJahqgmCX+66qhrouWczn3Lx5C0WSKdceyXzBS8c38E4vUBSV68kEXdcRmu9+av6emMSrskHTuoDJdL5BlRqqPOL8/JQor5it1lSNQBylaKK620Jez8iziDjxyYuQqsmpRBj09/B9jzAMCIINoghIFZqjESYBNQUbf8mt8QHNd5A7G0hYiEiSgqSINEKNJIFlKwRJgGzq/Mk77+DFIYasM7uecTKZ8Fd+8Rf5jd/4Dd78wpt889vf5LVXX+Znf/pfZT6Zc3E5gVLhxVff4OjOHeq6JilzhLLm1RdeIhcaQqmkLhLi5ZLu/oCTkycomsw28QlJOTy4xcOHj/nWt95mMl0gVQ1lUZDkGWlZcLs35tWXX2RZhjjDDm9XS7otl96whztoMwtW3Orc495zn4BUxNM0mumKRIa2bBKJsF6v0QSZpsiR5ApZkMnzghfuPI/jtJBNneVyyf7eHookcnh4iKypxGVOXGQsFjOSJKIsctIgJo8joixFUGRu7d/C1A28aIsiCcwWU84uTnntE6/hbzf0eh0ItgjbEO/sEheJcb+HHweIugqKQJambOZTFEPBcRwcTeNqOkNuGXiXZwgtGaqaxN/StV1c3aRru8gCbP2AXqfDyHFZlRFvfOoNvvTKp/jBL30JXRQpKDk5OcFWNF55/gHDfgdFbVimCXkjYrotnvvk63zy+DZ3OwO6soGJQSOZfPD4lPHxDUoaFEHgaLxPsg3ZHw9xZIEmz3AsG003udjMsfod7r/yCo2u4Oz32AYfVw7Lpsy3P3iX/dGYKSlimGF1LWIxpSeqZGKNECQYpYCQ78hWDRWGoSEGCY5l44z6yG6LKMnwPB9RkVFbNmXfpihyuppCEHm7BakGrd/lueN79EWZy8cfoVoKx8/fosozrE5rtxBHIYEfM52vafe7tLstUj+lESRapo0qqWw1lVmWUQki+WrNvXaXbruFpshUZYmmWhjtNos0RjI0IrmhBoLFhlavy8FgQNmUGJpOmWa0VZuiVukNDnk6PUfvdmlUnVavT1pWLL01Uz/Cz0ukVov3r6/oODrBNqMoNSLPZ89uE65WPP/KA05Xl1xcnlELICkiK88DS2EdbzF1C1kAt1EhzCnXEX2jA2mK02sh2gZxWlKmMOgNWfsBcVEi6wZrvcFAwI9CerKF0jQYhsbRrdv4SY6/mtPrdTg7PWHQaWE7Dq6mkagNx26XTqtLWVcUeU3x3XOSvzcmcUGAKFiTlBFVEaEoLVx7gOW2sHSNRoE0DhmM+qQULFcelmFgqMbOrraxETRtZ0bvJ/Q6LSRFZNRro0oVUtVw3B7QdtqUDRi6QxAnGK2PR+JZU1GWNUVTIksClmaSxw09y+RgeMSj9x5hOSZBsCATUmbrKX/tP/6r/NZv/g/09waczK8Z37nB+dLj7YcPaesGg9Eer332Tfb3jiAXGB/cYJGFCJbCN/74D+nfOCJOApRGxbB7RF6EKjeMDZduY+MqbbIoRFRkZpsNZV6gjFuoXZe6aWjyms/+4A8jKRplVnJ1ccZ9q8u2ztFkheXZFSM0zsMJo3EHQSy50+nwO1/7Cv/GF3+chVrQAOeTMxqxwJU1BopOEAQ8d+c5Pjg9YRME1KsE3baIooCyjPEv59w5vkvXcJGpaeqQw4MBnV4XJU/RJJhtZjR5wmx+wbKp6Bk9hod3uOEcEjQC55dTer0BcZ3R3z/mlTvPo3VtCk0hqnLmoY/aspmdTXnu6B62rlOLAqdnFxRJjmQYNGVFpz/GMnu4usnewSFJUyNLAmVa49c1cstiMZuy3vh83+FztA0XwTbRi5jR0RFi3TDoWhz2O8hRxlB2uTu8wc07t3n+xk36mkGel4i6i2y4FKJIoyuYgr07J5EL7t57jrJSeHy9c6Cc+T7r7ZZSBKfrUhQZOhp1FPN0ckIsFKTrBNv9uHe0rVsUhcR069GrNEY3bzE5m1Jj4WU5486ATBBxh0ekFAhBxkF7SFLmPJ1dsExjri9OaBYb2ntd7P0OqlATpB7tukRyTSZ+ynkQI6UlTtshX6/YbufkaHT6hxh6i3IR0QgQh1vEuiIpSm4NOgSzK9bzJZPpnFRriNMMtddDVUXScomqF9iKhtvf5yorqZOM9dJHFi2EUiDLEz716c9BBqIiEqcJYZORByGCopLnNYqm43RdQiIWwYKJP2d4cJNKFRnJKvH1DDUpGRld5DRiaBrImsDB0Zj5dIXbNcmFBH8bIEsNQVnx9h+/R68zpNNrkysNlSpws99DqWQcxeR0MSEzdPwypNOzKYWMPMt4nMXIcUWx2Oxy7lnJdbBm0OpRl1BXAoao8VEW8eq9+0xDnyrJCKKQp+++x9B2qMuc/rCHWYosow37d4Y7ZN56i9XtsfKWyAropsqm/hcj/P5p+56YxMuqYjQY0HNadPoDTFli4c3IqKEs6HS66LqO7/tUVYXTdVHrhlqG0cGY9XJG5seUecOi3FAkIaoq8/jqnNuvvUKjyswDn6TIQRLZpiFt26FOP77cZWWFJCncOrpJlRcoioSsQFFL/P5Xv8bZ+SVhGKOICo8+fMQv/MK/z//2lX8IhoqKyBc++X1YkopW1uiGyre2Mz7zmc8jSRKPHj3ipQfPoSKiKSpZVZM24AcBjmYgSEBdous6o4MDVsGGvC4oyFEUjfV6g+8FDIdjXrx5n2ITYkgKw24PQRZIsp1V7WuvvEoWxRxIFnmccP+F57maTMiCjO0y4tbhXVI/46233uZzn/4cUtLsgBFeyD/+6tfZhBHztbc7PwhDhKpEqWu6roXiWtRJjpYJ3D26SbJcE3oesijz0qc+SYWEt1izSVL0Ep5/7gGLImZbpLQ6GpojcnH2kDhZ4+oqZRKTbSMc3WWbBrzz9ENs10HTJVq1iq1o5FGC2jJ5/+opGQJBEDLsdjgc9injkKYs6PU7TCYXDDs9kigmXK/pDPpcL2b0JBUlLVEcE0GSuJ7PuD3Yw0xqNnnMvfaATbDB7LZYBSGJKpLaEplUYEgijQq1LmCZGp2ugyKDlFeM7S6VWpCnBeIWzp885qDXwmxKOq4DdY0kyGiqwXy+pGkEZqsrDEnDKDXujI/JI488+fiOUJAkWt0WlQCiJbFcTlE1gb5t0FJU/GBDz3VYPH3KNgrRK4Gr8wvqloFSNvQcB8dQuX3rGKOUSc4WPPfKS6CorNYzkizgB15+wCvDPrIpMPdmaJZOJUpExYZxx2TqTbmYXdFxdizZuqwY9vs8PrtiPDrk7s0bZKGHrUiM2x3Onz5hsfAYKA6Ft+Xq5AxZlkESERWZlmNRFhF+4qFJCmenT0nDLZqsYBcCmiARxBEpFZnSEAg52yji+cN7iHJDJufk0YYyD0mEGsUyqIqSyN8ithzadps8zLALCbXjsIl28O6OauKnAZoKqi2T5lu0BhxEyihnVZW4mkTqr1AoCDcLxKDEzzNM1SBzDPqmQR6FZElMWKUkakWv1thkMd1ei7SIWU0m9PtdnkyuEW0Dr63Q023GgyESAu7ebR5/+yMCteRIEIm3GXGY0O/3SZMIw1QZ9vYp45yWrnzX8+f3xCSOAJ63Yj2f0Wq1yP0VvXYL2dAYHI5I05SiKCjLGmqBhe/hJSGlJvHo7ITakSmamr12D7IUggTiBEWW+ea336FoQFIVimd+J67rsgm3zL3Vd7gXiTRO2PoBpmlS1AW9YY+33vuAuMgQRQg2W06envHLv/SX+Ue/9/ssl0s6ss6PfP4HdsgmQ6E2FEzL4fN3XuMqCZjMJxwe7fH+e+8QrD1uDkds4pCtUJJsQxxR5VtPP8CwDbws5GoxY52ECLpEe9zj6fkZ73zzbSgrfvhHf4S33nqLmoYgifizP/vTJGnE0dEBg16f6fWMOEyQuhZNljG7vOAzn/8Mdx/cRNIa8jqlkhpE0+Gtj55ShRUpCpIgg9jwzsMPaLX7SK5OJFSskpBaFZhJKUacY2gmhttipZXUukhYJohFxVW05fHjp9xwBmSmTLvdZbFYYdsunxjdYVCoTKZzbt+8wzQMuP/8HURNQFBFlhuPPI1YhB5VVRKHIUmVcXh0tLMHtl1ef+EFNNNClmW89RLqktGgh21ZzGYTRKXh3FsxywPcTps/uT6h61oYXYdNEKDFNVoFczVnnvrIfZPbgz2eBAtM12E+mSIJMrquY2gq/baLhEqDApJOXYjoAjRVQaVKfHh1QWOAoslkVY3RchDEGldWWW6WyHWNY5jUecGwN0RWdeiZ6LqJVgtsvQ2pWZNsy489hlESEychjmsQKiJJlpLqECyXzGJvhzCLU7rdNrZiovRsak1GbWR63QFn1+cETc5V6HGWhyxtgQ/ffgerFOgMjukIDmqvTaFK6KaGIAjEaYZuOwiSRpY2yLJFLelMFnM6owFpXTJfr+hqLY4G+3iex+jeDaJNSLTeWVu03A5L38cZDNh75R4XkwsGaOSUCHEKdcUqD2mqCknYCdtURaHQJdAVsizD0Qx6kkri+2wCn48eP0ISdHTNpUp2HvOX3oJlvGV445AoS+iIGo9nM9Iwxy9Tkizm1p27UIqkUUySCzS5COrwDEkAACAASURBVKKMKht4RUK73Wav3aMIM0pBxXR76HoLWXd4+fs/hz9d0e52KZ5eUakGlaDjtoaM2wcUm5S8b3Db6VJuQ1Aa7H6HOEupReioNjdkh5PlhGUUUKgiVRrQeuGQbqTxcB0xWW6QHJvzxTWKrVIoIoKmUTQNUf7dy+6/JyZxXdNpxAar43Dx9JzC1VhGIf7cY/n4ks1qg6bo2KbF3t4etigxHPQoPB+nEZDTmvGwzSZecrh3ROzqpJqM3AjI/ha7kpHS3YosFgXebMbrL7/C7Rs3P3YvgtAgNTWSAmVTkhYlJ1dXzJYrNE1BFBrm11f88q/8h/zXf+/vIjYFVDmvv/4688hHd2386YKf+tKf5fjgNu1ul2S7peU4fPObf8JsMUXWDU5XVyhlhRlXlELJWhM4NFxSf0MrrOhJCvuGQ73acvLW+/zRH/8hktDw5/+tf5O6LkmSjLKsuXN8k818iWtYPHzvfWgqfN/HcmySJOHTr72Bqxo8ffqUycUlqqRSlw2rlcdf/cv/HoaUkWoBPSUFWaASS8IwIE5CojzCjwPcXge3tXvYs3XIpklZyBnXixlZtOXBg+c42Nsn/eiSdq/LSRNwR2/tcoOZiBGVXOdbcrHipeEBjy6eopkal09PkaqGJi9xTYOh26GsG+IwpdNqoWkCZZVhO7vDo5OnFwhFwbg3wHBczmYzzqbXbOOcqmqwTQt9W9JNAAnsbUkl1kTXC+7evslaKqnGLZy4ZpNu0W2DNK+o4goNFbWWKKWaQatNWzHIsgK7ZVLlCYf9LoYm4W180jRFEaDr2Awli7rIqNWCRlSZRCG66dA/OECpINysybOE+tkOq53reN4Cta/jxWuCqxVIHy8xLIuMtuPib9bUJxtUo8VYahM3oJYiGiqCovJhvKSqKq42c9I6p44LerdvUCEw1Ls4mYYdFNxodJxBi4vtgovljKrMmawWWG6Xq8sFrtHCVE3SKEVWDNKqpspS8iJFF2W81Zqmquk4LlJL5J1H71JUJbOrJXanTyOLrNdLaqFGtVwaUWJydsbQcij8cOehYmmga9xwRqR5Tq/XwW07lFWFbVrEYUTHcZmenLNYLjka7WGqCugCYlnjVGBbBqIq8vrRXZptwuz0gnG3z3a1wu44SM1ujKMaPP3wIxynhaiopFlB17bxfW/HHchhsfJZ+h7DboeszinLnLZl0m+3uH58QmlIFGHIRiowZJFBt4Mhq4g0WLaGPFvze0/eojXs00oE3vzUp9iuPKIyZxVsOFnP6LY72JJKX7exLIvkg1OCocKNwYjj0RBVAFs3aLIMSzVZezMMU6Xf+/9ZiWFeFmSFwDQtyGWJ1M+4f/8+ra6N1HcxFIsqzdlGAVcnZ/zYD32RrJFRFRNaBoKh8nRyTZjkWIpCuQmxJR2n1yNTJLxsQ+/oEMNu0WsNEEWBf/zVr3M9X3ynmwFd4uzRyY55KQl88MF7yDLki4DpxYT/4D/6Ff7+f/l36PfbeGufL33xJ4jDiKasKOOcL3zhh5hczXDcLokscmu4z3sf/DHXyytSoWbuzznojej3+1QjFw2dKEqQ2ja1apBaGoEocp1sCaqM0+2KpCj5uX/nL5FbCl/+xtcIA58sSfmRH/8STiaDoDDeP8ZPIj7/5mfoWjr+ZIqfR0i6TpDX1EVJnaf0ez3efPMH+K3f+i2mJ1O6wxssK4GmkcjjmpiGydUUs9FxRBW7YzMPl/RMh6xvUcQ5N/r71HVBUhVcX0/48PISnwxBFtCigrKGtuMSUtIdDRjYLebrCXMhpqpLhoaDrplUlUpnNEBQZa6LkEqo2T844HLhkck6p6enlEWK2TFwuy55LRDmJZIsU9U5N+89YN6kON0eQ7vPpbglkiskQUZuuwhICJbOYr1CSjKq2QLRNNkfjPjwo4/Yhil2q810s0J1LIwIFllOWORYlkWa5mjaTvGHoSP3HFKhBk0kp+R6tSXPKxRJ4fLkKUQNl96KxN+idBxcp4cjaxR+sCuJ1RU0NFbrLb29IwzdoWd/XHYfRhHrpU8Q14h7Okm+4TrfctzpkJYNUVzhBRm3O0eossitW7eQahmlyvjgnXfpdboUSkWulwi2ylZXiC9D3MEAZzjAcE1mkzlJVdAZH1N4PnmxpWkEqqahbKnIUoNUNSSihGUbqIbEJohZxTmSbdDkJbJuUWsidVnRd9t4mxDJT4g9H1HXEWqBZtjh+voaMxYQcoFZGfNjX/oiTx6d4mcZoZyzXK/Iig21qmLdGuB2R6zXm90OIUnIpJqwLpinEZu1z/V0jaY6yG6LRRoTI1F4EYKtUpYSSRQhqxJJnbBpUnp9i7PNjPZwTIaILKmUDVSSBLLCOgxYhh6XixkX1zNiU8BwbUokHoxu83hyxqb20V2RXM4RRZnQsvj885/k+uIat9vjf/ntL2PqMlZa0tQZlibjpz52LbGeeySlyINPf5ahovFk+ZTZOsCrSgq1JBJ1NDFCShSEBjTh/wPZvSAIkiAI3xQE4bef9W8JgvBHgiA8FgThvxcEQX12XXvWf/zs9Zv/wjcvKwxRxKzBVXRuDveYPD4lXvvMLiaYpk6j7uhwg/0x/+s/+hpBvkWUKtJtSpBlNLXAoNNntlrz8osvkNcFL3/iVaokYdTr8+TRQ9aBR1xnYGkIYgXCx7exZVnSczrIrkOtKJxMZ/hpTh5nnKxn/Jmf+Vf49f/q17j/0gsAvPHGG8xmMxRNZb3acO/5B8iqSuZviDdLpDDm9OFDwqRkMNynqEoaSSSJCygEqvUWw9AYtbsIQYKlKLRFGSkpqEWB8ycnLB6f8RPf/2Msnk5YPrqgnng0WcGf+5mfRa3BuLuH4WhYjk4Rp8yuZzSyiu5YvP3uO7gHA7b5lrsvvcbzd58nSRIezS8oZZG0yOnZNlpRkKYxCCV1U3J+eUYhVDt13DJgP9No6pJ8vqJvWtRhhC0Z7PWGjDtddATEaueCZ6sO8/mSeTRDcRUWvsf5fMne7XtohouCTt8ZUlYZZZXiLWeEmzVdp01dNaw3K8omw1EtLNlk6PZJ/Jg8TEGKqcnI8prx+Car2ZQ37zyPvAm5SNb0coluJWNIO/dETdMgL3ciklGPvcObzKdTVGTu7B2BLXB69pi+6yLkJTgaitQglTVNmGLIKr3hgDyMMQyDqqlxOi3sdotMKHEUgX21hRQ3vLF/E1MAxzSQkmIXyfd6pKqMV2f4wYoojqnkBhURIS85vnHI1P94Wm/YcWirIl1NJF/F3Lv3PKEfEocZltGwyqfsH7nU3gWCLHF9fU1RFATbCFvesV87ZguyEtcxEcIQ2VE5VA20RqQlqgx7fUwk6mTF4cEBxbbA1RSy8xmb6YJRZ4DVcVDFhL3eIY4xQlehXCwxkdiKFXZZUcQRo9EIq9XGtHQqs8ExVApy6rreib8sg+t4zjqYY+cZf++/+3VUQSGKEnTNxZYVDtwjxCijXm8o43hHL4pS9twhkefTZAW9VgfDMHDaJo1YkuYJo70hbUGm02khWxoVOYahYZsGTVHhaBY9zcHMG+7197EzkSyKcS0TXVW4npxz3xriNuqzaFukU4u8/twDaqFmnUcc6j32ZZeebJHPfPRSIVuuqJsS3TFoWiY3b9/BlHQkUcPsdBiJFiPVpurbGKqKH57x9qNvcblY4DojiBLcRkKNKkaySpML6KqEruv/V9nud9P+ZSLxXwI++L/1/wbwN5umuQt4wF98dv0vAt6z63/z2bh/bhMEga1aISkQCwXTaEtESXs0wu53kcsSr0wxdYM4SpEqIE1pthF6sivrMSSFbRSRSTKT2RR/G/DVr36VYbfHcrFCNXTyMqNsapKioN0ZkXyHAyWPnOV6TV1WnJ9e8OTDx9RJTVRV3B8fcfr4Cd1bh2y3W+7dvbsTuhQFgqTw2Te/wMVkznsfPmSdbXn/6QdcPv6ImAKpFsm3KVIp0HZcFt6aLTX7vX0m4Yp14OPr8PTijKmQ4W0D/udf/wdMi5De3WOajk3VNvgnb3+TVIDhg7v0D/cpw4Szk1Na+33SPOFGe8ByvuLh/Jon3g6k8ft/+DUGkkaqylw8vSQvCjxiWrLJarEkoSBsMuoyp2kaGsBb+ztW58GAbZIyVyBzNXp7I5ZFjKApFEXF9WTG1FvR7rnUYcTY7bNaebjDHmm8JQ58dFFGKCEPI/SsYK/j8uH0MVmWUJY5m82a44NDovmG4+EBeRijCSLLYIOoKURlidVqU8sgoFJmGQIxReGjSyJn/ozWqEvHNMHWkQ56SK6JXYm7en1BxFt61ILIoyeP6XRapNsIUzHQy4aObRLlCYUioRcVTikiqQpizyEXK2bn55SagBSnpEHE8nrG5Oyc1A+oWzqXkUdqqqzqhKt6iymIuIMewcZnGwUESYRt2xy3ujRlQSbUdHUDNS04mV6h9Qcfew43cUJa1jTISLbE5PwpdZEgHtqsfFADjXIjUqoDgiAgCiLavT6NLKOaBlGWc3J5zuGdO8xPZwwPj1kGAcuJRzpdcJb6zDcbJpdXlFXCZOvRHx/gb9cs7BJBbEBXyP2ArnXE+elHTGaP2BseY3fbLFcrjEogkApagsp7jx7y4eUpUlOzjDY7R8OyYL6eEm48JKPNa7de4Lh/iD3Y44uf+wzugYtUZ0SzGakooHZ7NIZKJcoIusTl9QVtw0WvZDRJRkYgjWPyPOd6PuH+/btIskBRZIQbj81qSRhHaJqG53kkcUwSRUg1fDSfsC4TvvXkfcSeg4hAGkZML3daj4frCXXb5Gq1ZNjvMp/OePThQxRVQpRgViecT6f8weP3SARYm9BxbC5XU5S85vriEqmGUtd3CteqIm/p5KoI1PhZTNvaw3Z6OI6Dqkq09gdskoAoq9kWAcEqpagTwjCmSP5f9k4RBOEQ+Eng1571BeCHgd98NuTvsyPeA/z0sz7PXv+RZ+P/9PeXRI4rFUtRcGoBqWro6S7h5Ywju09Q5+zJBnFd4Edb7t85xNYsREHDdEwMSyUTSkpK1DgDQUJTTJpKxHQ7hGlBUdaISGRRhinopN+BdA/Q1nRCtUCUwPc9YEcequOQlz//aa4nM9SwJPG37A9GCIJA1dS89MqrxGnBcDTm8NYN7u7d5PV7L3K2mbGdTymiDZnvMWy1CNcbKrlCSAs+XE0Zqx3abmsnkhF1Xn3jM3zmC2/y7/7cv82g1+eVT75BX5d5+2tfQ8pyCDK+8MIn8C+nFJqEpiucvfUBpBWn3pxKaqiCEFswuHfjHp/r3+NovM/mg0eIuowlKtyrDNI05fbdO8iyilALzyAMEiAjazr5PMSfrrjZGZKFPuNYxltt6Eom2yDCUlUUWcQyDOqspPfcEdf+gqPRHlbZIKkGdSOhaQaqZaBsc2bBhvcn59zojxFFkbIsuXHrDpfTKbVj8tHsGnO0K59TZHbRb56xWi3I0oiiKFBlGUMxabcGaLqJVknEUUoYRDRRiuu6JP4WqxKR2S1KCiJOvXPm6w46ZELBw5NHjNpDcm9LR9QwSwHPUVBbLpKoIBYSURRx++gG2zKjLEtEzcSwW1h2i363T7u26eoaUhpSZTVD1aXV6/PQu6KrKhhJglsJ5GlBZEps5ZoD1WITh2yEAr1qqM4/HolXRQ2igBcGmJ0BUZBwf++Yq8mEw30HeSiCU5FHK0ajEUdHRwT+lkaSUQSRIsswVINvv/0uqVxQxiFjXSM1GoSWjqYo2JnI/VdfwlJ6JEVJuPVwLBe7gqFkMp/PqXWRi9kT3MEeptmnzLZkVY1muTSSiqubeErBoNOnLxk0YkOWNmQV7LX7tPeG1FKDVEQ8np9QiAVKVnH26Jx1FNF22ghpQzYLWZ5dkvhLokWBJWsc9IZUKpzmG6xem1rcpXpEVaHVanHx9Jz59ZzrywmK02K5XGPJBnEQc+vG3V09umVRCgVOLfHqrfsYNYhZiWiINMDrr3+C64sFtimDH7KvuTRJjt80qJZLFqZIcUErrbn34B7dUqDddrmp2ARZxt3hEZN4i4VGJdZUCpimzn63zdPLS3TXJYoiKlXAsC2ShUcVx7S6LfKNT1ezcGWLyFvSHR9jtU1kWUYTPw6s+dPadxuJ/yrwK+xsbwF6wKZpmn+aj7gEDp79fQBcPJv8SsB/Nv6faYIg/LwgCN8QBOEbaZqxKVO8NGK+DWjUiuvVNUbX4WR+iVo0FEJDE8UYNZx/+D69lktCxujWTcogYmhZHDktCr2klBssTUYTBB5dXTJ02ohpRsvWOTwY0KgltmHj7jkf+6BFVOCmKpsgJQh29aWr1TU/+oUv8j/9g99gPO7gFz4/9NnP7nKjksgbz71MGKV0Dse0+yP2O4e8+843+JP/44/YN3qURc1gOERoQKpFWk4boVLwMh85T9hUKR0KjtwjRrduMmq5/OCX/jXc515kb3wDfV3wzW+/w9rzCYKQP/ev/wxJHiEqIpPVgn6nT5am6IrKwvfZO77D5174DH1RZbqeEkgZyzRkW+csy4BUh1m8pagExjdu4Z/PCNNs52xXiigVpIXPY++amIYnm2u63S7X/2d7bx5rW5bX933Wnoezz3zOnYc31auxq7qo7qbpbowh4GawE6BRQMhBBAtHQRGWURKjSFHnH0t2wOBEDoljDFZihwDBhGDHDM1gxQ3dXV3dNb6qV2+483Dms/fZ87Dyx7ltF12ddAUI7z3rfqSjs9dvb139vvuu/dt7r7PW7xfPcQyN3KowPZ1zGbNIMzzNZRCH3D8+QeYVb5zucxLOaHQ3EZVKHi8IFmOG+QLNrnGt3iUvc0xZp3AUyskCSzVZjM55cnudfHiGVxZUWUCRLdPh3ti9TlN3cHSdqozJwpTD269ycHbErbduk6uw2mnz5GPX0YOUEklYJviTCXbDxquZSCWl0uBo/xhtltHsr+InCe0rm+S2wiwOaOZQBAGaLqjUEr1Z58T38ZwGpa7T0xQ8JaPXrBMGAa2VOpmtYdkOqq5QiYKyyrjZ2sBudUgMlZWr24gsw1SgGamESUW93qTfbOK0XLo7765srgmFUIdn+5ukkU/TdDiaDGnYDqfDIeY45c7JEYVncjpbcO/ogFIsUJWCTAhc1wSZU3Ms6rZHGERMKVBQycIcUzUpXI2jt++w6uoISxIWIXlZoGYKNJzl4pwK1ptNhmd7VFXI3cN9Wpt9rq9vkI4mpH4KscDSBLkiyaOEm/1NANrNJvlsQavZQ2DS8nokRY5f+az0+xiVSTwLoANrG+uEWYKi2HR6dUahzzzJCMucRIQoRUZW5ORlgWvoWFIhLVKa9Rq6ojIMTrh28ymiYMrRndscHZ5yPh2zurOOWkqcXovBbIbrepyPjxHUqdKKz9+/BQ0H29Ao05BIzwnyhDVd53h8ssxg2LaJtZKjwRkzSkwURvEcr24zGE9Y667x5Dc+RzKKaJsec3/K0WxELVXQUgmVQC0F5SKmbpqgSE5PT3FaDpZl0Op6pJVGGpzgWC5xCpr3pzjFUAjxHcBASvn59/xX3wNSyr8vpXxBSvmCoRsYVo3Qj6k3OrTUGtudNdSs5ObODompEKsStGUJNtOrczieYPc7vPjiiziqxVkQ4EcRMq04ms1IbYNJGuBoCqWhI2yXstQ4ORrjKA2C+Tnp4ehdfjWbddS6watvv0FW5Bi6Rb+3Tq5Jdja3SOOEztoK8zjA1U0219ZxOk1W211aQsWh4jd/65+RVAWKbTLPQoSpEyNxXZcqi0nykCrP2OivstLvUuUZqeZy+85bPPPEU/SaO/zkf//fcnZ0yDSacuv4Dm/fv0cQhXz3v/8JTmdDjmXE+fEJq+0u8yRCmiZZkqMlBS9+8bPcO7zDxke/hjtvvs2towMWBwNmeUgcpei2g3QM/GDEzRtXWCwWZEm6nAOuq+QqVIbB4fEpjmKSpznzKMWsmQxnI9xJgjnP0acxTz/2OIsipd1oIlDpeU1anSZpWTA8H2C4NkEcYTsOpSoIhmP67Q5pVTApYuxMMhcVwjERjSav3Nujtb5FUmrkaGgJ9BWP2ck59ZUuIlVwnTaZEGSaRdOrc2Vne1nGLQx5+fyAcV3FrjsMqhiv22EwmTDy50yikH6tznq/x9CtSJMFZ+NzFv6CmtWg3eoxTRPMuouqayRRzGazwc2tDfLFHNdQiKIK0/AIphH91ir+6RClWJZtK6oSx3KYDMcoYvma7w/mlElFlhbUzBpmVxCJBabtYlp1PvA138BweP6uflgYKutuk/vVgk3TIy5z5CLDyVSanS6LquSp3ccw5hmqrpDkCTWvSZjkfPCF59ja2iHNKurNLvNwQaSDmkmUoqIoCoSpYnUalIrCy/4QERUY0sAtVHqeS5JGUEimaUJZLldVe/U2/e1t5nsn3Ll/h9ZmHy3L6eo17EphZ32TNM+YR2Pq/Q6v3LrPSq1PFE2X47xZhqNZmJXCnaN98jylvdIjC1NKE7x+C8U1mKcLXNNg6s8hK+iqy0LTtmnhWDZplFLpJgUC1bTIJezsXgEkNcdj9/oNOqlGcj4jGi2wc40iyQhlwUkZsvvcU2TliMhJearWpxsbLCqFzKszmcd0RI0IFceoY+g1ZklBzbLJ4wTP8zgdnJOWBX23g6KAgeD2Z1+i3qpxcnJAs1kniiKsjsc8DlENnUarzma7R5gm5KpACrDMGkWUcXx8TGtzFaFoHO4dosiCLErecyx9L0/iHwH+khBiD/gFlsMofxdoCiG+lAVxEzi+2D4GtgAu9jeArzAh+9+gaQqZTKnZBkJJuXN+yuF0zJycYR5RBhEOKnmWYTU9JlGEWqhYYcX1q7tQCaRlIlUNmVVccdowC2jWPLI8YRaOUYyKk8EBlqkSzidIKoqa/S5fwpnP/vkJWlmiliWDozO+/7u+l3/+O7+FEII0itna3MGu15ifz6g7TaKi4N7RAaenx/z+v/wUmqtQs1yElBiaQpkWLAY+QZ4wykNszeDG1hb+ZEqQpmy2epwdDmipgo1al5Ef82N/9a9yNjzBXRRklUSXgu/8+LdjVYLN/ip2XPLMM89Q83Nm947oVCZKWrHutfjYzk3yMiP87Fu0NJtrbgezUWMbB+V4TnNW0vSXyfIbjQaqgPc9/ST9RhtNFctCwJWCjkbuB2y0e2hSRYtz1np90n6dyNWwTJ1XXnuZQTBhVbVQFymRUmBUElFWWKVgvghQPJsqy9E8h53uKi++8gp+HGMkOdNwgSU19LLECEJ2+31m8ymGZxDrKnrdA01nvFhw++SI5maHIA4xtZLNTp+uW0cvJHpVYSB4rNbj9M27uIXAtk2iICILEp64/ji2YZOHMa+f77Ol1fBMG83UcFULXTXYHw6Ii4wwTWk2mxRpwetfvIXMFWy9huc1qSy4e75HZcF4MaYUCrrtEFcVtW6XspDomsnJyRGVLFjpr3E0GWOtdCmlIF6UNNwWyWKOLks+869+m07nXS+qrJoNDicjHKlz63CPlmbQaHeIJMh5wubVq8RhQqGqmEnOkzs3ODs5R5gOL7/0BU5OzqhQOD06ZnV1nTBJMYS6LMFngmNZBGcj6t02fbOOF0GS5pxWKUrbISpzDKlgVxq9/ir9Tp/xcMIw8BH9BkbNYTgcQtOlv7nOJAzZHw6ob6zRMR0sBF6tzSSKmZIyyiNKHeIy5+BsiNRVXnj/cwRpzBNXH2drc5UkmCHyEllW2G5tuQQ9zkjGAfM0ZeBPmYYBuawIk5TqInq1O01OBlPu7d9ByxRCWTGvF9Q3miyqBXtyRh5kiLLCzEvMUUzu1Vlx+9yZnBC5CdYooKZqGDWdo3TI0ck+HcdiMThHCxbEfoRtu1AJTMshqyT3Dg7JFMks9rENk3E4QTNUbNulqGBtpUeVFMzOh1RJwq3795G6iswrzFyQlhJF1fGaDYJkgdPt0O12sch5bG3nPYTmJV81iEspf1xKuSml3AW+F/gdKeX3A78LfOLisB8A/veL7V+7aHOx/3eklO+ug/YO8rIgrqDl9ciSEqvpUm83KEpJGC7r4QVBQKPZ5Hh4DrogCAJWVlY4m54z8MfoUmJZFlarxng+pGU79AybOEzY6W5iqjZXrlyj1WmCqKj31rDTd89OibOc0f0zFEUBRbBzbYejwSlNxyGRJZs7uyiZJAlSrj/+NIbbotnq8sEPf4gvvP4qru0QDecEQUCSJwwmy/tXv9lFNXSMeg1Vd7k3GmB7TeSs4CANUHs2kZbxc//o57m50+FHv/N7eOUP/4CT+0eYlofTrPO7//L3yeMStTJo1DxeOd+n6NZ47iMfItEqhiKm9/7HeH3/LtRtjmsFymod0XaYRT50XWpPX2NPxkSOQVVq7N0/QAG2N9bZ2thAypIyT9EUnUQpOEvnHAVj0FUWlklqGlhBxuz+CVqzhlercaO5wu/de43Hb95k6I9RqgJHN0lMhZrQ0Wcxa5sblOcz7ixGXNnZYT4PiBsaG80uoumRlstX/TTN8OcBruGgyIJUqyibJs2tNWqOSxLE6LpBoRbcPT3jLJyS6RLFtZiEPq8PT+h2OhwFE8o8o6TEtA1eeeMVpAbdK9s8bfY4LxKivMCsSuytLtPMZ7PuUded5bzoyTLpVufKOufhlMJRWVQ5sR9xZXOXXqePrASTYI6Uy5usXkgOzw6RGriNJgiVuVnSdBweb/QYDc7QLJUg8RnOpxyNzklkhRDvfnXeH52hGQbD4ZD1zjrH8Zx6q04WJ/TXVjk+PaUKE2Klwnc07g9OsF0HU1cphUat1QVF0Gp7TM+H9A2XWZpw7/gQFMli5tN2Pe7vH9IUGnnL5MpjVzFrFqcznzTNSZIEpcw4OtxnFs7p97usCpN45JPqOkqvQxJmDAMfaWpkKhwPz9EbHoPZhCid0V1tQVpi12vkZcl05rNx5Rq+7/P65z9PEEecHZ7yuT/8DG2vhW27KIpGri1r2sYyQ+m7dFt9CTTVJQAAIABJREFULEWj7dRoGC6NmkuVJKx2W4g8J5tEvO+pm+zPxrQqjVazQ8uu42Lh6C5FmlMFMcQZ08kcfR4yH0/wrDp6IhB5vlxdGhXs7DzG86tX8CqVqMrZuLqLYVsoikJvpY/v+6gI+hsrxIuAKzu7DCY+lutQrzeZz0J6vTVOT48xdYNOt8sknFAZKo5mIZKcPM/ZO77HxtUNhBDUNYfKT1AVBaWuMBZ/NkUh/nPgrwsh7rAc8/7ZC/vPAp0L+18H/sZXdUJobLV6FOmyRp+WlgTjZcEGmYUMkoD/+Mf+Gm+NTyhVQY06tbrK7YO72IqBaugQxARpzCxYUF/pMi1iJrMp640Wo8k54+mIvbfvMp1McPstprMxufrui2ccTCnTBXoJx+M53/EN38pv/PZv0GnUKMOQ1abLVk3D0+s4TY96TedKo8kv/aN/SKIXlEJCzUI3TXJFw+mukJUKd86O0MKM2kSgJgXtWhO9kNQ9mw8/+Qxy6NNtbyGrlCeffpLHH3+M6fkQsdIgnU5p97qEZUauVhxOT6mEwpbTYXh6xkuf/gwvfOg5ttttGoVCJgRpWTF4+xilFBydnWPoDofTIWuOx/D4mDSL+cT3fhc/8fd+GlXXEZrJttvGsx1KQyUzwEor3r95jbbjsWpATaYki5BYSKyazWgwJa4K7k/OeKKscTAZYAmDMF9O6VtMJ/ik5JbBm2/dJSmgb3tkisZmd40qjFikMdloQLfewBUuda3G9tYmx7MjvLGKnmTkqc/geB99XjEfjqkpDiudK/TX1xCLAiMD1joouaChmuT+Alc1uLZ+DUVTaegWq5sb1ELJm7duIRo6fUoaZU4oK0w/QUkyPKHTN0yGszl5WqAkCSQhcuET+wsWs5zVG1vMo4yz0xF5VFDDxcgFulISFAHPPPMM4/GcRqNDkcREixnRYsGd8YBcVmyvP0EuYXdrlcL3yYcxRR6/qx9u1us0LYtIrZhUOZZqsnd2QKnGnExH6J7DwD8nn/mYi4iGYVPOQgx/ii1KFsf7dOs1JkmIYZlEScI3fdM3YdoWbdMjmfrLnPyuQ1wIlFJlPJ1Qzny0osLOSgJ/hq24GJ7J1ZUNzs+HzIMFUi2pZRViFtGxDcKzA+rtHvNowY5iMlWW1bDWbIc333iLjc5VevU6WTin03LIkxm1eptSVSnKjE67zuq1LcKzAWfnB0ivQpUVlBn1msdqbxXdrkjziCxLOFNiRucHNKqK/ZMDZvGM2orL2XDKxkqfUZYwXwRIDZJFgJOWFE5GLGJU1wBXp7QU9JrD1voGmqVxJkqGx6eYuuBo/23uBufcn5zSrTe5c+cOimGSBCGz0RDDMNClShDMcVodjs4OMCI4Hw25d7bHLJsgkjnd9gphHDEI57i1JqahICyJ6ik0ui6e1eDo/JQqTVHmGfN8TlmmhIuCcPLuod7/x/j5no8EpJS/J6X8jovte1LKD0opr0spv0dKmV7Yk4v29Yv9977a3xVI8jAkU5cnPbMFG+0OwXhK12pxU23zk3/zv2ZLreNhklEidJ2srCgVcC/m7YaLBe1anbPBGR3NpvAcfFOhUgVOzUWoCpPJhDAMadUshPrumhjHkyHnSUxSFXhujVRUZLIkz0uaqkPNbTLXNSI75pV/9fv89m/8H/xPv/lLlJaGm4G30qWFQZjGNFSd6niZRra31UXpO4yMmMc+9j5ee/s1nv3YCyQ6vPTKq2w1m9zfv8e3/MWP015t8+n9N3nh6z/C7dtvEhUZcSHpNrpEQx+lVNFrDtPplHEaUlQlv/+5zzGMQ/6vL75Eq9UhHcz5wFNPU/pzRBgjw4hVvc4sCSnThPODU37gL/+HnBwdsbm6QhYvGCcLrj9+kzzNmJ4P+ZH/5D9iGM1wSsH+bEiZaOipRpkp6FYDr+YgTqbYisagZ5BMpqSLgJtPPkFQ5LQcB0dXUaqMbsPEcRx0XWc4XlZvt4WGIRVkzeK1115npsSkasno5Jwts0Pvepv1lRXcXGOrv4a76lK4GoE/YXR8TLjwafa75LoCByNKU8PzPFY3N6nygtPxObZmMUgi0jBCadkgdPx5ijQbrDz1Phpeg9PxiIUGZ6T4Ssnm5iatTptElRh6lygRiLLCNUuy4wEegjJb8Oz7HyerIqbJFKEbqKnkjdN9Gp0mpwcHFKpgu7NG2/GQSUbTqeFWEZZUGEtB1evQf2ybefTuIB6UBTgmtTDHms/xtQrFrlModeo1D0cz2FzfoLe5SZCEJGXK9s3HkJbF+GRI1a4ziULqqQK2QlZlvPyFlzBVBV0olLLiykXBkEJIFFUliiLcQuAqFuvrmxzPRtyZHTM7GuHnGc2tPrJmINKKxFLxNB3Lc8HSGJ0do0c5zXaf/TfvUmUVu9dvsLG7zen5MZEfUUqbdmuDLEjQRYVmGGw7HWZFgeErVP0+vd4O4ryizJc//ubpsgLU3lv77G5ep5Q6WZBjul0Mp0NcGXitTXTD4d7ePlGYsLK2xjOrO2R+jHAtckOha6+iVQ5FpTNYhHQcDzOXvP3WbYZZCIZGZ6WPH4TkaYZtLxOeRUlCt9snTJJl5lHdwKu72LaJKQT5aE7kR2SmhpgW/DtPfpRdd4PFJGM0HtPw6qx1+4g8p5hlxH5BURj4cUnLraEWEr1mc5TOWe+tUeVgaCYq6nuOyw/Fik2BZDodMw18Oq023RTuJVO2233u+UOOrYLd3V0qx0DVNYo8Ra3AlBrxLFwWuF0sqNkOWZLSrDXoPH2d5GTEiloDoaJoBo1Gi3anx3g8ZjIIUNV3j/KIFPpGk8liwTd88MN85uUXWV1fwdMdalsrDBYLbOlgZJJK02i4LTbNDhvNLjElp2/dYxQHmIagMgvsDQ/NyPEHAZbqMDodIoKMr2mt8alf/2f4MmX12g7WVo9mzeVv/sxPsdXu8dT7n2Xvztt83TPPcm17l+H5MY26SZ7N0JQC4gxLKrTrDbb6a2iFRBYl/fU13E4Ty6sTBlNWW112N7fottrsrG1x7/AuNddlpb3KL/7iL2IZJs89/QxN2yUXktX1NUxF49u/+S/w9tF9xrMJc62ihkHhqVQyQU1C3CJDa7u4O6v4RYY+T6ivdTE9ly+8+HlEkmPoNRTNRnNcMgRJEjEcnrOzs8VoMqS51Wfgz5n7Cz74zPOYQhIH/rLGZxQhc5+D8SkTtWAYLijCCA+Va7tbJErOXBQwXvDNH/g6zoMZtmWRlxmn4zE1YTJJfDSpY3oujmlxNDpnpWZhNm2CZMH+rTfI85yOW6eeC9alRb3VJBrPiOYBWRRDEZCrBZ7bxB8tGKUZmWpg2B5H945wZImjGghdpzAMhJ9QJDGqZ5JoMBiP8EOfhJwoifnsmy8xlimT28cok5iRjLm+duVd/dAzbeZ3DilX6kSOSz3TqWsqRTJhOhsjgxhRVLx+9zbrXodsEXF8fMxwOsNut9AyyXw8RdQdnFyy21+hFJLAACOt6Kyv8ulXv4CjGcyLiHEwx61U5nnGSq3NwZ09am6dJ596jJZdx18EJL5Py7bRLBstzpkoKfcODhGOThDM0QvJNErYbfVJ5sGy7qwqyIM5oT8iy2JmsxGtRg2yjF6twZySs7cPmGc+ZbIgGo1w6x42KvPRDF0oWIrBRz70tYTzGYpS0dB1dAH75ZxrtSZ6EDEajbj52BN0u11Ojo74wy9+nlSRSFVja2WDmzcfQ9UFqlZh6oJYVDgND8cw0SpQDZUkS+l0ejhOHU3RsRyHKEnxfR/LspCAoiyTk02nU9aaPaoopaCif/0qO1fXefWLnyXNQtZ212g0GkxnY4LZlJpl0nZsPF3BVCRKUTCJ5iRZytnJKQ3HJpgFrG9tUpYSpfx/nZX9R3gogriqqoynM/qtFWZhQtRwaSQK1G2e3b1GOVowLzIyP6aqIKkyskpSKTrd1TVIcq5u76AoClGeIgqNlz71B3S21zmOR2iFhKygzAvSNEXRdDQb+q3Gu3wpTJ1QkZRInti+yitvvA5ViV8lrLfaKEXFLE2YjH1ModLsNImVgluH9yktjcxU2NjapIwFwTBBxgayqiFsDUWBr/9zH2WQ+vzFT3w33/zch3jabHHw4kt8+vA2imFSL3U+9LGPUU4jqiSh0fDYu3+fPIiQouJDH/s6siyj2fDIKMiSmDSL0dsuI39C4QfkoylVmnDv7JTT8ZhFkmLXW5wkC8bHpxweHfG3/85P8z/+3D+g0WqSJBmaalCqgtFgiFZUXFvZQB0GdJtt8DO0Xoc1LKJ0gbXWZPWp64z29qmpBopUmJIRjRcYQmd7e5uoSkmTBZqm4Ps+pm6RJMs53PcP9pGK4HRwTsOro5WSL9y5vaxmIwV+EJBSMpqENEqVDcVmRbfx5yGdzXXu7N1nrdZmpbIoOy7/56d/n0a7RaRU5HFCXTEQDYfaoiDMIjqayyyO6dstChTuHJ+gXFy8HbuxzB3v1ZiIclkT0raYlgk1x2V3rUetUSMpUjrbq3i2iaJW6KZBEIV0drYoS1BTQZTlbFAnKiqmqkRLBVGa4bWb6KYGlcCptbA9l5oi6dkulaJycLT/rn64+9wzy/zfKDx18zoogrIssSyLo7t38ZWSIAzZtTssNJ32ap9qEbLrtQhmc+6fH9HoeHRsi3sLn1FWMJvMWXFqbD/7BNF4xqrl0VrpcT4dcuX6FXzfR2/WGKUzav0OpuPi6Q5VltLrdYiCBamqYaLRWu8RHpyh6hqDsxFms0FlKozyBaFdce3pGyhBwmLiU9tYZTBdcGN3g9l8RKRoKPUaZ7HP4P4+je0VbG1ZKnFhSRK15P74BNGySR2FSK+4s3dImpQ4bgNHqmSyJBiPuTvc487+G6yu9snSkNl0CGXK+vVrhGHMYjLj9r27vHp2h87mClohaecqW+sb7B8cUCmClV4f0uWCPFlWFHmFYRgomo5hmvR6K4xPT3CNZcbQ0WiGUA1e3LuHaNWpuw6LvUNyIRCuhZ/OSZM5YTqn3vQYjCc0uytERsVCrUgNhdxQlmkwbB274WFbGnmVMpmNScuYSnnEKvukZUFvewOmMRu1FotpwKrjcPLWbW7fuUuz1yCbz2h36gTTEXXHxTAMbFNSLQJwdY5Oj8jzlKxIKcucmu2g5hWyhEbPI8sjFFNQyIJus46Cyu29rzDSE0yJpc81o0mhFrQ1hTqCegFX13eIiowsS8iEoCwvso35Aa3NPivzDON0xOdufY5zJ8F73ybb79vlr/yV7+N7rj2PPJxz/7U3+cLvfYb/9Of/O2YI/vDV1/jox7+Z3cIkjHxis+SNO29wGp/yvsefQFcN5r5Ps9nm/U89yxuHe7TqLWajAaJeAzSkJunGDjvOOnqjvnw9FCpNx6aoCQIlZ2tnm9F0hGV6ZFHK63tv4VgGf+7DHyLPUzTXw9U0jDTnmeee4SCe4IcReZlTxCH6POSl0QEr21fJC5U3Xn2LKJcM84iGZbGOh6hZmEJwPDjmxuoKuVQwpIVjufiTMUFLJz8fUe83UUrJmuqSFylFWXLtiRs4umSn1UZHI80zvHqfUgoqSqaLKR3XITw4xTEtEn9GHs2IogWGIvDvHlEN5lSayTwOGR8esrq1iuJqTPwhDdcmtCqkpvPY+jaObaKttZjGc6o4Rg9ieuurCCEwpeTJ1gpFmnM6C9m2mziaYD4ZU9dtavly8Vh9a503D/bobK9SqRKnqNBtlXrdoZGXtA2DjuUwHE+RaJR6hVtvYEwrYgWmVUp5uqCqvfsyvPXZT7PSaRFGEXfvHRBLSZCX2LU6tWYHRSrEUclcKRguBoTzOa1ui8p0sYySummQlgoTqeMkFXqast1ocvzqHnfunLEQCWmSMx0d8y03nufWy6/zzPPPYxYKoyxHR2en1SeJFwQOy0yiSYo/GhHJjIO7d7h6ZRsrzvFW1nF0FzMTbEkbOc8Y7J/hdRv0V5rYecrNnQ3uv3nIan2V4ShAiRJMr0HPajEcjknTGJkn1IVKFCXEfgxJRcdooAYlLbtGo1XHDyYM4hmWpdGpt2k3N/jgRz7O6f4hilSw3RqZojE7PqCx2kImGR/88x9hejBkb++IDFiY8PqLX6Cz2mdRlYwmUzTLZrvdJysLClFieSaKVqfR7nBw9z6droflGrzy+itcf2wHoZXYRkWz12FjZQtsyfhsRKXbZIbDcRiSzTIs02Fnc4v7+/tkuSBbRLiFpJErNG2bbLyg3ewwCUPCNCPNTbRIJ4gX7zl+qp/85Cf/2MH3T4uf+Ft/+5ONdpPW5hpus4aRSoYkeIaFZjskQYBpGPhRSFTm2LrKbO5jmBYSQRDG9Np9JuMpUqiEChiWga1bzM8GKLqJrBQUYaBpOlmWY9sdyjLntU98zx/xxfnJn0BoBs889gRlw+Ltu7fRTRNfq8hkhePVcC2LD6xf4d74lFhWbLb6bD1+hRdeeJ6vff5reEppMb91wE6jSzqP+Mf/9FeZ5wG5LpGawG3UuLl+hcl8yu4TN3jtpZfIc1A0je2NLTzbpWba1OtNXnn7NkJR6a6t0Gy1KKucMs8JpyNkzeSxZx6DIuUkmaK5KmoQoRYlhVJRCEG9svnQBz5KbBj8wad+k6PjY37+Z3+eH/zLP8i1rU3qros/9UFKKl1jvb9KOQ252V5FXbGJggjheiipRmejTzCeYkpBMB7x2JXrDAbn+FkMdRuzLKkMFV0oJIrAVVUOh8e0V3rMhkOeMnqkpoKYh2iWQVBlSEXgahbBYEyqCSJZkukKju1yMD9HBRRVIxOCw9EIr9NgeDag/8QNoqzEQCWRFaqi0ep0eOzpG0wmY6okZ2N7m2Du0253KNKMNEoYn5+j6RqKY/P2wT7dZou0yNEUFZlmjMIFUtfwiwzVtrAsm8UiRLNtUFWmRknmL2jqLrPxiK3dLfbv79Fx6vgnQ2TTQzeXyaEQkhKFIq8wDQtVESxkCkHB45vbkOdUhSRH8tp3ffcf6YfP/sqvcFLEtOotpr6Pogi0NCceTrAbdZI0IUszdq5eIR352BlUNRMEZNLBUHWi2QxXSlzbYTCf4ZkOtFwm54d4nTUCOUErHU7GAxqdFvfv3mPoT1G1DFWBqpTs751clCDMSJKMmutxtphy9fo1JrM5hVSYxD6mYdBfXyGhICkzKgVM22Y6nXF0fIxegL3SYXFwRGYqICVmCVXLRqsqNNeiFBZ5VjIJhqyv9ZBFShpHJFXB6OSYKE0xvBrS0EmDktX1VQ6P9qjKlKwqkEjmwQJVEegNG6kKSCvOb+3R31ghzXKCmY8soeY5tG2PPEyYTCdERYZpmmRCkmUZ8zAliU7oeSusbKzgxxE1r0FelEzmAVIIal4dUzE42L9La7PHZLJgfX2dmqLQNk021jeZ+j5lVVFVglKG1JsNxpMFuuHhdT3KsiLxYwxFxdZMVLVCGuB1urz+6sunn/zkJ//+V4uf4qvM/vszodfvy3/v27+Nch7S2F5jeHZOEIWktoaTa9R7Nc5OTlnrrpIlOVkZo6sG4SLGdh1s22Y+nS0L4VomRVGxvrrC22+/zer6CmkW4Xl15tMZ8/mcbrvFPIjor/b4mb/3Vc/RJZf8mfPDP/hDODkkaslgEWLaBqurfY4Pj7hx5SoLP2Q8n1FVYLbrWIucWCmopj4LB7Zbq0zzFCXJ8QybQRhg5RDVFLZ2Hmfx1mvMwozOTocgyjGlgLxcphnYvcZZMEa1NAhCZkHK2toa4/GYGzdu8Pq9t2h6TeJFiGs4xOWCKIqotdrcPz3iam+FYDZnMpry+OOPMxwO2e6ssBdPac0jxNYaw9MhV5+8ycEXX6O9s8XR/gEra7tsrXZ56ZU/pNnqMRrPaXgNkiDGapt0621Oj44Rmo7n2dTaTdJFzgvPfg2/8Ou/QFd3qGwdigolyal1W2SloOl4nA1OWGQJrm5j2g5+OkeU0Km3iYqEpmIyyEPMpARdRW3YrHlbhLMTgrLClstUu5a1TIk9Go2o76yxmMzQFFA8HTsxORme0+zU6XUavLm/h2s4qMLAtRySPMYPJgghMEybpmmBahBlOaqhUugK/r17dLc2kbOSf/LPf+nzUsoXvlpfeSiGU4os43wxgXaNo5MBmQpbvVWe6O5g1VzSi5qJWbZceqvoNopu4LouZZkTRwtM2yArUqBCUyXHp0eUZPjBDH8RMByMcV2Pa7vXKIpl/mp/PH3Q0i+55CuiGDppXUeGIc26hyzy5ZBBWjE4OydPYqQqUIXESRMUBSokz3zkBcxYME5T9NECxdVIshzXsDBNk8IPOH/lJQaaht0yGJ8GpOcT/MGYsUxpb62xPzphfHRMfDakNC2EIpn7U6aTAa+/9kUszeTt27exDZNuw2EuM0pFZXx0zo3GBuOJT5JV9NZWicuUaeYzmkxZsTwqVefsfApVwejlNwm7NaL9IQ2nzsnpfc5O96GERZLiILBMg3G5YDSfs3/vPkpRYCPp9VZ47bXXmM2m/OZv/Qs+sv0k4yKmp9iINKXeW2WQpBRBxN7BHn64QLd0FB0W8YzCD0mzjHmeIIuS++GYK1vbzJWCwWKODA3u7X2BOJeMhkN8kdHd3eR4PibWSmprbaKFTyoKWo0m2gKOh0cUOkzDkKOzMU+2tpGTCNc08GOfeTDDVDQaTo0iSZkFPgiBpmlkcUZ2MmX3qfcxPxkx0977is2H4km81WnLf/db/wJhlpBnJVmR02o0Ccdj+utr5EpFnuZoio7v+zz+xBPs3d4DT6AkObE0ScMFrZqLUhQ0HQdNU1lUCqJYZmRTkAwjH9up4WgWSZaS5AlhEmI6Nr/8T375q/p5ySX/f/Ot3/Od1KSGVa9jKQq6pRD4IStr6wzPTpmEAZaiYSgCS2/i+3OaHZd5mFD3zGVRB8chm84x6038eYhlqESmZMVtkUdzZCKRhsF0McNUFXZWlwm+ptMphazQqgSv00LGGkLJWcwSSlehyBJadp0UFSXPMF2LmT9lPh6xvb3LPIwoK9CylI2rVzm/s8cgm/LM2g1O0hDDNCnjlMnCZ31lnazMyPMFvdVtTt68T6kIKsCr1/CLFLcsKQyVIkn5wOPv5/de/gx1Q6O51uVsOqetNzBMweRsQGWZOAhmRcKV9XX2989wnSaLxQjHtPBsh2m0wNJ0TFNnXqbLyQ4CHNMhLSs81SAuEtxaDdeySZKM0+mAXqtJPolJdQGiwKoKhOvhhylOpRDmKd1mCyrJ/ekpHcelZ3skVUEuYT6fk6iSplZHMyENfQodHK1NuJgiSVA1G1lpFLrCbrvB6TTn1371f350nsRlJUnTnKoEy6qxtbJBVpR4K32yJMfSTLK4pKwUmq0Ot27dwjAMZoGPbdvIIqHfWy5d1u0aJ8mCvfmIURGTOtqyxFiyoFQFORWLKMS2dUxNp1tv4yrmAz4Dl1yyxJAKmmVQVcuZNpoUxHHM8PxsWY7N9fCDGYqu4S8CRM0iTGIcx2IyGdPstFFNg7XVVeJFiK6qUKSoouT07IwgytAci1xIPvh1H6Hh1Xnr3h2m82UBBk1V6bT7+POQaRAwWURobY8yz5eVhtKIIAhQVZVgMafV6rC5c5Oz0RSJQpylrK9vcnh4ytPve55mt4+fZViqyWp/jUWcsLq5RTQPUBUFmRXcu/02tmWgqdBuelRVgRACygopJa1Wixe/+CKWYxOHGWeHQ0SYEg+nVEGMWVZoqVwm9tItppMAy1rOhmo2m6iGzmg2BVXBsHSOz88oFjkrzRUMRQdRIcnRDYFj2eRhyvHpEYuFT7vdJktSCk/FKkDJJO3dHdKTORqSosyQGsxmE6aLGU/tXsc1HPw0Zxr4lLLAMS26tQaqLsiSmLrt0jA8HCGW+7orCHQsTcOzTU4Oj8gT/z33mYfiSVwIEQBvPWg//pToAu99udXDzaWWh5NLLQ8nf9padqSU7042/2W8e8nig+Gt9/La8CgghHjxUsvDx6WWh5NLLX9yHorhlEsuueSSS/54XAbxSy655JJHmIcliP/bNFn7UsvDyaWWh5NLLX9CHoofNi+55JJLLvnj8bA8iV9yySWXXPLH4IEHcSHEx4UQbwkh7gghvmoBiQeNEOIfCiEGQojX3mFrCyF+Swjx9sV368IuhBD/zYW2V4QQzz84z9+NEGJLCPG7Qog3hBCvCyF+9ML+yOkRQlhCiM8KIV6+0PJfXdivCCE+c+Hz/yqEMC7s5kX7zsX+3Qfp/5cjhFCFEF8QQvz6RftR1bEnhHhVCPFFIcSLF7ZHrn8BCCGaQohfFkK8KYS4JYT48MOg5YEGcSGECvw94FuBJ4HvE0I8+SB9eg/8PPDxL7P9DeBTUsobwKf4N9WMvhW4cfH5YeBn/ox8fK8UwI9JKZ8Evhb4kYvz/yjqSYFvlFI+CzwHfFwI8bXA3wJ+Skp5HZgCP3Rx/A8B0wv7T10c9zDxo8Ctd7QfVR0Af15K+dw7pt89iv0LlrWF/4WU8nHgWZb/nwevRUr5wD7Ah4HfeEf7x4Eff5A+vUe/d4HX3tF+C1i72F5jOe8d4H8Avu8rHfcwfljWSf3mR10P4AAvAR9iufhC+/L+BvwG8OGLbe3iOPGgfb/wZ5NlQPhG4NcB8SjquPBpD+h+me2R618sC77f//Jz+zBoedDDKRvA4TvaRxe2R40VKeXpxfYZsHKx/cjou3gNfz/wGR5RPRdDEF8EBsBvAXeBmZTySxWx3+nvv9ZysX/Oslbsw8BPA/8Z8KXKAB0eTR0AEvhNIcTnhRA/fGF7FPvXFWAI/NzFMNc/EEK4PARaHnQQ/7cOubztPlJTfoQQNeB/A/6alPKPJG14lPRIKUsp5XMsn2Q/CDz+gF36/4wQ4juAgZTy8w/alz8lPiqlfJ7l8MKPCCG+/p07H6H+pQHPAz8jpXw/EPJlReAflJYHHcSPga13tDcvbI8a50KINYCL78GF/aHXJ4TQWQbwfyw0zj5+AAAB0klEQVSl/JUL8yOrB0BKOQN+l+WwQ1MI8aX0Eu/0919rudjfAMZ/xq5+JT4C/CUhxB7wCyyHVP4uj54OAKSUxxffA+Cfsry5Por96wg4klJ+5qL9yyyD+gPX8qCD+OeAGxe/vBvA9wK/9oB9+uPwa8APXGz/AMux5S/Z/4OLX6q/Fpi/49XrgSOEEMDPAreklH/nHbseOT1CiJ4QonmxbbMc27/FMph/4uKwL9fyJY2fAH7n4knqgSKl/HEp5aaUcpfl9fA7Usrv5xHTASCEcIUQ3pe2gW8BXuMR7F9SyjPgUAhx88L0TcAbPAxaHoIfDL4NuM1y/PK/eND+vAd//xfgFMhZ3p1/iOUY5KeAt4HfBtoXxwqWs2/uAq8CLzxo/79My0dZvv69Anzx4vNtj6Ie4H3AFy60vAb8lxf2q8BngTvALwHmhd26aN+52H/1QWv4Cpq+Afj1R1XHhc8vX3xe/9L1/Sj2rwv/ngNevOhjvwq0HgYtlys2L7nkkkseYR70cMoll1xyySV/Ai6D+CWXXHLJI8xlEL/kkksueYS5DOKXXHLJJY8wl0H8kksuueQR5jKIX3LJJZc8wlwG8UsuueSSR5jLIH7JJZdc8gjzfwMePDXK3TAM9gAAAABJRU5ErkJggg==\n", + "text/plain": [ + "

" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "if PROTOCOL == 'grpc':\n", + " np_image = np.repeat(np.expand_dims(np_image, 0), batch_size, axis=0)\n", + " channel = grpc.insecure_channel(SERVER_URL)\n", + " stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n", + " request = predict_pb2.PredictRequest()\n", + " request.model_spec.name = 'ssdmobilenet'\n", + " request.model_spec.signature_name = 'serving_default'\n", + " request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(np_image))\n", + " result = stub.Predict(request)\n", + " visualize(result.outputs, np_image[0])\n", + "elif PROTOCOL == 'rest':\n", + " predict_request = '{\"instances\" : %s}' % np.expand_dims(np_image, 0).tolist()\n", + " result = requests.post(SERVER_URL, data=predict_request)\n", + " visualize(result.json()['predictions'][0], np_image)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Measure Performance" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def make_request(batch_size):\n", + " if PROTOCOL == 'rest':\n", + " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist()\n", + " return '{\"instances\" : %s}' % np_images\n", + " elif PROTOCOL == 'grpc':\n", + " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0), batch_size, axis=0)\n", + " channel = grpc.insecure_channel(SERVER_URL)\n", + " stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n", + " request = predict_pb2.PredictRequest()\n", + " request.model_spec.name = MODEL\n", + " request.model_spec.signature_name = 'serving_default'\n", + " request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(np_images))\n", + " return (stub, request)\n", + "\n", + "def send_request(predict_request):\n", + " if PROTOCOL == 'rest':\n", + " requests.post(SERVER_URL, data=predict_request)\n", + " elif PROTOCOL == 'grpc':\n", + " predict_request[0].Predict(predict_request[1])\n", + "\n", + "def benchmark(batch_size=1, num_iteration=10, warm_up_iteration=2):\n", + " i = 0\n", + " total_time = 0\n", + " for _ in range(num_iteration):\n", + " i += 1\n", + " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0), batch_size, axis=0)\n", + " predict_request = make_request(batch_size)\n", + " start_time = time.time()\n", + " send_request(predict_request)\n", + " time_consume = time.time() - start_time\n", + " print('Iteration %d: %.3f sec' % (i, time_consume))\n", + " if i > warm_up_iteration:\n", + " total_time += time_consume\n", + "\n", + " time_average = total_time / (num_iteration - warm_up_iteration)\n", + " print('Average time: %.3f sec' % (time_average))\n", + " print('Batch size = %d' % batch_size)\n", + " if batch_size == 1:\n", + " print('Latency: %.3f ms' % (time_average * 1000))\n", + " print('Throughput: %.3f images/sec' % (batch_size / time_average))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Real-time Inference (latency, batch_size=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 1: 0.059 sec\n", + "Iteration 2: 0.098 sec\n", + "Iteration 3: 0.055 sec\n", + "Iteration 4: 0.052 sec\n", + "Iteration 5: 0.056 sec\n", + "Iteration 6: 0.051 sec\n", + "Iteration 7: 0.056 sec\n", + "Iteration 8: 0.052 sec\n", + "Iteration 9: 0.050 sec\n", + "Iteration 10: 0.048 sec\n", + "Average time: 0.052 sec\n", + "Batch size = 1\n", + "Latency: 52.392 ms\n", + "Throughput: 19.087 images/sec\n" + ] + } + ], + "source": [ + "benchmark()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Throughput (batch_size=128)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 1: 4.414 sec\n", + "Iteration 2: 2.699 sec\n", + "Iteration 3: 2.654 sec\n", + "Iteration 4: 2.409 sec\n", + "Iteration 5: 2.485 sec\n", + "Iteration 6: 2.476 sec\n", + "Iteration 7: 2.457 sec\n", + "Iteration 8: 2.497 sec\n", + "Iteration 9: 2.575 sec\n", + "Iteration 10: 2.539 sec\n", + "Average time: 2.511 sec\n", + "Batch size = 128\n", + "Throughput: 50.967 images/sec\n" + ] + } + ], + "source": [ + "benchmark(batch_size=128)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/object_detection/tensorflow_serving/RFCN.ipynb b/docs/object_detection/tensorflow_serving/RFCN.ipynb deleted file mode 100644 index 2f96cf5e7..000000000 --- a/docs/object_detection/tensorflow_serving/RFCN.ipynb +++ /dev/null @@ -1,207 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Object Detection: R-FCN" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from __future__ import print_function\n", - "\n", - "import os\n", - "import time\n", - "import random\n", - "import requests\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "from PIL import Image\n", - "\n", - "from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array\n", - "\n", - "%matplotlib inline\n", - "import matplotlib\n", - "from matplotlib import pyplot as plt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "SERVER_URL = 'http://localhost:8501/v1/models/rfcn:predict'\n", - "IMAGES_PATH = '/home//coco/val/val2017' # Edit this to your COCO validation directory" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_random_image(image_dir):\n", - " image_path = os.path.join(image_dir, random.choice(os.listdir(image_dir)))\n", - " image = Image.open(image_path)\n", - " (im_width, im_height) = image.size\n", - " \n", - " return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\n", - "\n", - "def visualize(output_dict, image_np):\n", - " output_dict['num_detections'] = int(output_dict['num_detections'])\n", - " output_dict['detection_classes'] = np.array(output_dict['detection_classes']).astype(np.uint8)\n", - " output_dict['detection_boxes'] = np.array(output_dict['detection_boxes'])\n", - " output_dict['detection_scores'] = np.array(output_dict['detection_scores'])\n", - "\n", - " # Visualize the results of a detection\n", - " visualize_boxes_and_labels_on_image_array(\n", - " image_np,\n", - " output_dict['detection_boxes'],\n", - " output_dict['detection_classes'],\n", - " output_dict['detection_scores'],\n", - " {1: {'id': 1, 'name': 'object'}}, # Empty category index\n", - " instance_masks=output_dict.get('detection_masks'),\n", - " use_normalized_coordinates=True,\n", - " line_thickness=8)\n", - " plt.figure()\n", - " plt.imshow(image_np)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Test Object Detection" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np_image = get_random_image(IMAGES_PATH)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "predict_request = '{\"instances\" : %s}' % np.expand_dims(np_image, 0).tolist()\n", - "result = requests.post(SERVER_URL, data=predict_request)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "visualize(result.json()['predictions'][0], np_image)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Measure Performance" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def benchmark(batch_size=1, num_iteration=40, warm_up_iteration=10):\n", - " i = 0\n", - " total_time = 0\n", - " for _ in range(num_iteration):\n", - " i += 1\n", - " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist()\n", - " predict_request = '{\"instances\" : %s}' % np_images\n", - " start_time = time.time()\n", - " requests.post(SERVER_URL, data=predict_request)\n", - " time_consume = time.time() - start_time\n", - " print('Iteration %d: %.3f sec' % (i, time_consume))\n", - " if i > warm_up_iteration:\n", - " total_time += time_consume\n", - "\n", - " time_average = total_time / (num_iteration - warm_up_iteration)\n", - " print('Average time: %.3f sec' % (time_average))\n", - " print('Batch size = %d' % batch_size)\n", - " if batch_size == 1:\n", - " print('Latency: %.3f ms' % (time_average * 1000))\n", - " print('Throughput: %.3f images/sec' % (batch_size / time_average))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Real-time Inference (latency, batch_size=1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "benchmark()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Throughput (batch_size=128)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "benchmark(batch_size=128)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.10" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/object_detection/tensorflow_serving/Tutorial.md b/docs/object_detection/tensorflow_serving/Tutorial.md index d3256e7c1..224943f5c 100644 --- a/docs/object_detection/tensorflow_serving/Tutorial.md +++ b/docs/object_detection/tensorflow_serving/Tutorial.md @@ -1,15 +1,17 @@ -# Object Detection with TensorFlow Serving on CPU using R-FCN model +# Object Detection with TensorFlow Serving on CPU +Models: R-FCN and SSD-MobileNet ## Goal This tutorial will introduce you to the CPU performance considerations for object detection in deep learning models and how to use [IntelĀ® Optimizations for TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. -This tutorial uses a pre-trained Region-based Fully Convolutional Network (R-FCN) model for object detection and provides sample code that you can use to get your optimized TensorFlow model server and REST client up and running quickly. In this tutorial using R-FCN, you will measure inference performance in two situations: -* **Online inference**, where batch_size=1. In this case, lower time to result means better runtime performance. +This tutorial uses two pre-trained models - a [Region-based Fully Convolutional Network (R-FCN)](https://arxiv.org/pdf/1605.06409.pdf) and a [Single-Shot MultiBox Detector MobileNet (SSD-MobileNet)](https://arxiv.org/pdf/1704.04861.pdf) - for object detection and provides sample code that you can use to get your optimized TensorFlow model server and client up and running quickly. +In this tutorial you will choose between R-FCN and SSD-MobileNet, and between the REST client and GRPC client, and then measure inference performance in two situations: +* **Online inference**, where batch_size=1. In this case, a lower number means better runtime performance. * **Batch inference**, where batch_size>1. In this case, a higher number means better runtime performance. **NOTE about REST vs. GRPC**: This tutorial is focused on optimizing the model server, not the client that sends requests. For optimal client-side serialization and de-serialization, you may want to use TensorFlow Serving's GRPC option instead of the REST API, especially if you are optimizing for batch inference (here is one [article](https://medium.com/@avidaneran/tensorflow-serving-rest-vs-grpc-e8cef9d4ff62) with a relevant analysis). -We use REST in this tutorial for illustration, not as a best practice, and offer another [tutorial](/docs/image_recognition/tensorflow_serving/Tutorial.md) that illustrates the use of GRPC with TensorFlow Serving. +We show both GRPC and REST in this tutorial for illustration, not as a best practice. Feel free to compare and choose the protocol that works best for you. ## Prerequisites @@ -19,140 +21,178 @@ This tutorial assumes you have already: especially these sections: * [Performance Metrics](/docs/general/tensorflow_serving/GeneralBestPractices.md#performance-metrics) * [TensorFlow Serving Configuration Settings](/docs/general/tensorflow_serving/GeneralBestPractices.md#tensorflow-serving-configuration-settings) -* Ran an example end-to-end using a REST client, such as the one in the [Installation Guide](/docs/general/tensorflow_serving/InstallationGuide.md) +* Ran an example end-to-end using a REST or GRPC client, such as the one in the [Installation Guide](/docs/general/tensorflow_serving/InstallationGuide.md) ## Background -[IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN)](https://github.com/intel/mkl-dnn) offers significant performance improvements for convolution, pooling, normalization, activation, and other operations for object detection, using efficient vectorization and multi-threading. Tuning TensorFlow Serving to take full advantage of your hardware for object detection deep learning inference involves: +[IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN)](https://github.com/intel/mkl-dnn) offers significant performance improvements for convolution, pooling, normalization, activation, and other operations for object detection, using efficient vectorization and multi-threading. +Tuning TensorFlow Serving to take full advantage of your hardware for object detection deep learning inference involves: 1. Running a TensorFlow Serving docker container configured for performance given your hardware resources -2. Running a REST client notebook to verify object detection and measure online and batch inference performance +2. Running a REST or GRPC client to verify object detection and measure online and batch inference 3. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case -## Hands-on Tutorial with pre-trained R-FCN model +## Hands-on Tutorial -1. **Set up your environment**: We need to setup two things for this tutorial - #### 1.1 Install the [requests](http://docs.python-requests.org) package for making REST HTTP requests. - We will use a virtual environment to install the required packages. If you do not have pip or virtualenv, you will need to get them first: - ``` - $ sudo apt-get install -y python python-pip - $ pip install virtualenv - ``` - - Create and activate the python virtual envirnoment in your home directory and install the [`requests`](http://docs.python-requests.org) package. +1. **Download the data and clone the Model Zoo**: + + 1.1 Download the 2017 validation COCO dataset (~780MB) (**note**: do not convert the COCO dataset to TF records format): + ``` - $ cd ~ - $ virtualenv rfcn_venv - $ source rfcn_venv/bin/activate - (rfcn_venv)$ pip install requests + cd ~ + mkdir -p coco/val + wget http://images.cocodataset.org/zips/val2017.zip + unzip val2017.zip -d coco/val + export COCO_VAL_DATA=$(pwd)/coco/val/val2017 + echo "export COCO_VAL_DATA=$(pwd)/coco/val/val2017" >> ~/.bashrc ``` - #### 1.2 Install [Tensorflow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) - For detailed instructions, [click here](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). Following are the instructions for Ubuntu 16.04. - - - 1.2.1 Install Tensorflow Object Detection API dependencies - ``` - (rfcn_venv)$ sudo apt-get install -y protobuf-compiler python-pil python-lxml python-tk - (rfcn_venv)$ pip install tensorflow Cython contextlib2 jupyter matplotlib pillow lxml - ``` - - 1.2.2 Clone the tensorflow models repo into your home directory. - ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ git clone https://github.com/tensorflow/models - (rfcn_venv)$ export TF_MODELS_ROOT=$(pwd)/models - (rfcn_venv)$ echo "export TF_MODELS_ROOT=$(pwd)/models" >> ~/.bashrc - ``` + 1.2 Clone the Intel Model Zoo into your home directory: + + ``` + cd ~ + git clone https://github.com/IntelAI/models.git + ``` + +2. **Choose your model and download the pre-trained SavedModel**: Select either R-FCN or SSD-MobileNet. + Then download and extract the pre-trained model and copy the `saved_model.pb` to `~/obj_detection/1` (the `1` subdirectory is important - don't skip it!). + This is the file we will serve from TensorFlow Serving. Finally, define a variable for your chosen model to use in later steps. + Refer to the [TensorFlow documentation](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/saved_model) for more information about SavedModels, and refer to the FP32 model READMEs for [R-FCN](/benchmarks/object_detection/tensorflow/rfcn/README.md#download_fp32_pretrained_model) and [SSD-MobileNet](/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) to get the latest location of the pre-trained models. + + Highlight and copy one of the following download links: + * R-FCN: `https://storage.googleapis.com/intel-optimized-tensorflow/models/rfcn_resnet101_fp32_coco_pretrained_model.tar.gz` + * SSD-MobileNet: `http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz` + + Then execute the following bash commands after customizing them for the model you have chosen: + + ``` + cd ~ + wget + tar -xzvf + mkdir -p obj_detection/1 + cp /saved_model/saved_model.pb obj_detection/1 + model_name= + ``` - 1.2.3 Install COCO API - ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ git clone https://github.com/cocodataset/cocoapi.git - (rfcn_venv)$ cd cocoapi/PythonAPI - (rfcn_venv)$ make - (rfcn_venv)$ cp -r pycocotools $TF_MODELS_ROOT/research/ - ``` +3. **Set up your virtual environment**: We will use a virtual environment to install the required packages. - 1.2.4 Manually install the protobuf-compiler v3.0.0, run the compilation process, add Libraries to PYTHONPATH and to your `.bashrc` and test the installation of Tensorflow Object Detection API - ``` - (rfcn_venv)$ cd $TF_MODELS_ROOT/research/ - (rfcn_venv)$ wget -O protobuf.zip https://github.com/google/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip - (rfcn_venv)$ unzip protobuf.zip - (rfcn_venv)$ ./bin/protoc object_detection/protos/*.proto --python_out=. - (rfcn_venv)$ export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim - (rfcn_venv)$ echo "export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim" >> ~/.bashrc - (rfcn_venv)$ python object_detection/builders/model_builder_test.py - ``` - -2. **Download the Data**: Download the 2017 validation COCO dataset (~780MB) (**note**: do not convert the COCO dataset to TF records format): - + 3.1 If you do not have pip or virtualenv, you will need to get them first: + ``` + sudo apt-get install -y python python-pip virtualenv ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ mkdir -p coco/val - (rfcn_venv)$ wget http://images.cocodataset.org/zips/val2017.zip - (rfcn_venv)$ unzip val2017.zip -d coco/val - (rfcn_venv)$ export COCO_VAL_DATA=$(pwd)/coco/val/val2017 - (rfcn_venv)$ echo "export COCO_VAL_DATA=$(pwd)/coco/val/val2017" >> ~/.bashrc + + 3.2 Create and activate the python virtual environment in your home directory: + ``` + cd ~ + virtualenv od_venv + source od_venv/bin/activate ``` -3. **Download and Prepare the pre-trained SavedModel**: Download and extract the pre-trained model and copy the `rfcn_resnet101_fp32_coco/saved_model/saved_model.pb` to `rfcn/1` (the `1` subdirectory is important - don't skip it!). This is the file we will serve from TensorFlow Serving. - Refer to the [TensorFlow documentation](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/saved_model) for more information about SavedModels, and refer to this [README file](/benchmarks/object_detection/tensorflow/rfcn/README.md#download_fp32_pretrained_model) to get the latest location of the pre-trained model. + 3.3 Install the required packages using `requirements.txt`: ``` - (rfcn_venv)$ cd ~/ - (rfcn_venv)$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/rfcn_resnet101_fp32_coco_pretrained_model.tar.gz - (rfcn_venv)$ tar -xzvf rfcn_resnet101_fp32_coco_pretrained_model.tar.gz - (rfcn_venv)$ mkdir -p rfcn/1 - (rfcn_venv)$ cp rfcn_resnet101_fp32_coco/saved_model/saved_model.pb rfcn/1 + pip install -r models/docs/object_detection/tensorflow_serving/requirements.txt ``` -4. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. To compute *num_physical_cores* with bash commands: + 3.3 Choose between the REST example or the GRPC example (the environment dependencies are different depending on the protocol you use, + and GRPC is usually faster, especially when using larger batch sizes). Define a variable for your desired protocol. + + **REST**: + ``` + protocol_name=rest + ``` + + **GRPC**: + ``` + protocol_name=grpc + ``` + +4. **Install [TensorFlow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection)**: + For detailed instructions, [click here](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). + We have already installed the required python packages for the API. Following are the rest of the instructions for Ubuntu 16.04. + + 4.1 Clone the tensorflow models repo into a new folder in your home directory. ``` - (rfcn_venv)$ cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` - (rfcn_venv)$ num_sockets=`lscpu | grep "Socket(s)" | cut -d':' -f2 | xargs` - (rfcn_venv)$ num_physical_cores=$((cores_per_socket * num_sockets)) - (rfcn_venv)$ echo $num_physical_cores + cd ~ + git clone https://github.com/tensorflow/models tensorflow-models + export TF_MODELS_ROOT=$(pwd)/tensorflow-models + echo "export TF_MODELS_ROOT=$(pwd)/tensorflow-models" >> ~/.bashrc ``` -5. **Start the server**: Now let's start up the TensorFlow model server. With `&` at the end of the cmd, runs the container as a background process. Press enter after executing the following cmd. -To optimize overall performance, use the following recommended settings from the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md): - * OMP_NUM_THREADS=*num_physical_cores* - * TENSORFLOW_INTER_OP_PARALLELISM=2 - * TENSORFLOW_INTRA_OP_PARALLELISM=*num_physical_cores* + 4.2 Manually install the protobuf-compiler v3.0.0, run the compilation process, add libraries to PYTHONPATH and to your `.bashrc` and test the installation of Tensorflow Object Detection API. + ``` + cd $TF_MODELS_ROOT/research/ + wget -O protobuf.zip https://github.com/protocolbuffers/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip + unzip protobuf.zip + ./bin/protoc object_detection/protos/*.proto --python_out=. + export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim + echo "export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim" >> ~/.bashrc + python object_detection/builders/model_builder_test.py + ``` +5. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. + For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. + To compute *num_physical_cores* with bash commands: ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ docker run \ - --name=tfserving_rfcn \ - -p 8501:8501 \ - -v "$(pwd)/rfcn:/models/rfcn" \ - -e MODEL_NAME=rfcn \ - -e OMP_NUM_THREADS=$num_physical_cores \ - -e TENSORFLOW_INTER_OP_PARALLELISM=2 \ - -e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \ - tensorflow/serving:mkl & - ``` - **Note**: For some models, playing around with these settings values can improve performance even further. - We recommend that you experiment with your own hardware and model if you have strict performance requirements. - -6. *Measure Online and Batch inference performance**: Clone the Intel Model Zoo into a directory called `intel-models` and run `rfcn-benchmark.py` [python script](/docs/object_detection/tensorflow_serving/rfcn-benchmark.py), which will test both Online and Batch performance. - ``` - (rfcn_venv)$ git clone https://github.com/IntelAI/models.git intel-models - (rfcn_venv)$ python intel-models/docs/object_detection/tensorflow_serving/rfcn-benchmark.py \ - -i $COCO_VAL_DATA + cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` + num_sockets=`lscpu | grep "Socket(s)" | cut -d':' -f2 | xargs` + num_physical_cores=$((cores_per_socket * num_sockets)) + echo $num_physical_cores ``` +6. **Start the server**: Now start up the TensorFlow model server. Using `-d` (for "detached") runs the container as a background process. + We will publish the ports for both REST (`-p 8501:8501`) and GRPC (`-p 8500:8500`). + To optimize overall performance, use the following recommended settings from the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md): + * OMP_NUM_THREADS=*num_physical_cores* + * TENSORFLOW_INTER_OP_PARALLELISM=2 + * TENSORFLOW_INTRA_OP_PARALLELISM=*num_physical_cores* + + ``` + cd ~ + docker run \ + --name=tfserving \ + -d \ + -p 8500:8500 \ + -p 8501:8501 \ + -v "$(pwd)/obj_detection:/models/$model_name" \ + -e MODEL_NAME=$model_name \ + -e OMP_NUM_THREADS=$num_physical_cores \ + -e TENSORFLOW_INTER_OP_PARALLELISM=2 \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \ + tensorflow/serving:mkl + ``` + + **Note**: For some models, playing around with the parallelism settings can improve performance even further. + We recommend that you experiment with your own hardware and model if you have strict performance requirements. -7. **Visualize Object Detection Output**: To visually see the output of object detection results, we will use Jupyter notebook via web browser. If you are using a system that does not have a browser, such as a VM on GCP or AWS, a workaround is to use local port forwarding of port 8888 to relay the jupyter service to your localhost. You will need to quit your SSH session and log back in with port forwarding configured. -For example, with a GCP VM, add `--ssh-flag="-L 8888:localhost:8888"` to your ssh command. Once you are connected again with port forwarding, reactivate the virtual environment, navigate to the tutorial directory, and start jupyter notebook. Continue with the next instruction. - ``` - $ cd ~ - $ source rfcn_venv/bin/activate - (rfcn_venv)$ cd intel-models/docs/object_detection/tensorflow_serving - (rfcn_venv)$ jupyter notebook +7. **Measure online and batch inference performance**: Run the `object_detection_benchmark.py` [python script](/docs/object_detection/tensorflow_serving/object_detection_benchmark.py), which will test both online and batch inference performance. + + ``` + cd ~ + python models/docs/object_detection/tensorflow_serving/object_detection_benchmark.py \ + -i $COCO_VAL_DATA \ + -m $model_name \ + -p $protocol_name ``` - After running `jupyter notebook` , paste the generated link into your browser and open the `RFCN.ipynb` file. You will need to edit the code in one place - in the second cell, insert the path to your downloaded COCO validation data set. Then, execute the cells in order. The output of the "Test Object Detection" section should be an image with objects correctly detected by the R-FCN model. -8. (Optional) **Using a single core**: In some cases, it is desirable to constrain the inference server to a single core or socket. Docker has many runtime flags that allow you to control the container's access to the host system's CPUs, memory, and other resources. See the [Docker document on this topic](https://docs.docker.com/config/containers/resource_constraints/#cpu) for all the options and their definitions. For example, to run the container so that a single CPU is used, you can use these settings: +8. **Visualize object detection output**: To visually see the results of object detection, we will use a Jupyter notebook via web browser. + If you are using a system that does not have a browser, such as a VM on GCP or AWS, a workaround is to use local port forwarding of port 8888 to relay the jupyter service to your localhost. + You will need to quit your SSH session and log back in with port forwarding configured. For example, with a GCP VM, add `--ssh-flag="-L 8888:localhost:8888"` to your ssh command. + Once you are connected again with port forwarding, reactivate the virtual environment, navigate to the tutorial directory, and start the jupyter notebook service. + + ``` + cd ~ + source od_venv/bin/activate + cd models/docs/object_detection/tensorflow_serving + jupyter notebook + ``` + + After running `jupyter notebook`, paste the generated link into your browser and open the `ObjectDetection.ipynb` file. + You will need to edit the code in one cell - in the second cell, insert the path to your downloaded COCO validation data set and name of your chosen model and protocol. + Then, execute the cells in order. The output of the "Test Object Detection" section should be an image with objects detected by the served model. + +9. (Optional) **Using a single core**: In some cases, it is desirable to constrain the inference server to a single core or socket. + Docker has many runtime flags that allow you to control the container's access to the host system's CPUs, memory, and other resources. + See the [Docker document on this topic](https://docs.docker.com/config/containers/resource_constraints/#cpu) for all the options and their definitions. + For example, to run the container so that a single CPU is used, you can use these settings: * `--cpuset-cpus="0"` * `--cpus="1"` * `OMP_NUM_THREADS=1` @@ -160,33 +200,39 @@ For example, with a GCP VM, add `--ssh-flag="-L 8888:localhost:8888"` to your ss * `TENSORFLOW_INTRA_OP_PARALLELISM=1` ``` - (rfcn_venv)$ docker run \ - --name=tfserving_rfcn_1 \ - -p 8500:8500 \ - --cpuset-cpus="0" \ - --cpus="1" \ - -v "$(pwd)/rfcn:/models/rfcn" \ - -e MODEL_NAME=rfcn \ - -e OMP_NUM_THREADS=1 \ - -e TENSORFLOW_INTER_OP_PARALLELISM=1 \ - -e TENSORFLOW_INTRA_OP_PARALLELISM=1 \ - tensorflow/serving:mkl & + cd ~ + docker run \ + --name=tfserving_1core \ + -d \ + -p 8500:8500 \ + -p 8501:8501 \ + --cpuset-cpus="0" \ + --cpus="1" \ + -v "$(pwd)/obj_detection:/models/$model_name" \ + -e MODEL_NAME=$model_name \ + -e OMP_NUM_THREADS=1 \ + -e TENSORFLOW_INTER_OP_PARALLELISM=1 \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=1 \ + tensorflow/serving:mkl ``` - + 10. **Clean up**: * After saving any changes you made to the Jupyter notebook, close the file and stop the Jupyter server by clicking `Quit` from the main file browser. - * After you are fininshed with querying, you can stop the container which is running in the background. To restart the container with the same name, you need to stop and remove the container from the registry. To view your running containers run `docker ps`. - ``` - (rfcn_venv)$ docker rm -f tfserving_rfcn - ``` + * After you are finished with querying, you can stop the container which is running in the background. + To restart the container with the same name, you need to stop and remove the container from the registry. + To view your running containers run `docker ps`. + + ``` + docker rm -f tfserving + ``` + * Deactivate your virtual environment with `deactivate`. - ## Conclusion You have now seen an end-to-end example of serving an object detection model for inference using TensorFlow Serving, and learned: 1. How to choose good values for the performance-related runtime parameters exposed by the `docker run` command -2. How to verify that the served model can correctly detect objects in an image using a sample Jupyter notebook -3. How to measure online and batch inference metrics using a REST client +2. How to test online and batch inference metrics using a REST or GRPC client +3. How to verify that the served model can correctly detect objects in an image using a sample Jupyter notebook With this knowledge and the example code provided, you should be able to get started serving your own custom object detection model with good performance. If desired, you should also be able to investigate a variety of different settings combinations to see if further performance improvement are possible. diff --git a/docs/object_detection/tensorflow_serving/rfcn-benchmark.py b/docs/object_detection/tensorflow_serving/object_detection_benchmark.py similarity index 54% rename from docs/object_detection/tensorflow_serving/rfcn-benchmark.py rename to docs/object_detection/tensorflow_serving/object_detection_benchmark.py index 6948df969..c30c1aeae 100644 --- a/docs/object_detection/tensorflow_serving/rfcn-benchmark.py +++ b/docs/object_detection/tensorflow_serving/object_detection_benchmark.py @@ -14,7 +14,7 @@ # ####### USAGE ######### -# python rfcn-benchmark.py -i +# python object_detection_benchmark.py -i -m -p from __future__ import print_function @@ -25,8 +25,6 @@ import requests import numpy as np from PIL import Image -import tensorflow as tf -from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array def check_for_link(value): @@ -40,7 +38,7 @@ def check_for_link(value): raise argparse.ArgumentTypeError("{} cannot be a link.".format(value)) def check_valid_folder(value): - """verifies filename exists and isn't a link""" + """Verifies filename exists and isn't a link""" if value is not None: if not os.path.isdir(value): raise argparse.ArgumentTypeError("{} does not exist or is not a directory.". @@ -48,6 +46,20 @@ def check_valid_folder(value): check_for_link(value) return value +def check_valid_model(value): + """Verifies model name is supported""" + if value not in ('rfcn', 'ssdmobilenet'): + raise argparse.ArgumentError("Model name {} does not match 'rfcn' or 'ssdmobilenet'.". + format(value)) + return value + +def check_valid_protocol(value): + """Verifies protocol is supported""" + if value not in ('rest', 'grpc'): + raise argparse.ArgumentError("Protocol name {} does not match 'rest' or 'grpc'.". + format(value)) + return value + def get_random_image(image_dir): image_path = os.path.join(image_dir, random.choice(os.listdir(image_dir))) image = Image.open(image_path) @@ -55,15 +67,38 @@ def get_random_image(image_dir): return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8) +def make_request(batch_size): + if PROTOCOL == 'rest': + np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist() + return '{"instances" : %s}' % np_images + elif PROTOCOL == 'grpc': + import grpc + import tensorflow as tf + from tensorflow_serving.apis import predict_pb2 + from tensorflow_serving.apis import prediction_service_pb2_grpc + np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0), batch_size, axis=0) + channel = grpc.insecure_channel(SERVER_URL) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + request = predict_pb2.PredictRequest() + request.model_spec.name = MODEL + request.model_spec.signature_name = 'serving_default' + request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(np_images)) + return (stub, request) + +def send_request(predict_request): + if PROTOCOL == 'rest': + requests.post(SERVER_URL, data=predict_request) + elif PROTOCOL == 'grpc': + predict_request[0].Predict(predict_request[1]) + def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10): i = 0 total_time = 0 for _ in range(num_iteration): i += 1 - np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist() - predict_request = '{"instances" : %s}' % np_images + predict_request = make_request(batch_size) start_time = time.time() - requests.post(SERVER_URL, data=predict_request) + send_request(predict_request) time_consume = time.time() - start_time print('Iteration %d: %.3f sec' % (i, time_consume)) if i > warm_up_iteration: @@ -81,15 +116,26 @@ def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10): ap = argparse.ArgumentParser() ap.add_argument("-i", "--images_path", type=check_valid_folder, required=True, help="Path to COCO validation directory") + ap.add_argument("-m", "--model", type=check_valid_model, required=True, + help="Name of model (rfcn or ssdmobilenet)") + ap.add_argument("-p", "--protocol", type=check_valid_protocol, required=True, + help="Name of protocol (rest or grpc)") args = vars(ap.parse_args()) - - SERVER_URL = 'http://localhost:8501/v1/models/rfcn:predict' + IMAGES_PATH = args['images_path'] + MODEL = args['model'] + PROTOCOL = args['protocol'] + if PROTOCOL == 'rest': + SERVER_URL = 'http://localhost:8501/v1/models/{}:predict'.format(MODEL) + elif PROTOCOL == 'grpc': + SERVER_URL = 'localhost:8500' print('\n SERVER_URL: {} \n IMAGES_PATH: {}'.format(SERVER_URL, IMAGES_PATH)) - print('\nStarting R-FCN model benchmarking for Latency with batch_size=1, num_iteration=20, warm_up_iteration=10') + print('\nStarting {} model benchmarking for latency on {}:'.format(MODEL.upper(), PROTOCOL.upper())) + print('batch_size=1, num_iteration=20, warm_up_iteration=10\n') benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10) - print('\nStarting R-FCN model benchmarking for Throughput with batch_size=128, num_iteration=10, warm_up_iteration=2') + print('\nStarting {} model benchmarking for throughput on {}:'.format(MODEL.upper(), PROTOCOL.upper())) + print('batch_size=128, num_iteration=10, warm_up_iteration=2\n') benchmark(batch_size=128, num_iteration=10, warm_up_iteration=2) diff --git a/docs/object_detection/tensorflow_serving/requirements.txt b/docs/object_detection/tensorflow_serving/requirements.txt new file mode 100644 index 000000000..c4c13fc19 --- /dev/null +++ b/docs/object_detection/tensorflow_serving/requirements.txt @@ -0,0 +1,16 @@ +# rest +requests + +# grpc +grpc +intel-tensorflow +tensorflow-serving-api + +# object detection api +Cython +contextlib2 +jupyter +matplotlib +pillow +lxml +absl-py \ No newline at end of file From ab5c13d9c9ae15e2f7492a80aa5b94053020b07e Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 13 Jun 2019 16:24:02 -0700 Subject: [PATCH 50/62] Add arg validation for paths in generate_coco_records.py (#328) --- .../inference/generate_coco_records.py | 28 ++++++++++++++++--- 1 file changed, 24 insertions(+), 4 deletions(-) diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py b/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py index 6badc74a9..5cc72cf7a 100755 --- a/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py @@ -135,16 +135,36 @@ def get_record(filename, buffer, width, height, bboxes, labels, label_names, dif return tf.train.Example(features = tf.train.Features(feature = features)) +def check_for_link(value): + """ + Throws an error if the specified path is a link. os.islink returns + True for sym links. For files, we also look at the number of links in + os.stat() to determine if it's a hard link. + """ + if os.path.islink(value) or \ + (os.path.isfile(value) and os.stat(value).st_nlink > 1): + raise argparse.ArgumentTypeError("{} cannot be a link.".format(value)) + +def check_valid_file_or_folder(value): + """verifies filename exists and isn't a link""" + if value is not None: + if not os.path.isfile(value) and not os.path.isdir(value): + raise argparse.ArgumentTypeError("{} does not exist or is not a file/folder.". + format(value)) + check_for_link(value) + return value + + def main(): RECORDS_PER_FILE = 1024 RECORD_FILENAME_FORMAT = '%s-%.5d-of-%.5d' parser = argparse.ArgumentParser() - parser.add_argument('--image_path', type = str, required = True, help = 'path to the input validation image files') - parser.add_argument('--annotations_file', type = str, required = True, help = 'name of the input validation annotations file') - parser.add_argument('--output_prefix', type = str, required = True, help = 'prefix of the output TensorFlow record files') - parser.add_argument('--output_path', type = str, required = True, help = 'path to the output TensorFlow record files') + parser.add_argument('--image_path', type=check_valid_file_or_folder, required=True, help='path to the input validation image files') + parser.add_argument('--annotations_file', type=check_valid_file_or_folder, required=True, help='name of the input validation annotations file') + parser.add_argument('--output_prefix', type=str, required=True, help='prefix of the output TensorFlow record files') + parser.add_argument('--output_path', type=check_valid_file_or_folder, required=True, help='path to the output TensorFlow record files') args = parser.parse_args() From f0aa7abab24ba07c21499f2f13d4fb4e1f2eb80b Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Fri, 14 Jun 2019 14:10:44 -0700 Subject: [PATCH 51/62] Specify scipy==1.2.1 for MaskRCNN (#329) --- benchmarks/common/tensorflow/start.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index 7c37309d3..923cdecc8 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -443,6 +443,7 @@ function maskrcnn() { if [ ${NOINSTALL} != "True" ]; then # install dependencies pip3 install -r ${MOUNT_EXTERNAL_MODELS_SOURCE}/requirements.txt + pip3 install --force-reinstall scipy==1.2.1 # install cocoapi get_cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/coco ${MOUNT_EXTERNAL_MODELS_SOURCE}/samples/coco From c895e4752ac7cf37247cd54b834cdd143f3ea370 Mon Sep 17 00:00:00 2001 From: Melanie Buehler Date: Fri, 14 Jun 2019 14:17:14 -0700 Subject: [PATCH 52/62] Remove grpc package from tfserving dependencies (#330) --- benchmarks/common/tensorflow_serving/start.sh | 3 +-- docs/general/tensorflow_serving/InstallationGuide.md | 2 +- docs/image_recognition/tensorflow_serving/Tutorial.md | 7 ++----- docs/language_translation/tensorflow_serving/Tutorial.md | 4 ++-- docs/object_detection/tensorflow_serving/requirements.txt | 1 - 5 files changed, 6 insertions(+), 11 deletions(-) diff --git a/benchmarks/common/tensorflow_serving/start.sh b/benchmarks/common/tensorflow_serving/start.sh index e611cc931..5da3f31c5 100644 --- a/benchmarks/common/tensorflow_serving/start.sh +++ b/benchmarks/common/tensorflow_serving/start.sh @@ -103,8 +103,7 @@ function resnet50_or_inceptionv3(){ virtualenv venv source venv/bin/activate - pip install grpc \ - requests \ + pip install requests \ intel-tensorflow \ tensorflow-serving-api diff --git a/docs/general/tensorflow_serving/InstallationGuide.md b/docs/general/tensorflow_serving/InstallationGuide.md index 60ffcede1..0aa6a03b7 100644 --- a/docs/general/tensorflow_serving/InstallationGuide.md +++ b/docs/general/tensorflow_serving/InstallationGuide.md @@ -260,7 +260,7 @@ $ curl -s http://download.tensorflow.org/models/official/20181001_resnet/savedmo $ cd ~ $ virtualenv tfserving_venv $ source tfserving_venv/bin/activate - (tfserving_venv)$ pip install grpc requests tensorflow tensorflow-serving-api + (tfserving_venv)$ pip install requests tensorflow tensorflow-serving-api ``` * Run the example `resnet_client_grpc.py` script from the TensorFlow Serving repository, which you cloned earlier. ``` diff --git a/docs/image_recognition/tensorflow_serving/Tutorial.md b/docs/image_recognition/tensorflow_serving/Tutorial.md index 71f94f76c..e5e9b0153 100644 --- a/docs/image_recognition/tensorflow_serving/Tutorial.md +++ b/docs/image_recognition/tensorflow_serving/Tutorial.md @@ -63,13 +63,10 @@ For steps 1 and 2, refer to the Intel Model Zoo READMEs: $ pip install virtualenv $ virtualenv venv ``` - Then activate the virtual environment and install `grpc`, `requests`, `tensorflow`, and `tensorflow-serving-api` (at the time of this writing, the order of installation matters): + Then activate the virtual environment and install `requests`, `tensorflow`, and `tensorflow-serving-api`: ``` $ source venv/bin/activate - (venv)$ pip install grpc - (venv)$ pip install requests - (venv)$ pip install intel-tensorflow - (venv)$ pip install tensorflow-serving-api + (venv)$ pip install requests intel-tensorflow tensorflow-serving-api ``` 5. **Create a SavedModel**: Using the conversion script `model_graph_to_saved_model.py`, convert the pre-trained model graph to a SavedModel. (For ResNet50, substitute the name of the ResNet50 FP32 or the ResNet50 Int8 pre-trained model.) diff --git a/docs/language_translation/tensorflow_serving/Tutorial.md b/docs/language_translation/tensorflow_serving/Tutorial.md index 1d8ebff71..c584495c1 100644 --- a/docs/language_translation/tensorflow_serving/Tutorial.md +++ b/docs/language_translation/tensorflow_serving/Tutorial.md @@ -76,13 +76,13 @@ Tuning TensorFlow Serving to take full advantage of your hardware for language t sudo apt-get install -y python python-pip virtualenv ``` - - Create and activate the python virtual environment in your home directory and install the `grpc`, `tensorflow`, `pandas`, and `tensorflow-serving-api` packages. + - Create and activate the python virtual environment in your home directory and install the `tensorflow`, `pandas`, and `tensorflow-serving-api` packages. ``` cd ~ virtualenv lt_venv source lt_venv/bin/activate - pip install grpc intel-tensorflow pandas tensorflow-serving-api + pip install intel-tensorflow pandas tensorflow-serving-api ``` 4. **Download the pre-trained model and test data**: Download and extract the packaged pre-trained model and dataset ```transformer_lt_official_fp32_pretrained_model.tar.gz``` diff --git a/docs/object_detection/tensorflow_serving/requirements.txt b/docs/object_detection/tensorflow_serving/requirements.txt index c4c13fc19..1e77692c2 100644 --- a/docs/object_detection/tensorflow_serving/requirements.txt +++ b/docs/object_detection/tensorflow_serving/requirements.txt @@ -2,7 +2,6 @@ requests # grpc -grpc intel-tensorflow tensorflow-serving-api From fc16c086dd303cdda99fafdd0cc0a65de29601e1 Mon Sep 17 00:00:00 2001 From: Wafaa Taie Date: Tue, 18 Jun 2019 10:05:16 -0700 Subject: [PATCH 53/62] fix the path to the calibration script for resnet101 int8. (#332) --- .../tensorflow/resnet101/inference/int8/model_init.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py index a53cf6884..36a9f479a 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py @@ -101,7 +101,7 @@ def run_benchmark_or_accuracy(self): self.run_command(cmd) def run_calibration(self): - calibration_script = os.path.join(self.args.intelai_models, self.args.mode, + calibration_script = os.path.join(self.args.intelai_models, self.args.precision, "calibration.py") script_args_list = [ "input_graph", "data_location", From d6c0cb89e0302edd38f686f545fec4843192f4bc Mon Sep 17 00:00:00 2001 From: Melanie Buehler Date: Tue, 18 Jun 2019 15:58:44 -0700 Subject: [PATCH 54/62] NCF doc hotfix (#334) * Update NCF doc for TF models code modification * Creates a backup and adds *.bak to gitignore --- .gitignore | 1 + benchmarks/recommendation/tensorflow/ncf/README.md | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitignore b/.gitignore index 82da18448..b3e91143c 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ .coverage .tox test_data/ +*.bak diff --git a/benchmarks/recommendation/tensorflow/ncf/README.md b/benchmarks/recommendation/tensorflow/ncf/README.md index ccde269ff..53a21a3b3 100644 --- a/benchmarks/recommendation/tensorflow/ncf/README.md +++ b/benchmarks/recommendation/tensorflow/ncf/README.md @@ -14,13 +14,13 @@ This model uses official tensorflow models repo, where [ncf](https://github.com/ model automatically downloads movielens ml-1m dataset as default if the `--data-location` flag is not set. If you want to download movielens 1M dataset and provide that path to `--data-location`, check this [reference](https://grouplens.org/datasets/movielens/1m/) -2. Clone the official `tensorflow/models` repository with tag `v1.11` +2. Clone the official `tensorflow/models` repository with tag `v1.11` and make a small change to `data_async_generation.py`, commenting out a line that causes a crash in the model script. ``` $ git clone https://github.com/tensorflow/models.git $ cd models $ git checkout v1.11 -$ pwd +$ sed -i.bak 's/atexit.register/# atexit.register/g' official/recommendation/data_async_generation.py ``` 3. Now clone `IntelAI/models` repository and then navigate to the `benchmarks` folder: From 2f46653463edab0935f713ea451d98c8444bb525 Mon Sep 17 00:00:00 2001 From: wenxizhu Date: Sat, 22 Jun 2019 02:35:00 +0800 Subject: [PATCH 55/62] BKC for mobilenet-v1 int8 inference (#333) * Add affinity and config setting to get better performance on mobilenet-v1. * Remove redundant config settings for mobilenet_v1 inference. --- .../mobilenet_v1/inference/int8/benchmark.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py index 7cccb9f23..0e7a41f31 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py @@ -113,18 +113,19 @@ def load_graph(model_file): name='synthetic_images') image_data = None + + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + config.use_per_session_threads = True + with tf.Session() as sess: image_data = sess.run(images) - graph = load_graph(model_file) input_tensor = graph.get_tensor_by_name(input_layer + ":0"); output_tensor = graph.get_tensor_by_name(output_layer + ":0"); - config = tf.ConfigProto() - config.inter_op_parallelism_threads = num_inter_threads - config.intra_op_parallelism_threads = num_intra_threads - with tf.Session(graph=graph, config=config) as sess: sys.stdout.flush() print("[Running warmup steps...]") @@ -134,7 +135,7 @@ def load_graph(model_file): elapsed_time = time.time() - start_time if((t+1) % 10 == 0): print("steps = {0}, {1} images/sec" - "".format(t+1, batch_size/elapsed_time)) + "".format(t+1, batch_size/elapsed_time), flush=True) print("[Running benchmark steps...]") total_time = 0; @@ -145,4 +146,4 @@ def load_graph(model_file): elapsed_time = time.time() - start_time if((t+1) % 10 == 0): print("steps = {0}, {1} images/sec" - "".format(t+1, batch_size/elapsed_time)); + "".format(t+1, batch_size/elapsed_time), flush=True); From 41977d78058318a69fe71395e0574861f44371ff Mon Sep 17 00:00:00 2001 From: Jitendra Patil Date: Fri, 21 Jun 2019 12:42:11 -0700 Subject: [PATCH 56/62] TF Serving: tf version fix (#337) * updated order of intel-tensorflow & tensorflow-serving-api installation. --- benchmarks/common/tensorflow_serving/start.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/benchmarks/common/tensorflow_serving/start.sh b/benchmarks/common/tensorflow_serving/start.sh index 5da3f31c5..b27e40ed5 100644 --- a/benchmarks/common/tensorflow_serving/start.sh +++ b/benchmarks/common/tensorflow_serving/start.sh @@ -103,10 +103,12 @@ function resnet50_or_inceptionv3(){ virtualenv venv source venv/bin/activate - pip install requests \ - intel-tensorflow \ - tensorflow-serving-api - + # Make sure intel-tensorflow is after tensorflow-serving-api, so that + # tensorflow from intel-tensorflow get installed effectively. + pip install grpc \ + requests \ + tensorflow-serving-api \ + intel-tensorflow # cd to image recognition tfserving scripts cd ${WORKSPACE}/../../${USE_CASE}/${FRAMEWORK}/${MODEL_NAME}/${MODE}/${PRECISION} From 6a13ce8cf368f7a63aed661f63ed68b8dab2789c Mon Sep 17 00:00:00 2001 From: Abolfazl Shahbazi <12436063+ashahba@users.noreply.github.com> Date: Mon, 24 Jun 2019 14:57:19 -0700 Subject: [PATCH 57/62] Install the development package for google-perftools (#338) * Install the development package for google-perftools * Creating symlinks for /usr/lib/libtcmalloc.so if doesn't exist --- benchmarks/common/tensorflow/start.sh | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index 923cdecc8..d81504d83 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -67,9 +67,15 @@ if [[ ${NOINSTALL} != "True" ]]; then pip install --upgrade pip pip install requests - # install google-perftools for tcmalloc + # install libgoogle-perftools-dev for tcmalloc if [[ ${DISABLE_TCMALLOC} != "True" ]]; then - apt-get install google-perftools -y + apt-get install --no-install-recommends --fix-missing google-perftools -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + apt-get install --no-install-recommends --fix-missing libgoogle-perftools-dev -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + ln -sf /usr/lib/x86_64-linux-gnu/libtcmalloc.so /usr/lib/libtcmalloc.so + fi + fi fi fi @@ -827,7 +833,13 @@ function wide_deep_large_ds() { if [[ -z "${LIBTCMALLOC}" ]]; then echo "libtcmalloc.so.4 not found, trying to install" apt-get update - apt-get install google-perftools --fix-missing -y + apt-get install --no-install-recommends --fix-missing google-perftools -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + apt-get install --no-install-recommends --fix-missing libgoogle-perftools-dev -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + ln -sf /usr/lib/x86_64-linux-gnu/libtcmalloc.so /usr/lib/libtcmalloc.so + fi + fi fi LIBTCMALLOC="$(ldconfig -p | grep $TCMALLOC_LIB | tr ' ' '\n' | grep /)" From fba107ab1ac1e81b654cd82943dccdd556a09138 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Tue, 25 Jun 2019 15:51:01 -0700 Subject: [PATCH 58/62] Update TF image tag and updates due to using a non-dev container (#339) --- .../tensorflow/dcgan/README.md | 2 +- benchmarks/common/tensorflow/start.sh | 7 ++++++- .../content_creation/tensorflow/draw/README.md | 4 ++-- .../tensorflow/facenet/README.md | 6 +++--- .../tensorflow/mtcc/README.md | 2 +- .../tensorflow/densenet169/README.md | 6 +++--- .../tensorflow/inception_resnet_v2/README.md | 12 ++++++------ .../tensorflow/inceptionv3/README.md | 16 ++++++++-------- .../tensorflow/inceptionv4/README.md | 12 ++++++------ .../tensorflow/mobilenet_v1/README.md | 12 ++++++------ .../tensorflow/resnet101/README.md | 14 +++++++------- .../tensorflow/resnet50/README.md | 12 ++++++------ .../tensorflow/resnet50v1_5/README.md | 14 +++++++------- .../tensorflow/squeezenet/README.md | 4 ++-- .../tensorflow/maskrcnn/README.md | 2 +- .../image_segmentation/tensorflow/unet/README.md | 2 +- .../language_modeling/tensorflow/lm-1b/README.md | 4 ++-- .../tensorflow/gnmt/README.md | 4 ++-- .../tensorflow/transformer_language/README.md | 4 ++-- .../tensorflow/transformer_lt_official/README.md | 4 ++-- .../tensorflow/faster_rcnn/README.md | 8 ++++---- .../object_detection/tensorflow/rfcn/README.md | 8 ++++---- .../tensorflow/ssd-mobilenet/README.md | 8 ++++---- .../tensorflow/ssd-resnet34/README.md | 8 ++++---- .../tensorflow/ssd_vgg16/README.md | 8 ++++---- .../recommendation/tensorflow/ncf/README.md | 6 +++--- .../tensorflow/wide_deep/README.md | 4 ++-- .../tensorflow/wide_deep_large_ds/README.md | 12 ++++++------ .../text_to_speech/tensorflow/wavenet/README.md | 2 +- docs/general/tensorflow/LaunchBenchmark.md | 4 ++-- 30 files changed, 108 insertions(+), 103 deletions(-) diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md index 688844ae0..e6f572916 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md @@ -60,7 +60,7 @@ $ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//dcgan_fp32_unconditional_cifar10_pretrained_model \ --data-location /home//cifar10 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` 5. Log files are located at the value of `--output-dir`. diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index d81504d83..4eeda5648 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -196,7 +196,7 @@ function install_protoc() { if [ ! -f "bin/protoc" ]; then install_location=$1 echo "protoc not found, installing protoc from ${install_location}" - apt-get -y install wget + apt-get -y install wget unzip wget -O protobuf.zip ${install_location} unzip -o protobuf.zip rm protobuf.zip @@ -642,6 +642,7 @@ function ssd-resnet34() { do pip install $line done + apt install -y git-all old_dir=${PWD} cd /tmp git clone --single-branch https://github.com/tensorflow/benchmarks.git @@ -762,6 +763,10 @@ function transformer_lt_official() { exit 1 fi + if [ ${NOINSTALL} != "True" ]; then + pip install pandas + fi + cp ${MOUNT_INTELAI_MODELS_SOURCE}/${MODE}/${PRECISION}/infer_ab.py \ ${MOUNT_EXTERNAL_MODELS_SOURCE}/official/transformer/infer_ab.py diff --git a/benchmarks/content_creation/tensorflow/draw/README.md b/benchmarks/content_creation/tensorflow/draw/README.md index 310b789ac..069dfd7a8 100644 --- a/benchmarks/content_creation/tensorflow/draw/README.md +++ b/benchmarks/content_creation/tensorflow/draw/README.md @@ -48,7 +48,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 1 \ @@ -61,7 +61,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 100 \ diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md index 5d8bef1dc..4a5322bad 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md @@ -59,7 +59,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` Example log tail for online inference: ``` @@ -94,7 +94,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` Example log tail for batch inference: ``` @@ -126,7 +126,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` Example log tail for accuracy: ``` diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md index 84017e33b..a79584c36 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md @@ -55,7 +55,7 @@ Run: --mode inference \ --socket-id 0 \ --checkpoint /home//MTCNN_model \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` 6. The log file is saved to the value of `--output-dir`. diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md index b1ecd8832..8f4a0b3f2 100644 --- a/benchmarks/image_recognition/tensorflow/densenet169/README.md +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -58,7 +58,7 @@ following modes/precisions: --batch-size 100 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" ``` @@ -74,7 +74,7 @@ following modes/precisions: --batch-size 1 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" ``` @@ -91,7 +91,7 @@ following modes/precisions: --batch-size 100 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --data-location /home//imagenet_validation_dataset \ -- input_height=224 input_width=224 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index 7d62cdf10..034651a7f 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -84,7 +84,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -100,7 +100,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -115,7 +115,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -242,7 +242,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -259,7 +259,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` For batch inference (using `--benchmark-only`, `--socket-id 0` and `--batch-size 128`): @@ -274,7 +274,7 @@ python launch_benchmark.py \ --batch-size 128 \ --socket-id 0 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index 64f0209ef..adaedef36 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -97,7 +97,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -118,7 +118,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -135,7 +135,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -151,7 +151,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -168,7 +168,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -258,7 +258,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for online inference: @@ -289,7 +289,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for batch inference: @@ -321,7 +321,7 @@ python launch_benchmark.py \ --accuracy-only \ --batch-size 100 \ --data-location /dataset/Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for accuracy: diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index 7084202c0..31b3ba91b 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -56,7 +56,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -71,7 +71,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` @@ -85,7 +85,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` @@ -185,7 +185,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -200,7 +200,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` @@ -214,7 +214,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index 694a3f575..0c7295244 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -67,7 +67,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --batch-size 240 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" ``` @@ -83,7 +83,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --batch-size 1 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" ``` @@ -100,7 +100,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --batch-size 100 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --data-location /home//imagenet_validation_dataset \ -- input_height=224 input_width=224 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" @@ -216,7 +216,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --batch-size 1 \ --socket-id 0 \ @@ -234,7 +234,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --socket-id 0 \ @@ -248,7 +248,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --accuracy-only \ diff --git a/benchmarks/image_recognition/tensorflow/resnet101/README.md b/benchmarks/image_recognition/tensorflow/resnet101/README.md index 9ab36ebc7..a39daaf70 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet101/README.md @@ -85,7 +85,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --data-location /home//dataset/FullImageNetData_directory \ --in-graph=/home//resnet101_int8_pretrained_model.pb ``` @@ -106,7 +106,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -123,7 +123,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//dataset/FullImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -139,7 +139,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -156,7 +156,7 @@ python launch_benchmark.py \ --batch-size 128 \ --data-location /home//dataset/FullImageNetData_directory \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -250,7 +250,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 128 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --socket-id 0 ``` @@ -277,7 +277,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --data-location /home//imagenet_validation_dataset \ --accuracy-only \ diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index db5ca2f58..751f15e98 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -58,7 +58,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=100 \ --accuracy-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -99,7 +99,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --benchmark-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 -- warmup_steps=50 steps=500 ``` The tail of the log output when the script completes should look @@ -160,7 +160,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -195,7 +195,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -233,7 +233,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -267,7 +267,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The results file will be written to the `models/benchmarks/common/tensorflow/logs` directory, unless another diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md index 2a13913d9..314a65dcc 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md @@ -42,7 +42,7 @@ $ git clone https://github.com/IntelAI/models.git The optimized ResNet50v1.5 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and located at `models/models/image_recognition/tensorflow/resnet50v1_5/`. - The docker image (`intelaipg/intel-optimized-tensorflow:1.14`) + The docker image (`intelaipg/intel-optimized-tensorflow:1.14.0`) used in the commands above were built using [TensorFlow](git@github.com:tensorflow/tensorflow.git) master for TensorFlow version 1.14. @@ -61,7 +61,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=100 \ --accuracy-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -100,7 +100,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --benchmark-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 -- warmup_steps=50 steps=500 ``` The tail of the log output when the benchmarking completes should look @@ -159,7 +159,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -197,7 +197,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -238,7 +238,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The log file is saved to the value of `--output-dir`. @@ -275,7 +275,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The results file will be written to the `models/benchmarks/common/tensorflow/logs` directory, unless another diff --git a/benchmarks/image_recognition/tensorflow/squeezenet/README.md b/benchmarks/image_recognition/tensorflow/squeezenet/README.md index c6118e89d..2c3a245f9 100644 --- a/benchmarks/image_recognition/tensorflow/squeezenet/README.md +++ b/benchmarks/image_recognition/tensorflow/squeezenet/README.md @@ -79,7 +79,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 64 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -94,7 +94,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md index f7058bc90..938dcb634 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md @@ -61,7 +61,7 @@ $ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//COCO2014 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 ``` 5. Log files are located at the value of `--output-dir`. diff --git a/benchmarks/image_segmentation/tensorflow/unet/README.md b/benchmarks/image_segmentation/tensorflow/unet/README.md index 9d7f9dbdb..e91d2af2a 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/README.md +++ b/benchmarks/image_segmentation/tensorflow/unet/README.md @@ -57,7 +57,7 @@ modes/precisions: --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --checkpoint /home//unet_trained \ --model-source-dir /home//tf_unet \ -- checkpoint_name=model.cpkt diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/README.md b/benchmarks/language_modeling/tensorflow/lm-1b/README.md index ec4bdcf47..871660d67 100644 --- a/benchmarks/language_modeling/tensorflow/lm-1b/README.md +++ b/benchmarks/language_modeling/tensorflow/lm-1b/README.md @@ -53,7 +53,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /inference/cloud/language_modeling ``` @@ -68,7 +68,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1024 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /inference/cloud/language_modeling \ -- steps=4 \ ``` diff --git a/benchmarks/language_translation/tensorflow/gnmt/README.md b/benchmarks/language_translation/tensorflow/gnmt/README.md index 143daf45d..523965917 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/README.md +++ b/benchmarks/language_translation/tensorflow/gnmt/README.md @@ -82,7 +82,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:1.14 \ +--docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- infer_mode=beam_search ``` @@ -99,7 +99,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:1.14 \ +--docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- infer_mode=beam_search ``` diff --git a/benchmarks/language_translation/tensorflow/transformer_language/README.md b/benchmarks/language_translation/tensorflow/transformer_language/README.md index 93bf84700..f4997711f 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_language/README.md @@ -82,7 +82,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ @@ -99,7 +99,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 32 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md b/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md index f0d79e4e3..f592cf832 100644 --- a/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md @@ -65,7 +65,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow-models/models \ --in-graph /home//transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ --data-location /home//transformer_lt_official_fp32_pretrained_model/data \ @@ -85,7 +85,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 64 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow-models/models \ --in-graph /home//transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ --data-location /home//transformer_lt_official_fp32_pretrained_model/data \ diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index 9528f4808..cd57419f1 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -156,7 +156,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//faster_rcnn_resnet50_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- config_file=pipeline.config ``` @@ -169,7 +169,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output \ --in-graph /home//faster_rcnn_resnet50_fp32_coco/frozen_inference_graph.pb \ @@ -270,7 +270,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --benchmark-only \ -- number_of_steps=5000 ``` @@ -285,7 +285,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --data-location /home//output/coco_val.record \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index 85f2e2fbc..42c488ca4 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -139,7 +139,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -156,7 +156,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record-00000-of-00001 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -331,7 +331,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//rfcn_resnet101_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ -- config_file=rfcn_pipeline.config ``` @@ -344,7 +344,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record \ --in-graph /home//rfcn_resnet101_fp32_coco/frozen_inference_graph.pb \ diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index fa395d5f9..35db5d14d 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -121,7 +121,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ @@ -138,7 +138,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ @@ -349,7 +349,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --benchmark-only ``` @@ -368,7 +368,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --accuracy-only ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index 3f2623389..2b5364ff9 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -134,7 +134,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --benchmark-only ``` @@ -154,7 +154,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --accuracy-only ``` @@ -310,7 +310,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --benchmark-only ``` @@ -330,7 +330,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --accuracy-only ``` diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 8036419ba..6ecd4b646 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -103,7 +103,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ @@ -133,7 +133,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ @@ -210,7 +210,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --batch-size 1 \ --socket-id 0 \ --num-inter-threads 11 \ @@ -237,7 +237,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14-py3 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ diff --git a/benchmarks/recommendation/tensorflow/ncf/README.md b/benchmarks/recommendation/tensorflow/ncf/README.md index 53a21a3b3..2eccb84e7 100644 --- a/benchmarks/recommendation/tensorflow/ncf/README.md +++ b/benchmarks/recommendation/tensorflow/ncf/README.md @@ -53,7 +53,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The tail of batch inference log, looks as below. @@ -83,7 +83,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The tail of online inference log, looks as below. @@ -115,7 +115,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 ``` The tail of accuracy log, looks as below. diff --git a/benchmarks/recommendation/tensorflow/wide_deep/README.md b/benchmarks/recommendation/tensorflow/wide_deep/README.md index d092b8f22..8f28c0607 100644 --- a/benchmarks/recommendation/tensorflow/wide_deep/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep/README.md @@ -56,7 +56,7 @@ use in the next step. --batch-size 1 \ --data-location /home//widedeep_dataset \ --checkpoint /home//path/to/wide_deep_fp32_pretrained_model \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --verbose ``` * Running the model in batch inference mode, set `--batch-size` = `1024` @@ -72,7 +72,7 @@ use in the next step. --batch-size 1024 \ --data-location /home//path/to/dataset \ --checkpoint /home//path/to/wide_deep_fp32_pretrained_model \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --verbose ``` 6. The log file is saved to the value of `--output-dir`. diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md index 61ff4cd48..19880cd2d 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md @@ -163,7 +163,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision fp32 \ --mode inference \ @@ -171,7 +171,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --batch-size 1000 \ --socket-id 0 \ --accuracy-only \ - --docker-image docker.io/intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_eval.tfrecords ``` @@ -183,7 +183,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision fp32 \ --mode inference \ @@ -191,7 +191,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image docker.io/intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_test.tfrecords \ -- num_parallel_batches=1 @@ -200,7 +200,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision fp32 \ --mode inference \ @@ -208,7 +208,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 512 \ --socket-id 0 \ - --docker-image docker.io/intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_test.tfrecords ``` diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/README.md b/benchmarks/text_to_speech/tensorflow/wavenet/README.md index 512cabd95..d51cdfa72 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/README.md +++ b/benchmarks/text_to_speech/tensorflow/wavenet/README.md @@ -71,7 +71,7 @@ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --num-cores 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --model-source-dir /home//wavenet/tensorflow-wavenet \ --checkpoint /home//wavenet_checkpoints \ -- checkpoint_name=model.ckpt-99 sample=8510 diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index 4ee5d6a0a..08c6c999a 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -164,7 +164,7 @@ $ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --volume /home//custom_folder_1:/custom_folder_1 \ --volume /home//custom_folder_2:/custom_folder_2 ``` @@ -201,7 +201,7 @@ Below is an example showing how to use the `--debug` flag: --batch-size=1 \ --socket-id 0 \ --data-location /home//Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14 \ + --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ --debug # ls From 51baf076d2c98fafeb8fbe9088ad89275782c636 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Thu, 27 Jun 2019 09:41:53 -0700 Subject: [PATCH 59/62] Update lm-1b README due to branch and path changes (#343) * Update lm-1b README due to branch and path changes * Added specific SHA (just the current one from master as of 6/26) --- .../tensorflow/lm-1b/README.md | 25 +++++++++++++------ 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/README.md b/benchmarks/language_modeling/tensorflow/lm-1b/README.md index 871660d67..85b4f1fc7 100644 --- a/benchmarks/language_modeling/tensorflow/lm-1b/README.md +++ b/benchmarks/language_modeling/tensorflow/lm-1b/README.md @@ -9,17 +9,26 @@ other platforms are coming later. ## FP32 Inference Instructions -1. Clone [mlperf/inference](https://github.com/mlperf/inference.git) and -checkout `setInter` branch. +1. Clone [mlperf/inference](https://github.com/mlperf/inference.git) +with the current SHA from master of the repo on 6/26/2019: ``` git clone https://github.com/mlperf/inference.git -cd mlperf -git checkout setInter +cd inference +git checkout 41eb3e489233e83e544cd25148aca177b95d7bea ``` -To prepare the checkpoint and dataset, run: +To prepare the checkpoint and dataset, run the `benchmark.py` script +from the mlperf inference repo. Since this requires python3 and +TensorFlow to be installed, the following instructions show how to run +a docker container with your cloned mlperf inference repo mounted as a +volume: ``` -python inference/cloud/language_modeling/benchmark.py +docker run --volume /home//inference:/inference -it intelaipg/intel-optimized-tensorflow:1.14.0-py3 /bin/bash +``` +In the docker container, run: +``` +cd /inference/others/cloud/language_modeling/ +python3 benchmark.py ``` 2. Clone this [intelai/models](https://github.com/IntelAI/models) @@ -54,7 +63,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ - --model-source-dir /inference/cloud/language_modeling + --model-source-dir /inference/others/cloud/language_modeling ``` @@ -69,7 +78,7 @@ python launch_benchmark.py \ --batch-size 1024 \ --socket-id 0 \ --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ - --model-source-dir /inference/cloud/language_modeling \ + --model-source-dir /inference/others/cloud/language_modeling \ -- steps=4 \ ``` From 2aa62041cd2ea2c2eda0d464dad5eb61b4e13f0e Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Tue, 2 Jul 2019 13:22:01 -0700 Subject: [PATCH 60/62] Update README files to use tf-cpu.1-14 docker image (#346) * Update README files to use tf-cpu docker image for 1.14 * Peg older version of pytest --- .../content_creation/tensorflow/draw/README.md | 4 ++-- .../tensorflow/densenet169/README.md | 6 +++--- .../tensorflow/inception_resnet_v2/README.md | 8 ++++---- .../tensorflow/inceptionv3/README.md | 16 ++++++++-------- .../tensorflow/inceptionv4/README.md | 12 ++++++------ .../tensorflow/mobilenet_v1/README.md | 12 ++++++------ .../tensorflow/resnet101/README.md | 14 +++++++------- .../tensorflow/squeezenet/README.md | 4 ++-- .../tensorflow/maskrcnn/README.md | 2 +- .../image_segmentation/tensorflow/unet/README.md | 2 +- .../language_modeling/tensorflow/lm-1b/README.md | 6 +++--- .../tensorflow/gnmt/README.md | 4 ++-- .../tensorflow/transformer_language/README.md | 4 ++-- .../tensorflow/transformer_lt_official/README.md | 4 ++-- .../tensorflow/faster_rcnn/README.md | 8 ++++---- .../object_detection/tensorflow/rfcn/README.md | 8 ++++---- .../tensorflow/ssd-mobilenet/README.md | 8 ++++---- .../tensorflow/ssd-resnet34/README.md | 8 ++++---- .../tensorflow/ssd_vgg16/README.md | 8 ++++---- .../tensorflow/wide_deep/README.md | 4 ++-- .../tensorflow/wide_deep_large_ds/README.md | 6 +++--- .../text_to_speech/tensorflow/wavenet/README.md | 2 +- requirements-test.txt | 2 +- 23 files changed, 76 insertions(+), 76 deletions(-) diff --git a/benchmarks/content_creation/tensorflow/draw/README.md b/benchmarks/content_creation/tensorflow/draw/README.md index 069dfd7a8..a918d1a5a 100644 --- a/benchmarks/content_creation/tensorflow/draw/README.md +++ b/benchmarks/content_creation/tensorflow/draw/README.md @@ -48,7 +48,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 1 \ @@ -61,7 +61,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 100 \ diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md index 8f4a0b3f2..aaf2fd9e2 100644 --- a/benchmarks/image_recognition/tensorflow/densenet169/README.md +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -58,7 +58,7 @@ following modes/precisions: --batch-size 100 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" ``` @@ -74,7 +74,7 @@ following modes/precisions: --batch-size 1 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" ``` @@ -91,7 +91,7 @@ following modes/precisions: --batch-size 100 \ --socket-id 0 \ --in-graph /home//densenet169_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --data-location /home//imagenet_validation_dataset \ -- input_height=224 input_width=224 \ input_layer="input" output_layer="densenet169/predictions/Reshape_1" diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index 034651a7f..cd38e364f 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -84,7 +84,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -100,7 +100,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -115,7 +115,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -242,7 +242,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index adaedef36..0a9223914 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -97,7 +97,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -118,7 +118,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -135,7 +135,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -151,7 +151,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -168,7 +168,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -258,7 +258,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for online inference: @@ -289,7 +289,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for batch inference: @@ -321,7 +321,7 @@ python launch_benchmark.py \ --accuracy-only \ --batch-size 100 \ --data-location /dataset/Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for accuracy: diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index 31b3ba91b..560de9ef5 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -56,7 +56,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -71,7 +71,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` @@ -85,7 +85,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` @@ -185,7 +185,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -200,7 +200,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` @@ -214,7 +214,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index 0c7295244..e7d0d6f5d 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -67,7 +67,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --batch-size 240 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" ``` @@ -83,7 +83,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --batch-size 1 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" ``` @@ -100,7 +100,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --batch-size 100 \ --socket-id 0 \ --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --data-location /home//imagenet_validation_dataset \ -- input_height=224 input_width=224 \ input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" @@ -216,7 +216,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --batch-size 1 \ --socket-id 0 \ @@ -234,7 +234,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --socket-id 0 \ @@ -248,7 +248,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilene --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --accuracy-only \ diff --git a/benchmarks/image_recognition/tensorflow/resnet101/README.md b/benchmarks/image_recognition/tensorflow/resnet101/README.md index a39daaf70..7fb3566eb 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet101/README.md @@ -85,7 +85,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --data-location /home//dataset/FullImageNetData_directory \ --in-graph=/home//resnet101_int8_pretrained_model.pb ``` @@ -106,7 +106,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -123,7 +123,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//dataset/FullImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -139,7 +139,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -156,7 +156,7 @@ python launch_benchmark.py \ --batch-size 128 \ --data-location /home//dataset/FullImageNetData_directory \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -250,7 +250,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 128 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --socket-id 0 ``` @@ -277,7 +277,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --data-location /home//imagenet_validation_dataset \ --accuracy-only \ diff --git a/benchmarks/image_recognition/tensorflow/squeezenet/README.md b/benchmarks/image_recognition/tensorflow/squeezenet/README.md index 2c3a245f9..feaba492a 100644 --- a/benchmarks/image_recognition/tensorflow/squeezenet/README.md +++ b/benchmarks/image_recognition/tensorflow/squeezenet/README.md @@ -79,7 +79,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 64 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -94,7 +94,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md index 938dcb634..218fd7e2f 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md @@ -61,7 +61,7 @@ $ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//COCO2014 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` 5. Log files are located at the value of `--output-dir`. diff --git a/benchmarks/image_segmentation/tensorflow/unet/README.md b/benchmarks/image_segmentation/tensorflow/unet/README.md index e91d2af2a..d86505a69 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/README.md +++ b/benchmarks/image_segmentation/tensorflow/unet/README.md @@ -57,7 +57,7 @@ modes/precisions: --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//unet_trained \ --model-source-dir /home//tf_unet \ -- checkpoint_name=model.cpkt diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/README.md b/benchmarks/language_modeling/tensorflow/lm-1b/README.md index 85b4f1fc7..fa05e8b3b 100644 --- a/benchmarks/language_modeling/tensorflow/lm-1b/README.md +++ b/benchmarks/language_modeling/tensorflow/lm-1b/README.md @@ -23,7 +23,7 @@ TensorFlow to be installed, the following instructions show how to run a docker container with your cloned mlperf inference repo mounted as a volume: ``` -docker run --volume /home//inference:/inference -it intelaipg/intel-optimized-tensorflow:1.14.0-py3 /bin/bash +docker run --volume /home//inference:/inference -it gcr.io/deeplearning-platform-release/tf-cpu.1-14 /bin/bash ``` In the docker container, run: ``` @@ -62,7 +62,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /inference/others/cloud/language_modeling ``` @@ -77,7 +77,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1024 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /inference/others/cloud/language_modeling \ -- steps=4 \ ``` diff --git a/benchmarks/language_translation/tensorflow/gnmt/README.md b/benchmarks/language_translation/tensorflow/gnmt/README.md index 523965917..987be7075 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/README.md +++ b/benchmarks/language_translation/tensorflow/gnmt/README.md @@ -82,7 +82,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ +--docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- infer_mode=beam_search ``` @@ -99,7 +99,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ +--docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- infer_mode=beam_search ``` diff --git a/benchmarks/language_translation/tensorflow/transformer_language/README.md b/benchmarks/language_translation/tensorflow/transformer_language/README.md index f4997711f..2c0b700f2 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_language/README.md @@ -82,7 +82,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ @@ -99,7 +99,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 32 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md b/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md index f592cf832..87cc6b472 100644 --- a/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md @@ -65,7 +65,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow-models/models \ --in-graph /home//transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ --data-location /home//transformer_lt_official_fp32_pretrained_model/data \ @@ -85,7 +85,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 64 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow-models/models \ --in-graph /home//transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ --data-location /home//transformer_lt_official_fp32_pretrained_model/data \ diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index cd57419f1..ff3dfce3f 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -156,7 +156,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//faster_rcnn_resnet50_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- config_file=pipeline.config ``` @@ -169,7 +169,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output \ --in-graph /home//faster_rcnn_resnet50_fp32_coco/frozen_inference_graph.pb \ @@ -270,7 +270,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --benchmark-only \ -- number_of_steps=5000 ``` @@ -285,7 +285,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//output/coco_val.record \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index 42c488ca4..6e4a519df 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -139,7 +139,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -156,7 +156,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record-00000-of-00001 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -331,7 +331,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//rfcn_resnet101_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- config_file=rfcn_pipeline.config ``` @@ -344,7 +344,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record \ --in-graph /home//rfcn_resnet101_fp32_coco/frozen_inference_graph.pb \ diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index 35db5d14d..c6400197c 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -121,7 +121,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ @@ -138,7 +138,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ @@ -349,7 +349,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --benchmark-only ``` @@ -368,7 +368,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --accuracy-only ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index 2b5364ff9..e7b3528fb 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -134,7 +134,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --benchmark-only ``` @@ -154,7 +154,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --accuracy-only ``` @@ -310,7 +310,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --benchmark-only ``` @@ -330,7 +330,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --accuracy-only ``` diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md index 6ecd4b646..971311f75 100644 --- a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -103,7 +103,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ @@ -133,7 +133,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ @@ -210,7 +210,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --batch-size 1 \ --socket-id 0 \ --num-inter-threads 11 \ @@ -237,7 +237,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//SSD.TensorFlow \ --data-location /home//tf_records \ --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ diff --git a/benchmarks/recommendation/tensorflow/wide_deep/README.md b/benchmarks/recommendation/tensorflow/wide_deep/README.md index 8f28c0607..8ace58237 100644 --- a/benchmarks/recommendation/tensorflow/wide_deep/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep/README.md @@ -56,7 +56,7 @@ use in the next step. --batch-size 1 \ --data-location /home//widedeep_dataset \ --checkpoint /home//path/to/wide_deep_fp32_pretrained_model \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --verbose ``` * Running the model in batch inference mode, set `--batch-size` = `1024` @@ -72,7 +72,7 @@ use in the next step. --batch-size 1024 \ --data-location /home//path/to/dataset \ --checkpoint /home//path/to/wide_deep_fp32_pretrained_model \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --verbose ``` 6. The log file is saved to the value of `--output-dir`. diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md index 19880cd2d..e2467d45f 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md @@ -171,7 +171,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --batch-size 1000 \ --socket-id 0 \ --accuracy-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_eval.tfrecords ``` @@ -191,7 +191,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_test.tfrecords \ -- num_parallel_batches=1 @@ -208,7 +208,7 @@ when calling `launch_benchmark.py` and the script will run without TCMalloc. --benchmark-only \ --batch-size 512 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_test.tfrecords ``` diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/README.md b/benchmarks/text_to_speech/tensorflow/wavenet/README.md index d51cdfa72..963d892d3 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/README.md +++ b/benchmarks/text_to_speech/tensorflow/wavenet/README.md @@ -71,7 +71,7 @@ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --num-cores 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//wavenet/tensorflow-wavenet \ --checkpoint /home//wavenet_checkpoints \ -- checkpoint_name=model.ckpt-99 sample=8510 diff --git a/requirements-test.txt b/requirements-test.txt index 5102c19b3..fe0bf31ab 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,6 +1,6 @@ conditional flake8==3.7.5 -pytest +pytest==4.6.3 pytest-cov pytest-xdist mock From f2cc76dd503f2dbdf674c5e8956fbb92f9bceb9d Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Tue, 2 Jul 2019 13:54:56 -0700 Subject: [PATCH 61/62] Update Pillow version and py3 fix (#351) --- benchmarks/common/tensorflow/start.sh | 7 ++++++- .../object_detection/tensorflow/rfcn/requirements.txt | 2 +- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index 4eeda5648..9ea5f9f02 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -449,7 +449,7 @@ function maskrcnn() { if [ ${NOINSTALL} != "True" ]; then # install dependencies pip3 install -r ${MOUNT_EXTERNAL_MODELS_SOURCE}/requirements.txt - pip3 install --force-reinstall scipy==1.2.1 + pip3 install --force-reinstall scipy==1.2.1 Pillow==5.3.0 # install cocoapi get_cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/coco ${MOUNT_EXTERNAL_MODELS_SOURCE}/samples/coco @@ -551,6 +551,7 @@ function rfcn() { if [ ${NOINSTALL} != "True" ]; then # install dependencies pip install -r "${MOUNT_BENCHMARK}/object_detection/tensorflow/rfcn/requirements.txt" + original_dir=$(pwd) cd "${MOUNT_EXTERNAL_MODELS_SOURCE}/research" @@ -561,6 +562,10 @@ function rfcn() { get_cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/research/ fi + # Fix the object_detection_evaluation.py file to change unicode() to str() so that it works in py3 + chmod -R 777 ${MOUNT_EXTERNAL_MODELS_SOURCE}/research/object_detection/utils/object_detection_evaluation.py + sed -i.bak "s/unicode(/str(/g" ${MOUNT_EXTERNAL_MODELS_SOURCE}/research/object_detection/utils/object_detection_evaluation.py + split_arg="" if [ -n "${split}" ] && [ ${ACCURACY_ONLY} == "True" ]; then split_arg="--split=${split}" diff --git a/benchmarks/object_detection/tensorflow/rfcn/requirements.txt b/benchmarks/object_detection/tensorflow/rfcn/requirements.txt index 92d9e0ba5..3ebb25335 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/requirements.txt +++ b/benchmarks/object_detection/tensorflow/rfcn/requirements.txt @@ -1,6 +1,6 @@ Cython contextlib2 -pillow +pillow==5.3.0 lxml jupyter matplotlib From 53e25d01ca88ad99bfc92e6655f9ab14ffdde463 Mon Sep 17 00:00:00 2001 From: Dina Suehiro Jones Date: Wed, 3 Jul 2019 12:54:19 -0700 Subject: [PATCH 62/62] Updating docker images that were missed earlier (#352) --- .../tensorflow/dcgan/README.md | 2 +- .../tensorflow/facenet/README.md | 6 +++--- .../tensorflow/mtcc/README.md | 2 +- .../tensorflow/inception_resnet_v2/README.md | 4 ++-- .../tensorflow/resnet50/README.md | 12 ++++++------ .../tensorflow/resnet50v1_5/README.md | 14 +++++++------- benchmarks/recommendation/tensorflow/ncf/README.md | 6 +++--- docs/general/tensorflow/LaunchBenchmark.md | 4 ++-- 8 files changed, 25 insertions(+), 25 deletions(-) diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md index e6f572916..4950d0f63 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md @@ -60,7 +60,7 @@ $ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//dcgan_fp32_unconditional_cifar10_pretrained_model \ --data-location /home//cifar10 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` 5. Log files are located at the value of `--output-dir`. diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md index 4a5322bad..fd27ffa2b 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md @@ -59,7 +59,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Example log tail for online inference: ``` @@ -94,7 +94,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Example log tail for batch inference: ``` @@ -126,7 +126,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Example log tail for accuracy: ``` diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md index a79584c36..36cad0fe3 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md @@ -55,7 +55,7 @@ Run: --mode inference \ --socket-id 0 \ --checkpoint /home//MTCNN_model \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` 6. The log file is saved to the value of `--output-dir`. diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index cd38e364f..c3a44d2d2 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -259,7 +259,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` For batch inference (using `--benchmark-only`, `--socket-id 0` and `--batch-size 128`): @@ -274,7 +274,7 @@ python launch_benchmark.py \ --batch-size 128 \ --socket-id 0 \ --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index 751f15e98..71bbdf7cc 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -58,7 +58,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=100 \ --accuracy-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -99,7 +99,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --benchmark-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 -- warmup_steps=50 steps=500 ``` The tail of the log output when the script completes should look @@ -160,7 +160,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -195,7 +195,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -233,7 +233,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -267,7 +267,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The results file will be written to the `models/benchmarks/common/tensorflow/logs` directory, unless another diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md index 314a65dcc..18889005a 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md @@ -42,7 +42,7 @@ $ git clone https://github.com/IntelAI/models.git The optimized ResNet50v1.5 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and located at `models/models/image_recognition/tensorflow/resnet50v1_5/`. - The docker image (`intelaipg/intel-optimized-tensorflow:1.14.0`) + The docker image (`gcr.io/deeplearning-platform-release/tf-cpu.1-14`) used in the commands above were built using [TensorFlow](git@github.com:tensorflow/tensorflow.git) master for TensorFlow version 1.14. @@ -61,7 +61,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=100 \ --accuracy-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -100,7 +100,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --benchmark-only \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 -- warmup_steps=50 steps=500 ``` The tail of the log output when the benchmarking completes should look @@ -159,7 +159,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -197,7 +197,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -238,7 +238,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -275,7 +275,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The results file will be written to the `models/benchmarks/common/tensorflow/logs` directory, unless another diff --git a/benchmarks/recommendation/tensorflow/ncf/README.md b/benchmarks/recommendation/tensorflow/ncf/README.md index 2eccb84e7..a86a56b1f 100644 --- a/benchmarks/recommendation/tensorflow/ncf/README.md +++ b/benchmarks/recommendation/tensorflow/ncf/README.md @@ -53,7 +53,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The tail of batch inference log, looks as below. @@ -83,7 +83,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The tail of online inference log, looks as below. @@ -115,7 +115,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The tail of accuracy log, looks as below. diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index 08c6c999a..14e38385e 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -164,7 +164,7 @@ $ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --volume /home//custom_folder_1:/custom_folder_1 \ --volume /home//custom_folder_2:/custom_folder_2 ``` @@ -201,7 +201,7 @@ Below is an example showing how to use the `--debug` flag: --batch-size=1 \ --socket-id 0 \ --data-location /home//Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:1.14.0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --debug # ls