diff --git a/.gitignore b/.gitignore index 82da18448..b3e91143c 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,4 @@ .coverage .tox test_data/ +*.bak diff --git a/Contribute.md b/Contribute.md new file mode 100644 index 000000000..4a869c931 --- /dev/null +++ b/Contribute.md @@ -0,0 +1,191 @@ +# Contributing to the Model Zoo for IntelĀ® Architecture + +## Adding scripts for a new TensorFlow model + +### Code updates + +In order to add a new model to the zoo, there are a few things that are +required: + +1. Setup the directory structure to allow the + [launch script](/docs/general/tensorflow/LaunchBenchmark.md) to find + your model. This involves creating folders for: + `/benchmarks/////`. + Note that you will need to add `__init__.py` files in each new + directory that you add, in order for python to find the code. + + ![Directory Structure](benchmarks_directory_structure.png) + +2. Next, in the leaf folder that was created in the previous step, you + will need to create `config.json` and `model_init.py` files: + + ![Add model init](add_model_init_and_config.png) + + The `config.json` file contains the best known KMP environment variable + settings to get optimal performance for the model. Below default settings are recommended for most of + the models in Model Zoo. + + ``` + { + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } + } + ``` + + The `model_init.py` file is used to initialize the best known configuration for the + model, and then start executing inference or training. When the + [launch script](/docs/general/tensorflow/LaunchBenchmark.md) is run, + it will look for the appropriate `model_init.py` file to use + according to the model name, framework, mode, and precision that are + specified by the user. + + The contents of the `model_init.py` file will vary by framework. For + TensorFlow models, we typically use the + [base model init class](/benchmarks/common/base_model_init.py) that + includes functions for doing common tasks such as setting up the best + known environment variables (like `KMP_BLOCKTIME`, `KMP_SETTINGS`, + `KMP_AFFINITY` by loading **config.json** and `OMP_NUM_THREADS`), num intra threads, and num + inter threads. The `model_init.py` file also sets up the string that + will ultimately be used to run inference or model training, which + normally includes the use of `numactl` and sending all of the + appropriate arguments to the model's script. Also, if your model + requires any non-standard arguments (arguments that are not part of + the [launch script flags](/docs/general/tensorflow/LaunchBenchmark.md#launch_benchmarkpy-flags)), + the `model_init.py` file is where you would define and parse those + args. + +3. [start.sh](/benchmarks/common/tensorflow/start.sh) is a shell script + that is called by the `launch_benchmarks.py` script in the docker + container. This script installs dependencies that are required by + the model, sets up the `PYTHONPATH` environment variable, and then + calls the [run_tf_benchmark.py](/benchmarks/common/tensorflow/run_tf_benchmark.py) + script with the appropriate args. That run script will end up calling + the `model_init.py` file that you have defined in the previous step. + + To add support for a new model in the `start.sh` script, you will + need to add a function with the same name as your model. Note that + this function name should match the `` folder from the + first step where you setup the directories for your model. In this + function, add commands to install any third-party dependencies within + an `if [ ${NOINSTALL} != "True" ]; then` conditional block. The + purpose of the `NOINSTALL` flag is to be able to skip the installs + for quicker iteration when running on bare metal or debugging. If + your model requires the `PYTHONPATH` environment variable to be setup + to find model code or dependencies, that should be done in the + model's function. Next, setup the command that will be run. The + standard launch script args are already added to the `CMD` variable, + so your model function will only need to add on more args if you have + model-specific args defined in your `model_init.py`. Lastly, call the + `run_model` function with the `PYTHONPATH` and the `CMD` string. + + Below is a sample template of a `start.sh` model function that + installs dependencies from `requirements.txt` file, sets up the + `PYHTONPATH` to find model source files, adds on a custom steps flag + to the run command, and then runs the model: + ```bash + function () { + if [ ${PRECISION} == "fp32" ]; then + if [ ${NOINSTALL} != "True" ]; then + pip install -r ${MOUNT_EXTERNAL_MODELS_SOURCE}/requirements.txt + fi + + export PYTHONPATH=${PYTHONPATH}:${MOUNT_EXTERNAL_MODELS_SOURCE} + CMD="${CMD} $(add_steps_args)" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi + } + ``` + +Optional step: +* If there is CPU-optimized model code that has not been upstreamed to + the original repository, then it can be added to the + [models](/models) directory in the zoo repo. As with the first step + in the previous section, the directory structure should be setup like: + `/models/////`. + + ![Models Directory Structure](models_directory_structure.png) + + If there are model files that can be shared by multiple modes or + precisions, they can be placed the higher-level directory. For + example, if a file could be shared by both `FP32` and `Int8` + precisions, then it could be placed in the directory at: + `/models////` (omitting the + `` directory). Note that if this is being done, you need to + ensure that the license that is associated with the original model + repository is compatible with the license of the model zoo. + +### Debugging + +There are a couple of options for debugging and quicker iteration when +developing new scripts: +* Use the `--debug` flag in the launch_benchmark.py script, which will + give you a shell into the docker container. See the + [debugging section](/docs/general/tensorflow/LaunchBenchmark.md#debugging) + of the launch script documentation for more information on using this + flag. +* Run the launch script on bare metal (without a docker container). The + launch script documentation also has a + [section](/docs/general/tensorflow/LaunchBenchmark.md#alpha-feature-running-on-bare-metal) + with instructions on how to do this. Note that when running without + docker, you are responsible for installing all dependencies on your + system before running the launch script. If you are using this option + during development, be sure to also test _with_ a docker container to + ensure that the `start.sh` script dependency installation is working + properly for your model. + +### Documentation updates + +1. Create a `README.md` file in the + `/benchmarks///` directory: + + ![Add README file](add_readme.png) + + This README file should describe all of the steps necessary to run + the model, including downloading and preprocessing the dataset, + downloading the pretrained model, cloning repositories, and running + the model script with the appropriate arguments. Most models + have best known settings for batch and online inference performance + testing as well as testing accuracy. The README file should specify + how to set these configs using the `launch_benchmark.py` script. + +2. Update the table in the [main `benchmarks` README](/benchmarks/README.md) + with a link to the model that you are adding. Note that the models + in this table are ordered alphabetically by use case, framework, and + model name. The model name should link to the original paper for the + model. The instructions column should link to the README + file that you created in the previous step. + +### Testing + +1. After you've completed the above steps, run the model according to + instructions in the README file for the new model. Ensure that the + performance and accuracy metrics are on par with what you would + expect. + +2. Add unit tests to cover the new model. + * For TensorFlow models, there is a + [parameterized test](/tests/unit/common/tensorflow/test_run_tf_benchmarks.py#L80) + that checks the flow running from `run_tf_benchmarks.py` to the + inference command that is executed by the `model_init.py` file. The + test ensures that the inference command has all of the expected + arguments. + + To add a new parameterized instance of the test for your + new model, add a new JSON file `tf__args.json` to the [tf_models_args](/tests/unit/common/tensorflow/tf_model_args) + directory. Each file has a list of dictionaries, a dictionary has three + items: (1) `_comment` a comment describes the command, + (2) `input` the `run_tf_benchmarks.py` command with the appropriate + flags to run the model (3) `output` the expected inference or training + command that should get run by the `model_init.py` file. + * If any launch script or base class files were changed, then + additional unit tests should be added. + * Unit tests and style checks are run when you post a GitHub PR, and + the tests must be passing before the PR is merged. + * For information on how to run the unit tests and style checks + locally, see the [tests documentation](/tests/README.md). diff --git a/Jenkinsfile b/Jenkinsfile index eac6e7fc9..0eb363206 100644 --- a/Jenkinsfile +++ b/Jenkinsfile @@ -16,8 +16,8 @@ node('skx') { sudo apt-get install -y python3-dev || sudo yum install -y python36-devel.x86_64 # virtualenv 16.3.0 is broken do not use it - python2 -m pip install --force-reinstall --user --upgrade pip virtualenv!=16.3.0 tox - python3 -m pip install --force-reinstall --user --upgrade pip virtualenv!=16.3.0 tox + python2 -m pip install --no-cache-dir --user --upgrade pip==19.0.3 virtualenv!=16.3.0 tox + python3 -m pip install --no-cache-dir --user --upgrade pip==19.0.3 virtualenv!=16.3.0 tox """ } stage('Style tests') { diff --git a/README.md b/README.md index 54b69df95..eb326584b 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,8 @@ This repository contains **links to pre-trained models, sample scripts, best pra - Show how to efficiently execute, train, and deploy Intel-optimized models - Make it easy to get started running Intel-optimized models on Intel hardware in the cloud or on bare metal -***DISCLAIMER: These scripts are not intended for benchmarking Intel platforms. For any performance and/or benchmarking information on specific Intel platforms, visit [https://www.intel.ai/blog](https://www.intel.ai/blog).*** +***DISCLAIMER: These scripts are not intended for benchmarking Intel platforms. +For any performance and/or benchmarking information on specific Intel platforms, visit [https://www.intel.ai/blog](https://www.intel.ai/blog).*** ## How to Use the Model Zoo @@ -31,3 +32,6 @@ We hope this structure is intuitive and helps you find what you are looking for; ![Repo Structure](repo_structure.png) *Note: For model quantization and optimization tools, see [https://github.com/IntelAI/tools](https://github.com/IntelAI/tools)*. + +## How to Contribute +If you would like to add a new benchmarking script, please use [this guide](/Contribute.md). diff --git a/add_model_init_and_config.png b/add_model_init_and_config.png new file mode 100644 index 000000000..ef9b88290 Binary files /dev/null and b/add_model_init_and_config.png differ diff --git a/add_readme.png b/add_readme.png new file mode 100644 index 000000000..f28783bad Binary files /dev/null and b/add_readme.png differ diff --git a/benchmarks/README.md b/benchmarks/README.md index 787949b75..a1bac907b 100644 --- a/benchmarks/README.md +++ b/benchmarks/README.md @@ -11,7 +11,7 @@ dependencies to be installed: * [git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) * `wget` for downloading pre-trained models -## Use Cases +## TensorFlow Use Cases | Use Case | Framework | Model | Mode | Instructions | | -----------------------| --------------| ------------------- | --------- |------------------------------| @@ -19,23 +19,36 @@ dependencies to be installed: | Content Creation | TensorFlow | [DRAW](https://arxiv.org/pdf/1502.04623.pdf) | Inference | [FP32](content_creation/tensorflow/draw/README.md#fp32-inference-instructions) | | Face Detection and Alignment | Tensorflow | [FaceNet](https://arxiv.org/pdf/1503.03832.pdf) | Inference | [FP32](face_detection_and_alignment/tensorflow/facenet/README.md#fp32-inference-instructions) | | Face Detection and Alignment | TensorFlow | [MTCC](https://arxiv.org/pdf/1604.02878.pdf) | Inference | [FP32](face_detection_and_alignment/tensorflow/mtcc/README.md#fp32-inference-instructions) | +| Image Recognition | TensorFlow | [DenseNet169](https://arxiv.org/pdf/1608.06993.pdf) | Inference | [FP32](image_recognition/tensorflow/densenet169/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception ResNet V2](https://arxiv.org/pdf/1602.07261.pdf) | Inference | [Int8](image_recognition/tensorflow/inception_resnet_v2/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inception_resnet_v2/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception V3](https://arxiv.org/pdf/1512.00567.pdf) | Inference | [Int8](image_recognition/tensorflow/inceptionv3/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inceptionv3/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [Inception V4](https://arxiv.org/pdf/1602.07261.pdf) | Inference | [Int8](image_recognition/tensorflow/inceptionv4/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/inceptionv4/README.md#fp32-inference-instructions) | -| Image Recognition | TensorFlow | [MobileNet V1](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [FP32](image_recognition/tensorflow/mobilenet_v1/README.md#fp32-inference-instructions) | +| Image Recognition | TensorFlow | [MobileNet V1](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](image_recognition/tensorflow/mobilenet_v1/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/mobilenet_v1/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [ResNet 101](https://arxiv.org/pdf/1512.03385.pdf) | Inference | [Int8](image_recognition/tensorflow/resnet101/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet101/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [ResNet 50](https://arxiv.org/pdf/1512.03385.pdf) | Inference | [Int8](image_recognition/tensorflow/resnet50/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet50/README.md#fp32-inference-instructions) | +| Image Recognition | TensorFlow | [ResNet 50v1.5](https://github.com/tensorflow/models/tree/master/official/resnet) | Inference | [Int8](image_recognition/tensorflow/resnet50v1_5/README.md#int8-inference-instructions) [FP32](image_recognition/tensorflow/resnet50v1_5/README.md#fp32-inference-instructions) | | Image Recognition | TensorFlow | [SqueezeNet](https://arxiv.org/pdf/1602.07360.pdf) | Inference | [FP32](image_recognition/tensorflow/squeezenet/README.md#fp32-inference-instructions) | | Image Segmentation | TensorFlow | [Mask R-CNN](https://arxiv.org/pdf/1703.06870.pdf) | Inference | [FP32](image_segmentation/tensorflow/maskrcnn/README.md#fp32-inference-instructions) | | Image Segmentation | TensorFlow | [UNet](https://arxiv.org/pdf/1505.04597.pdf) | Inference | [FP32](image_segmentation/tensorflow/unet/README.md#fp32-inference-instructions) | +| Language Modeling | TensorFlow | [LM-1B](https://arxiv.org/pdf/1602.02410.pdf) | Inference | [FP32](language_modeling/tensorflow/lm-1b/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [GNMT](https://arxiv.org/pdf/1609.08144.pdf) | Inference | [FP32](language_translation/tensorflow/gnmt/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [Transformer Language](https://arxiv.org/pdf/1706.03762.pdf)| Inference | [FP32](language_translation/tensorflow/transformer_language/README.md#fp32-inference-instructions) | | Language Translation | TensorFlow | [Transformer_LT_Official ](https://arxiv.org/pdf/1706.03762.pdf)| Inference | [FP32](language_translation/tensorflow/transformer_lt_official/README.md#fp32-inference-instructions) | -| Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [R-FCN](https://arxiv.org/pdf/1605.06409.pdf) | Inference | [Int8](object_detection/tensorflow/rfcn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/rfcn/README.md#fp32-inference-instructions) | | Object Detection | TensorFlow | [Faster R-CNN](https://arxiv.org/pdf/1506.01497.pdf) | Inference | [Int8](object_detection/tensorflow/faster_rcnn/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/faster_rcnn/README.md#fp32-inference-instructions) | -| Object Detection | TensorFlow | [SSD-MobileNet](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [FP32](object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) | -| Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [SSD-MobileNet](https://arxiv.org/pdf/1704.04861.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-mobilenet/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [SSD-ResNet34](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [Int8](object_detection/tensorflow/ssd-resnet34/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd-resnet34/README.md#fp32-inference-instructions) | +| Object Detection | TensorFlow | [SSD-VGG16](https://arxiv.org/pdf/1512.02325.pdf) | Inference | [Int8](object_detection/tensorflow/ssd_vgg16/README.md#int8-inference-instructions) [FP32](object_detection/tensorflow/ssd_vgg16/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [NCF](https://arxiv.org/pdf/1708.05031.pdf) | Inference | [FP32](recommendation/tensorflow/ncf/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep Large Dataset](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [Int8](recommendation/tensorflow/wide_deep_large_ds/README.md#int8-inference-instructions) [FP32](recommendation/tensorflow/wide_deep_large_ds/README.md#fp32-inference-instructions) | | Recommendation | TensorFlow | [Wide & Deep](https://arxiv.org/pdf/1606.07792.pdf) | Inference | [FP32](recommendation/tensorflow/wide_deep/README.md#fp32-inference-instructions) | | Text-to-Speech | TensorFlow | [WaveNet](https://arxiv.org/pdf/1609.03499.pdf) | Inference | [FP32](text_to_speech/tensorflow/wavenet/README.md#fp32-inference-instructions) | + + +## TensorFlow Serving Use Cases + + +| Use Case | Framework | Model | Mode | Instructions | +| -----------------------| --------------| ------------------- | --------- |------------------------------| +| Image Recognition | TensorFlow Serving | [Inception V3](https://arxiv.org/pdf/1512.00567.pdf) | Inference | [FP32](image_recognition/tensorflow_serving/inceptionv3/README.md#fp32-inference-instructions) | + diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md index e23fc9c6a..4950d0f63 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/README.md +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/README.md @@ -60,7 +60,7 @@ $ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//dcgan_fp32_unconditional_cifar10_pretrained_model \ --data-location /home//cifar10 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` 5. Log files are located at the value of `--output-dir`. @@ -71,8 +71,6 @@ Batch size: 100 Batches number: 500 Time spent per BATCH: 35.8268 ms Total samples/sec: 2791.2030 samples/s -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_dcgan_inference_fp32_20190117_220342.log ``` \ No newline at end of file diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/config.json b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/config.json new file mode 100644 index 000000000..dfac18793 --- /dev/null +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/config.json @@ -0,0 +1,8 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1, + "KMP_HW_SUBSET": "1T" + } +} diff --git a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py index aed323e94..2e2f88104 100644 --- a/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py +++ b/benchmarks/adversarial_networks/tensorflow/dcgan/inference/fp32/model_init.py @@ -37,13 +37,13 @@ def __init__(self, args, custom_args=[], platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() - set_env_var("KMP_HW_SUBSET", "1T") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) benchmark_script = os.path.join( self.args.intelai_models, args.mode, args.precision, "inference_bench.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/common/base_benchmark_util.py b/benchmarks/common/base_benchmark_util.py index adb102c3c..1aefdebd0 100644 --- a/benchmarks/common/base_benchmark_util.py +++ b/benchmarks/common/base_benchmark_util.py @@ -23,6 +23,7 @@ from __future__ import print_function import os +import sys from argparse import ArgumentParser from common import platform_util @@ -47,6 +48,9 @@ def _define_args(self): """define args for the benchmark interface shared by FP32 and int8 models""" + # only require the arg, if we aren't just printing out --help + required_arg = "--help" not in sys.argv + self._common_arg_parser = ArgumentParser( add_help=False, description="Parse args for base benchmark " "interface") @@ -54,7 +58,7 @@ def _define_args(self): self._common_arg_parser.add_argument( "-f", "--framework", help="Specify the name of the deep learning framework to use.", - dest="framework", default=None, required=True) + dest="framework", default=None, required=required_arg) self._common_arg_parser.add_argument( "-r", "--model-source-dir", @@ -64,15 +68,15 @@ def _define_args(self): self._common_arg_parser.add_argument( "-p", "--precision", help="Specify the model precision to use: fp32, int8, or bfloat16", - required=True, choices=["fp32", "int8", "bfloat16"], + required=required_arg, choices=["fp32", "int8", "bfloat16"], dest="precision") self._common_arg_parser.add_argument( "-mo", "--mode", help="Specify the type training or inference ", - required=True, choices=["training", "inference"], dest="mode") + required=required_arg, choices=["training", "inference"], dest="mode") self._common_arg_parser.add_argument( - "-m", "--model-name", required=True, + "-m", "--model-name", required=required_arg, help="model name to run benchmarks for", dest="model_name") self._common_arg_parser.add_argument( @@ -128,7 +132,9 @@ def _define_args(self): help="Specify the location of trained model checkpoint directory. " "If mode=training model/weights will be written to this " "location. If mode=inference assumes that the location points" - " to a model that has already been trained.", + " to a model that has already been trained. Note that using " + "checkpoint files for inference is being deprecated, in favor " + "of using frozen graphs.", dest="checkpoint", default=None, type=check_valid_folder) self._common_arg_parser.add_argument( @@ -155,6 +161,30 @@ def _define_args(self): "with --accuracy-only and --mode=inference.", dest="output_results", action="store_true") + # Note this can't be a normal boolean flag, because we need to know when the user + # does not explicitly set the arg value so that we can apply the appropriate + # default value, depending on the the precision. + self._common_arg_parser.add_argument( + "--disable-tcmalloc", + help="When TCMalloc is enabled, the google-perftools are installed (if running " + "using docker) and the LD_PRELOAD environment variable is set to point to " + "the TCMalloc library file. The TCMalloc memory allocator produces better " + "performance results with smaller batch sizes. This flag disables the use of " + "TCMalloc when set to True. For int8 benchmarking, TCMalloc is enabled by " + "default (--disable-tcmalloc=False). For other precisions, the flag is " + "--disable-tcmalloc=True by default.", + dest="disable_tcmalloc", choices=["True", "False"], + default=None + ) + + self._common_arg_parser.add_argument( + "--tcmalloc-large-alloc-report-threshold", + help="Sets the TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD environment variable to " + "the specified value. The environment variable sets the threshold (in bytes) " + "for when large memory allocation messages will be displayed.", + dest="tcmalloc_large_alloc_report_threshold", default=2147483648, type=int + ) + self._common_arg_parser.add_argument( "-v", "--verbose", help="Print verbose information.", dest="verbose", action="store_true") @@ -198,7 +228,8 @@ def _validate_args(self): raise ValueError("Number of cores exceeds system core number: {}". format(system_num_cores)) - if args.output_results and (args.model_name != "resnet50" or args.precision != "fp32"): + if args.output_results and ((args.model_name != "resnet50" and + args.model_name != "resnet50v1_5") or args.precision != "fp32"): raise ValueError("--output-results is currently only supported for resnet50 FP32 inference.") elif args.output_results and (args.mode != "inference" or not args.data_location): raise ValueError("--output-results can only be used when running inference with a dataset.") diff --git a/benchmarks/common/base_model_init.py b/benchmarks/common/base_model_init.py index 6294190d8..4a334ca65 100644 --- a/benchmarks/common/base_model_init.py +++ b/benchmarks/common/base_model_init.py @@ -18,6 +18,8 @@ # SPDX-License-Identifier: EPL-2.0 # +import glob +import json import os @@ -42,6 +44,13 @@ def __init__(self, args, custom_args=[], platform_util=None): self.custom_args = custom_args self.platform_util = platform_util + # Set default values for TCMalloc and convert string value to a boolean + if self.args.disable_tcmalloc is None: + # Set to False for int8 and True for other precisions + self.args.disable_tcmalloc = self.args.precision != "int8" + elif isinstance(self.args.disable_tcmalloc, str): + self.args.disable_tcmalloc = self.args.disable_tcmalloc == "True" + # Ensure that we are using the proper version of python to run the benchmarking script self.python_exe = os.environ["PYTHON_EXE"] @@ -61,15 +70,32 @@ def run_command(self, cmd): os.system(cmd) - def get_numactl_command(self, socket_id): + def get_command_prefix(self, socket_id, numactl=True): """ - Returns the numactl command with --cpunodebind and --membind set to the - specified socket_id. If socket_id is set to -1 (undefined) then an - empty string is returned. + Returns the command prefix with: + - LD_PRELOAD for int8 models (if tcmalloc is not disabled) + - The numactl command with --cpunodebind and --membind set to the specified socket_id (if numactl=True) """ - return "" if socket_id == -1 else \ - "numactl --cpunodebind={0} --membind={0} ".format( - str(socket_id)) + command = "" + + if not self.args.disable_tcmalloc: + # Try to find the TCMalloc library file + matches = glob.glob("/usr/lib/libtcmalloc.so*") + + if len(matches) == 0: + matches = glob.glob("/usr/lib64/libtcmalloc.so*") + + if len(matches) > 0: + command += "LD_PRELOAD={} ".format(matches[0]) + else: + # Unable to find the TCMalloc library file + print("Warning: Unable to find the TCMalloc library file (libtcmalloc.so) in /usr/lib or /usr/lib64, " + "so the LD_PRELOAD environment variable will not be set.") + + if socket_id != -1 and numactl: + command += "numactl --cpunodebind={0} --membind={0} ".format(str(socket_id)) + + return command def add_args_to_command(self, command, arg_list): """ @@ -135,14 +161,28 @@ def set_num_inter_intra_threads(self, num_inter_threads=None, num_intra_threads= print("num_inter_threads: {}\nnum_intra_threads: {}".format( self.args.num_inter_threads, self.args.num_intra_threads)) - def set_kmp_vars(self, kmp_settings="1", kmp_blocktime="1", kmp_affinity="granularity=fine,verbose,compact,1,0"): + def set_kmp_vars(self, config_file_path, kmp_settings=None, kmp_blocktime=None, kmp_affinity=None): """ Sets KMP_* environment variables to the specified value, if the environment variable has not already been set. - The default values for this function's args are the most common values that we have seen in the model zoo. + The default values in the json file are the best known settings for the model. """ + if os.path.exists(config_file_path): + with open(config_file_path, 'r') as config: + config_object = json.load(config) + + # First sets default from config file + for param in config_object.keys(): + for env in config_object[param].keys(): + set_env_var(env, config_object[param][env]) + + else: + print("Warning: File {} does not exist and \ + cannot be used to set KMP environment variables".format(config_file_path)) + + # Override user provided envs if kmp_settings: - set_env_var("KMP_SETTINGS", kmp_settings) + set_env_var("KMP_SETTINGS", kmp_settings, overwrite_existing=True) if kmp_blocktime: - set_env_var("KMP_BLOCKTIME", kmp_blocktime) + set_env_var("KMP_BLOCKTIME", kmp_blocktime, overwrite_existing=True) if kmp_affinity: - set_env_var("KMP_AFFINITY", kmp_affinity) + set_env_var("KMP_AFFINITY", kmp_affinity, overwrite_existing=True) diff --git a/benchmarks/common/tensorflow/start.sh b/benchmarks/common/tensorflow/start.sh index bc7fd699c..9ea5f9f02 100755 --- a/benchmarks/common/tensorflow/start.sh +++ b/benchmarks/common/tensorflow/start.sh @@ -45,6 +45,8 @@ echo " NUM_CORES: ${NUM_CORES}" echo " BENCHMARK_ONLY: ${BENCHMARK_ONLY}" echo " ACCURACY_ONLY: ${ACCURACY_ONLY}" echo " OUTPUT_RESULTS: ${OUTPUT_RESULTS}" +echo " DISABLE_TCMALLOC: ${DISABLE_TCMALLOC}" +echo " TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD: ${TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD}" echo " NOINSTALL: ${NOINSTALL}" echo " OUTPUT_DIR: ${OUTPUT_DIR}" @@ -58,10 +60,23 @@ if [[ ${NOINSTALL} != "True" ]]; then ## install common dependencies apt update apt full-upgrade -y + # Set env var before installs so that user interaction is not required + export DEBIAN_FRONTEND=noninteractive apt-get install python-tk numactl -y apt install -y libsm6 libxext6 pip install --upgrade pip pip install requests + + # install libgoogle-perftools-dev for tcmalloc + if [[ ${DISABLE_TCMALLOC} != "True" ]]; then + apt-get install --no-install-recommends --fix-missing google-perftools -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + apt-get install --no-install-recommends --fix-missing libgoogle-perftools-dev -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + ln -sf /usr/lib/x86_64-linux-gnu/libtcmalloc.so /usr/lib/libtcmalloc.so + fi + fi + fi fi verbose_arg="" @@ -170,6 +185,10 @@ if [ ${DATA_NUM_INTRA_THREADS} != "None" ]; then CMD="${CMD} --data-num-intra-threads=${DATA_NUM_INTRA_THREADS}" fi +if [ ${DISABLE_TCMALLOC} != "None" ]; then + CMD="${CMD} --disable-tcmalloc=${DISABLE_TCMALLOC}" +fi + function install_protoc() { pushd "${MOUNT_EXTERNAL_MODELS_SOURCE}/research" @@ -177,7 +196,7 @@ function install_protoc() { if [ ! -f "bin/protoc" ]; then install_location=$1 echo "protoc not found, installing protoc from ${install_location}" - apt-get -y install wget + apt-get -y install wget unzip wget -O protobuf.zip ${install_location} unzip -o protobuf.zip rm protobuf.zip @@ -278,6 +297,19 @@ function dcgan() { fi } +# DenseNet 169 model +function densenet169() { + if [ ${PRECISION} == "fp32" ]; then + CMD="${CMD} $(add_arg "--input_height" ${input_height}) $(add_arg "--input_width" ${input_width}) \ + $(add_arg "--warmup_steps" ${warmup_steps}) $(add_arg "--steps" ${steps}) $(add_arg "--input_layer" ${input_layer}) \ + $(add_arg "--output_layer" ${output_layer})" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi +} + # DRAW model function draw() { if [ ${PRECISION} == "fp32" ]; then @@ -397,6 +429,18 @@ function inception_resnet_v2() { fi } +# language modeling lm-1b +function lm-1b() { + if [ ${PRECISION} == "fp32" ]; then + CMD="${CMD} $(add_steps_args)" + + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi +} + # Mask R-CNN model function maskrcnn() { if [ ${PRECISION} == "fp32" ]; then @@ -405,6 +449,7 @@ function maskrcnn() { if [ ${NOINSTALL} != "True" ]; then # install dependencies pip3 install -r ${MOUNT_EXTERNAL_MODELS_SOURCE}/requirements.txt + pip3 install --force-reinstall scipy==1.2.1 Pillow==5.3.0 # install cocoapi get_cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/coco ${MOUNT_EXTERNAL_MODELS_SOURCE}/samples/coco @@ -423,6 +468,11 @@ function mobilenet_v1() { if [ ${PRECISION} == "fp32" ]; then export PYTHONPATH=${PYTHONPATH}:${MOUNT_EXTERNAL_MODELS_SOURCE}:${MOUNT_EXTERNAL_MODELS_SOURCE}/research:${MOUNT_EXTERNAL_MODELS_SOURCE}/research/slim PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + elif [ ${PRECISION} == "int8" ]; then + CMD="${CMD} $(add_arg "--input_height" ${input_height}) $(add_arg "--input_width" ${input_width}) \ + $(add_arg "--warmup_steps" ${warmup_steps}) $(add_arg "--steps" ${steps}) $(add_arg "--input_layer" ${input_layer}) \ + $(add_arg "--output_layer" ${output_layer})" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model else echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" exit 1 @@ -501,6 +551,7 @@ function rfcn() { if [ ${NOINSTALL} != "True" ]; then # install dependencies pip install -r "${MOUNT_BENCHMARK}/object_detection/tensorflow/rfcn/requirements.txt" + original_dir=$(pwd) cd "${MOUNT_EXTERNAL_MODELS_SOURCE}/research" @@ -511,6 +562,10 @@ function rfcn() { get_cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/research/ fi + # Fix the object_detection_evaluation.py file to change unicode() to str() so that it works in py3 + chmod -R 777 ${MOUNT_EXTERNAL_MODELS_SOURCE}/research/object_detection/utils/object_detection_evaluation.py + sed -i.bak "s/unicode(/str(/g" ${MOUNT_EXTERNAL_MODELS_SOURCE}/research/object_detection/utils/object_detection_evaluation.py + split_arg="" if [ -n "${split}" ] && [ ${ACCURACY_ONLY} == "True" ]; then split_arg="--split=${split}" @@ -586,12 +641,19 @@ function ssd_mobilenet() { # SSD-ResNet34 model function ssd-resnet34() { - if [ ${PRECISION} == "fp32" ]; then + if [ ${PRECISION} == "fp32" ] || [ ${PRECISION} == "int8" ]; then if [ ${NOINSTALL} != "True" ]; then for line in $(cat ${MOUNT_BENCHMARK}/object_detection/tensorflow/ssd-resnet34/requirements.txt) do pip install $line done + apt install -y git-all + old_dir=${PWD} + cd /tmp + git clone --single-branch https://github.com/tensorflow/benchmarks.git + cd benchmarks + git checkout 1e7d788042dfc6d5e5cd87410c57d5eccee5c664 + cd ${old_dir} fi CMD=${CMD} run_model @@ -601,6 +663,32 @@ function ssd-resnet34() { fi } +# SSD-VGG16 model +function ssd_vgg16() { + + if [ ${NOINSTALL} != "True" ]; then + pip install opencv-python Cython + + if [ ${ACCURACY_ONLY} == "True" ]; then + # get the python cocoapi + get_cocoapi ${MOUNT_EXTERNAL_MODELS_SOURCE}/coco ${MOUNT_INTELAI_MODELS_SOURCE}/inference + fi + fi + + cp ${MOUNT_INTELAI_MODELS_SOURCE}/__init__.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/dataset + cp ${MOUNT_INTELAI_MODELS_SOURCE}/__init__.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/preprocessing + cp ${MOUNT_INTELAI_MODELS_SOURCE}/__init__.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/utility + export PYTHONPATH=${PYTHONPATH}:${MOUNT_EXTERNAL_MODELS_SOURCE} + + if [ ${PRECISION} == "int8" ] || [ ${PRECISION} == "fp32" ]; then + CMD="${CMD} $(add_steps_args)" + PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model + else + echo "PRECISION=${PRECISION} is not supported for ${MODEL_NAME}" + exit 1 + fi +} + # UNet model function unet() { if [ ${PRECISION} == "fp32" ]; then @@ -629,10 +717,6 @@ function transformer_language() { echo "transformer-language requires -- decode_from_file arg to be defined" exit 1 fi - if [[ -z "${reference}" ]]; then - echo "transformer-language requires -- reference arg to be defined" - exit 1 - fi if [[ -z "${CHECKPOINT_DIRECTORY}" ]]; then echo "transformer-language requires --checkpoint arg to be defined" exit 1 @@ -650,8 +734,11 @@ function transformer_language() { cp ${MOUNT_INTELAI_MODELS_SOURCE}/${MODE}/${PRECISION}/decoding.py ${MOUNT_EXTERNAL_MODELS_SOURCE}/tensor2tensor/utils/decoding.py - CMD="${CMD} --decode_from_file=${CHECKPOINT_DIRECTORY}/${decode_from_file} \ - --reference=${CHECKPOINT_DIRECTORY}/${reference}" + CMD="${CMD} --decode_from_file=${CHECKPOINT_DIRECTORY}/${decode_from_file}" + + if [[ -n "${reference}" ]]; then + CMD="${CMD} --reference=${CHECKPOINT_DIRECTORY}/${reference}" + fi PYTHONPATH=${PYTHONPATH} CMD=${CMD} run_model else @@ -681,6 +768,10 @@ function transformer_lt_official() { exit 1 fi + if [ ${NOINSTALL} != "True" ]; then + pip install pandas + fi + cp ${MOUNT_INTELAI_MODELS_SOURCE}/${MODE}/${PRECISION}/infer_ab.py \ ${MOUNT_EXTERNAL_MODELS_SOURCE}/official/transformer/infer_ab.py @@ -752,7 +843,13 @@ function wide_deep_large_ds() { if [[ -z "${LIBTCMALLOC}" ]]; then echo "libtcmalloc.so.4 not found, trying to install" apt-get update - apt-get install google-perftools --fix-missing -y + apt-get install --no-install-recommends --fix-missing google-perftools -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + apt-get install --no-install-recommends --fix-missing libgoogle-perftools-dev -y + if [ ! -f /usr/lib/libtcmalloc.so ]; then + ln -sf /usr/lib/x86_64-linux-gnu/libtcmalloc.so /usr/lib/libtcmalloc.so + fi + fi fi LIBTCMALLOC="$(ldconfig -p | grep $TCMALLOC_LIB | tr ' ' '\n' | grep /)" @@ -789,6 +886,8 @@ echo "Log output location: ${LOGFILE}" MODEL_NAME=$(echo ${MODEL_NAME} | tr 'A-Z' 'a-z') if [ ${MODEL_NAME} == "dcgan" ]; then dcgan +elif [ ${MODEL_NAME} == "densenet169" ]; then + densenet169 elif [ ${MODEL_NAME} == "draw" ]; then draw elif [ ${MODEL_NAME} == "facenet" ]; then @@ -803,6 +902,8 @@ elif [ ${MODEL_NAME} == "inceptionv4" ]; then inceptionv4 elif [ ${MODEL_NAME} == "inception_resnet_v2" ]; then inception_resnet_v2 +elif [ ${MODEL_NAME} == "lm-1b" ]; then + lm-1b elif [ ${MODEL_NAME} == "maskrcnn" ]; then maskrcnn elif [ ${MODEL_NAME} == "mobilenet_v1" ]; then @@ -815,6 +916,8 @@ elif [ ${MODEL_NAME} == "resnet101" ]; then resnet50_101_inceptionv3 elif [ ${MODEL_NAME} == "resnet50" ]; then resnet50_101_inceptionv3 +elif [ ${MODEL_NAME} == "resnet50v1_5" ]; then + resnet50_101_inceptionv3 elif [ ${MODEL_NAME} == "rfcn" ]; then rfcn elif [ ${MODEL_NAME} == "squeezenet" ]; then @@ -823,6 +926,8 @@ elif [ ${MODEL_NAME} == "ssd-mobilenet" ]; then ssd_mobilenet elif [ ${MODEL_NAME} == "ssd-resnet34" ]; then ssd-resnet34 +elif [ ${MODEL_NAME} == "ssd_vgg16" ]; then + ssd_vgg16 elif [ ${MODEL_NAME} == "unet" ]; then unet elif [ ${MODEL_NAME} == "transformer_language" ]; then diff --git a/benchmarks/common/tensorflow_serving/__init__.py b/benchmarks/common/tensorflow_serving/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/common/tensorflow_serving/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/common/tensorflow_serving/build_tfserving_image.sh b/benchmarks/common/tensorflow_serving/build_tfserving_image.sh new file mode 100644 index 000000000..a47505f88 --- /dev/null +++ b/benchmarks/common/tensorflow_serving/build_tfserving_image.sh @@ -0,0 +1,73 @@ +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Bash script to build tensorflow serving image +# Setup proxy on your terminal before running the script. + +# To build image separately +# TF_SERVING_VERSION=1.13.0 MKL_IMAGE_TAG=tensorflow/serving:latest-mkl bash build_tfserving_image.sh + +#!/usr/bin/env bash +set -e +set -x + +WORKDIR=serving_workspace + +if [ -d ${WORKDIR} ]; then + rm -rf ${WORKDIR} +fi + +pushd $(pwd) + +mkdir -p ${WORKDIR} +cd ${WORKDIR} + +# Build Tensorflow Serving image +TF_SERVING_VERSION=${TF_SERVING_VERSION:-"1.13.0"} +echo "Using TF_SERVING_VERSION=${TF_SERVING_VERSION} to build docker image" + +# Clone official tensorflow serving repo +git clone https://github.com/tensorflow/serving.git + +TF_SERVING_ROOT=$(pwd)/serving +cd ${TF_SERVING_ROOT}/tensorflow_serving/tools/docker/ + +# Build Dockerfile.devel-mkl +docker build \ + --build-arg TF_SERVING_BAZEL_OPTIONS="--incompatible_disallow_data_transition=false --incompatible_disallow_filetype=false" \ + --build-arg TF_SERVING_VERSION_GIT_BRANCH=${TF_SERVING_VERSION} \ + --build-arg HTTP_PROXY=${HTTP_PROXY} \ + --build-arg HTTPS_PROXY=${HTTPS_PROXY} \ + --build-arg http_proxy=${http_proxy} \ + --build-arg https_proxy=${https_proxy} \ + -f Dockerfile.devel-mkl -t tensorflow/serving:latest-devel-mkl . + +# Build Dockerfile.mkl, which uses above image as base_image +docker build \ + --build-arg TF_SERVING_VERSION_GIT_BRANCH=${TF_SERVING_VERSION} \ + --build-arg HTTP_PROXY=${HTTP_PROXY} \ + --build-arg HTTPS_PROXY=${HTTPS_PROXY} \ + --build-arg http_proxy=${http_proxy} \ + --build-arg https_proxy=${https_proxy} \ + -f Dockerfile.mkl -t ${MKL_IMAGE_TAG} . + +popd + +rm -rf ${WORKDIR} + +echo "Image built with tag: ${MKL_IMAGE_TAG}" diff --git a/benchmarks/common/tensorflow_serving/start.sh b/benchmarks/common/tensorflow_serving/start.sh new file mode 100644 index 000000000..b27e40ed5 --- /dev/null +++ b/benchmarks/common/tensorflow_serving/start.sh @@ -0,0 +1,162 @@ +#!/usr/bin/env bash +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +#!/usr/bin/env bash +set -e +set -x + +echo 'Running with parameters:' +echo " USE_CASE: ${USE_CASE}" +echo " FRAMEWORK: ${FRAMEWORK}" +echo " WORKSPACE: ${WORKSPACE}" +echo " IN_GRAPH: ${IN_GRAPH}" +echo " MODEL_NAME: ${MODEL_NAME}" +echo " MODE: ${MODE}" +echo " PRECISION: ${PRECISION}" +echo " BATCH_SIZE: ${BATCH_SIZE}" +echo " BENCHMARK_ONLY: ${BENCHMARK_ONLY}" +echo " ACCURACY_ONLY: ${ACCURACY_ONLY}" +echo " OMP_NUM_THREADS: ${OMP_NUM_THREADS}" +echo " NUM_INTRA_THREADS: ${NUM_INTRA_THREADS}" +echo " NUM_INTER_THREADS: ${NUM_INTER_THREADS}" +echo " OUTPUT_DIR: ${OUTPUT_DIR}" +echo " TF_SERVING_VERSION: ${TF_SERVING_VERSION}" + + +if [ ${ACCURACY_ONLY} == "True" ]; then + echo "Accuracy is not supported with Tensorflow Serving" + exit 1 +fi + +WORKDIR=workspace + +if [ -d ${WORKDIR} ]; then + rm -rf ${WORKDIR} +fi + +pushd $(pwd) + +mkdir -p ${WORKDIR} +cd ${WORKDIR} + +# Check docker +if ! [[ $(which docker) && $(docker --version) ]]; then + echo "Docker not found, please install docker to proceed." + exit 1 +fi + +# Check for pip +if ! [[ $(which pip) && $(pip --version) ]]; then + echo "pip not found, please install pip to proceed." + exit 1 +fi + +timestamp=`date +%Y%m%d_%H%M%S` +LOG_FILENAME="benchmark_${MODEL_NAME}_${MODE}_${PRECISION}_${timestamp}.log" +if [ ! -d "${OUTPUT_DIR}" ]; then + mkdir ${OUTPUT_DIR} +fi + +MKL_IMAGE_TAG=tensorflow/serving:latest-mkl + +# Build Tensorflow Serving docker image +echo "Building tensorflow serving image..." +echo "First time it takes few minutes to build images, consecutive builds are much faster" + +TF_SERVING_VERSION=${TF_SERVING_VERSION} MKL_IMAGE_TAG=${MKL_IMAGE_TAG} bash ${WORKSPACE}/build_tfserving_image.sh + +function docker_run(){ + docker run \ + --name=${CONTAINER_NAME} \ + --rm \ + -d \ + -p 8500:8500 \ + -v /tmp:/models/${MODEL_NAME} \ + -e MODEL_NAME=${MODEL_NAME} \ + -e OMP_NUM_THREADS=${OMP_NUM_THREADS} \ + -e TENSORFLOW_INTER_OP_PARALLELISM=${NUM_INTER_THREADS} \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=${NUM_INTRA_THREADS} \ + ${MKL_IMAGE_TAG} +} + + +function resnet50_or_inceptionv3(){ + # Setup virtual env + pip install virtualenv + virtualenv venv + + source venv/bin/activate + # Make sure intel-tensorflow is after tensorflow-serving-api, so that + # tensorflow from intel-tensorflow get installed effectively. + pip install grpc \ + requests \ + tensorflow-serving-api \ + intel-tensorflow + # cd to image recognition tfserving scripts + cd ${WORKSPACE}/../../${USE_CASE}/${FRAMEWORK}/${MODEL_NAME}/${MODE}/${PRECISION} + + # by default converted model is saved at /tmp/1 + rm -rf /tmp/1 + + # convert pretrained model to savedmodel + python model_graph_to_saved_model.py --import_path ${IN_GRAPH} + + RUNNING=$(docker ps --filter="expose=8501/tcp" -q | xargs) + if [[ -n ${RUNNING} ]]; then + docker rm -f ${RUNNING} + fi + + CONTAINER_NAME=tfserving_${RANDOM} + + # Run container + MKL_IMAGE_TAG=${MKL_IMAGE_TAG} CONTAINER_NAME=${CONTAINER_NAME} docker_run + + # Test + python image_recognition_client.py --model ${MODEL_NAME} + + + if [ ${BATCH_SIZE} == 1 ];then + # Test Average latency + python image_recognition_benchmark.py --batch_size ${BATCH_SIZE} --model ${MODEL_NAME} + else + # Test max throughput + python image_recognition_benchmark.py --batch_size ${BATCH_SIZE} --model ${MODEL_NAME} + fi + + # Clean up + docker rm -f ${CONTAINER_NAME} +} + +LOGFILE=${OUTPUT_DIR}/${LOG_FILENAME} + +MODEL_NAME=$(echo ${MODEL_NAME} | tr 'A-Z' 'a-z') +if [ ${MODEL_NAME} == "inceptionv3" ] || [ ${MODEL_NAME} == "resnet50" ] && [ ${PRECISION} == "fp32" ]; then + resnet50_or_inceptionv3 | tee -a ${LOGFILE} +else + echo "Unsupported Model: ${MODEL_NAME} or Precision: ${PRECISION}" + exit 1 +fi + +popd + +# Clean up work directory +rm -rf ${WORKDIR} + +echo "Log output location: ${LOGFILE}" | tee -a ${LOGFILE} diff --git a/benchmarks/common/utils/validators.py b/benchmarks/common/utils/validators.py index 54f280dfd..16ec18aba 100644 --- a/benchmarks/common/utils/validators.py +++ b/benchmarks/common/utils/validators.py @@ -88,3 +88,23 @@ def check_valid_file_or_dir(value): raise ArgumentTypeError("{} does not exist.".format(value)) check_for_link(value) return value + + +def check_volume_mount(value): + """ + Verifies that the value is a valid docker volume mount, where there should be + at least two fields separated by a : (for the local directory to mount and the + path to the where the directory will be mounted in the container. The third + optional field is for extra options like read only. + """ + if value: + # Check that we have at least 2 fields and at most 3 fields + if not 3 > value.count(":") > 0: + raise ArgumentTypeError( + "{} is not a valid volume mount string where ':' is used to separate the fields. " + "See https://docs.docker.com/storage/volumes for information on formatting the volume " + "mount string".format(value)) + + # Check that the local directory specified is a valid folder and not a link + check_valid_folder(value.split(':')[0]) + return value diff --git a/benchmarks/content_creation/tensorflow/draw/README.md b/benchmarks/content_creation/tensorflow/draw/README.md index f3ea0732f..a918d1a5a 100644 --- a/benchmarks/content_creation/tensorflow/draw/README.md +++ b/benchmarks/content_creation/tensorflow/draw/README.md @@ -48,7 +48,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 1 \ @@ -61,7 +61,7 @@ modes/precisions: --model-name draw \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//draw_fp32_pretrained_model \ --data-location /home//mnist \ --batch-size 100 \ @@ -82,8 +82,6 @@ modes/precisions: Time spent per BATCH: 6.6667 ms Total samples/sec: 149.9996 samples/s Outputs saved in file: /home//mnist/draw_data.npy - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_draw_inference_fp32_20190123_012947.log ``` @@ -97,8 +95,6 @@ modes/precisions: Time spent per BATCH: 28.1952 ms Total samples/sec: 3546.7006 samples/s Outputs saved in file: /home//mnist/draw_data.npy - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_draw_inference_fp32_20190123_013432.log ``` \ No newline at end of file diff --git a/benchmarks/content_creation/tensorflow/draw/inference/fp32/config.json b/benchmarks/content_creation/tensorflow/draw/inference/fp32/config.json new file mode 100644 index 000000000..dfac18793 --- /dev/null +++ b/benchmarks/content_creation/tensorflow/draw/inference/fp32/config.json @@ -0,0 +1,8 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1, + "KMP_HW_SUBSET": "1T" + } +} diff --git a/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py b/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py index 390bcae82..e306ecd55 100644 --- a/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py +++ b/benchmarks/content_creation/tensorflow/draw/inference/fp32/model_init.py @@ -22,7 +22,6 @@ import os import sys from common.base_model_init import BaseModelInitializer -from common.base_model_init import set_env_var class ModelInitializer(BaseModelInitializer): @@ -32,8 +31,8 @@ def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() - set_env_var("KMP_HW_SUBSET", "1T") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) if self.args.accuracy_only: print("Accuracy testing for DRAW inference is not supported yet.") @@ -45,7 +44,7 @@ def __init__(self, args, custom_args=[], platform_util=None): # Create the command prefix with numactl and executing the script script_path = os.path.join(self.args.intelai_models, self.args.mode, self.args.precision, "draw_inf.py") - self.command_prefix = self.get_numactl_command(args.socket_id) + \ + self.command_prefix = self.get_command_prefix(args.socket_id) + \ " {} {} ".format(self.python_exe, script_path) # Add additional args to the command diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md index 0a3659d20..fd27ffa2b 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/README.md @@ -59,7 +59,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Example log tail for online inference: ``` @@ -77,8 +77,6 @@ Total samples/sec: 33.1608 samples/s 2019-03-28 21:00:02.725722: W tensorflow/core/kernels/queue_base.cc:277] _1_batch_join/fifo_queue: Skipping cancelled enqueue attempt with queue not closed 2019-03-28 21:00:02.725746: W tensorflow/core/kernels/queue_base.cc:277] _1_batch_join/fifo_queue: Skipping cancelled enqueue attempt with queue not closed 2019-03-28 21:00:02.725776: W tensorflow/core/kernels/queue_base.cc:277] _1_batch_join/fifo_queue: Skipping cancelled enqueue attempt with queue not closed -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_facenet_inference_fp32_20190328_205911.log ``` @@ -96,7 +94,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Example log tail for batch inference: ``` @@ -110,8 +108,6 @@ Accuracy: 0.98833+-0.00489 Validation rate: 0.96200+-0.01968 @ FAR=0.00100 Area Under Curve (AUC): 0.999 Equal Error Rate (EER): 0.011 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_facenet_inference_fp32_20190329_002623.log ``` @@ -130,7 +126,7 @@ python launch_benchmark.py \ --checkpoint /home//checkpoints \ --data-location /home//dataset \ --model-source-dir /home//facenet/ \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Example log tail for accuracy: ``` @@ -144,8 +140,6 @@ Accuracy: 0.98833+-0.00489 Validation rate: 0.96200+-0.01968 @ FAR=0.00100 Area Under Curve (AUC): 0.999 Equal Error Rate (EER): 0.011 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_facenet_inference_fp32_20190328_214145.log ``` diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/config.json b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py index 9bd9c6243..e00bf70f7 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py +++ b/benchmarks/face_detection_and_alignment/tensorflow/facenet/inference/fp32/model_init.py @@ -30,11 +30,12 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + \ + self.cmd = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) pairs_file = os.path.join(self.args.model_source_dir, "data/pairs.txt") diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md index 1963f9cbc..36cad0fe3 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/README.md @@ -55,7 +55,7 @@ Run: --mode inference \ --socket-id 0 \ --checkpoint /home//MTCNN_model \ - --docker-image intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` 6. The log file is saved to the value of `--output-dir`. diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/config.json b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py index 34409b702..5d1983139 100644 --- a/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py +++ b/benchmarks/face_detection_and_alignment/tensorflow/mtcc/inference/fp32/model_init.py @@ -33,7 +33,8 @@ def __init__(self, args, custom_args, platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) @@ -41,7 +42,7 @@ def __init__(self, args, custom_args, platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, "one_image_test.py") self.command_prefix = \ - self.get_numactl_command(self.args.socket_id) + \ + self.get_command_prefix(self.args.socket_id) + \ "{} ".format(self.python_exe) + benchmark_script self.run_cmd = \ diff --git a/benchmarks/image_recognition/tensorflow/densenet169/README.md b/benchmarks/image_recognition/tensorflow/densenet169/README.md new file mode 100644 index 000000000..aaf2fd9e2 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/README.md @@ -0,0 +1,143 @@ +# DenseNet 169 + +This document has instructions for how to run DenseNet 169 for the +following modes/precisions: +* [FP32 inference](#fp32-inference-instructions) + +## FP32 Inference Instructions + +1. Download ImageNet dataset. + + This step is required only for running accuracy, for running the model for performance we do not need to provide dataset. + + Register and download the ImageNet dataset. Once you have the raw ImageNet dataset downloaded, we need to convert + it to the TFRecord format. The TensorFlow models repo provides + [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) + to download, process and convert the ImageNet dataset to the TF records format. After converting data, you should have a directory + with the sharded dataset something like below, we only need `validation-*` files, discard `train-*` files: + ``` + $ ll /home/myuser/datasets/ImageNet_TFRecords + -rw-r--r--. 1 user 143009929 Jun 20 14:53 train-00000-of-01024 + -rw-r--r--. 1 user 144699468 Jun 20 14:53 train-00001-of-01024 + -rw-r--r--. 1 user 138428833 Jun 20 14:53 train-00002-of-01024 + ... + -rw-r--r--. 1 user 143137777 Jun 20 15:08 train-01022-of-01024 + -rw-r--r--. 1 user 143315487 Jun 20 15:08 train-01023-of-01024 + -rw-r--r--. 1 user 52223858 Jun 20 15:08 validation-00000-of-00128 + -rw-r--r--. 1 user 51019711 Jun 20 15:08 validation-00001-of-00128 + -rw-r--r--. 1 user 51520046 Jun 20 15:08 validation-00002-of-00128 + ... + -rw-r--r--. 1 user 52508270 Jun 20 15:09 validation-00126-of-00128 + -rw-r--r--. 1 user 55292089 Jun 20 15:09 validation-00127-of-00128 + ``` + +2. Download the pretrained model: + ``` + $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/densenet169_fp32_pretrained_model.pb + ``` + +3. Clone the [intelai/models](https://github.com/intelai/models) repo + and then run the model scripts for either online or batch inference or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. + Each model run has user configurable arguments separated from regular arguments by '--' at the end of the command. + Unless configured, these arguments will run with default values. Below are the example codes for each use case: + + ``` + $ git clone https://github.com/IntelAI/models.git + + $ cd benchmarks + ``` + + For throughput (using `--benchmark-only`, `--socket-id 0` and `--batch-size 100`): + ``` + python launch_benchmark.py \ + --model-name densenet169 \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 100 \ + --socket-id 0 \ + --in-graph /home//densenet169_fp32_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ + input_layer="input" output_layer="densenet169/predictions/Reshape_1" + ``` + + For latency (using `--benchmark-only`, `--socket-id 0` and `--batch-size 1`) + ``` + python launch_benchmark.py \ + --model-name densenet169 \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 1 \ + --socket-id 0 \ + --in-graph /home//densenet169_fp32_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + -- input_height=224 input_width=224 warmup_steps=20 steps=100 \ + input_layer="input" output_layer="densenet169/predictions/Reshape_1" + ``` + + For accuracy (using your `--data-location`, `--socket-id 0`, `--accuracy-only` and + `--batch-size 100`): + ``` + python launch_benchmark.py \ + --model-name densenet169 \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --accuracy-only \ + --batch-size 100 \ + --socket-id 0 \ + --in-graph /home//densenet169_fp32_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --data-location /home//imagenet_validation_dataset \ + -- input_height=224 input_width=224 \ + input_layer="input" output_layer="densenet169/predictions/Reshape_1" + ``` + + Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands + to get additional debug output or change the default output location. + +4. The log file is saved to the `models/benchmarks/common/tensorflow/logs` directory, + or the directory specified by the `--output-dir` arg. Below are examples of + what the tail of your log file should look like for the different configs. + + Example log tail when running for batch inference: + ``` + steps = 80, 159.83471377 images/sec + Latency: 625.646317005 ms + steps = 90, 159.852789241 images/sec + Latency: 625.57557159 ms + steps = 100, 159.853966416 images/sec + Latency: 625.570964813 ms + Ran inference with batch size 100 + Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_023940.log + ``` + + Example log tail when running for online inference: + ``` + steps = 80, 34.9948442873 images/sec + Latency: 28.5756379366 ms + steps = 90, 34.9644341907 images/sec + Latency: 28.6004914178 ms + steps = 100, 34.9655988121 images/sec + Latency: 28.5995388031 ms + Ran inference with batch size 1 + Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_024505.log + ``` + + Example log tail when running for accuracy: + ``` + Iteration time: 581.6446 ms + 0.757505030181 + Iteration time: 581.5755 ms + 0.757489959839 + Iteration time: 581.5709 ms + 0.75749498998 + Iteration time: 581.1705 ms + 0.75748 + Ran inference with batch size 100 + Log location outside container: {--output-dir value}/benchmark_densenet169_inference_fp32_20190412_021545.log + ``` diff --git a/benchmarks/image_recognition/tensorflow/densenet169/__init__.py b/benchmarks/image_recognition/tensorflow/densenet169/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/__init__.py b/benchmarks/image_recognition/tensorflow/densenet169/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/__init__.py b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/config.json new file mode 100644 index 000000000..812311847 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters":{ + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py new file mode 100644 index 000000000..3e4a376af --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/densenet169/inference/fp32/model_init.py @@ -0,0 +1,107 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for Densenet169 FP32 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + self.cmd = self.get_command_prefix(self.args.socket_id) + "{} ".format(self.python_exe) + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + if self.args.batch_size == -1: + self.args.batch_size = 100 + + # set num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + self.parse_args() + + if self.args.benchmark_only: + run_script = os.path.join(self.args.intelai_models, + self.args.mode, self.args.precision, + "benchmark.py") + + script_args_list = [ + "input_graph", "input_height", "input_width", "batch_size", + "input_layer", "output_layer", "num_inter_threads", + "num_intra_threads", "warmup_steps", "steps"] + + elif self.args.accuracy_only: + run_script = os.path.join(self.args.intelai_models, + self.args.mode, self.args.precision, + "accuracy.py") + + script_args_list = [ + "input_graph", "data_location", "input_height", "input_width", + "batch_size", "input_layer", "output_layer", + "num_inter_threads", "num_intra_threads"] + + self.cmd = self.add_args_to_command(self.cmd + run_script, + script_args_list) + + def parse_args(self): + if self.custom_args: + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_height", default=224, + dest='input_height', type=int, help="input height") + parser.add_argument( + "--input_width", default=224, + dest='input_width', type=int, help="input width") + parser.add_argument( + '--warmup_steps', dest='warmup_steps', + help='number of warmup steps', + type=int, default=20) + parser.add_argument( + '--steps', dest='steps', + help='number of steps', + type=int, default=100) + parser.add_argument( + '--input_layer', dest='input_layer', + help='name of input layer', + type=str, default="input") + parser.add_argument( + '--output_layer', dest='output_layer', + help='name of output layer', + type=str, default="densenet169/predictions/Reshape_1") + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + def run(self): + if self.cmd: + self.run_command(self.cmd) diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md index 3cc4fdccb..c3a44d2d2 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/README.md @@ -7,6 +7,11 @@ following modes/precisions: ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: @@ -79,7 +84,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -95,7 +100,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -110,7 +115,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb ``` @@ -136,30 +141,30 @@ Log location outside container: /benchmark_inception_resnet_v2 Example log tail when running for online inference: ``` ... -Iteration 37: 0.046 sec -Iteration 38: 0.046 sec -Iteration 39: 0.046 sec -Iteration 40: 0.046 sec -Average time: 0.045 sec +Iteration 37: 0.043 sec +Iteration 38: 0.042 sec +Iteration 39: 0.043 sec +Iteration 40: 0.043 sec +Average time: 0.043 sec Batch size = 1 -Latency: 45.441 ms -Throughput: 22.007 images/sec +Latency: 42.793 ms +Throughput: 23.368 images/sec Ran inference with batch size 1 -Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190330_012557.log +Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190415_231020.log ``` Example log tail when running for batch inference: ``` ... -Iteration 37: 0.975 sec -Iteration 38: 0.975 sec -Iteration 39: 0.987 sec -Iteration 40: 0.974 sec -Average time: 0.976 sec +Iteration 37: 0.932 sec +Iteration 38: 0.928 sec +Iteration 39: 0.927 sec +Iteration 40: 0.928 sec +Average time: 0.928 sec Batch size = 128 -Throughput: 131.178 images/sec +Throughput: 137.978 images/sec Ran inference with batch size 128 -Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190330_012719.log +Log location outside container: /benchmark_inception_resnet_v2_inference_int8_20190415_225215.log ``` @@ -174,21 +179,12 @@ $ git clone git@github.com:IntelAI/models.git This repository includes launch scripts for running an optimized version of the Inception ResNet V2 model code. -2. Download the pre-trained Inception ResNet V2 model files: - -For accuracy: +2. Download the pre-trained Inception ResNet V2 model: ``` $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/inception_resnet_v2_fp32_pretrained_model.pb ``` -For batch and online inference: - -``` -$ wget http://download.tensorflow.org/models/inception_resnet_v2_2016_08_30.tar.gz -$ mkdir -p checkpoints && tar -C ./checkpoints/ -zxf inception_resnet_v2_2016_08_30.tar.gz -``` - 3. If you would like to run Inception ResNet V2 inference and test for accuracy, you will need the full ImageNet dataset. Running for online and batch inference do not require the ImageNet dataset. @@ -230,7 +226,7 @@ precision, and docker image to use, along with your path to the ImageNet TF Records that you generated in step 3. Substitute in your own `--data-location` (from step 3, for accuracy -only), `--checkpoint` pre-trained model checkpoint file path (from step 2). +only), `--in-graph` frozen graph file path (from step 2). Inception ResNet V2 can be run for accuracy, online inference, or batch inference. Use one of the following examples below, depending on your use case. @@ -246,8 +242,8 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ - --in-graph /home//inception_resnet_v2_int8_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -262,9 +258,8 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --checkpoint /home//checkpoints \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ - --data-location /home//datasets/ImageNet_TFRecords + --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` For batch inference (using `--benchmark-only`, `--socket-id 0` and `--batch-size 128`): @@ -278,9 +273,8 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --checkpoint /home//checkpoints \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ - --data-location /home//datasets/ImageNet_TFRecords + --in-graph /home//inception_resnet_v2_fp32_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands @@ -297,36 +291,31 @@ Example log tail when running for accuracy: Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.8036, 0.9526) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.8036, 0.9525) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.8037, 0.9525) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190109_081637.log ``` Example log tail when running for online inference: ``` -eval/Accuracy[0] -eval/Recall_5[0.01] -INFO:tensorflow:Finished evaluation at 2019-01-08-01:51:28 -self._total_images_per_sec = 69.7 -self._displayed_steps = 10 -Total images/sec = 7.0 -Latency ms/step = 143.4 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +Iteration 38: 0.052 sec +Iteration 39: 0.051 sec +Iteration 40: 0.051 sec +Average time: 0.050 sec +Batch size = 1 +Latency: 50.094 ms +Throughput: 19.963 images/sec Ran inference with batch size 1 -Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190108_015057.log +Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190410_205213.log ``` Example log tail when running for batch inference: ``` -eval/Accuracy[0.00078125] -eval/Recall_5[0.00375] -INFO:tensorflow:Finished evaluation at 2019-01-08-01:59:37 -self._total_images_per_sec = 457.0 -self._displayed_steps = 10 -Total images/sec = 45.7 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +Iteration 38: 1.848 sec +Iteration 39: 1.799 sec +Iteration 40: 1.850 sec +Average time: 1.818 sec +Batch size = 128 +Throughput: 70.402 images/sec Ran inference with batch size 128 -Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190108_015440.log +Log location outside container: {--output-dir value}/benchmark_inception_resnet_v2_inference_fp32_20190410_205628.log +``` diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py index 045921acd..13fd8a79f 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/fp32/model_init.py @@ -29,10 +29,11 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + self.cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # use default batch size if -1 if self.args.batch_size == -1: @@ -45,20 +46,14 @@ def __init__(self, args, custom_args=[], platform_util=None): if self.args.benchmark_only: run_script = os.path.join(self.args.intelai_models, - "eval_image_classifier.py") + "eval_image_classifier_benchmark.py") - cmd_args = " --dataset_name=imagenet" + \ - " --checkpoint_path=" + self.args.checkpoint + \ - " --eval_dir=" + self.args.checkpoint + \ - " --dataset_dir=" + self.args.data_location + \ - " --dataset_split_name=validation" + \ - " --clone_on_cpu=True" + \ - " --model_name=" + str(self.args.model_name) + \ - " --inter_op_parallelism_threads=" + \ - str(self.args.num_inter_threads) + \ - " --intra_op_parallelism_threads=" + \ - str(self.args.num_intra_threads) + \ - " --batch_size=" + str(self.args.batch_size) + cmd_args = " --input-graph=" + self.args.input_graph + \ + " --inter-op-parallelism-threads=" + \ + str(self.args.num_inter_threads) + \ + " --intra-op-parallelism-threads=" + \ + str(self.args.num_intra_threads) + \ + " --batch-size=" + str(self.args.batch_size) elif self.args.accuracy_only: run_script = os.path.join(self.args.intelai_models, "eval_image_classifier_accuracy.py") diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py index f2e2e1469..90ce7bcb2 100644 --- a/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inception_resnet_v2/inference/int8/model_init.py @@ -31,8 +31,12 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.set_kmp_vars() - self.cmd = self.get_numactl_command(self.args.socket_id) + "{} ".format(self.python_exe) + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + self.cmd = self.get_command_prefix(self.args.socket_id) + "{} ".format(self.python_exe) # use default batch size if -1 if self.args.batch_size == -1: diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md index 9de17c994..0a9223914 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/README.md @@ -9,6 +9,11 @@ Instructions for model training and inference for other precisions are coming la ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: @@ -92,7 +97,7 @@ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -113,7 +118,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -130,7 +135,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -146,7 +151,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ --data-location /home//datasets/ImageNet_TFRecords \ -- warmup_steps=50 steps=500 @@ -163,17 +168,11 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location.. @@ -185,9 +184,8 @@ different configs. Example log tail when running for accuracy: ``` +Iteration time: 357.3781 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7666, 0.9333) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Executing command: python /workspace/intelai_models/int8/accuracy.py --input_height=299 --input_width=299 --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/inceptionv3_int8_pretrained_model.pb --data_location=/dataset Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190104_013246.log @@ -196,27 +194,25 @@ Log location outside container: {--output-dir value}/benchmark_inceptionv3_infer Example log tail when running for online inference: ``` ... -steps = 470, 53.7256017113 images/sec -steps = 480, 52.5430812016 images/sec -steps = 490, 52.9076139058 images/sec -steps = 500, 53.5021876395 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +steps = 470, 134.912798739 images/sec +steps = 480, 132.379245045 images/sec +steps = 490, 133.977640069 images/sec +steps = 500, 132.083262478 images/sec +Average throughput for batch size 1: 133.440858806 images/sec Ran inference with batch size 1 -Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190223_194002.log +Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190415_220455.log ``` Example log tail when running for batch inference: ``` ... -steps = 470, 370.435654276 images/sec -steps = 480, 369.710160177 images/sec -steps = 490, 369.083388904 images/sec -steps = 500, 370.287978128 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +steps = 470, 369.151656047 images/sec +steps = 480, 373.174541014 images/sec +steps = 490, 372.402638382 images/sec +steps = 500, 371.836748659 images/sec +Average throughput for batch size 128: 371.269087408 images/sec Ran inference with batch size 128 -Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190223_194314.log +Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_int8_20190416_162155.log ``` ## FP32 Inference Instructions @@ -262,7 +258,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for online inference: @@ -279,8 +275,6 @@ Average time: 0.014 sec Batch size = 1 Latency: 14.442 ms Throughput: 69.243 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190104_025220.log ``` @@ -295,7 +289,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for batch inference: @@ -311,8 +305,6 @@ Iteration 40: 0.757 sec Average time: 0.760 sec Batch size = 128 Throughput: 168.431 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190104_024842.log ``` @@ -329,19 +321,20 @@ python launch_benchmark.py \ --accuracy-only \ --batch-size 100 \ --data-location /dataset/Imagenet_Validation \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv3_fp32_pretrained_model.pb ``` Example log tail when running for accuracy: ``` +Iteration time: 756.7571 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7673, 0.9341) +Iteration time: 757.3781 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7674, 0.9341) +Iteration time: 760.3024 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7675, 0.9342) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190104_023816.log ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands -to get additional debug output or change the default output location.. \ No newline at end of file +to get additional debug output or change the default output location.. diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py index dd504259e..f550765f4 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py @@ -60,15 +60,17 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) benchmark_script = os.path.join( self.args.intelai_models, self.args.precision, "eval_image_classifier_inference.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script num_cores = self.platform_util.num_cores_per_socket if self.args.num_cores == -1 \ diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py index 87301fd64..139d705c0 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/__init__.py @@ -1,7 +1,7 @@ #!/usr/bin/env python # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py index 6d586ea80..645f2f92e 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv3/inference/int8/model_init.py @@ -60,8 +60,9 @@ def parse_args(self): self.args = parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) def run_benchmark(self): benchmark_script = os.path.join(self.args.intelai_models, @@ -73,7 +74,7 @@ def run_benchmark(self): "data_num_inter_threads", "data_num_intra_threads", "warmup_steps", "steps"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + benchmark_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) # add num_cores @@ -93,7 +94,7 @@ def run_accuracy(self): "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + accuracy_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) @@ -105,7 +106,7 @@ def run_calibration(self): "input_graph", "data_location", "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + calibration_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md index a1228c3ad..560de9ef5 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/README.md +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/README.md @@ -10,6 +10,11 @@ other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: ``` @@ -51,7 +56,7 @@ other precisions are coming later. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -66,7 +71,7 @@ other precisions are coming later. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` @@ -80,16 +85,10 @@ other precisions are coming later. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_int8_pretrained_model.pb ``` - The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) - used in the commands above were built using - [TensorFlow](git@github.com:tensorflow/tensorflow.git) master - ([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and - [PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` flag can be added to any of the above commands to get additional debug output. @@ -101,47 +100,45 @@ other precisions are coming later. Example log tail when running for accuracy: ``` ... + Iteration time: 685.1976 ms Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7985, 0.9504) + Iteration time: 686.3845 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7983, 0.9504) + Iteration time: 686.7021 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7984, 0.9504) + Iteration time: 685.8914 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7984, 0.9504) - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_221608.log ``` Example log tail when running for batch inference: ``` - [Running warmup steps...] - steps = 10, 185.108768528 images/sec - [Running benchmark steps...] - steps = 10, 184.482999017 images/sec - steps = 20, 184.561572444 images/sec - steps = 30, 184.620504126 images/sec - steps = 40, 183.900309054 images/sec - steps = 50, 184.110358713 images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu - Ran inference with batch size 240 - Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_215858.log + [Running warmup steps...] + steps = 10, 184.497605972 images/sec + [Running benchmark steps...] + steps = 10, 184.664702184 images/sec + steps = 20, 184.938455688 images/sec + steps = 30, 184.454197634 images/sec + steps = 40, 184.491891402 images/sec + steps = 50, 184.390001575 images/sec + Ran inference with batch size 240 + Log location outside container: /benchmark_inceptionv4_inference_int8_20190415_233517.log ``` Example log tail when running for online inference: ``` - [Running warmup steps...] - steps = 10, 30.8738415788 images/sec - [Running benchmark steps...] - steps = 10, 31.8633787623 images/sec - steps = 20, 31.1129375635 images/sec - steps = 30, 31.2716048462 images/sec - steps = 40, 31.9682931663 images/sec - steps = 50, 31.6665962009 images/sec - Latency: 31.936 ms - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu - Ran inference with batch size 1 - Log location outside container: /benchmark_inceptionv4_inference_int8_20190306_215702.log + [Running warmup steps...] + steps = 10, 32.6095380262 images/sec + [Running benchmark steps...] + steps = 10, 32.9024373024 images/sec + steps = 20, 32.5328989723 images/sec + steps = 30, 32.5988932413 images/sec + steps = 40, 31.3991914957 images/sec + steps = 50, 32.7053998207 images/sec + Latency: 30.598 ms + Ran inference with batch size 1 + Log location outside container: /benchmark_inceptionv4_inference_int8_20190415_232441.log ``` ## FP32 Inference Instructions @@ -188,7 +185,7 @@ other precisions are coming later. --accuracy-only \ --batch-size 100 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb \ --data-location /home//ImageNet_TFRecords ``` @@ -203,7 +200,7 @@ other precisions are coming later. --benchmark-only \ --batch-size 240 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` @@ -217,7 +214,7 @@ other precisions are coming later. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//inceptionv4_fp32_pretrained_model.pb ``` @@ -232,10 +229,15 @@ other precisions are coming later. Example log tail when running for accuracy: ``` ... + Iteration time: 1337.8728 ms Processed 49600 images. (Top1 accuracy, Top5 accuracy) = (0.8015, 0.9517) + Iteration time: 1331.8253 ms Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.8017, 0.9518) + Iteration time: 1339.1553 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.8017, 0.9518) + Iteration time: 1334.5991 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.8018, 0.9519) + Iteration time: 1336.1905 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.8018, 0.9519) Ran inference with batch size 100 Log location outside container: /benchmark_inceptionv4_inference_fp32_20190308_182729.log diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/config.json b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py index c7d546477..74da197fd 100644 --- a/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py +++ b/benchmarks/image_recognition/tensorflow/inceptionv4/inference/inceptionv4_model_init.py @@ -38,7 +38,11 @@ def __init__(self, args, custom_args=[], platform_util=None): # Environment variables set_env_var("OMP_NUM_THREADS", platform_util.num_cores_per_socket if self.args.num_cores == -1 else self.args.num_cores) - self.set_kmp_vars(kmp_blocktime="0") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + self.set_num_inter_intra_threads(num_inter_threads=platform_util.num_threads_per_core, num_intra_threads=platform_util.num_cores_per_socket) @@ -69,7 +73,7 @@ def parse_args(self): def add_command_prefix(self, script_path): """ Uses the specified script path and adds on the command prefix """ - return self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + \ + return self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + \ script_path def run_benchmark(self): diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md index 17e274aa9..e7d0d6f5d 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/README.md @@ -2,14 +2,164 @@ This document has instructions for how to run MobileNet V1 for the following modes/precisions: +* [Int8 inference](#int8-inference-instructions) * [FP32 inference](#fp32-inference-instructions) Instructions and scripts for model training are coming later. + +## Int8 Inference Instructions + +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + +1. Download ImageNet dataset. + + This step is required only for running accuracy, for running benchmark we do not need to provide dataset. + + Register and download the ImageNet dataset. Once you have the raw ImageNet dataset downloaded, we need to convert + it to the TFRecord format. The TensorFlow models repo provides + [scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) + to download, process and convert the ImageNet dataset to the TF records format. After converting data, you should have a directory + with the sharded dataset something like below, we only need `validation-*` files, discard `train-*` files: + ``` + $ ll /home/myuser/datasets/ImageNet_TFRecords + -rw-r--r--. 1 user 143009929 Jun 20 14:53 train-00000-of-01024 + -rw-r--r--. 1 user 144699468 Jun 20 14:53 train-00001-of-01024 + -rw-r--r--. 1 user 138428833 Jun 20 14:53 train-00002-of-01024 + ... + -rw-r--r--. 1 user 143137777 Jun 20 15:08 train-01022-of-01024 + -rw-r--r--. 1 user 143315487 Jun 20 15:08 train-01023-of-01024 + -rw-r--r--. 1 user 52223858 Jun 20 15:08 validation-00000-of-00128 + -rw-r--r--. 1 user 51019711 Jun 20 15:08 validation-00001-of-00128 + -rw-r--r--. 1 user 51520046 Jun 20 15:08 validation-00002-of-00128 + ... + -rw-r--r--. 1 user 52508270 Jun 20 15:09 validation-00126-of-00128 + -rw-r--r--. 1 user 55292089 Jun 20 15:09 validation-00127-of-00128 + ``` +2. Download the pre-trained model. +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/mobilenetv1_int8_pretrained_model.pb +``` + +3. Clone the [intelai/models](https://github.com/intelai/models) repo + and then run the model scripts for either online or batch inference or accuracy. For --dataset-location in accuracy run, please use the ImageNet validation data path from step 1. + Each model run has user configurable arguments separated from regular arguments by '--' at the end of the command. + Unless configured, these arguments will run with default values. Below are the example codes for each use case: + + ``` + $ git clone https://github.com/IntelAI/models.git + + $ cd benchmarks + ``` + + For batch inference (using `--benchmark-only`, `--socket-id 0` and `--batch-size 240`): + ``` + python launch_benchmark.py \ + --model-name mobilenet_v1 \ + --precision int8 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 240 \ + --socket-id 0 \ + --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ + input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" + ``` + + For online inference (using `--benchmark-only`, `--socket-id 0` and `--batch-size 1`) + ``` + python launch_benchmark.py \ + --model-name mobilenet_v1 \ + --precision int8 \ + --mode inference \ + --framework tensorflow \ + --benchmark-only \ + --batch-size 1 \ + --socket-id 0 \ + --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + -- input_height=224 input_width=224 warmup_steps=10 steps=50 \ + input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" + ``` + + For accuracy (using your `--data-location`, `--accuracy-only` and + `--batch-size 100`): + ``` + python launch_benchmark.py \ + --model-name mobilenet_v1 \ + --precision int8 \ + --mode inference \ + --framework tensorflow \ + --accuracy-only \ + --batch-size 100 \ + --socket-id 0 \ + --in-graph /home//mobilenetv1_int8_pretrained_model.pb \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --data-location /home//imagenet_validation_dataset \ + -- input_height=224 input_width=224 \ + input_layer="input" output_layer="MobilenetV1/Predictions/Reshape_1" + ``` + + Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands + to get additional debug output or change the default output location. + +4. The log file is saved to the `models/benchmarks/common/tensorflow/logs` directory, + or the directory specified by the `--output-dir` arg. Below are examples of + what the tail of your log file should look like for the different configs. + + Example log tail when running for batch inference: + ``` + [Running warmup steps...] + steps = 10, 1865.30956528 images/sec + [Running benchmark steps...] + steps = 10, 1872.92398031 images/sec + steps = 20, 1862.64499512 images/sec + steps = 30, 1857.97283454 images/sec + steps = 40, 1864.70142784 images/sec + steps = 50, 1854.23896906 images/sec + Ran inference with batch size 240 + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164626.log + ``` + + Example log tail when running for online inference: + ``` + [Running warmup steps...] + steps = 10, 197.082229114 images/sec + [Running benchmark steps...] + steps = 10, 195.201936054 images/sec + steps = 20, 195.693743293 images/sec + steps = 30, 198.999098543 images/sec + steps = 40, 189.256565292 images/sec + steps = 50, 201.252531069 images/sec + Ran inference with batch size 1 + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164348.log + ``` + + Example log tail when running for accuracy: + ``` + Iteration time: 66.8541 ms + Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8935) + Iteration time: 66.7909 ms + Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8934) + Iteration time: 66.7001 ms + Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7014, 0.8934) + Ran inference with batch size 100 + Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_int8_20190523_164955.log + ``` + ## FP32 Inference Instructions -1. Download the ImageNet dataset and convert it to the TF records format +1. The ImageNet dataset is required for testing accuracy and can also be + used when running online or batch inference. If no dataset is provided when running + online or batch inference, synthetic data will be used. + + Download the ImageNet dataset and convert it to the TF records format using the instructions [here](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data). @@ -56,28 +206,35 @@ later. [tensorflow/models](https://github.com/tensorflow/models) repo that was cloned in step 3. - * Run for online inference (with `--batch-size 1` and `--checkpoint` with a path to the checkpoint file directory): + * Run for online inference (with `--batch-size 1`, `--checkpoint` + with a path to the checkpoint file directory, and the `--data-location` + is optional): + ``` python launch_benchmark.py \ --precision fp32 \ --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --batch-size 1 \ --socket-id 0 \ --data-location /dataset/Imagenet_Validation \ --checkpoint /home//mobilenet_v1_fp32_pretrained_model ``` - * Run for batch inference (with `--batch-size 100` and `--checkpoint` with a path to the checkpoint file directory): + + * Run for batch inference (with `--batch-size 100`, + `--checkpoint` with a path to the checkpoint file directory, and + the `--data-location` is optional): + ``` python launch_benchmark.py \ --precision fp32 \ --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --socket-id 0 \ @@ -91,7 +248,7 @@ later. --model-name mobilenet_v1 \ --mode inference \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --batch-size 100 \ --accuracy-only \ @@ -115,8 +272,6 @@ later. self._displayed_steps = 10 Total images/sec = 81.0 Latency ms/step = 12.4 - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_fp32_20190104_200218.log ``` @@ -132,18 +287,17 @@ later. self._total_images_per_sec = 1810.2 self._displayed_steps = 10 Total images/sec = 181.0 - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_fp32_20190104_200512.log ``` * Below is a sample lof file snippet when testing accuracy: ``` + Iteration time: 119.1134 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7104, 0.8999) + Iteration time: 118.8375 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7103, 0.8999) + Iteration time: 119.9311 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7102, 0.8999) - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_mobilenet_v1_inference_fp32_20190110_211648.log - ``` \ No newline at end of file + ``` diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py index cf793ec6a..d9c4123de 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/__init__.py @@ -1,7 +1,7 @@ # # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py index cf793ec6a..d9c4123de 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/__init__.py @@ -1,7 +1,7 @@ # # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/config.json new file mode 100644 index 000000000..f0b327528 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py index e75c72194..8fa7391ae 100644 --- a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/fp32/model_init.py @@ -33,8 +33,9 @@ def __init__(self, args, custom_args=[], platform_util=None): if self.args.batch_size == -1: self.args.batch_size = 128 - # Set KMP env vars (except KMP_SETTINGS is not set) - self.set_kmp_vars(kmp_settings=None) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads (override inter threads to 2) self.set_num_inter_intra_threads(num_inter_threads=2) @@ -56,7 +57,6 @@ def __init__(self, args, custom_args=[], platform_util=None): self.command_prefix = ("{prefix} " "--dataset_name imagenet " "--checkpoint_path {checkpoint} " - "--dataset_dir {dataset} " "--dataset_split_name=validation " "--clone_on_cpu=True " "--model_name {model} " @@ -64,9 +64,11 @@ def __init__(self, args, custom_args=[], platform_util=None): "--intra_op_parallelism_threads {intra} " "--batch_size {bz}").format( prefix=self.command_prefix, checkpoint=self.args.checkpoint, - dataset=self.args.data_location, model=self.args.model_name, - inter=self.args.num_inter_threads, + model=self.args.model_name, inter=self.args.num_inter_threads, intra=self.args.num_intra_threads, bz=self.args.batch_size) + + if self.args.data_location: + self.command_prefix += " --dataset_dir {}".format(self.args.data_location) else: # add args for the accuracy script script_args_list = [ diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py new file mode 100644 index 000000000..c693b055c --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/mobilenet_v1/inference/int8/model_init.py @@ -0,0 +1,100 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import os + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for Mobilenet INT8 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + self.cmd = self.get_command_prefix(self.args.socket_id) + "python " + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + # Set the num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + # Set env vars, if they haven't already been set + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + self.parse_args() + + if self.args.benchmark_only: + run_script = os.path.join( + self.args.intelai_models, self.args.mode, + self.args.precision, "benchmark.py") + script_args_list = [ + "input_graph", "input_height", "input_width", "batch_size", + "input_layer", "output_layer", "num_inter_threads", + "num_intra_threads", "warmup_steps", "steps"] + if self.args.accuracy_only: + run_script = os.path.join( + self.args.intelai_models, self.args.mode, + self.args.precision, "accuracy.py") + script_args_list = [ + "input_graph", "data_location", "input_height", "input_width", + "batch_size", "input_layer", "output_layer", + "num_inter_threads", "num_intra_threads"] + + self.cmd = self.add_args_to_command(self.cmd + run_script, script_args_list) + + def parse_args(self): + if self.custom_args: + parser = argparse.ArgumentParser() + parser.add_argument( + "--input_height", default=224, + dest='input_height', type=int, help="input height") + parser.add_argument( + "--input_width", default=224, + dest='input_width', type=int, help="input width") + parser.add_argument( + '--warmup_steps', dest='warmup_steps', + help='number of warmup steps', + type=int, default=10) + parser.add_argument( + '--steps', dest='steps', + help='number of steps', + type=int, default=50) + parser.add_argument( + '--input_layer', dest='input_layer', + help='name of input layer', + type=str, default="input") + parser.add_argument( + '--output_layer', dest='output_layer', + help='name of output layer', + type=str, default="MobilenetV1/Predictions/Reshape_1") + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + def run(self): + if self.cmd: + self.run_command(self.cmd) diff --git a/benchmarks/image_recognition/tensorflow/resnet101/README.md b/benchmarks/image_recognition/tensorflow/resnet101/README.md index 442c9cb21..7fb3566eb 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet101/README.md @@ -7,6 +7,11 @@ following modes/precisions: ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone this [intelai/models](https://github.com/IntelAI/models) repository: @@ -80,7 +85,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --accuracy-only \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --data-location /home//dataset/FullImageNetData_directory \ --in-graph=/home//resnet101_int8_pretrained_model.pb ``` @@ -101,7 +106,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -118,7 +123,7 @@ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//dataset/FullImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -134,7 +139,7 @@ python launch_benchmark.py \ --benchmark-only \ --batch-size 128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` @@ -151,18 +156,11 @@ python launch_benchmark.py \ --batch-size 128 \ --data-location /home//dataset/FullImageNetData_directory \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph=/home//resnet101_int8_pretrained_model.pb \ -- warmup_steps=50 steps=500 ``` - -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location.. @@ -176,8 +174,6 @@ Example log tail when running for accuracy: Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7690, 0.9304) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7691, 0.9305) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7691, 0.9305) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_int8_20190104_205838.log ``` @@ -189,8 +185,6 @@ steps = 470, 48.3195530058 images/sec steps = 480, 47.2792312364 images/sec steps = 490, 46.3175214744 images/sec steps = 500, 45.4044245083 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_int8_20190223_191406.log ``` @@ -202,8 +196,6 @@ steps = 470, 328.906266308 images/sec steps = 480, 322.0451309 images/sec steps = 490, 315.455582114 images/sec steps = 500, 309.142758646 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_int8_20190223_192438.log ``` @@ -258,7 +250,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 128 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --socket-id 0 ``` @@ -272,8 +264,6 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 steps = 80, 169.258177508 images/sec steps = 90, 150.457869027 images/sec steps = 100, 135.433960175 images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_fp32_20190104_204615.log ``` @@ -287,7 +277,7 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 --mode inference \ --model-name resnet101 \ --batch-size 100 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /home//trained_models/resnet101_fp32_pretrained_model.pb \ --data-location /home//imagenet_validation_dataset \ --accuracy-only \ @@ -304,8 +294,6 @@ $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet10 Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7639, 0.9289) Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7641, 0.9289) Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7640, 0.9289) - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet101_inference_fp32_20190104_201506.log ``` diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py index 5e35e462b..98962a670 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py @@ -60,15 +60,17 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) benchmark_script = os.path.join( self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script self.benchmark_command = \ diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py index 5e32d3e92..36a9f479a 100644 --- a/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet101/inference/int8/model_init.py @@ -41,8 +41,9 @@ def __init__(self, args, custom_args=[], platform_util=None): set_env_var("OMP_NUM_THREADS", platform_util.num_cores_per_socket if args.num_cores == -1 else args.num_cores) - # Set KMP env vars, but override default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime="0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) def parse_args(self): parser = argparse.ArgumentParser() @@ -77,7 +78,7 @@ def run_benchmark_or_accuracy(self): self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - cmd = self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + cmd + cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + cmd cmd += " --input-graph=" + self.args.input_graph + \ " --num-inter-threads=" + str(self.args.num_inter_threads) + \ @@ -100,13 +101,13 @@ def run_benchmark_or_accuracy(self): self.run_command(cmd) def run_calibration(self): - calibration_script = os.path.join(self.args.intelai_models, self.args.mode, + calibration_script = os.path.join(self.args.intelai_models, self.args.precision, "calibration.py") script_args_list = [ "input_graph", "data_location", "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + calibration_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/image_recognition/tensorflow/resnet50/README.md b/benchmarks/image_recognition/tensorflow/resnet50/README.md index fec96a4f2..71bbdf7cc 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/README.md +++ b/benchmarks/image_recognition/tensorflow/resnet50/README.md @@ -10,6 +10,11 @@ precisions. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Download the full ImageNet dataset and convert to the TF records format. * Clone the tensorflow/models repository: @@ -38,12 +43,6 @@ $ git clone https://github.com/IntelAI/models.git The optimized ResNet50 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and located at `models/models/image_recognition/tensorflow/resnet50/`. - The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) - used in the commands above were built using - [TensorFlow](git@github.com:tensorflow/tensorflow.git) master - ([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and - [PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - * Calculate the model accuracy, the required parameters parameters include: the `ImageNet` dataset location (from step 1), the pre-trained `final_int8_resnet50.pb` input graph file (from step 2), and the `--accuracy-only` flag. @@ -59,20 +58,23 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=100 \ --accuracy-only \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. The tail of the log output when the script completes should look something like this: ``` +Iteration time: 233.495 ms Processed 49600 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) +Iteration time: 233.231 ms Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) +Iteration time: 234.541 ms Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7360, 0.9154) +Iteration time: 233.033 ms Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7361, 0.9155) +Iteration time: 233.013 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7360, 0.9154) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190104_212224.log ``` @@ -97,21 +99,22 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --benchmark-only \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 -- warmup_steps=50 steps=500 ``` The tail of the log output when the script completes should look something like this: ``` ... -steps = 470, 460.113806562 images/sec -steps = 480, 460.073982602 images/sec -steps = 490, 463.289831148 images/sec -steps = 500, 463.521427264 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +Iteration 497: 0.253495 sec +Iteration 498: 0.253033 sec +Iteration 499: 0.258083 sec +Iteration 500: 0.254541 sec +Average time: 0.254572 sec +Batch size = 128 +Throughput: 502.805 images/sec Ran inference with batch size 128 -Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190223_180546.log +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_20190416_172735.log ``` Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands @@ -157,7 +160,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -176,8 +179,6 @@ Average time: 0.011 sec Batch size = 1 Latency: 10.924 ms Throughput: 91.541 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190104_215326.log ``` @@ -194,7 +195,7 @@ $ python launch_benchmark.py \ --mode inference \ --batch-size=128 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -213,8 +214,6 @@ Iteration 40: 0.652 sec Average time: 0.653 sec Batch size = 128 Throughput: 196.065 images/sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190104_215655.log ``` @@ -234,7 +233,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The log file is saved to the value of `--output-dir`. @@ -242,9 +241,8 @@ The tail of the log output when the accuracy run completes should look something like this: ``` ... +Iteration time: 649.252 ms Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7430, 0.9188) -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 100 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190104_213452.log ``` @@ -269,7 +267,7 @@ $ python launch_benchmark.py \ --batch-size 100 \ --socket-id 0 \ --data-location /home//dataset/ImageNetData_directory \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The results file will be written to the `models/benchmarks/common/tensorflow/logs` directory, unless another diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py index a2e6be8a3..88520cbdd 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py @@ -61,15 +61,17 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) benchmark_script = os.path.join( self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script num_cores = self.platform_util.num_cores_per_socket if self.args.num_cores == -1 \ diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py index 07dfa5d2f..41571564c 100644 --- a/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py +++ b/benchmarks/image_recognition/tensorflow/resnet50/inference/int8/model_init.py @@ -65,15 +65,16 @@ def parse_args(self): self.args = parser.parse_args(self.custom_args, namespace=self.args) - # Use default KMP variable values, but override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime=str(self.args.kmp_blocktime)) + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) def run_benchmark_or_accuracy(self): cmd = os.path.join( self.args.intelai_models, self.args.mode, "eval_image_classifier_inference.py") - cmd = self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + cmd + cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + cmd cmd += " --input-graph=" + self.args.input_graph + \ " --num-inter-threads=" + str(self.args.num_inter_threads) + \ @@ -105,7 +106,7 @@ def run_calibration(self): "input_graph", "data_location", "batch_size", "num_inter_threads", "num_intra_threads"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + calibration_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md new file mode 100644 index 000000000..18889005a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/README.md @@ -0,0 +1,300 @@ +# ResNet50 + +This document has instructions for how to run ResNet50 (v1.5) for the +following precisions: +* [Int8 inference](#int8-inference-instructions) +* [FP32 inference](#fp32-inference-instructions) + +Original ResNet model has multiple versions which have shown better accuracy +and/or batch inference performance. As mentioned in TensorFlow's [official ResNet +model page](https://github.com/tensorflow/models/tree/master/official/resnet), 3 different +versions of the original ResNet model exists - ResNet50v1, ResNet50v1.5, and ResNet50v2. +As a side note, ResNet50v1.5 is also in MLPerf's [cloud inference benchmark for +image classification](https://github.com/mlperf/inference/tree/master/cloud/image_classification). + +## Int8 Inference Instructions + +1. Download the full ImageNet dataset and convert to the TF records format. + +* Clone the tensorflow/models repository: +``` +$ git clone https://github.com/tensorflow/models.git +``` +The TensorFlow models repo provides +[scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) +to download, process and convert the ImageNet dataset to the TF records format. + +* The ImageNet dataset directory location is only required to calculate the model accuracy. + +2. Download the pre-trained model. +``` +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/resnet50v1_5_int8_pretrained_model.pb +``` + +3. Clone the +[intelai/models](https://github.com/intelai/models) +repository +``` +$ git clone https://github.com/IntelAI/models.git +``` + +4. Run the inference script `launch_benchmark.py` with the appropriate parameters to evaluate the model performance and/or calculate the accuracy. +The optimized ResNet50v1.5 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and +located at `models/models/image_recognition/tensorflow/resnet50v1_5/`. + + The docker image (`gcr.io/deeplearning-platform-release/tf-cpu.1-14`) + used in the commands above were built using + [TensorFlow](git@github.com:tensorflow/tensorflow.git) master for TensorFlow + version 1.14. + +* Calculate the model accuracy, the required parameters parameters include: the `ImageNet` dataset location (from step 1), +the pre-trained `resnet50v1_5_int8_pretrained_model.pb` input graph file (from step 2), and the `--accuracy-only` flag. +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --data-location /home//dataset/FullImageNetData_directory + --in-graph resnet50v1_5_int8_pretrained_model.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --batch-size=100 \ + --accuracy-only \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 +``` +The log file is saved to the value of `--output-dir`. + +The tail of the log output when the benchmarking completes should look +something like this: +``` +Iteration time: 239.899 ms +Processed 49700 images. (Top1 accuracy, Top5 accuracy) = (0.7622, 0.9296) +Iteration time: 239.110 ms +Processed 49800 images. (Top1 accuracy, Top5 accuracy) = (0.7621, 0.9295) +Iteration time: 239.512 ms +Processed 49900 images. (Top1 accuracy, Top5 accuracy) = (0.7622, 0.9296) +Iteration time: 239.989 ms +Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7623, 0.9296) +Ran inference with batch size 100 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_{timestamp}.log +``` + +* Evaluate the model performance: If just evaluate performance for dummy data, the `--data-location` is not needed. +Otherwise `--data-location` argument needs to be specified: +Calculate the batch inference performance `images/sec`, the required parameters to run the inference script would include: +the pre-trained `resnet50v1_5_int8_pretrained_model.pb` input graph file (from step +2), and the `--benchmark-only` flag. It is +optional to specify the number of `warmup_steps` and `steps` as extra +args, as shown in the command below. If these values are not specified, +the script will default to use `warmup_steps=10` and `steps=50`. + +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50v1_5_int8_pretrained_model.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --batch-size=128 \ + --benchmark-only \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 + -- warmup_steps=50 steps=500 +``` +The tail of the log output when the benchmarking completes should look +something like this: +``` +... +Iteration 490: 0.249899 sec +Iteration 500: 0.249110 sec +Average time: 0.251280 sec +Batch size = 128 +Throughput: 509.392 images/sec +Ran inference with batch size 128 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_int8_{timestamp}.log +``` + +Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. + +## FP32 Inference Instructions + +1. Download the pre-trained model. + +If you would like to get a pre-trained model for ResNet50v1.5, +``` +$ wget https://zenodo.org/record/2535873/files/resnet50_v1.pb +``` + +2. Clone the [intelai/models](https://github.com/intelai/models) repository +``` +$ git clone https://github.com/IntelAI/models.git +``` + +3. If running resnet50 for accuracy, the ImageNet dataset will be +required (if running the model for batch or online inference, then dummy +data will be used). + +The TensorFlow models repo provides +[scripts and instructions](https://github.com/tensorflow/models/tree/master/research/slim#an-automated-script-for-processing-imagenet-data) +to download, process, and convert the ImageNet dataset to the TF records format. + +4. Run the inference script `launch_benchmark.py` with the appropriate parameters to evaluate the model performance. +The optimized ResNet50v1.5 model files are attached to the [intelai/models](https://github.com/intelai/models) repo and +located at `models/models/image_recognition/tensorflow/resnet50v1_5/`. +If benchmarking uses dummy data for inference, `--data-location` flag is not required. Otherwise, +`--data-location` needs to point to point to ImageNet dataset location. + +* To measure online inference, set `--batch-size=1` and run the model script as shown: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size=1 \ + --socket-id 0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 +``` + +The log file is saved to the value of `--output-dir`. + +The tail of the log output when the script completes should look +something like this: +``` +Inference with dummy data. +Iteration 1: 2.761204 sec +Iteration 2: 0.011155 sec +Iteration 3: 0.009289 sec +... +Iteration 48: 0.009315 sec +Iteration 49: 0.009343 sec +Iteration 50: 0.009278 sec +Average time: 0.009481 sec +Batch size = 1 +Latency: 9.481 ms +Throughput: 105.470 images/sec +lscpu_path_cmd = command -v lscpu +lscpu located here: /usr/bin/lscpu +Ran inference with batch size 1 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log +``` + +* To measure batch inference, set `--batch-size=128` and run the model script as shown: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size=128 \ + --socket-id 0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 +``` + +The log file is saved to the value of `--output-dir`. + +The tail of the log output when the script completes should look +something like this: +``` +Inference with dummy data. +Iteration 1: 3.013918 sec +Iteration 2: 0.543498 sec +Iteration 3: 0.536187 sec +Iteration 4: 0.532568 sec +... +Iteration 46: 0.532444 sec +Iteration 47: 0.535652 sec +Iteration 48: 0.532158 sec +Iteration 49: 0.538117 sec +Iteration 50: 0.532411 sec +Average time: 0.534427 sec +Batch size = 128 +Throughput: 239.509 images/sec +Ran inference with batch size 128 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log +``` + +* To measure the model accuracy, use the `--accuracy-only` flag and pass +the ImageNet dataset directory from step 3 as the `--data-location`: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --accuracy-only \ + --batch-size 100 \ + --socket-id 0 \ + --data-location /home//dataset/ImageNetData_directory \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 +``` + +The log file is saved to the value of `--output-dir`. +The tail of the log output when the accuracy run completes should look +something like this: +``` +... +Iteration time: 514.427 ms +Processed 50000 images. (Top1 accuracy, Top5 accuracy) = (0.7651, 0.9307) +lscpu_path_cmd = command -v lscpu +lscpu located here: /usr/bin/lscpu +Ran inference with batch size 100 +Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_{timestamp}.log +``` + +* The `--output-results` flag can be used along with above performance +or accuracy test, in order to also output a file with the inference +results (file name, actual label, and the predicted label). The results +output can only be used with real data. + +For example, the command below is the same as the accuracy test above, +except with the `--output-results` flag added: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph resnet50_v1.pb \ + --model-name resnet50v1_5 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --accuracy-only \ + --output-results \ + --batch-size 100 \ + --socket-id 0 \ + --data-location /home//dataset/ImageNetData_directory \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 +``` +The results file will be written to the +`models/benchmarks/common/tensorflow/logs` directory, unless another +output directory is specified by the `--output-dir` arg. Below is an +example of what the inference results file will look like: +``` +filename,actual,prediction +ILSVRC2012_val_00033870.JPEG,592,592 +ILSVRC2012_val_00045598.JPEG,258,258 +ILSVRC2012_val_00047428.JPEG,736,736 +ILSVRC2012_val_00003341.JPEG,344,344 +ILSVRC2012_val_00037069.JPEG,192,192 +ILSVRC2012_val_00029701.JPEG,440,440 +ILSVRC2012_val_00016918.JPEG,286,737 +ILSVRC2012_val_00015545.JPEG,5,5 +ILSVRC2012_val_00016713.JPEG,274,274 +ILSVRC2012_val_00014735.JPEG,31,31 +... +``` + +Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/config.json b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/model_init.py new file mode 100644 index 000000000..7231243b8 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/fp32/model_init.py @@ -0,0 +1,115 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + +import os +from argparse import ArgumentParser +import time + + +class ModelInitializer(BaseModelInitializer): + """initialize mode and run benchmark""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.benchmark_command = "" + if not platform_util: + raise ValueError("Did not find any platform info.") + + # use default batch size if -1 + if self.args.batch_size == -1: + self.args.batch_size = 128 + + # set num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument("--warmup-steps", dest='warmup_steps', + type=int, default=10, + help="number of warmup steps") + arg_parser.add_argument("--steps", dest='steps', + type=int, default=50, + help="number of steps") + arg_parser.add_argument( + '--kmp-blocktime', dest='kmp_blocktime', + help='number of kmp block time', + type=int, default=1) + + self.args = arg_parser.parse_args(self.custom_args, namespace=self.args) + + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + benchmark_script = os.path.join( + self.args.intelai_models, self.args.mode, + "eval_image_classifier_inference.py") + + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ + self.python_exe + " " + benchmark_script + + num_cores = self.platform_util.num_cores_per_socket if self.args.num_cores == -1 \ + else self.args.num_cores + + self.benchmark_command = \ + self.benchmark_command + \ + " --input-graph=" + self.args.input_graph + \ + " --num-inter-threads=" + str(self.args.num_inter_threads) + \ + " --num-intra-threads=" + str(self.args.num_intra_threads) + \ + " --num-cores=" + str(num_cores) + \ + " --batch-size=" + str(self.args.batch_size) + \ + " --warmup-steps=" + str(self.args.warmup_steps) + \ + " --steps=" + str(self.args.steps) + + if self.args.data_num_inter_threads: + self.benchmark_command += " --data-num-inter-threads=" + str(self.args.data_num_inter_threads) + if self.args.data_num_intra_threads: + self.benchmark_command += " --data-num-intra-threads=" + str(self.args.data_num_intra_threads) + + # if the data location directory is not empty, then include the arg + if self.args.data_location and os.listdir(self.args.data_location): + self.benchmark_command += " --data-location=" + \ + self.args.data_location + if self.args.accuracy_only: + self.benchmark_command += " --accuracy-only" + + # if output results is enabled, generate a results file name and pass it to the inference script + if self.args.output_results: + self.results_filename = "{}_{}_{}_results_{}.txt".format( + self.args.model_name, self.args.precision, self.args.mode, + time.strftime("%Y%m%d_%H%M%S", time.gmtime())) + self.results_file_path = os.path.join(self.args.output_dir, self.results_filename) + self.benchmark_command += " --results-file-path {}".format(self.results_file_path) + + def run(self): + if self.benchmark_command: + self.run_command(self.benchmark_command) + + if self.args.output_results: + print("Inference results file in the output directory: {}".format(self.results_filename)) diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/config.json b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py new file mode 100644 index 000000000..03b523829 --- /dev/null +++ b/benchmarks/image_recognition/tensorflow/resnet50v1_5/inference/int8/model_init.py @@ -0,0 +1,123 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + +import argparse +import os + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for resnet50 int8 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + # Set the num_inter_threads and num_intra_threads + self.set_num_inter_intra_threads() + # Set env vars, if they haven't already been set + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads, overwrite_existing=True) + + def parse_args(self): + parser = argparse.ArgumentParser() + parser.add_argument( + "--warmup-steps", dest="warmup_steps", + help="number of warmup steps", + type=int, default=10) + parser.add_argument( + "--steps", dest="steps", + help="number of steps", + type=int, default=50) + parser.add_argument( + '--kmp-blocktime', dest='kmp_blocktime', + help='number of kmp block time', + type=int, default=1) + parser.add_argument( + "--calibration-only", + help="Calibrate the accuracy.", + dest="calibration_only", action="store_true") + parser.add_argument( + "--calibrate", dest="calibrate", + help=" run accuracy with calibration data, " + "to generate min_max ranges, calibrate=[True/False]", + type=bool, default=False) + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + # Set KMP env vars, if they haven't already been set, but override the default KMP_BLOCKTIME value + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path, kmp_blocktime=str(self.args.kmp_blocktime)) + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + def run_benchmark_or_accuracy(self): + cmd = os.path.join( + self.args.intelai_models, self.args.mode, + "eval_image_classifier_inference.py") + + cmd = self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + cmd + + cmd += " --input-graph=" + self.args.input_graph + \ + " --num-inter-threads=" + str(self.args.num_inter_threads) + \ + " --num-intra-threads=" + str(self.args.num_intra_threads) + \ + " --batch-size=" + str(self.args.batch_size) + \ + " --warmup-steps=" + str(self.args.warmup_steps) + \ + " --steps=" + str(self.args.steps) + + if self.args.calibrate: + cmd += " --calibrate=" + str(self.args.calibrate) + if self.args.data_num_inter_threads: + cmd += " --data-num-inter-threads=" + str(self.args.data_num_inter_threads) + if self.args.data_num_intra_threads: + cmd += " --data-num-intra-threads=" + str(self.args.data_num_intra_threads) + + # if the data location directory is not empty, then include the arg + if self.args.data_location and os.listdir(self.args.data_location): + cmd += " --data-location=" + self.args.data_location + if self.args.accuracy_only: + cmd += " --accuracy-only" + + self.run_command(cmd) + + def run_calibration(self): + calibration_script = os.path.join(self.args.intelai_models, + self.args.precision, + "generate_calibration_data.py") + script_args_list = [ + "input_graph", "data_location", + "batch_size", + "num_inter_threads", "num_intra_threads"] + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ + self.python_exe + " " + calibration_script + cmd = self.add_args_to_command(cmd_prefix, script_args_list) + self.run_command(cmd) + + def run(self): + # Parse custom arguments and append to self.args + self.parse_args() + if self.args.accuracy_only and self.args.calibration_only: + self.run_calibration() + else: + self.run_benchmark_or_accuracy() diff --git a/benchmarks/image_recognition/tensorflow/squeezenet/README.md b/benchmarks/image_recognition/tensorflow/squeezenet/README.md index e5e9cc86c..feaba492a 100644 --- a/benchmarks/image_recognition/tensorflow/squeezenet/README.md +++ b/benchmarks/image_recognition/tensorflow/squeezenet/README.md @@ -79,7 +79,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 64 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -94,7 +94,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --batch-size 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//squeezenet_checkpoints \ --data-location /home//datasets/ImageNet_TFRecords ``` @@ -114,8 +114,6 @@ SqueezeNet Inference Summary: throughput[med] = 837.1 image/sec latency[median] = 1.195 ms -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 64 Log location outside container: {--output-dir value}/benchmark_squeezenet_inference_fp32_20190104_220051.log ``` @@ -129,8 +127,6 @@ SqueezeNet Inference Summary: throughput[med] = 115.3 image/sec latency[median] = 8.67 ms -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_squeezenet_inference_fp32_20190104_220712.log ``` diff --git a/benchmarks/image_recognition/tensorflow_serving/__init__.py b/benchmarks/image_recognition/tensorflow_serving/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md new file mode 100644 index 000000000..bef280f1d --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/README.md @@ -0,0 +1,90 @@ +# Inception V3 + +This document has instructions for how to run Inception V3 for the +following modes/precisions: +* [FP32 inference](#fp32-inference-instructions) + +## FP32 Inference Instructions + +1. Clone this [intelai/models](https://github.com/IntelAI/models) +repository: + +``` +$ git clone https://github.com/IntelAI/models.git +``` + +2. Download the pre-trained model. +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/inceptionv3_fp32_pretrained_model.pb +``` + +3. Navigate to the `benchmarks` directory in your local clone of +the [intelai/models](https://github.com/IntelAI/models) repo from step 1. +The `launch_benchmark.py` script in the `benchmarks` directory is +used for starting a tensorflow serving run using optimized TensorFlow Serving docker +container. It has arguments to specify which model, framework, mode, +precision, and input graph. + +Substitute in your own `--in-graph` pretrained model file path (from step 2). + +4. Inception V3 can be run for measuring batch or online inference performance. Use one of the following examples below, +depending on your use case. + +* For online inference with dummy data (using `--batch-size 1`): + +``` +python launch_benchmark.py \ + --in-graph /home//inceptionv3_fp32_pretrained_model.pb \ + --model-name inceptionv3 \ + --framework tensorflow_serving \ + --precision fp32 \ + --mode inference \ + --batch-size=1 \ + --benchmark-only +``` +Example log tail when running for online inference: +``` +Iteration 35: 0.019 sec +Iteration 36: 0.020 sec +Iteration 37: 0.018 sec +Iteration 38: 0.018 sec +Iteration 39: 0.019 sec +Iteration 40: 0.018 sec +Average time: 0.019 sec +Batch size = 1 +Latency: 18.801 ms +Throughput: 53.189 images/sec +tfserving_3784 +Log output location: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190516_103531.log +``` + +* For batch inference with dummy data (using `--batch-size 128`): + +``` +python launch_benchmark.py \ + --in-graph /home//inceptionv3_fp32_pretrained_model.pb \ + --model-name inceptionv3 \ + --framework tensorflow_serving \ + --precision fp32 \ + --mode inference \ + --batch-size=128 \ + --benchmark-only +``` +Example log tail when running for batch inference: +``` +Iteration 34: 0.779 sec +Iteration 35: 0.916 sec +Iteration 36: 0.809 sec +Iteration 37: 0.793 sec +Iteration 38: 0.813 sec +Iteration 39: 0.796 sec +Iteration 40: 0.796 sec +Average time: 0.817 sec +Batch size = 128 +Throughput: 156.752 images/sec +tfserving_5299 +Log output location: {--output-dir value}/benchmark_inceptionv3_inference_fp32_20190516_103958.log +``` + +Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py new file mode 100644 index 000000000..3178741db --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_benchmark.py @@ -0,0 +1,117 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +"""Send simulated image data to tensorflow_model_server loaded with ResNet50 or InceptionV3 model. + +""" + +from __future__ import print_function + +import os +import random + +import grpc +import numpy as np +import sys +import tensorflow as tf +import time +from tensorflow_serving.apis import predict_pb2 +from tensorflow_serving.apis import prediction_service_pb2_grpc + +from util import preprocess_image, parse_example_proto + +tf.app.flags.DEFINE_string('server', 'localhost:8500', + 'PredictionService host:port') +tf.app.flags.DEFINE_integer('batch_size', 1, 'Batch size to use') +tf.app.flags.DEFINE_string('data_dir', '', 'path to images in TF records format') +tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).') +FLAGS = tf.app.flags.FLAGS + + +def sample_images(image_size): + """Pull a random batch of images from FLAGS.data_dir containing TF record formatted ImageNet validation set + Returns: + ndarray of float32 with shape [FLAGS.batch_size, image_size, image_size, 3] + """ + + sample_file = random.choice(os.listdir(FLAGS.data_dir)) + dataset = tf.data.TFRecordDataset(os.path.join(FLAGS.data_dir, sample_file)) + dataset = dataset.map(lambda x: parse_example_proto(x)).shuffle(True).batch(FLAGS.batch_size) + iterator = dataset.make_one_shot_iterator() + next_element = iterator.get_next() + with tf.Session() as sess: + images, labels = sess.run(next_element) + images = np.array([sess.run(preprocess_image(x, FLAGS.model, image_size)) for x in images]) + + return images + + +def main(_): + if FLAGS.model == 'resnet50': + image_size = 224 + elif FLAGS.model == 'inceptionv3': + image_size = 299 + else: + print('Please specify model as either resnet50 or inceptionv3.') + sys.exit(-1) + + channel = grpc.insecure_channel(FLAGS.server) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + i = 0 + num_iteration = 40 + warm_up_iteration = 10 + total_time = 0 + for _ in range(num_iteration): + i += 1 + if FLAGS.data_dir: + image_np = sample_images(image_size) + else: + image_np = np.random.rand(FLAGS.batch_size, image_size, image_size, 3).astype(np.float32) + if FLAGS.model == 'resnet50': + # For ResNet50, rescale to [0, 256] + image_np *= 256.0 + elif FLAGS.model == 'inceptionv3': + # For InceptionV3, rescale to [-1, 1] + image_np = (image_np - 0.5) * 2.0 + + request = predict_pb2.PredictRequest() + request.model_spec.name = FLAGS.model + request.model_spec.signature_name = 'serving_default' + request.inputs['input'].CopyFrom( + tf.contrib.util.make_tensor_proto(image_np, shape=[FLAGS.batch_size, image_size, image_size, 3])) + start_time = time.time() + stub.Predict(request, 10.0) # 10 secs timeout + time_consume = time.time() - start_time + print('Iteration %d: %.3f sec' % (i, time_consume)) + if i > warm_up_iteration: + total_time += time_consume + + time_average = total_time / (num_iteration - warm_up_iteration) + print('Average time: %.3f sec' % (time_average)) + + print('Batch size = %d' % FLAGS.batch_size) + if (FLAGS.batch_size == 1): + print('Latency: %.3f ms' % (time_average * 1000)) + + print('Throughput: %.3f images/sec' % (FLAGS.batch_size / time_average)) + + +if __name__ == '__main__': + tf.app.run() diff --git a/docs/image_recognition/tensorflow_serving/src/image_recognition_client.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_client.py similarity index 52% rename from docs/image_recognition/tensorflow_serving/src/image_recognition_client.py rename to benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_client.py index abdc77d05..2926f4621 100644 --- a/docs/image_recognition/tensorflow_serving/src/image_recognition_client.py +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/image_recognition_client.py @@ -24,12 +24,11 @@ from __future__ import print_function -import sys import grpc -import requests import numpy as np +import requests +import sys import tensorflow as tf - from tensorflow_serving.apis import predict_pb2 from tensorflow_serving.apis import prediction_service_pb2_grpc @@ -41,45 +40,46 @@ tf.app.flags.DEFINE_string('server', 'localhost:8500', 'PredictionService host:port') tf.app.flags.DEFINE_string('image', '', 'path to image in JPEG format') -tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).') +tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or Inceptionv3).') FLAGS = tf.app.flags.FLAGS def main(_): - if FLAGS.model == 'resnet50': - image_size = 224 - elif FLAGS.model == 'inceptionv3': - image_size = 299 - else: - print('Please specify model as either resnet50 or inceptionv3.') - sys.exit(-1) - - if FLAGS.image: - with open(FLAGS.image, 'rb') as f: - data = f.read() - else: - # Download the image URL if a path is not provided as input - dl_request = requests.get(IMAGE_URL, stream=True) - dl_request.raise_for_status() - data = dl_request.content - - channel = grpc.insecure_channel(FLAGS.server) - stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) - request = predict_pb2.PredictRequest() - request.model_spec.name = FLAGS.model - request.model_spec.signature_name = 'serving_default' - image_data = tf.reshape(preprocess_image(data, FLAGS.model, image_size), [1, image_size, image_size, 3]) - - # Run the graph - with tf.Session() as sess: - sess.run(tf.global_variables_initializer()) - image_data = (sess.run(image_data)) - - request.inputs['input'].CopyFrom(tf.contrib.util.make_tensor_proto(image_data, shape=[1, image_size, image_size, 3])) - result = stub.Predict(request) - print(result) - print('Predicted class: ', str(np.argmax(result.outputs['predict'].float_val))) + if FLAGS.model == 'resnet50': + image_size = 224 + elif FLAGS.model == 'inceptionv3': + image_size = 299 + else: + print('Please specify model as either resnet50 or Inceptionv3.') + sys.exit(-1) + + if FLAGS.image: + with open(FLAGS.image, 'rb') as f: + data = f.read() + else: + # Download the image URL if a path is not provided as input + dl_request = requests.get(IMAGE_URL, stream=True) + dl_request.raise_for_status() + data = dl_request.content + + channel = grpc.insecure_channel(FLAGS.server) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + request = predict_pb2.PredictRequest() + request.model_spec.name = FLAGS.model + request.model_spec.signature_name = 'serving_default' + image_data = tf.reshape(preprocess_image(data, FLAGS.model, image_size), [1, image_size, image_size, 3]) + + # Run the graph + with tf.Session() as sess: + sess.run(tf.global_variables_initializer()) + image_data = (sess.run(image_data)) + + request.inputs['input'].CopyFrom( + tf.contrib.util.make_tensor_proto(image_data, shape=[1, image_size, image_size, 3])) + result = stub.Predict(request) + print(result) + print('Predicted class: ', str(np.argmax(result.outputs['predict'].float_val))) if __name__ == '__main__': - tf.app.run() + tf.app.run() diff --git a/docs/image_recognition/tensorflow_serving/src/model_graph_to_saved_model.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/model_graph_to_saved_model.py similarity index 99% rename from docs/image_recognition/tensorflow_serving/src/model_graph_to_saved_model.py rename to benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/model_graph_to_saved_model.py index ca4f8092c..a593539ca 100644 --- a/docs/image_recognition/tensorflow_serving/src/model_graph_to_saved_model.py +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/model_graph_to_saved_model.py @@ -25,9 +25,7 @@ from __future__ import print_function -import os import sys - import tensorflow as tf import tensorflow.tools.graph_transforms as graph_transforms diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/util.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/util.py new file mode 100644 index 000000000..70eaba0de --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32/util.py @@ -0,0 +1,61 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import print_function + +import tensorflow as tf + + +def preprocess_image(image_buffer, model, image_size): + """Preprocess JPEG encoded bytes to 3D float Tensor.""" + + # Decode the string as an RGB JPEG of unknown height and width. + image = tf.image.decode_jpeg(image_buffer, channels=3) + # Convert pixels to [0, 1) + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region to 87.5% of the original image. + image = tf.image.central_crop(image, central_fraction=0.875) + # Resize the image to image_size x image_size. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [image_size, image_size], align_corners=False) + image = tf.squeeze(image, [0]) + if model == 'resnet50': + # For ResNet50, rescale to [0, 256] + image = tf.multiply(image, 256.0) + elif model == 'Inceptionv3': + # For InceptionV3, rescale to [-1, 1] + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +def parse_example_proto(example_serialized): + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + } + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + return features['image/encoded'], label diff --git a/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/int8/__init__.py b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/int8/__init__.py new file mode 100644 index 000000000..cf793ec6a --- /dev/null +++ b/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2018 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md index 15edaebba..218fd7e2f 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/README.md @@ -61,7 +61,7 @@ $ python launch_benchmark.py \ --batch-size 1 \ --socket-id 0 \ --data-location /home//COCO2014 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl-py3 + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` 5. Log files are located at the value of `--output-dir`. @@ -90,7 +90,5 @@ Batch size: 1 Time spent per BATCH: 609.6943 ms Total samples/sec: 1.6402 samples/s Total time: 35.407243490219116 -lscpu_path_cmd = command -v lscpu -lscpu located here: b'/usr/bin/lscpu' Log location outside container: {--output-dir value}/benchmark_maskrcnn_inference_fp32_20190111_205935.log ``` \ No newline at end of file diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/config.json b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/config.json new file mode 100644 index 000000000..23d5de76e --- /dev/null +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/config.json @@ -0,0 +1,8 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1, + "KMP_HW_SUBSET": "1T" + } +} diff --git a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py index 1fe96fe2b..35412be2f 100644 --- a/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py +++ b/benchmarks/image_segmentation/tensorflow/maskrcnn/inference/fp32/model_init.py @@ -37,12 +37,12 @@ def __init__(self, args, custom_args=[], platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars(kmp_affinity="granularity=fine, compact, 1, 0") - set_env_var("KMP_HW_SUBSET", "1T") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) benchmark_script = os.path.join( self.args.intelai_models, "coco.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script + " evaluate " set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/image_segmentation/tensorflow/unet/README.md b/benchmarks/image_segmentation/tensorflow/unet/README.md index e7d9693e4..d86505a69 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/README.md +++ b/benchmarks/image_segmentation/tensorflow/unet/README.md @@ -57,7 +57,7 @@ modes/precisions: --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//unet_trained \ --model-source-dir /home//tf_unet \ -- checkpoint_name=model.cpkt @@ -73,8 +73,6 @@ modes/precisions: ``` Time spent per BATCH: 1.1043 ms Total samples/sec: 905.5344 samples/s - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_unet_inference_fp32_20190201_205601.log ``` \ No newline at end of file diff --git a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/config.json b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/config.json new file mode 100644 index 000000000..ca15cfe6d --- /dev/null +++ b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine, compact", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py index cd4f5837d..3cdcf1701 100644 --- a/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py +++ b/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/model_init.py @@ -41,7 +41,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars(kmp_affinity="granularity=fine, compact") + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Get path to the inference script script_path = os.path.join( @@ -50,7 +51,7 @@ def __init__(self, args, custom_args=[], platform_util=None): "unet_infer.py") # Create the command prefix using numactl - self.command_prefix = self.get_numactl_command(self.args.socket_id) +\ + self.command_prefix = self.get_command_prefix(self.args.socket_id) +\ "{} {}".format(self.python_exe, script_path) # Add batch size arg diff --git a/benchmarks/language_modeling/__init__.py b/benchmarks/language_modeling/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/__init__.py b/benchmarks/language_modeling/tensorflow/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/README.md b/benchmarks/language_modeling/tensorflow/lm-1b/README.md new file mode 100644 index 000000000..fa05e8b3b --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/README.md @@ -0,0 +1,100 @@ +# LM-1B + +This document has instructions for how to run LM-1B for the +following modes/platforms: +* [FP32 inference](#fp32-inference-instructions) + +Instructions and scripts for model training and inference for +other platforms are coming later. + +## FP32 Inference Instructions + +1. Clone [mlperf/inference](https://github.com/mlperf/inference.git) +with the current SHA from master of the repo on 6/26/2019: +``` +git clone https://github.com/mlperf/inference.git +cd inference +git checkout 41eb3e489233e83e544cd25148aca177b95d7bea +``` + +To prepare the checkpoint and dataset, run the `benchmark.py` script +from the mlperf inference repo. Since this requires python3 and +TensorFlow to be installed, the following instructions show how to run +a docker container with your cloned mlperf inference repo mounted as a +volume: +``` +docker run --volume /home//inference:/inference -it gcr.io/deeplearning-platform-release/tf-cpu.1-14 /bin/bash +``` +In the docker container, run: +``` +cd /inference/others/cloud/language_modeling/ +python3 benchmark.py +``` + +2. Clone this [intelai/models](https://github.com/IntelAI/models) +repository: + +``` +git clone https://github.com/IntelAI/models.git +``` + +3. Next, navigate to the `benchmarks` directory in your local clone of +the [intelai/models](https://github.com/IntelAI/models) repo (from step 2). +The `launch_benchmark.py` script in the `benchmarks` directory is +used for starting a model run in a optimized TensorFlow docker +container. It has arguments to specify which model, framework, mode, +precision, and docker image to use, and the checkpoint directory. + +Substitute the `--model-source-dir` to `/inference/cloud/language_modeling`. +Before running, ensure that you have run the script to prepare checkpoint files and the dataset +from Step 1. + +LM-1B can run for online or batch inference. Use one of the following examples below, depending on +your use case. + +For online inference (using `--socket-id 0` and `--batch-size 1`): + +``` +python launch_benchmark.py \ + --model-name lm-1b \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 1 \ + --socket-id 0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --model-source-dir /inference/others/cloud/language_modeling + +``` + +For batch inference (using `--socket-id 0` and `--batch-size 1024`): + +``` +python launch_benchmark.py \ + --model-name lm-1b \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 1024 \ + --socket-id 0 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --model-source-dir /inference/others/cloud/language_modeling \ + -- steps=4 \ +``` + +Note that the `--verbose` flag can be added to any of the above commands +to get additional debug output. + +4. By default, the log file is saved to the +`models/benchmarks/common/tensorflow/logs` directory. The user can specify a +different directory using `--output-dir`. + +Example log tail when running for online or batch inference: +``` +Running warmup... +Running benchmark... +Number samples: 4234 +Longest latency was: 2.9153692722320557 seconds. Average latency was:2.891982913017273 +Perplexity: 40.110043230980665, target is 40.209 . +Ran inference with batch size 1024 +``` diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/__init__.py b/benchmarks/language_modeling/tensorflow/lm-1b/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/__init__.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/__init__.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/config.json b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/config.json new file mode 100644 index 000000000..8ae78e72a --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py new file mode 100644 index 000000000..535f42416 --- /dev/null +++ b/benchmarks/language_modeling/tensorflow/lm-1b/inference/fp32/model_init.py @@ -0,0 +1,77 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import os +from argparse import ArgumentParser + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + """Model initializer for LM-1B FP32 inference""" + + def __init__(self, args, custom_args, platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.cmd = self.get_command_prefix(self.args.socket_id) + + self.set_num_inter_intra_threads() + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + if self.args.socket_id != -1: + if self.args.num_cores != -1: + self.cmd += "--physcpubind=0-" + \ + (str(self.args.num_cores - 1)) + " " + self.cmd += self.python_exe + " " + + run_script = os.path.join(self.args.model_source_dir, + "benchmark.py") + + # Model args + arg_parser = ArgumentParser(description='process custom_args') + + arg_parser.add_argument('-S', '--steps', help='Number of steps', + dest="steps", + default="100") + self.args = arg_parser.parse_args(self.custom_args, + namespace=self.args) + + # Model parameter control + cmd_args = " -b=" + str(self.args.batch_size) + \ + " -I=" + str(self.args.steps) + \ + " --inter=" + \ + str(self.args.num_inter_threads) + \ + " --intra=" + \ + str(self.args.num_intra_threads) + + self.cmd = self.cmd + run_script + cmd_args + + def run(self): + original_dir = os.getcwd() + os.chdir(self.args.model_source_dir) + self.run_command(self.cmd) + + os.chdir(original_dir) diff --git a/benchmarks/language_translation/tensorflow/gnmt/README.md b/benchmarks/language_translation/tensorflow/gnmt/README.md index fd92755b2..987be7075 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/README.md +++ b/benchmarks/language_translation/tensorflow/gnmt/README.md @@ -82,7 +82,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ +--docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- infer_mode=beam_search ``` @@ -99,7 +99,7 @@ python launch_benchmark.py \ --socket-id 0 \ --checkpoint /home//gnmt_checkpoints \ --data-location /home//wmt16 \ ---docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ +--docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- infer_mode=beam_search ``` @@ -118,8 +118,6 @@ Example log tail when running for online inference: done, num sentences 2169, num translations per input 1, time 1108s, Wed Feb 6 01:36:13 2019. The latency of the model is 511.2466 ms/sentences bleu: 29.2 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_gnmt_inference_fp32_20190206_011740.log ``` @@ -134,8 +132,6 @@ Example log tail when running for batch inference: done, num sentences 2169, num translations per input 1, time 302s, Wed Feb 6 01:48:30 2019. The throughput of the model is 7.1780 sentences/s bleu: 29.2 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 32 Log location outside container: {--output-dir value}/benchmark_gnmt_inference_fp32_20190206_014324.log ``` diff --git a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/config.json b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/config.json new file mode 100644 index 000000000..4d0e2acf5 --- /dev/null +++ b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py index 61ef1bda6..6f46f2c80 100644 --- a/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/gnmt/inference/fp32/model_init.py @@ -30,15 +30,16 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.cmd = self.get_command_prefix(self.args.socket_id) if self.args.socket_id != -1 and self.args.num_cores != -1: self.cmd += "--physcpubind=0-" + \ (str(self.args.num_cores - 1)) + " " self.cmd += "{} ".format(self.python_exe) - # Set the KMP env vars - self.set_kmp_vars(kmp_affinity="granularity=fine,compact,1,0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # use default batch size if -1 if self.args.batch_size == -1: diff --git a/benchmarks/language_translation/tensorflow/transformer_language/README.md b/benchmarks/language_translation/tensorflow/transformer_language/README.md index 100b1e16d..2c0b700f2 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_language/README.md @@ -67,8 +67,10 @@ Substitute the `--model-source-dir` for the location where you cloned the [tensorflow/tensor2tensor](https://github.com/tensorflow/tensor2tensor) repo (from step 1). -Transformer Language can run for online or batch inference. Use one of the following examples below, depending on -your use case. +Transformer Language can run for online or batch +inference. Use one of the following examples below, depending on +your use case. Note that if no `reference` file is provided in the +launch script parameters, then the BLEU score cannot be calculated. For online inference (using `--socket-id 0` and `--batch-size 1`): @@ -80,7 +82,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ @@ -97,7 +99,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 32 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --checkpoint /home//transformer_lt_fp32_pretrained_model \ --data-location /home//t2t_data \ --model-source-dir /home//tensor2tensor/ \ @@ -124,8 +126,6 @@ INFO:tensorflow:Writing decodes into /workspace/models/out_dir/output_infer Inference time 6094.9205, Latency = 2810.0141 ms/setences BLEU_uncased = 22.63 BLEU_cased = 22.20 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_transformer_language_inference_fp32_20190210_050451.log ``` @@ -140,8 +140,6 @@ INFO:tensorflow:Writing decodes into /workspace/models/out_dir/output_infer Inference time 1174.0522, Throughput = 1.8474 sentences/second BLEU_uncased = 22.63 BLEU_cased = 22.20 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 32 Log location outside container: {--output-dir value}/benchmark_transformer_language_inference_fp32_20190210_072635.log ``` \ No newline at end of file diff --git a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/config.json b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/config.json new file mode 100644 index 000000000..8ae78e72a --- /dev/null +++ b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py index 20790b541..8d01493ae 100644 --- a/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_language/inference/fp32/model_init.py @@ -32,13 +32,14 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args, platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.cmd = self.get_command_prefix(self.args.socket_id) self.bleu_params = "" self.set_num_inter_intra_threads() - # Set the KMP env vars - self.set_kmp_vars(kmp_blocktime="0", kmp_affinity="granularity=fine,compact,1,0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) TEMP_DIR = str(self.args.model_source_dir) + "/out_dir" if os.path.exists(TEMP_DIR): @@ -97,14 +98,15 @@ def __init__(self, args, custom_args, platform_util=None): " --output_dir=" + self.args.checkpoint + \ " --decode_from_file=" + self.args.decode_from_file + \ " --decode_to_file=" + self.args.decode_to_file + \ - " --reference=" + self.args.reference + \ " --inter_op_parallelism_threads=" + \ str(self.args.num_inter_threads) + \ " --intra_op_parallelism_threads=" + \ str(self.args.num_intra_threads) - self.bleu_params += " --translation=" + self.args.decode_to_file + \ - " --reference=" + self.args.reference + # If a reference file was provided, also calculate the bleu file + if self.args.reference: + self.bleu_params += " --translation=" + self.args.decode_to_file + \ + " --reference=" + self.args.reference self.cmd = self.cmd + run_script + cmd_args @@ -113,10 +115,12 @@ def run(self): os.chdir(self.args.model_source_dir) self.run_command(self.cmd) - # calculate the bleu number after inference is done - bleucmd = "python " + \ - os.path.join(self.args.model_source_dir, - "tensor2tensor/bin/t2t_bleu.py") + \ - self.bleu_params - os.system(bleucmd) + # calculate the bleu number after inference is done (this is skipped if no reference file is provided) + if self.bleu_params: + bleucmd = "python " + \ + os.path.join(self.args.model_source_dir, + "tensor2tensor/bin/t2t_bleu.py") + \ + self.bleu_params + os.system(bleucmd) + os.chdir(original_dir) diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md b/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md index f0d79e4e3..87cc6b472 100644 --- a/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/README.md @@ -65,7 +65,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 1 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow-models/models \ --in-graph /home//transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ --data-location /home//transformer_lt_official_fp32_pretrained_model/data \ @@ -85,7 +85,7 @@ python launch_benchmark.py \ --framework tensorflow \ --batch-size 64 \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow-models/models \ --in-graph /home//transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ --data-location /home//transformer_lt_official_fp32_pretrained_model/data \ diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/config.json b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/config.json new file mode 100644 index 000000000..8ae78e72a --- /dev/null +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py index b598191f0..a8b0b9432 100644 --- a/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py +++ b/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py @@ -31,13 +31,14 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args, platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.cmd = self.get_numactl_command(self.args.socket_id) + self.cmd = self.get_command_prefix(self.args.socket_id) self.bleu_params = "" self.set_num_inter_intra_threads() - # Set the KMP env vars - self.set_kmp_vars(kmp_blocktime="0", kmp_affinity="granularity=fine,compact,1,0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) MODEL_EXEC_DIR = str(self.args.model_source_dir) + "/official/transformer/" @@ -91,7 +92,9 @@ def __init__(self, args, custom_args, platform_util=None): if self.args.batch_size != -1 else "1") + \ " --file=" + self.args.decode_from_file + \ " --file_out=" + translate_file + \ - " --vocab_file=" + self.args.vocab_file + " --vocab_file=" + self.args.vocab_file +\ + " --num_inter=" + str(self.args.num_inter_threads) +\ + " --num_intra=" + str(self.args.num_intra_threads) self.bleu_params += " --translation=" + translate_file + \ " --reference=" + self.args.reference diff --git a/benchmarks/launch_benchmark.py b/benchmarks/launch_benchmark.py old mode 100644 new mode 100755 index e3e982e70..6da9d7cb6 --- a/benchmarks/launch_benchmark.py +++ b/benchmarks/launch_benchmark.py @@ -29,7 +29,9 @@ import sys from argparse import ArgumentParser from common import base_benchmark_util -from common.utils.validators import check_no_spaces +from common import platform_util +from common.utils.validators import check_no_spaces, check_volume_mount +from common.base_model_init import BaseModelInitializer class LaunchBenchmark(base_benchmark_util.BaseBenchmarkUtil): @@ -67,6 +69,13 @@ def parse_args(self): "If no docker image is specified, then no docker container will be used.", dest="docker_image", default=None, type=check_no_spaces) + arg_parser.add_argument( + "--volume", + help="Specify a custom volume to mount in the container, which follows the same format as the " + "docker --volume flag (https://docs.docker.com/storage/volumes/). " + "This argument can only be used in conjunction with a --docker-image.", + action="append", dest="custom_volumes", type=check_volume_mount) + arg_parser.add_argument( "--debug", help="Launches debug mode which doesn't execute " "start.sh when running in a docker container.", action="store_true") @@ -86,6 +95,17 @@ def validate_args(self): if not self.args.benchmark_only and not self.args.accuracy_only: self.args.benchmark_only = True + # default disable_tcmalloc=False for int8 and disable_tcmalloc=True for other precisions + if not self.args.disable_tcmalloc: + self.args.disable_tcmalloc = str(self.args.precision != "int8") + + if self.args.custom_volumes and not self.args.docker_image: + raise ValueError("Volume mounts can only be used when running in a docker container " + "(a --docker-image must be specified when using --volume).") + + if self.args.mode == "inference" and self.args.checkpoint: + print("Warning: The --checkpoint argument is being deprecated in favor of using frozen graphs.") + def get_model_use_case(self, benchmark_scripts): """ Infers the use case based on the directory structure for the specified model. @@ -161,6 +181,8 @@ def get_env_vars(self, benchmark_scripts, use_case, intelai_models): "BENCHMARK_ONLY": args.benchmark_only, "ACCURACY_ONLY": args.accuracy_only, "OUTPUT_RESULTS": args.output_results, + "DISABLE_TCMALLOC": args.disable_tcmalloc, + "TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD": args.tcmalloc_large_alloc_report_threshold, "DOCKER": str(args.docker_image is not None), "PYTHON_EXE": sys.executable if not args.docker_image else "python" } @@ -193,13 +215,66 @@ def run_bare_metal(self, benchmark_scripts, intelai_models, env_var_dict): # setup volume directories to be the local system directories, since we aren't # mounting volumes when running bare metal, but start.sh expects these args args = self.args - mount_benchmark = benchmark_scripts - mount_external_models_source = args.model_source_dir - mount_intelai_models = intelai_models workspace = os.path.join(benchmark_scripts, "common", args.framework) + mount_benchmark = benchmark_scripts in_graph_path = args.input_graph - dataset_path = args.data_location checkpoint_path = args.checkpoint + dataset_path = args.data_location + + # To Launch Tensorflow Serving benchmark we need only --in-graph arg. + # It does not support checkpoint files. + if args.framework == "tensorflow_serving": + if args.docker_image: + raise ValueError("--docker-image arg is not supported with tensorflow serving benchmarking, " + "as script automatically builds image and supplies it.") + + if checkpoint_path: + raise ValueError("--checkpoint-path arg is not supported with tensorflow serving benchmarking") + + if args.mode != "inference": + raise ValueError("--mode arg should be set to inference") + + if in_graph_path: + env_var_dict["IN_GRAPH"] = in_graph_path + else: + raise ValueError("--in-graph arg is required to run tensorflow serving benchmarking") + + for env_var_name in env_var_dict: + os.environ[env_var_name] = str(env_var_dict[env_var_name]) + + # We need this env to be set for the platform util + os.environ["PYTHON_EXE"] = str(sys.executable if not args.docker_image else "python") + + # Get Platformutil + platform_util_obj = None or platform_util.PlatformUtil(self.args) + + # Configure num_inter_threads and num_intra_threads + base_obj = BaseModelInitializer(args=self.args, custom_args=[], platform_util=platform_util_obj) + base_obj.set_num_inter_intra_threads() + + # Update num_inter_threads and num_intra_threads in env dictionary + env_var_dict["NUM_INTER_THREADS"] = self.args.num_inter_threads + env_var_dict["NUM_INTRA_THREADS"] = self.args.num_intra_threads + + # Set OMP_NUM_THREADS + env_var_dict["OMP_NUM_THREADS"] = self.args.num_intra_threads + + else: + mount_external_models_source = args.model_source_dir + mount_intelai_models = intelai_models + + # Add env vars with bare metal settings + env_var_dict["MOUNT_EXTERNAL_MODELS_SOURCE"] = mount_external_models_source + env_var_dict["MOUNT_INTELAI_MODELS_SOURCE"] = mount_intelai_models + + if in_graph_path: + env_var_dict["IN_GRAPH"] = in_graph_path + + if checkpoint_path: + env_var_dict["CHECKPOINT_DIRECTORY"] = checkpoint_path + + if dataset_path: + env_var_dict["DATASET_LOCATION"] = dataset_path # if using the default output directory, get the full path if args.output_dir == "/models/benchmarks/common/tensorflow/logs": @@ -208,19 +283,8 @@ def run_bare_metal(self, benchmark_scripts, intelai_models, env_var_dict): # Add env vars with bare metal settings env_var_dict["WORKSPACE"] = workspace env_var_dict["MOUNT_BENCHMARK"] = mount_benchmark - env_var_dict["MOUNT_EXTERNAL_MODELS_SOURCE"] = mount_external_models_source - env_var_dict["MOUNT_INTELAI_MODELS_SOURCE"] = mount_intelai_models env_var_dict["OUTPUT_DIR"] = args.output_dir - if in_graph_path: - env_var_dict["IN_GRAPH"] = in_graph_path - - if checkpoint_path: - env_var_dict["CHECKPOINT_DIRECTORY"] = checkpoint_path - - if dataset_path: - env_var_dict["DATASET_LOCATION"] = dataset_path - # Set env vars for bare metal for env_var_name in env_var_dict: os.environ[env_var_name] = str(env_var_dict[env_var_name]) @@ -307,6 +371,10 @@ def run_docker_container(self, benchmark_scripts, intelai_models, env_var_dict): volume_mounts.extend([ "--volume", "{}:{}".format(in_graph_dir, "/in_graph")]) + if args.custom_volumes: + for custom_volume in args.custom_volumes: + volume_mounts.extend(["--volume", custom_volume]) + docker_run_cmd = ["docker", "run"] # only use -it when debugging, otherwise we might get TTY error diff --git a/benchmarks/object_detection/tensorflow/__init__.py b/benchmarks/object_detection/tensorflow/__init__.py index cf793ec6a..d9c4123de 100644 --- a/benchmarks/object_detection/tensorflow/__init__.py +++ b/benchmarks/object_detection/tensorflow/__init__.py @@ -1,7 +1,7 @@ # # -*- coding: utf-8 -*- # -# Copyright (c) 2018 Intel Corporation +# Copyright (c) 2019 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md index c5a64aecf..ff3dfce3f 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/README.md +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/README.md @@ -44,8 +44,8 @@ sed -i.bak 95s/input_config/input_config[0]/ offline_eval_map_corloc.py ``` -2. Download the 2017 validation -[COCO dataset](http://cocodataset.org/#home) and annotations: +2. Download and unzip the 2017 validation +[COCO dataset](http://cocodataset.org/#home) images: ``` $ mkdir val @@ -53,7 +53,10 @@ $ cd val $ wget http://images.cocodataset.org/zips/val2017.zip $ unzip val2017.zip $ cd .. +``` +3. Download and unzip the coco dataset annotations file: +``` $ mkdir annotations $ cd annotations $ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip @@ -73,13 +76,15 @@ $ echo "{ \"images\": {}, \"categories\": {}}" > empty.json $ cd .. ``` -3. Now that you have the raw COCO dataset, we need to convert it to the +4. Now that you have the raw COCO dataset and annotations files, we need to convert it to the TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the -script to the raw COCO dataset files that you have downloaded in step 2. +script to the raw COCO dataset files that you have downloaded in step 2 +and the annotations files that you downloaded and created in step 3. The `--output_dir` is the location where the TF record files will be located after the script has completed. @@ -112,13 +117,13 @@ $ git checkout master The `coco_val.record` file is what we will use in this inference example. -4. Download and extract the pre-trained model. +5. Download and extract the pre-trained model. ``` $ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz $ tar -xzvf faster_rcnn_resnet50_fp32_coco_pretrained_model.tar.gz ``` -5. Clone the [intelai/models](https://github.com/intelai/models) repo. +6. Clone the [intelai/models](https://github.com/intelai/models) repo. This repo has the launch script for running the model. ``` @@ -132,10 +137,10 @@ Receiving objects: 100% (11/11), done. Resolving deltas: 100% (3/3), done. ``` -6. Run the `launch_benchmark.py` script from the intelai/models repo +7. Run the `launch_benchmark.py` script from the intelai/models repo , with the appropriate parameters including: the -`coco_val.record` data location (from step 3), the pre-trained model -`pipeline.config` file and the checkpoint location (from step 4, and the +`coco_val.record` data location (from step 4), the pre-trained model +`pipeline.config` file and the checkpoint location (from step 5), and the location of your `tensorflow/models` clone (from step 1). Run for batch and online inference: @@ -151,27 +156,27 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//faster_rcnn_resnet50_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- config_file=pipeline.config ``` Or for accuracy where the `--data-location` is the path the directory where your `coco_val.record` file is located and the `--in-graph` is -the pre-trained graph located in the pre-trained model directory (from step 4): +the pre-trained graph located in the pre-trained model directory (from step 5): ``` python launch_benchmark.py \ --model-name faster_rcnn \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output \ --in-graph /home//faster_rcnn_resnet50_fp32_coco/frozen_inference_graph.pb \ --accuracy-only ``` -7. The log file is saved to the value of `--output-dir`. +8. The log file is saved to the value of `--output-dir`. Below is a sample log file tail when running for batch and online inference: @@ -179,8 +184,6 @@ and online inference: ``` Time spent : 167.353 seconds. Time spent per BATCH: 0.167 seconds. -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Received these standard args: Namespace(accuracy_only=False, batch_size=1, benchmark_only=False, checkpoint='/checkpoints', data_location='/dataset', framework='tensorflow', input_graph=None, intelai_models='/workspace/intelai_models', mode='inference', model_args=[], model_name='faster_rcnn', model_source_dir='/workspace/models', num_cores=-1, num_inter_threads=2, num_intra_threads=56, precision='fp32', socket_id=0, use_case='object_detection', verbose=True) Received these custom args: ['--config_file=pipeline.config'] Run model here. @@ -208,15 +211,24 @@ DONE (t=1.35s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.383 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_faster_rcnn_inference_fp32_20190114_205714.log ``` ## Int8 Inference Instructions -1. Please follow step 1, 2 and 3 of Faster R-CNN FP32 instructions written above. +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + +1. Please follow the steps from the +[Faster R-CNN FP32 instructions](#fp32-inference-instructions) written +above for cloning dependecy repositories and getting the coco dataset: +* Performance bechmarking uses the raw coco dataset images. Follow steps +1 and 2 from the FP32 instructions. +* Accuracy testing requires the coco daataset to be in the TF records +format. Follow steps 1, 2, 3, and 4 from the FP32 instructions. 2. Download the pre-trained model. ``` @@ -242,12 +254,15 @@ with the appropriate parameters. To run on single socket use `--socket_id` switc by default it will be using all available sockets. Optional parameter `number_of_steps` (default value = 5000) can be added at the end of command after `--` as shown below: -Run for batch and online inference: +Run batch and online inference using the following command. +The `--data-location` is the path to the directory that contains the raw coco dataset +validation images which you downloaded and unzipped: + ``` $ cd /home//models/benchmarks $ python launch_benchmark.py \ - --data-location /home//coco/output/ \ + --data-location /home//val2017 \ --model-source-dir /home//tensorflow/models \ --model-name faster_rcnn \ --framework tensorflow \ @@ -255,7 +270,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --benchmark-only \ -- number_of_steps=5000 ``` @@ -270,19 +285,13 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ - --data-location /home//coco_dataset/coco_val.record \ + --data-location /home//output/coco_val.record \ --in-graph /home//faster_rcnn_int8_pretrained_model.pb \ --accuracy-only ``` -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - 5. The log file is saved to the value of `--output-dir`. Below is a sample log file tail when running for batch @@ -295,8 +304,6 @@ Step 4970: 0.070191860199 seconds Step 4980: 0.0755469799042 seconds Step 4990: 0.0742928981781 seconds Avg. Duration per Step:0.0760930150986 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_faster_rcnn_inference_int8_20190117_232539.log ``` @@ -317,8 +324,6 @@ DONE (t=1.34s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.375 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_faster_rcnn_inference_int8_20190117_231937.log ``` diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py index 3e0167f75..c30f39ada 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/fp32/model_init.py @@ -43,7 +43,8 @@ def __init__(self, args, custom_args, platform_util=None): self.set_num_inter_intra_threads() # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) @@ -64,7 +65,7 @@ def __init__(self, args, custom_args, platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, "eval.py") self.command_prefix = \ - self.get_numactl_command(self.args.socket_id) + self.python_exe + " " + \ + self.get_command_prefix(self.args.socket_id) + self.python_exe + " " + \ benchmark_script config_file_path = os.path.join(self.args.checkpoint, diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/config.json b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py index 749026f3c..37eaf2722 100644 --- a/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/faster_rcnn/inference/int8/model_init.py @@ -41,8 +41,9 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, self.RFCN_ACCURACY_SCRIPT) - # Set KMP env vars, except override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime="0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) self.validate_args() @@ -82,7 +83,7 @@ def parse_args(self): def run_perf_command(self): set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) self.parse_args() - command = self.get_numactl_command(self.args.socket_id) + command = self.get_command_prefix(self.args.socket_id) command += " {} ".format(self.python_exe) + self.perf_script_path command += " -g " + self.args.input_graph if self.custom_args: diff --git a/benchmarks/object_detection/tensorflow/rfcn/README.md b/benchmarks/object_detection/tensorflow/rfcn/README.md index 02db3210a..6e4a519df 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/README.md +++ b/benchmarks/object_detection/tensorflow/rfcn/README.md @@ -10,6 +10,11 @@ for other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone the [tensorflow/models](https://github.com/tensorflow/models) and [cocodataset/cocoapi](https://github.com/cocodataset/cocoapi) repositories: ``` @@ -44,7 +49,7 @@ sed -i.bak 95s/input_config/input_config[0]/ offline_eval_map_corloc.py ``` -2. Download the 2017 validation +2. Download the 2017 validation [COCO dataset](http://cocodataset.org/#home) and annotations: ``` @@ -78,6 +83,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be @@ -133,7 +139,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -150,7 +156,7 @@ python launch_benchmark.py \ --mode inference \ --precision int8 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record-00000-of-00001 \ --in-graph /home//rfcn_resnet101_int8_coco_pretrained_model.pb \ @@ -158,12 +164,6 @@ python launch_benchmark.py \ -- split="accuracy_message" ``` -The docker image (`intelaipg/intel-optimized-tensorflow:PR25765-devel-mkl`) -used in the commands above were built using -[TensorFlow](git@github.com:tensorflow/tensorflow.git) master -([e889ea1](https://github.com/tensorflow/tensorflow/commit/e889ea1dd965c31c391106aa3518fc23d2689954)) and -[PR #25765](https://github.com/tensorflow/tensorflow/pull/25765). - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location. @@ -173,18 +173,16 @@ to get additional debug output or change the default output location. Below is a sample log file tail when running for batch and online inference: ``` -Step 0: 10.6923000813 seconds -Step 10: 0.168856859207 seconds +Step 0: 11.4450089931 seconds +Step 10: 0.25656080246 seconds ... -Step 460: 0.181148052216 seconds -Step 470: 0.202737092972 seconds -Step 480: 0.117042064667 seconds -Step 490: 0.103501081467 seconds -Avg. Duration per Step:0.169812122345 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +Step 460: 0.256786823273 seconds +Step 470: 0.267828941345 seconds +Step 480: 0.141321897507 seconds +Step 490: 0.127830982208 seconds +Avg. Duration per Step:0.195356227875 Ran inference with batch size -1 -Log location outside container: {--output-dir}/benchmark_rfcn_inference_int8_20190227_191959.log +Log location outside container: {--output-dir}/benchmark_rfcn_inference_int8_20190416_182445.log ``` And here is a sample log file tail when running for accuracy: @@ -204,8 +202,6 @@ DONE (t=1.03s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.150 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir}/benchmark_rfcn_inference_int8_20190227_194752.log ``` @@ -225,7 +221,7 @@ $ git clone https://github.com/cocodataset/cocoapi.git The TensorFlow models repo will be used for running inference as well as converting the coco dataset to the TF records format. -2. Download the 2017 validation +2. Download the 2017 validation [COCO dataset](http://cocodataset.org/#home) and annotations: ``` @@ -259,6 +255,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be @@ -334,7 +331,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --checkpoint /home//rfcn_resnet101_fp32_coco \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ -- config_file=rfcn_pipeline.config ``` @@ -347,7 +344,7 @@ python launch_benchmark.py \ --mode inference \ --precision fp32 \ --framework tensorflow \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//coco/output/coco_val.record \ --in-graph /home//rfcn_resnet101_fp32_coco/frozen_inference_graph.pb \ @@ -363,8 +360,6 @@ online inference: ``` Average time per step: 0.262 sec -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Received these standard args: Namespace(accuracy_only=False, batch_size=1, benchmark_only=False, checkpoint='/checkpoints', data_location='/dataset', framework='tensorflow', input_graph=None, intelai_models='/workspace/intelai_models', mode='inference', model_args=[], model_name='rfcn', model_source_dir='/workspace/models', num_cores=-1, num_inter_threads=2, num_intra_threads=56, precision='fp32, socket_id=0, use_case='object_detection', verbose=True) Received these custom args: ['--config_file=rfcn_pipeline.config'] Run model here. @@ -391,8 +386,7 @@ DONE (t=1.19s). Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.400 Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.400 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 - Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu -Ran inference with batch size 1 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 + Ran inference with batch size 1 Log location outside container: {--output-dir value}/benchmark_rfcn_inference_fp32_20181221_211905.log ``` diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/config.json new file mode 100644 index 000000000..d7f51a4c2 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py index 712da5777..031c0f2ca 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/fp32/model_init.py @@ -45,8 +45,9 @@ def __init__(self, args, custom_args, platform_util): self.args.intelai_models, self.args.mode, self.args.precision, "eval.py") - # Set KMP env vars, except override the default KMP_BLOCKTIME and KMP_AFFINITY values - self.set_kmp_vars(kmp_blocktime="0", kmp_affinity=None) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) self.run_inference_sanity_checks(self.args, self.custom_args) self.parse_custom_args() @@ -54,7 +55,7 @@ def __init__(self, args, custom_args, platform_util): "research") def run_benchmark(self): - command_prefix = self.get_numactl_command(self.args.socket_id) + \ + command_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + self.benchmark_script # set num_inter_threads and num_intra_threads diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/config.json b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py index eec69455d..f52eed9b4 100755 --- a/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/rfcn/inference/int8/model_init.py @@ -54,8 +54,9 @@ def __init__(self, args, custom_args=[], platform_util=None): self.parse_args() - # Set KMP env vars with defaults, except for KMP_BLOCKTIME - self.set_kmp_vars(kmp_blocktime=0) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Set num_inter_threads and num_intra_threads self.set_num_inter_intra_threads() @@ -110,6 +111,8 @@ def validate_args(self): format(self.args.model_source_dir)) def run_perf_command(self): + # Get the command previx, but numactl is added later in run_perf_command() + self.command.append(self.get_command_prefix(self.args.socket_id, numactl=False)) num_cores = str(self.platform_util.num_cores_per_socket) if self.args.num_cores != -1: num_cores = str(self.args.num_cores) @@ -157,7 +160,8 @@ def run_perf_command(self): def run_accuracy_command(self): # already validated by parent - self.command = "FROZEN_GRAPH=" + self.args.input_graph + self.command = self.get_command_prefix(self.args.socket_id, numactl=False) + self.command += "FROZEN_GRAPH=" + self.args.input_graph if self.args.data_location and os.path.exists( self.args.data_location): diff --git a/benchmarks/object_detection/tensorflow/rfcn/requirements.txt b/benchmarks/object_detection/tensorflow/rfcn/requirements.txt index 92d9e0ba5..3ebb25335 100644 --- a/benchmarks/object_detection/tensorflow/rfcn/requirements.txt +++ b/benchmarks/object_detection/tensorflow/rfcn/requirements.txt @@ -1,6 +1,6 @@ Cython contextlib2 -pillow +pillow==5.3.0 lxml jupyter matplotlib diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md index 8dc015d61..c6400197c 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md @@ -10,6 +10,11 @@ for other precisions are coming later. ## Int8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Clone the [tensorflow/models](https://github.com/tensorflow/models) repository at the specified SHA and clone the [cocoapi repo](git clone https://github.com/cocodataset/cocoapi.git) in @@ -61,6 +66,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be @@ -115,7 +121,7 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ --data-location /home//val/val2017 \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ @@ -123,8 +129,8 @@ python launch_benchmark.py \ --batch-size 1 ``` -Or for accuracy where the `--data-location` is the path the directory -where your `coco_val.record` file is located: +Or for accuracy where the `--data-location` is the path to +the tf record file that you generated in step 2: ``` python launch_benchmark.py \ --model-name ssd-mobilenet \ @@ -132,19 +138,14 @@ python launch_benchmark.py \ --precision int8 \ --framework tensorflow \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7-avx2-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//tensorflow/models \ - --data-location /home//coco/output \ + --data-location /home//coco/output/coco_val.record \ --in-graph /home//ssdmobilenet_int8_pretrained_model.pb \ --accuracy-only \ --batch-size 1 ``` -Note that it is required to use the docker image specified in the -commands above (`intelaipg/intel-optimized-tensorflow:latest-prs-b5d67b7`) -to run SSD-MobileNet Int8, as it includes PRs that are required to run -this model. - Note that the `--verbose` or `--output-dir` flag can be added to any of the above commands to get additional debug output or change the default output location. @@ -154,15 +155,13 @@ Below is a sample log file tail when running for batch and online inference: ``` -Step 4970: 0.0340421199799 seconds -Step 4980: 0.0429329872131 seconds -Step 4990: 0.0358219146729 seconds -Avg. Duration per Step:0.0364457404137 -Avg. Duration per Step:0.0365921088491 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu +Step 4970: 0.0305020809174 seconds +Step 4980: 0.0294089317322 seconds +Step 4990: 0.0301029682159 seconds +Avg. Duration per Step:0.0300041775227 +Avg. Duration per Step:0.0301246762276 Ran inference with batch size 1 -Log location outside container: /benchmark_ssd-mobilenet_inference_int8_20181203_232524.log +Log location outside container: /benchmark_ssd-mobilenet_inference_int8_20190417_175418.log ``` And here is a sample log file tail when running for accuracy: @@ -185,8 +184,6 @@ DONE (t=1.10s). Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.212 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size 1 Log location outside container: /benchmark_ssd-mobilenet_inference_int8_20181204_185432.log ``` @@ -245,6 +242,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be @@ -351,7 +349,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.12.0-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --benchmark-only ``` @@ -370,7 +368,7 @@ $ python launch_benchmark.py \ --precision fp32 \ --mode inference \ --socket-id 0 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.12.0-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --accuracy-only ``` @@ -382,8 +380,6 @@ Below is a sample log file tail when running for performance: INFO:tensorflow:Processed 5001 images... moving average latency 37 ms INFO:tensorflow:Finished processing records Latency: min = 33.8, max = 6635.9, mean= 38.4, median = 37.2 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_ssd-mobilenet_inference_fp32_20190130_225108.log ``` @@ -403,8 +399,6 @@ Below is a sample log file tail when testing accuracy: Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.264 Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = -1.000 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = -1.000 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_ssd-mobilenet_inference_fp32_20190123_225145.log ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/config.json new file mode 100644 index 000000000..6f1228ba7 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py index 379e47c67..927f73048 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/model_init.py @@ -44,8 +44,9 @@ def __init__(self, args, custom_args, platform_util): self.run_inference_sanity_checks(self.args, self.custom_args) self.research_dir = os.path.join(args.model_source_dir, "research") - # Set KMP env vars, except override the default KMP_BLOCKTIME value - self.set_kmp_vars(kmp_blocktime="0") + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads (override inter threads to 2) self.set_num_inter_intra_threads(num_inter_threads=2) @@ -67,7 +68,7 @@ def __init__(self, args, custom_args, platform_util): self.args.precision, "infer_detections.py") # get command with numactl - self.run_cmd = self.get_numactl_command( + self.run_cmd = self.get_command_prefix( self.args.socket_id) + "{} {}".format(self.python_exe, benchmark_script) output_tf_record_path = os.path.join(os.path.dirname( diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py index 4fdfb3a06..28522ada4 100644 --- a/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/int8/model_init.py @@ -31,7 +31,10 @@ class ModelInitializer(BaseModelInitializer): def __init__(self, args, custom_args=[], platform_util=None): super(ModelInitializer, self).__init__(args, custom_args, platform_util) - self.set_kmp_vars(kmp_blocktime="0") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads (override inter threads to 2) self.set_num_inter_intra_threads(num_inter_threads=2) @@ -49,7 +52,7 @@ def __init__(self, args, custom_args=[], platform_util=None): benchmark_script = os.path.join( self.args.intelai_models, self.args.mode, self.args.precision, "run_frozen_graph_ssdmob.py") - self.command_prefix = self.get_numactl_command(self.args.socket_id) + \ + self.command_prefix = self.get_command_prefix(self.args.socket_id) + \ "{} {}".format(self.python_exe, benchmark_script) set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) @@ -64,7 +67,7 @@ def __init__(self, args, custom_args=[], platform_util=None): accuracy_script = os.path.join( self.args.intelai_models, self.args.mode, self.args.precision, "coco_int8.sh") - self.command_prefix = "sh {} {} {}/coco_val.record".format( + self.command_prefix = "sh {} {} {}".format( accuracy_script, self.args.input_graph, self.args.data_location) diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md index 4171e1984..e7b3528fb 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/README.md @@ -3,6 +3,7 @@ This document has instructions for how to run SSD-ResNet34 for the following modes/precisions: * [FP32 inference](#fp32-inference-instructions) +* [INT8 inference](#int8-inference-instructions) Instructions and scripts for model training and inference for other precisions are coming later. @@ -61,6 +62,7 @@ TF records format in order to use it with the inference script. We will do this by running the `create_coco_tf_record.py` file in the TensorFlow models repo. +Follow [instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#dependencies) to install the required dependencies (`cocoapi` and `Protobuf 3.0.0`). Follow the steps below to navigate to the proper directory and point the script to the raw COCO dataset files that you have downloaded in step 2. The `--output_dir` is the location where the TF record files will be @@ -95,7 +97,11 @@ $ git checkout f505cecde2d8ebf6fe15f40fb8bc350b2b1ed5dc The `coco_val.record` file is what we will use in this inference example. -5. A link to download the pre-trained model is coming soon. +5. Download the pretrained model: + +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssd_resnet34_fp32_bs1_pretrained_model.pb +``` 6. Clone the [intelai/models](https://github.com/intelai/models) repo. This repo has the launch script for running the model, which we will @@ -109,20 +115,18 @@ $ git clone https://github.com/IntelAI/models.git [intelai/models](https://github.com/intelai/models) repo that was just cloned in the previous step. SSD-ResNet34 can be run for batch and online inference, or accuracy. Note that we are running -SSD-ResNet34 with a TensorFlow 1.13 docker image. +SSD-ResNet34 with a TensorFlow 1.14 docker image. To run for batch and online inference, use the following command, -but replace in your path to the unzipped coco dataset images from step 3 -for the `--dataset-location`, the path to the frozen graph that you -downloaded in step 5 as the `--in-graph`, and use the `--benchmark-only` +the path to the frozen graph that you downloaded in step 5 as +the `--in-graph`, and use the `--benchmark-only` flag: ``` $ cd /home//models/benchmarks $ python launch_benchmark.py \ - --data-location /home//coco/output/ \ - --in-graph /home//ssd_resnet34_coco_pretained_model/ssd_resnet34_bs1.pb \ + --in-graph /home//ssd_resnet34_fp32_bs1_pretrained_model.pb \ --model-source-dir /home//tensorflow/models \ --model-name ssd-resnet34 \ --framework tensorflow \ @@ -130,7 +134,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.13.1-devel-mkl-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --benchmark-only ``` @@ -142,7 +146,7 @@ the path to the frozen graph that you downloaded in step 5 as the ``` $ python launch_benchmark.py \ --data-location /home//coco/output/ \ - --in-graph /home//ssd_resnet34_coco_pretained_model/ssd_resnet34_bs1.pb \ + --in-graph /home//ssd_resnet34_fp32_bs1_pretrained_model.pb \ --model-source-dir /home//tensorflow/models \ --model-name ssd-resnet34 \ --framework tensorflow \ @@ -150,7 +154,7 @@ $ python launch_benchmark.py \ --mode inference \ --socket-id 0 \ --batch-size=1 \ - --docker-image intelaipg/intel-optimized-tensorflow:1.13.1-devel-mkl-py3 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --accuracy-only ``` @@ -180,8 +184,180 @@ Below is a sample log file tail when testing accuracy: Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.334 Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.494 Current AP: 0.21082 -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu -Ran inference with batch size -1 -Log location outside container: {--output-dir value}/benchmark_ssd-mobilenet_inference_fp32_20190123_225145.log +``` + +## INT8 Inference Instructions + +1. Clone the `tensorflow/models` repository with the specified SHA, +since we are using an older version of the models repo for +SSD-ResNet34. + +``` +$ git clone https://github.com/tensorflow/models.git +$ cd models +$ git checkout f505cecde2d8ebf6fe15f40fb8bc350b2b1ed5dc +$ git clone https://github.com/cocodataset/cocoapi.git +``` + +The TensorFlow models repo will be used for running inference as well as +converting the coco dataset to the TF records format. + +2. Follow the TensorFlow models object detection +[installation instructions](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md#installation) +to get your environment setup with the required dependencies. + +3. Download the 2017 validation +[COCO dataset](http://cocodataset.org/#home) and annotations: + +``` +$ mkdir val +$ cd val +$ wget http://images.cocodataset.org/zips/val2017.zip +$ unzip val2017.zip +$ cd .. + +$ mkdir annotations +$ cd annotations +$ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip +$ unzip annotations_trainval2017.zip +$ cd .. +``` + +Since we are only using the validation dataset in this example, we will +create an empty directory and empty annotations json file to pass as the +train and test directories in the next step. + +``` +$ mkdir empty_dir + +$ cd annotations +$ echo "{ \"images\": {}, \"categories\": {}}" > empty.json +$ cd .. +``` + +4. Now that you have the raw COCO dataset, we need to convert it to the +TF records format in order to use it with the inference script. We will +do this by running the `create_coco_tf_record.py` file in the TensorFlow +models repo. + +Follow the steps below to navigate to the proper directory and point the +script to the raw COCO dataset files that you have downloaded in step 2. +The `--output_dir` is the location where the TF record files will be +located after the script has completed. + +``` + +# We are going to use an older version of the conversion script to checkout the git commit +$ cd models +$ git checkout 7a9934df2afdf95be9405b4e9f1f2480d748dc40 + +$ cd research/object_detection/dataset_tools/ +$ python create_coco_tf_record.py --logtostderr \ + --train_image_dir="/home//coco/empty_dir" \ + --val_image_dir="/home//coco/val/val2017" \ + --test_image_dir="/home//coco/empty_dir" \ + --train_annotations_file="/home//coco/annotations/empty.json" \ + --val_annotations_file="/home//coco/annotations/instances_val2017.json" \ + --testdev_annotations_file="/home//coco/annotations/empty.json" \ + --output_dir="/home//coco/output" + +$ ll /home//coco/output +total 1598276 +-rw-rw-r--. 1 0 Nov 2 21:46 coco_testdev.record +-rw-rw-r--. 1 0 Nov 2 21:46 coco_train.record +-rw-rw-r--. 1 818336740 Nov 2 21:46 coco_val.record + +# Go back to the main models directory and checkout the SHA that we are using for SSD-ResMet34 +$ cd /home//models +$ git checkout f505cecde2d8ebf6fe15f40fb8bc350b2b1ed5dc +``` + +The `coco_val.record` file is what we will use in this inference example. + +5. Download the pretrained model: + +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssd_resnet34_int8_bs1_pretrained_model.pb +``` + +6. Clone the [intelai/models](https://github.com/intelai/models) repo. +This repo has the launch script for running the model, which we will +use in the next step. + +``` +$ git clone https://github.com/IntelAI/models.git +``` + +7. Next, navigate to the `benchmarks` directory of the +[intelai/models](https://github.com/intelai/models) repo that was just +cloned in the previous step. SSD-ResNet34 can be run for testing batch or online inference, or testing accuracy. Note that we are running +SSD-ResNet34 with a TensorFlow 1.14 docker image. + +To run for batch and online inference, use the following command, +the path to the frozen graph that you downloaded in step 5 as +the `--in-graph`, and use the `--benchmark-only` +flag: + +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --in-graph /home//ssd_resnet34_int8_bs1_pretrained_model.pb \ + --model-source-dir /home//tensorflow/models \ + --model-name ssd-resnet34 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --socket-id 0 \ + --batch-size=1 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --benchmark-only +``` + +To test accuracy, use the following command but replace in your path to +the tf record file that you generated in step 4 for the `--data-location`, +the path to the frozen graph that you downloaded in step 5 as the +`--in-graph`, and use the `--accuracy-only` flag: + +``` +$ python launch_benchmark.py \ + --data-location /home//coco/output/ \ + --in-graph /home//ssd_resnet34_int8_bs1_pretrained_model.pb \ + --model-source-dir /home//tensorflow/models \ + --model-name ssd-resnet34 \ + --framework tensorflow \ + --precision int8 \ + --mode inference \ + --socket-id 0 \ + --batch-size=1 \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --accuracy-only +``` + +8. The log file is saved to the value of `--output-dir`. + +Below is a sample log file tail when testing performance: + +``` +Batchsize: 1 +Time spent per BATCH: 12.0245 ms +Total samples/sec: 83.1635 samples/s +``` + +Below is a sample log file tail when testing accuracy: + +``` + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.204 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.360 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.208 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.051 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.213 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.335 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.210 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.294 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.301 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.083 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.327 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.484 +Current AP: 0.20408 ``` diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/config.json b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py index 0e6657a11..0b53a0112 100644 --- a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/fp32/model_init.py @@ -42,7 +42,11 @@ def __init__(self, args, custom_args, platform_util): super(ModelInitializer, self).__init__(args, custom_args, platform_util) self.run_inference_sanity_checks(self.args, self.custom_args) - self.set_kmp_vars() + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + self.set_num_inter_intra_threads() set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) @@ -53,7 +57,7 @@ def __init__(self, args, custom_args, platform_util): benchmark_script = os.path.join(self.model_dir, "infer_detections.py") # get command with numactl - self.run_cmd = self.get_numactl_command(self.args.socket_id) + self.run_cmd = self.get_command_prefix(self.args.socket_id) self.run_cmd += "{0} {1}".format(self.python_exe, benchmark_script) self.run_cmd += " --input-graph {0}".format(self.args.input_graph) self.run_cmd += " --batch-size {0}".format(args.batch_size) @@ -65,8 +69,8 @@ def __init__(self, args, custom_args, platform_util): self.run_cmd += " --data-location {0}".format(self.args.data_location) def run(self): - print(self.run_cmd) old_python_path = os.environ["PYTHONPATH"] os.environ["PYTHONPATH"] = os.path.join(self.args.model_source_dir, "research") + os.environ["PYTHONPATH"] += ":/tmp/benchmarks/scripts/tf_cnn_benchmarks/" self.run_command(self.run_cmd) os.environ["PYTHONPATH"] = old_python_path diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/config.json b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/model_init.py new file mode 100644 index 000000000..0b53a0112 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd-resnet34/inference/int8/model_init.py @@ -0,0 +1,76 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import os +import sys + +from common.base_model_init import BaseModelInitializer +from common.base_model_init import set_env_var + + +class ModelInitializer(BaseModelInitializer): + def run_inference_sanity_checks(self, args, custom_args): + if not args.input_graph: + sys.exit("Please provide a path to the frozen graph directory" + " via the '--in-graph' flag.") + if not args.data_location and self.args.accuracy_only: + sys.exit("Please provide a path to the data directory via the " + "'--data-location' flag.") + if args.socket_id == -1 and args.num_cores == -1: + print("***Warning***: Running inference on all cores could degrade" + " performance. Pass a '--socket-id' to specify running on a" + " single socket instead.\n") + + def __init__(self, args, custom_args, platform_util): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.run_inference_sanity_checks(self.args, self.custom_args) + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + self.set_num_inter_intra_threads() + + set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) + + self.model_dir = os.path.join(self.args.intelai_models, self.args.mode, self.args.precision) + + # get benchmark command + benchmark_script = os.path.join(self.model_dir, "infer_detections.py") + + # get command with numactl + self.run_cmd = self.get_command_prefix(self.args.socket_id) + self.run_cmd += "{0} {1}".format(self.python_exe, benchmark_script) + self.run_cmd += " --input-graph {0}".format(self.args.input_graph) + self.run_cmd += " --batch-size {0}".format(args.batch_size) + self.run_cmd += " --inter-op-parallelism-threads {0}".format(self.args.num_inter_threads) + self.run_cmd += " --intra-op-parallelism-threads {0}".format(self.args.num_intra_threads) + + if self.args.accuracy_only: + self.run_cmd += " --accuracy-only " + self.run_cmd += " --data-location {0}".format(self.args.data_location) + + def run(self): + old_python_path = os.environ["PYTHONPATH"] + os.environ["PYTHONPATH"] = os.path.join(self.args.model_source_dir, "research") + os.environ["PYTHONPATH"] += ":/tmp/benchmarks/scripts/tf_cnn_benchmarks/" + self.run_command(self.run_cmd) + os.environ["PYTHONPATH"] = old_python_path diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md new file mode 100644 index 000000000..971311f75 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/README.md @@ -0,0 +1,285 @@ +# SSD-VGG16 + +This document has instructions for how to run SSD-VGG16 for the +following modes/precisions: +* [Int8 inference](#int8-inference-instructions) +* [FP32 inference](#fp32-inference-instructions) + +Instructions and scripts for model training and inference +other precisions are coming later. + +## Int8 Inference Instructions + +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + +1. Clone the [original model](https://github.com/HiKapok/SSD.TensorFlow) repository: +``` +$ git clone https://github.com/HiKapok/SSD.TensorFlow.git +$ cd SSD.TensorFlow +$ git checkout 2d8b0cb9b2e70281bf9dce438ff17ffa5e59075c +``` + +2. Clone the [intelai/models](https://github.com/intelai/models) repository. +It will be used to run the SSD-VGG16 model accuracy and inference performance tests. + +3. Download the 2017 validation images file: +[COCO dataset](http://cocodataset.org/#home) and annotations: +This is required if you would like to run the accuracy test, +or batch/online inference with real data. + +``` +$ wget http://images.cocodataset.org/zips/val2017.zip +$ unzip val2017.zip +``` + +Download the validation annotations file: +``` +$ wget http://images.cocodataset.org/annotations/annotations_trainval2017.zip +$ unzip annotations_trainval2017.zip +``` + +4. Convert the COCO dataset to TF records format: + +We provide a script `generate_coco_records.py` to convert the raw dataset to the TF records required pattern. +* Some dependencies are required to be installed to run the script such as `python3`, `Tensorflow` and `tqdm`, also, the `SSD.TensorFlow/dataset` from the original model directory (from step 1). + +Follow the steps below get the COCO TF records: + +* Copy the `generate_coco_records.py` script from `models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py` +from the `models` directory (step 2) to `SSD.TensorFlow/dataset` in the original model directory (step 1). + +``` +$ cp /home//models/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py /home//SSD.TensorFlow/dataset +``` + +* Create directory for the output TF records: +``` +$ mkdir tf_records +``` + +* Run the script to generate the TF records with the required prefix `val`, COCO raw dataset and annotation file (step 3): +``` +$ cd /home//SSD.TensorFlow/dataset +$ python generate_coco_records.py \ +--image_path /home//val2017/ \ +--annotations_file /home//annotations/instances_val2017.json \ +--output_prefix val \ +--output_path /home//tf_records/ +``` + +Now, you can use the `/home//tf_records/` as the dataset location to run inference with real data, and test the model accuracy. +``` +$ ls -l /home//tf_records +total 792084 +-rw-r--r--. 1 170038836 Mar 17 21:35 val-00000-of-00005 +-rw-r--r--. 1 167260232 Mar 17 21:35 val-00001-of-00005 +-rw-r--r--. 1 167326957 Mar 17 21:35 val-00002-of-00005 +-rw-r--r--. 1 166289231 Mar 17 21:35 val-00003-of-00005 +-rw-r--r--. 1 140168531 Mar 17 21:35 val-00004-of-00005 +``` + +5. Download the pretrained model: + +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_int8_pretrained_model.pb +``` + +6. Navigate to the `benchmarks` directory (step 2), and run the model scripts for either batch or online +inference or accuracy. +``` +$ cd models/benchmarks +``` + +* Run the model for batch or online inference where the `--model-source-dir` is the model source directory from step 1, +and the `--in-graph` is the pretrained model graph from step 5. +If you specify the `--data-location` which is the path to the tf record file that you generated in step 4, +the model will run with real data, otherwise dummy data will be used: +``` +python launch_benchmark.py \ + --model-name ssd_vgg16 \ + --mode inference \ + --precision int8 \ + --framework tensorflow \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --model-source-dir /home//SSD.TensorFlow \ + --data-location /home//tf_records \ + --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ + --batch-size 1 \ + --socket-id 0 \ + --num-inter-threads 11 \ + --num-intra-threads 21 \ + --data-num-inter-threads 21 \ + --data-num-intra-threads 28 \ + -- warmup-steps=100 steps=500 +``` + +* For the accuracy test: + + * Clone the customized [cocoapi repo](https://github.com/waleedka/coco) in +the model directory `SSD.TensorFlow` from step 1. + ``` + $ git clone https://github.com/waleedka/coco.git + + ``` + * The `--data-location` is required, which is the path to the tf record file that you generated in step 4. + * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//tf_records/`. + * Use the `--accuracy-only` flag: +``` +python launch_benchmark.py \ + --model-name ssd_vgg16 \ + --mode inference \ + --precision int8 \ + --framework tensorflow \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --model-source-dir /home//SSD.TensorFlow \ + --data-location /home//tf_records \ + --in-graph /home//ssdvgg16_int8_pretrained_model.pb \ + --accuracy-only \ + --batch-size 1 +``` + +>Notes: +>* For batch and online inference, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, + `--data-num-intra-threads=28` for optimized performance on `28-cores Cascade Lake (CLX)` machine. + +>* SSD-VGG16 model accuracy test works only with the `Python3` based docker images. + +>* The `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. + +7. The log file is saved to the value of `--output-dir`. + +Below is a sample log file tail when running the model for batch +and online inference, the following results are based on CLX 28-cores with hyper-threading enabled: + +``` +Batch size = 1 +Throughput: 30.382 images/sec +Latency: 32.915 ms +Ran inference with batch size 1 +Log location outside container: {--output-dir value}/benchmark_ssd_vgg16_inference_int8_20190417_231832.log +``` + +And here is a sample log file tail when running for accuracy: + +``` + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.231 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.386 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.243 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.058 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.265 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.391 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.224 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.330 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.355 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.091 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.420 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.558 +``` + +## FP32 Inference Instructions + +Use the steps 1, 2,3 and 4 as above. + +5. Download the pretrained model: +``` +$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/ssdvgg16_fp32_pretrained_model.pb +``` + +6. Navigate to the `benchmarks` directory (step 2), and run the model scripts for either batch +and online inference or accuracy. +``` +$ cd models/benchmarks +``` + +* Run the model for batch and online inference where the `--model-source-dir` is the model source directory from step 1, +and the `--in-graph` is the pretrained model graph from step 5, +if you specify the `--data-location` which is the path to the tf record file that you generated in step 4, +the benchmark will run with real data, otherwise dummy data will be used: +``` +$ cd /home//models/benchmarks + +$ python launch_benchmark.py \ + --data-location /home//tf_records \ + --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ + --model-source-dir /home//SSD.TensorFlow \ + --model-name ssd_vgg16 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --batch-size 1 \ + --socket-id 0 \ + --num-inter-threads 11 \ + --num-intra-threads 21 \ + --data-num-inter-threads 21 \ + --data-num-intra-threads 28 \ + -- warmup-steps=100 steps=500 +``` + +* For the accuracy test: + + * Clone the customized [cocoapi repo](https://github.com/waleedka/coco) in +the model directory `SSD.TensorFlow` from step 1. + ``` + $ git clone https://github.com/waleedka/coco.git + + ``` + * The `--data-location` is required, which is the path to the tf record file that you generated in step 3. + * Copy the annotation file `instances_val2017.json` (from step 3) to the dataset directory `/home//tf_records/`. + * Use the `--accuracy-only` flag: +``` +python launch_benchmark.py \ + --model-name ssd_vgg16 \ + --mode inference \ + --precision fp32 \ + --framework tensorflow \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --model-source-dir /home//SSD.TensorFlow \ + --data-location /home//tf_records \ + --in-graph /home//ssdvgg16_fp32_pretrained_model.pb \ + --accuracy-only \ + --batch-size 1 +``` + +>Notes: +>* For batch and online inference, we recommend the provided values for the arguments: `--num-inter-threads=11`, `--num-intra-threads=21`, `--data-num-inter-threads=21`, + `--data-num-intra-threads=28` for optimized performance on `28-cores Cascade Lake (CLX)` machine. + +>* SSD-VGG16 model accuracy test works only with the `Python3` based docker images. + +>* The `--verbose` or `--output-dir` flag can be added to any of the above commands +to get additional debug output or change the default output location. + +7. The log file is saved to the value of `--output-dir`. + +Below is a sample log file tail when running batch and online inference, +the following results are based on CLX 28-cores with hyper-threading enabled: + +``` +Batch size = 1 +Throughput: 15.662 images/sec +Latency: 63.848 ms +Ran inference with batch size 1 +Log location outside container: {--output-dir value}/benchmark_ssd_vgg16_inference_fp32_20190417_232130.log +``` + +Below is a sample log file tail when testing accuracy: + +``` + Average Precision (AP) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.236 + Average Precision (AP) @[ IoU=0.50 | area= all | maxDets=100 ] = 0.391 + Average Precision (AP) @[ IoU=0.75 | area= all | maxDets=100 ] = 0.248 + Average Precision (AP) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.058 + Average Precision (AP) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.264 + Average Precision (AP) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.399 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 1 ] = 0.227 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets= 10 ] = 0.334 + Average Recall (AR) @[ IoU=0.50:0.95 | area= all | maxDets=100 ] = 0.358 + Average Recall (AR) @[ IoU=0.50:0.95 | area= small | maxDets=100 ] = 0.091 + Average Recall (AR) @[ IoU=0.50:0.95 | area=medium | maxDets=100 ] = 0.423 + Average Recall (AR) @[ IoU=0.50:0.95 | area= large | maxDets=100 ] = 0.564 +``` diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/config.json b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/config.json new file mode 100644 index 000000000..14d129748 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_SETTINGS": 1, + "TF_ENABLE_WINOGRAD_NONFUSED": 1 + } +} diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/model_init.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/model_init.py new file mode 100644 index 000000000..5698700f4 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/fp32/model_init.py @@ -0,0 +1,28 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from object_detection.tensorflow.ssd_vgg16.inference.ssd_vgg16_model_init import SSDVGG16ModelInitializer + + +class ModelInitializer(SSDVGG16ModelInitializer): + """Model initializer for SSD-VGG16 FP32 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/__init__.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/model_init.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/model_init.py new file mode 100644 index 000000000..01d1822ba --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/int8/model_init.py @@ -0,0 +1,28 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from object_detection.tensorflow.ssd_vgg16.inference.ssd_vgg16_model_init import SSDVGG16ModelInitializer + + +class ModelInitializer(SSDVGG16ModelInitializer): + """Model initializer for SSD-VGG16 Int8 inference""" + + def __init__(self, args, custom_args=[], platform_util=None): + super(ModelInitializer, self).__init__(args, custom_args, platform_util) diff --git a/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/ssd_vgg16_model_init.py b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/ssd_vgg16_model_init.py new file mode 100644 index 000000000..c54994170 --- /dev/null +++ b/benchmarks/object_detection/tensorflow/ssd_vgg16/inference/ssd_vgg16_model_init.py @@ -0,0 +1,107 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import sys +import argparse + +from common.base_model_init import BaseModelInitializer, set_env_var + + +class SSDVGG16ModelInitializer(BaseModelInitializer): + """Common model initializer for SSD-VGG16 inference""" + + def run_inference_sanity_checks(self, args, custom_args): + if not args.input_graph: + sys.exit("Please provide a path to the frozen graph directory" + " via the '--in-graph' flag.") + if not args.data_location and self.args.accuracy_only: + sys.exit("For accuracy test, please provide a path to the data directory via the " + "'--data-location' flag.") + if args.batch_size != -1 and args.batch_size != 1: + sys.exit("SSD-VGG16 inference supports 'batch-size=1' " + + "only, please modify via the '--batch_size' flag.") + + def __init__(self, args, custom_args, platform_util): + super(SSDVGG16ModelInitializer, self).__init__(args, custom_args, platform_util) + + self.parse_custom_args() + self.run_inference_sanity_checks(self.args, self.custom_args) + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) + + self.set_num_inter_intra_threads(num_inter_threads=self.args.num_inter_threads, + num_intra_threads=self.args.num_intra_threads) + + omp_num_threads = str(int(platform_util.num_cores_per_socket / 2))\ + if self.args.precision == "int8" else platform_util.num_cores_per_socket + + set_env_var("OMP_NUM_THREADS", omp_num_threads + if self.args.num_cores == -1 else self.args.num_cores) + + script_path = os.path.join( + self.args.intelai_models, self.args.mode, "eval_ssd.py") + + self.run_cmd = self.get_command_prefix( + self.args.socket_id) + "{} {}".format(self.python_exe, script_path) + + self.run_cmd += " --input-graph={} " \ + " --num-inter-threads={} --num-intra-threads={} ". \ + format(self.args.input_graph, self.args.num_inter_threads, + self.args.num_intra_threads) + + if self.args.data_num_inter_threads: + self.run_cmd += " --data-num-inter-threads={} ".format( + self.args.data_num_inter_threads) + + if self.args.data_num_intra_threads: + self.run_cmd += " --data-num-intra-threads={} ".format( + self.args.data_num_intra_threads) + + if self.args.benchmark_only: + self.run_cmd += " --warmup-steps={} --steps={} ". \ + format(self.args.warmup_steps, self.args.steps) + + # if the data location directory is not empty, then include the arg + if self.args.data_location and os.listdir(self.args.data_location): + self.run_cmd += " --data-location={} ".format(self.args.data_location) + + if self.args.accuracy_only: + self.run_cmd += "--accuracy-only " + + def parse_custom_args(self): + if self.custom_args: + parser = argparse.ArgumentParser() + parser.add_argument("--warmup-steps", type=int, default=10, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=50, + help="number of steps") + + self.args = parser.parse_args(self.custom_args, + namespace=self.args) + + def run(self): + self.run_command(self.run_cmd) diff --git a/benchmarks/recommendation/tensorflow/ncf/README.md b/benchmarks/recommendation/tensorflow/ncf/README.md index 2ee7b070b..a86a56b1f 100644 --- a/benchmarks/recommendation/tensorflow/ncf/README.md +++ b/benchmarks/recommendation/tensorflow/ncf/README.md @@ -14,13 +14,13 @@ This model uses official tensorflow models repo, where [ncf](https://github.com/ model automatically downloads movielens ml-1m dataset as default if the `--data-location` flag is not set. If you want to download movielens 1M dataset and provide that path to `--data-location`, check this [reference](https://grouplens.org/datasets/movielens/1m/) -2. Clone the official `tensorflow/models` repository with tag `v1.11` +2. Clone the official `tensorflow/models` repository with tag `v1.11` and make a small change to `data_async_generation.py`, commenting out a line that causes a crash in the model script. ``` $ git clone https://github.com/tensorflow/models.git $ cd models $ git checkout v1.11 -$ pwd +$ sed -i.bak 's/atexit.register/# atexit.register/g' official/recommendation/data_async_generation.py ``` 3. Now clone `IntelAI/models` repository and then navigate to the `benchmarks` folder: @@ -53,7 +53,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The tail of batch inference log, looks as below. @@ -83,7 +83,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The tail of online inference log, looks as below. @@ -115,7 +115,7 @@ $ python launch_benchmark.py \ --framework tensorflow \ --precision fp32 \ --mode inference \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 ``` The tail of accuracy log, looks as below. diff --git a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/config.json b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/config.json new file mode 100644 index 000000000..273b45b40 --- /dev/null +++ b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py index 1b6eb1eda..960c2523a 100644 --- a/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py +++ b/benchmarks/recommendation/tensorflow/ncf/inference/fp32/model_init.py @@ -40,7 +40,8 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.batch_size = 256 # Set KMP env vars, if they haven't already been set - self.set_kmp_vars() + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # set num_inter_threads and num_intra_threads self.set_num_inter_intra_threads() @@ -49,7 +50,7 @@ def __init__(self, args, custom_args=[], platform_util=None): self.args.intelai_models, self.args.mode, self.args.precision, "ncf_main.py") - self.benchmark_command = self.get_numactl_command(args.socket_id) + \ + self.benchmark_command = self.get_command_prefix(args.socket_id) + \ self.python_exe + " " + benchmark_script set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) diff --git a/benchmarks/recommendation/tensorflow/wide_deep/README.md b/benchmarks/recommendation/tensorflow/wide_deep/README.md index e6698bd5d..8ace58237 100644 --- a/benchmarks/recommendation/tensorflow/wide_deep/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep/README.md @@ -56,7 +56,7 @@ use in the next step. --batch-size 1 \ --data-location /home//widedeep_dataset \ --checkpoint /home//path/to/wide_deep_fp32_pretrained_model \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --verbose ``` * Running the model in batch inference mode, set `--batch-size` = `1024` @@ -72,7 +72,7 @@ use in the next step. --batch-size 1024 \ --data-location /home//path/to/dataset \ --checkpoint /home//path/to/wide_deep_fp32_pretrained_model \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --verbose ``` 6. The log file is saved to the value of `--output-dir`. @@ -94,8 +94,6 @@ use in the next step. recall: 0.0 End-to-End duration is %s 36.5971579552 Latency is: %s 0.00224784460139 - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu current path: /workspace/benchmarks search path: /workspace/benchmarks/*/tensorflow/wide_deep/inference/fp32/model_init.py Using model init: /workspace/benchmarks/classification/tensorflow/wide_deep/inference/fp32/model_init.py diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md index d4fb5fef4..e2467d45f 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/README.md @@ -55,6 +55,11 @@ Instructions and scripts for model training coming later. ## INT8 Inference Instructions +These instructions use the TCMalloc memory allocator, which produces +better performance results for Int8 precision models with smaller batch sizes. +If you want to disable the use of TCMalloc, set `--disable-tcmalloc=True` +when calling `launch_benchmark.py` and the script will run without TCMalloc. + 1. Download and extract the pre-trained model. ``` wget https://storage.googleapis.com/intel-optimized-tensorflow/models/wide_deep_int8_pretrained_model.pb @@ -72,7 +77,7 @@ Instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision int8 \ --mode inference \ @@ -92,7 +97,7 @@ Instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision int8 \ --mode inference \ @@ -109,7 +114,7 @@ Instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision int8 \ --mode inference \ @@ -158,7 +163,7 @@ Instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision fp32 \ --mode inference \ @@ -166,7 +171,7 @@ Instructions and scripts for model training coming later. --batch-size 1000 \ --socket-id 0 \ --accuracy-only \ - --docker-image docker.io/intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_eval.tfrecords ``` @@ -178,7 +183,7 @@ Instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision fp32 \ --mode inference \ @@ -186,7 +191,7 @@ Instructions and scripts for model training coming later. --benchmark-only \ --batch-size 1 \ --socket-id 0 \ - --docker-image docker.io/intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_test.tfrecords \ -- num_parallel_batches=1 @@ -195,7 +200,7 @@ Instructions and scripts for model training coming later. ``` cd /home//models/benchmarks - python launch_benchmark.py + python launch_benchmark.py \ --model-name wide_deep_large_ds \ --precision fp32 \ --mode inference \ @@ -203,7 +208,7 @@ Instructions and scripts for model training coming later. --benchmark-only \ --batch-size 512 \ --socket-id 0 \ - --docker-image docker.io/intelaipg/intel-optimized-tensorflow:nightly-latestprs-bdw \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --in-graph /root/user/wide_deep_files/wide_deep_fp32_pretrained_model.pb \ --data-location /root/user/wide_deep_files/dataset_preprocessed_test.tfrecords ``` diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/config.json b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/config.json new file mode 100644 index 000000000..4efe60b15 --- /dev/null +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "noverbose,warnings,respect,granularity=core,none", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py index 8f3e15359..6293b3d0c 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py @@ -36,9 +36,10 @@ def __init__(self, args, custom_args=[], platform_util=None): # Set the num_inter_threads and num_intra_threads self.set_num_inter_intra_threads(num_inter_threads=platform_util.num_cores_per_socket, num_intra_threads=1) - # Use default KMP AFFINITY values, override KMP_BLOCKTIME & enable KMP SETTINGS - self.set_kmp_vars(kmp_settings="1", kmp_blocktime="0", - kmp_affinity="noverbose,warnings,respect,granularity=core,none") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Set env vars, if they haven't already been set set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) @@ -61,7 +62,7 @@ def run_benchmark(self): script_args_list = ["input_graph", "num_parallel_batches", "batch_size", "num_inter_threads", "num_intra_threads", "accuracy_only", "data_location"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + benchmark_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/config.json b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/config.json new file mode 100644 index 000000000..4efe60b15 --- /dev/null +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/config.json @@ -0,0 +1,7 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "noverbose,warnings,respect,granularity=core,none", + "KMP_BLOCKTIME": 0, + "KMP_SETTINGS": 1 + } +} diff --git a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py index 2bd55b5a5..c6a3b25fd 100755 --- a/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py +++ b/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/int8/model_init.py @@ -36,9 +36,10 @@ def __init__(self, args, custom_args=[], platform_util=None): # Set the num_inter_threads and num_intra_threads self.set_num_inter_intra_threads(num_inter_threads=platform_util.num_cores_per_socket, num_intra_threads=1) - # Use default KMP AFFINITY values, override KMP_BLOCKTIME & enable KMP SETTINGS - self.set_kmp_vars(kmp_settings="1", kmp_blocktime="0", - kmp_affinity="noverbose,warnings,respect,granularity=core,none") + + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) # Set env vars, if they haven't already been set set_env_var("OMP_NUM_THREADS", self.args.num_intra_threads) @@ -61,7 +62,7 @@ def run_benchmark(self): script_args_list = ["input_graph", "num_parallel_batches", "batch_size", "num_inter_threads", "num_intra_threads", "accuracy_only", "data_location"] - cmd_prefix = self.get_numactl_command(self.args.socket_id) + \ + cmd_prefix = self.get_command_prefix(self.args.socket_id) + \ self.python_exe + " " + benchmark_script cmd = self.add_args_to_command(cmd_prefix, script_args_list) self.run_command(cmd) diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/README.md b/benchmarks/text_to_speech/tensorflow/wavenet/README.md index fa193aa07..963d892d3 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/README.md +++ b/benchmarks/text_to_speech/tensorflow/wavenet/README.md @@ -41,7 +41,7 @@ $ pwd 2. Clone this [intelai/models](https://github.com/intelai/models) repo. This repo has the launch script for running the model, as well as checkpoint files for a pre-trained model. After cloning the repo, -navigate to the benchmarks directory, which is where the launch script +navigate to the `benchmarks` directory, which is where the launch script is located. ``` @@ -71,7 +71,7 @@ python launch_benchmark.py \ --framework tensorflow \ --socket-id 0 \ --num-cores 1 \ - --docker-image intelaipg/intel-optimized-tensorflow:latest-devel-mkl \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ --model-source-dir /home//wavenet/tensorflow-wavenet \ --checkpoint /home//wavenet_checkpoints \ -- checkpoint_name=model.ckpt-99 sample=8510 @@ -99,8 +99,6 @@ Sample: 8500 Average Throughput of whole run: Samples / sec: 289.351783 Average Latency of whole run: msec / sample: 3.456001 Finished generating. The result can be viewed in TensorBoard. -lscpu_path_cmd = command -v lscpu -lscpu located here: /usr/bin/lscpu Ran inference with batch size -1 Log location outside container: {--output-dir value}/benchmark_wavenet_inference_fp32_20190105_015022.log ``` diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/config.json b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/config.json new file mode 100644 index 000000000..f0b327528 --- /dev/null +++ b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/config.json @@ -0,0 +1,6 @@ +{ + "optimization_parameters": { + "KMP_AFFINITY": "granularity=fine,verbose,compact,1,0", + "KMP_BLOCKTIME": 1 + } +} diff --git a/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py index 91ebe227c..1756e33ae 100644 --- a/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py +++ b/benchmarks/text_to_speech/tensorflow/wavenet/inference/fp32/model_init.py @@ -32,8 +32,9 @@ def __init__(self, args, custom_args, platform_util): self.command = "" command_prefix = "{} generate.py".format(self.python_exe) - # Set default KMP env vars, except for KMP_SETTINGS - self.set_kmp_vars(kmp_settings=None) + # Set KMP env vars, if they haven't already been set + config_file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "config.json") + self.set_kmp_vars(config_file_path) self.parse_custom_args() # Set the num_inter_threads and num_intra_threads (override inter threads to 1) diff --git a/benchmarks_directory_structure.png b/benchmarks_directory_structure.png new file mode 100644 index 000000000..1bf56d912 Binary files /dev/null and b/benchmarks_directory_structure.png differ diff --git a/docs/README.md b/docs/README.md index 7ade8475e..c5933030c 100644 --- a/docs/README.md +++ b/docs/README.md @@ -12,11 +12,13 @@ ## Tutorials by Use Case * Inference with IntelĀ® Optimization of Tensorflow: - * [Image Recognition](/docs/image_recognition/tensorflow/Tutorial.md) (ResNet50, ResNet101, and InceptionV3) + * [Image Recognition](/docs/image_recognition/tensorflow/Tutorial.md) (ResNet50, ResNet101, and InceptionV3) + * [Language Translation](/docs/language_translation/tensorflow/Tutorial.md) (Transformer-LT) * [Recommendation Systems](/docs/recommendation/tensorflow/Tutorial.md) (Wide and Deep) * Inference with IntelĀ® Optimization of Tensorflow Serving: * [Image Recognition](/docs/image_recognition/tensorflow_serving/Tutorial.md) (ResNet50 and InceptionV3) - * [Object Detection](/docs/object_detection/tensorflow_serving/Tutorial.md) (R-FCN) + * [Object Detection](/docs/object_detection/tensorflow_serving/Tutorial.md) (R-FCN and SSD-MobileNet) + * [Language Translation](/docs/language_translation/tensorflow_serving/Tutorial.md) (Transformer-LT) * Model Quantization and Optimization * [Image Recognition](/docs/image_recognition/quantization/Tutorial.md) (ResNet50) diff --git a/docs/general/tensorflow/LaunchBenchmark.md b/docs/general/tensorflow/LaunchBenchmark.md index ad358b6aa..14e38385e 100644 --- a/docs/general/tensorflow/LaunchBenchmark.md +++ b/docs/general/tensorflow/LaunchBenchmark.md @@ -23,14 +23,17 @@ Below the general description is an [index of links](#model-scripts-for-tensorfl * Image Recognition * ResNet50: [init](/benchmarks/image_recognition/tensorflow/resnet50/inference/fp32/model_init.py) | - [inference](/models/image_recognition/tensorflow/resnet50/fp32/eval_image_classifier_inference.py) | - [preprocessing](/models/image_recognition/tensorflow/resnet50/fp32/preprocessing.py) + [inference](/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py) | + [preprocessing](/models/image_recognition/tensorflow/resnet50/inference/preprocessing.py) * ResNet101: [init](/benchmarks/image_recognition/tensorflow/resnet101/inference/fp32/model_init.py) | - [inference](/models/image_recognition/tensorflow/resnet101/fp32/benchmark.py) | - [preprocessing](/models/image_recognition/tensorflow/resnet101/fp32/preprocessing.py) + [inference](/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py) | + [preprocessing](/models/image_recognition/tensorflow/resnet101/inference/preprocessing.py) * InceptionV3: [init](/benchmarks/image_recognition/tensorflow/inceptionv3/inference/fp32/model_init.py) | - [inference](/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py) | - [preprocessing](/models/image_recognition/tensorflow/inceptionv3/fp32/preprocessing.py) + [inference](/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py) | + [preprocessing](/models/image_recognition/tensorflow/inceptionv3/fp32/preprocessing.py) +* Language Translation + * Transformer-LT: [init](/benchmarks/language_translation/tensorflow/transformer_lt_official/inference/fp32/model_init.py) | + [inference](/models/language_translation/tensorflow/transformer_lt_official/inference/fp32/infer_ab.py) * Recommendation Systems * Wide and Deep: [init](/benchmarks/recommendation/tensorflow/wide_deep_large_ds/inference/fp32/model_init.py) | [inference](/models/recommendation/tensorflow/wide_deep_large_ds/inference/inference.py) | @@ -101,11 +104,170 @@ optional arguments: conjunction with --accuracy-only and --mode=inference. --output-dir OUTPUT_DIR Folder to dump output into. + --disable-tcmalloc {True,False} + When TCMalloc is enabled, the google-perftools are + installed (if running using docker) and the LD_PRELOAD + environment variable is set to point to the TCMalloc + library file. The TCMalloc memory allocator produces + better performance results with smaller batch sizes. + This flag disables the use of TCMalloc when set to + True. For int8 benchmarking, TCMalloc is enabled by + default (--disable-tcmalloc=False). For other + precisions, the flag is --disable-tcmalloc=True by + default. + --tcmalloc-large-alloc-report-threshold TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD + Sets the TCMALLOC_LARGE_ALLOC_REPORT_THRESHOLD + environment variable to the specified value. The + environment variable sets the threshold (in bytes) for + when large memory allocation messages will be + displayed. -g INPUT_GRAPH, --in-graph INPUT_GRAPH Full path to the input graph + --volume CUSTOM_VOLUMES + Specify a custom volume to mount in the container, + which follows the same format as the docker --volume + flag (https://docs.docker.com/storage/volumes/). This + argument can only be used in conjunction with a + --docker-image. --debug Launches debug mode which doesn't execute start.sh ``` +## Volume mounts + +When running the launch script using a docker image, volumes will +automatically get mounted in the container for the following +directories: + +| Directory | Mount location in the container | +|-----------|---------------------------------| +| Model zoo `/benchmarks` code | `/workspace/benchmarks` | +| Model zoo `/models` code | `/workspace/intelai_models` | +| `--model-source-dir` code | `/workspace/models` | +| `--checkpoints` directory | `/checkpoints` | +| `--in-graph` file | `/in_graph` | +| `--dataset-location` | `/dataset` | + +If you would like additional directories mounted in the docker +container, you can specify them by using the `--volume` flag using the +same `:` separated field format [as docker](https://docs.docker.com/storage/volumes/). +For example, the following command will mount `/home//custom_folder_1` +in the container at `custom_folder_1` and `/home//custom_folder_2` +in the container at `custom_folder_2`: + +``` +$ python launch_benchmark.py \ + --in-graph /home//resnet50_fp32_pretrained_model.pb \ + --model-name resnet50 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size 1 \ + --socket-id 0 \ + --data-location /home//Imagenet_Validation \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --volume /home//custom_folder_1:/custom_folder_1 \ + --volume /home//custom_folder_2:/custom_folder_2 +``` + +Note that volume mounting only applies when running in a docker +container. When running on [bare metal](#alpha-feature-running-on-bare-metal), +files are accessed in their original location. + +## Debugging + +The `--debug` flag in the `launch_benchmarks.py` script gives you a +shell into the docker container with the [volumes mounted](#volume-mounts) +for any dataset, pretrained model, model source code, etc that has been +provided by the other flags. It does not execute the `start.sh` script, +and is intended as a way to setup an environment for quicker iteration +when debugging and doing development. From the shell, you can manually +execute the `start.sh` script and select to not re-install dependencies +each time that you re-run, so that the script takes less time to run. + +Below is an example showing how to use the `--debug` flag: + +1. Run the model using your model's `launch_benchmark.py` command, but + add on the `--debug` flag, which will take you to a shell. If you + list the files in the directory at that prompt, you will see the + `start.sh` file: + + ``` + $ python launch_benchmark.py \ + --in-graph /home//resnet50_fp32_pretrained_model.pb \ + --model-name resnet50 \ + --framework tensorflow \ + --precision fp32 \ + --mode inference \ + --batch-size=1 \ + --socket-id 0 \ + --data-location /home//Imagenet_Validation \ + --docker-image gcr.io/deeplearning-platform-release/tf-cpu.1-14 \ + --debug + + # ls + __init__.py logs run_tf_benchmark.py start.sh + ``` + +2. Flags that were passed to the launch script are set as environment + variables in the container: + + ``` + # env + EXTERNAL_MODELS_SOURCE_DIRECTORY=None + IN_GRAPH=/in_graph/resnet50_fp32_pretrained_model.pb + WORKSPACE=/workspace/benchmarks/common/tensorflow + MODEL_NAME=resnet50 + PRECISION=fp32 + BATCH_SIZE=1 + MOUNT_EXTERNAL_MODELS_SOURCE=/workspace/models + DATASET_LOCATION=/dataset + BENCHMARK_ONLY=True + ACCURACY_ONLY=False + ... + ``` +3. Run the `start.sh` script, which will setup the `PYTHONPATH`, install + dependencies, and then run the model: + ``` + # bash start.sh + ... + Iteration 48: 0.011513 sec + Iteration 49: 0.011664 sec + Iteration 50: 0.011802 sec + Average time: 0.011650 sec + Batch size = 1 + Latency: 11.650 ms + Throughput: 85.833 images/sec + Ran inference with batch size 1 + Log location outside container: /benchmark_resnet50_inference_fp32_20190403_212048.log + ``` + +4. Code changes that are made locally will also be made in the container + (and vice versa), since the directories are mounted in the docker + container. Once code changes are made, you can rerun the start + script, except set the `NOINSTALL` variable, since dependencies were + already installed in the previous run. You can also change the + environment variable values for other settings, like the batch size. + + ``` + # NOINSTALL=True + # BATCH_SIZE=128 + # bash start.sh + ... + Iteration 48: 0.631819 sec + Iteration 49: 0.625606 sec + Iteration 50: 0.618813 sec + Average time: 0.625285 sec + Batch size = 128 + Throughput: 204.707 images/sec + Ran inference with batch size 128 + Log location outside container: /benchmark_resnet50_inference_fp32_20190403_212310.log + ``` + +5. Once you are done with the session, exit out of the docker container: + ``` + # exit + ``` + ## Alpha feature: Running on bare metal We recommend using [Docker](https://www.docker.com) to run the @@ -173,3 +335,11 @@ the following command can be used: --batch-size=1 \ --socket-id 0 ``` + +> When running on bare metal, be aware of environment variables that you +have set on your system. The model zoo scripts intentionally do not +overwrite environment variables that have already been set, such as +`OMP_NUM_THREADS`. The same is true when running in a docker container, +but since a new docker container instance is started with each run, you +won't have previously set environment variables, like you may have on +bare metal. diff --git a/docs/general/tensorflow_serving/InstallationGuide.md b/docs/general/tensorflow_serving/InstallationGuide.md index 2ef0489eb..0aa6a03b7 100644 --- a/docs/general/tensorflow_serving/InstallationGuide.md +++ b/docs/general/tensorflow_serving/InstallationGuide.md @@ -36,7 +36,7 @@ We will break down the installation into 2 steps: * Step 1: Build the Intel Optimized TensorFlow Serving Docker image * Step 2: Verify the Docker image by serving a simple model - half_plus_two -### Step 1: Build TensorFlow Serving Docker image +### Step 1: Build TensorFlow Serving Docker image. The recommended way to use TensorFlow Serving is with Docker images. Letā€™s build a docker image with TensorFlow Serving optimized for IntelĀ® Processors. * Login into your machine via SSH and clone the [Tensorflow Serving](https://github.com/tensorflow/serving/) repository and save the path of this cloned directory (Also, adding it to `.bashrc` ) for ease of use for the remainder of this tutorial. @@ -45,7 +45,9 @@ The recommended way to use TensorFlow Serving is with Docker images. Letā€™s bui $ export TF_SERVING_ROOT=$(pwd)/serving $ echo "export TF_SERVING_ROOT=$(pwd)/serving" >> ~/.bashrc ``` - + +* You can also build image using [this](/benchmarks/common/tensorflow_serving/build_tfserving_image.sh) script, run as per comments mentioned. Or Continue manual steps as below. + * Using `Dockerfile.devel-mkl`, build an image with Intel optimized ModelServer. This creates an image with all the required development tools and builds from sources. The image size will be around 5GB and will take some time. On AWS c5.4xlarge instance (16 logical cores), it took about 25min. **NOTE**: It is recommended that you build an official release version using `--build-arg TF_SERVING_VERSION_GIT_BRANCH=""`, but if you wish to build the (unstable) head of master, omit the build argument and master will be used by default. @@ -54,6 +56,7 @@ The recommended way to use TensorFlow Serving is with Docker images. Letā€™s bui $ cd $TF_SERVING_ROOT/tensorflow_serving/tools/docker/ $ docker build \ -f Dockerfile.devel-mkl \ + --build-arg TF_SERVING_BAZEL_OPTIONS="--incompatible_disallow_data_transition=false --incompatible_disallow_filetype=false" \ --build-arg TF_SERVING_VERSION_GIT_BRANCH="1.13.0" \ -t tensorflow/serving:latest-devel-mkl . ``` @@ -257,7 +260,7 @@ $ curl -s http://download.tensorflow.org/models/official/20181001_resnet/savedmo $ cd ~ $ virtualenv tfserving_venv $ source tfserving_venv/bin/activate - (tfserving_venv)$ pip install grpc requests tensorflow tensorflow-serving-api + (tfserving_venv)$ pip install requests tensorflow tensorflow-serving-api ``` * Run the example `resnet_client_grpc.py` script from the TensorFlow Serving repository, which you cloned earlier. ``` diff --git a/docs/image_recognition/tensorflow/Tutorial.md b/docs/image_recognition/tensorflow/Tutorial.md index 235ea4109..7b24c9557 100644 --- a/docs/image_recognition/tensorflow/Tutorial.md +++ b/docs/image_recognition/tensorflow/Tutorial.md @@ -359,8 +359,6 @@ Note: As per the recommended settings `socket-id` is set to 0 for InceptionV3. T steps = 30, ... images/sec steps = 40, ... images/sec steps = 50, ... images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet50 @@ -384,9 +382,7 @@ you can implement the same strategy on different use cases demoed in Step 3. --debug Example Output - - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' + root@a78677f56d69:/workspace/benchmarks/common/tensorflow# To rerun the bechmarking script, execute the ```start.sh``` bash script from your existing directory with additional or modified flags. For e.g to rerun with the best batch inference (batch size=128) settings run with ```BATCH_SIZE``` @@ -397,7 +393,7 @@ and to skip the run from reinstalling packages pass ```True``` to ```NOINSTALL`` NOINSTALL=True BATCH_SIZE=128 ./start.sh -All other flags will be defaulted to values passed in the first ```launch_benchmark.py``` that starts the container. [See here](google.com) to get the full list of flags. +All other flags will be defaulted to values passed in the first ```launch_benchmark.py``` that starts the container. [See here](/docs/general/tensorflow/LaunchBenchmark.md) to get the full list of flags. Example Output @@ -429,8 +425,6 @@ All other flags will be defaulted to values passed in the first ```launch_benchm . Batch size = 128 Throughput: ... images/sec - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu Ran inference with batch size 128 Log location outside container: {--output-dir value}/benchmark_resnet50_inference_fp32_20190205_201632.log diff --git a/docs/image_recognition/tensorflow_serving/Tutorial.md b/docs/image_recognition/tensorflow_serving/Tutorial.md index 0c9ad527f..e5e9b0153 100644 --- a/docs/image_recognition/tensorflow_serving/Tutorial.md +++ b/docs/image_recognition/tensorflow_serving/Tutorial.md @@ -1,10 +1,12 @@ # Image Recognition with TensorFlow Serving on CPU + ### Online and Batch Inference -Models: ResNet50, InceptionV3 +Model and Precision: InceptionV3 FP32, ResNet50 FP32, and ResNet50 Int8 ## Goal -This tutorial will introduce you to the CPU performance considerations for image recognition deep learning models and how to use IntelĀ® Optimizations for [TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. +This tutorial will introduce you to the CPU performance considerations for image recognition deep learning models with different precisions and +how to use IntelĀ® Optimizations for [TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. It also provides sample code that you can use to get your optimized TensorFlow model server and GRPC client up and running quickly. ## Prerequisites @@ -22,26 +24,36 @@ This tutorial assumes you have already: Convolutional neural networks (CNNs) for image recognition are computationally expensive. The IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN) offers significant performance improvements for convolution, pooling, normalization, activation, and other operations via efficient vectorization and multi-threading. Tuning TensorFlow Serving to take full advantage of your hardware for image recognition deep learning inference involves: -1. Working through this tutorial to set up servable versions of the well-known [ResNet50](https://arxiv.org/pdf/1512.03385.pdf) and [InceptionV3](https://arxiv.org/pdf/1512.00567v1.pdf) CNN models +1. Working through this tutorial to set up servable versions of the well-known [ResNet50](https://arxiv.org/pdf/1512.03385.pdf) and [InceptionV3](https://arxiv.org/pdf/1512.00567v1.pdf) CNN models with different precisions. 2. Running a TensorFlow Serving docker container configured for performance given your hardware resources 3. Running a client script to measure online and batch inference performance 4. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case -## Hands-on Tutorial - ResNet50 or InceptionV3 +## Hands-on Tutorial - InceptionV3 and Resnet50 + +This section shows a step-by-step example for how to serve one of the following Image Recognition models +`(ResNet50 FP32, ResNet50 Int8, and InceptionV3 FP32)` using TensorFlow Serving. +It also explains the possible ways to manage the available CPU resources and tune it for the optimal performance. + +For steps 1 and 2, refer to the Intel Model Zoo READMEs: +* **FP32 precision:** use the Intel Model Zoo `FP32` README sections, + * [InceptionV3 FP32 README](/benchmarks/image_recognition/tensorflow/inceptionv3#fp32-inference-instructions), and + * [ResNet50 FP32 README](/benchmarks/image_recognition/tensorflow/resnet50#fp32-inference-instructions) + +* **Int8 precision:** use the Intel Model Zoo `Int8` README sections, + * [ResNet50 Int8 README](/benchmarks/image_recognition/tensorflow/resnet50#int8-inference-instructions) -For steps 1 and 2, refer to the Intel Model Zoo FP32 READMEs: -* [ResNet50 README](/benchmarks/image_recognition/tensorflow/resnet50#fp32-inference-instructions) -* [InceptionV3 README](/benchmarks/image_recognition/tensorflow/inceptionv3#fp32-inference-instructions) +>NOTE: The below example shows InceptionV3 (FP32). The same code snippets will work for ResNet50 (FP32 and Int8) by replacing the model name to `resnet50`. -1. **Download the Model**: Download and extract the ResNet50 or InceptionV3 pre-trained model (FP32), using the instructions in one of the READMEs above. +1. **Download the Model**: Download and extract the InceptionV3 pre-trained model, using the instructions in above README. 2. **(Optional) Download Data**: If you are interested only in testing performance, not accuracy, you can skip this step and use synthetic data. If you want to verify prediction accuracy by testing on real data, follow the instructions in one of the READMEs above to download the ImageNet dataset. -3. **Clone this repository**: Clone the [intelai/models](https://github.com/intelai/models) repository and `cd` into the `docs/image_recognition/tensorflow_serving/src` directory. +3. **Clone this repository**: Clone the [intelai/models](https://github.com/intelai/models) repository and `cd` into the `models/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32` directory. ``` $ git clone https://github.com/IntelAI/models.git - $ cd models/docs/image_recognition/tensorflow_serving/src + $ cd models/benchmarks/image_recognition/tensorflow_serving/inceptionv3/inference/fp32 ``` 4. **Set up your environment**: In this tutorial, we use a virtual environment to install a few required Python packages. @@ -51,15 +63,13 @@ For steps 1 and 2, refer to the Intel Model Zoo FP32 READMEs: $ pip install virtualenv $ virtualenv venv ``` - Then activate the virtual environment and install `grpc`, `requests`, `tensorflow`, and `tensorflow-serving-api` (at the time of this writing, the order of installation matters): + Then activate the virtual environment and install `requests`, `tensorflow`, and `tensorflow-serving-api`: ``` $ source venv/bin/activate - (venv)$ pip install grpc - (venv)$ pip install requests - (venv)$ pip install intel-tensorflow - (venv)$ pip install tensorflow-serving-api + (venv)$ pip install requests intel-tensorflow tensorflow-serving-api ``` 5. **Create a SavedModel**: Using the conversion script `model_graph_to_saved_model.py`, convert the pre-trained model graph to a SavedModel. + (For ResNet50, substitute the name of the ResNet50 FP32 or the ResNet50 Int8 pre-trained model.) Example: ``` @@ -118,13 +128,13 @@ For steps 1 and 2, refer to the Intel Model Zoo FP32 READMEs: To see average online inference performance (in ms), run the script `image_recognition_benchmark.py` using batch_size 1: ``` (venv)$ python image_recognition_benchmark.py --batch_size 1 --model inceptionv3 - Iteration 1: 0.017 sec + Iteration 1: ... sec ... - Iteration 40: 0.016 sec - Average time: 0.016 sec + Iteration 40: ... sec + Average time: ... sec Batch size = 1 - Latency: 16.496 ms - Throughput: 60.619 images/sec + Latency: ... ms + Throughput: ... images/sec ``` In some cases, it is desirable to constrain the inference server to a single core or socket. @@ -156,12 +166,12 @@ For steps 1 and 2, refer to the Intel Model Zoo FP32 READMEs: To see average batch inference performance (in images/sec), run the script `image_recognition_benchmark.py` using batch_size 128: ``` (venv)$ python image_recognition_benchmark.py --batch_size 128 --model inceptionv3 - Iteration 1: 1.706 sec + Iteration 1: ... sec ... - Iteration 40: 0.707 sec - Average time: 0.693 sec + Iteration 40: ... sec + Average time: ... sec Batch size = 128 - Throughput: 184.669 images/sec + Throughput: ... images/sec ``` 11. **Clean up**: @@ -171,7 +181,7 @@ For steps 1 and 2, refer to the Intel Model Zoo FP32 READMEs: ## Conclusion -You have now seen two end-to-end examples of serving an image recognition model for inference using TensorFlow Serving, and learned: +You have now seen three end-to-end examples of serving an image recognition model for inference using TensorFlow Serving, and learned: 1. How to create a SavedModel from a TensorFlow model graph 2. How to choose good values for the performance-related runtime parameters exposed by the `docker run` command 3. How to verify that the served model can correctly classify an image using a GRPC client diff --git a/docs/image_recognition/tensorflow_serving/src/image_recognition_benchmark.py b/docs/image_recognition/tensorflow_serving/src/image_recognition_benchmark.py deleted file mode 100644 index 658812cd9..000000000 --- a/docs/image_recognition/tensorflow_serving/src/image_recognition_benchmark.py +++ /dev/null @@ -1,117 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - -"""Send simulated image data to tensorflow_model_server loaded with ResNet50 or InceptionV3 model. - -""" - -from __future__ import print_function - -import os -import sys -import random -import time -import grpc -import tensorflow as tf -import numpy as np - -from tensorflow_serving.apis import predict_pb2 -from tensorflow_serving.apis import prediction_service_pb2_grpc - -from util import preprocess_image, parse_example_proto - -tf.app.flags.DEFINE_string('server', 'localhost:8500', - 'PredictionService host:port') -tf.app.flags.DEFINE_integer('batch_size', 1, 'Batch size to use') -tf.app.flags.DEFINE_string('data_dir', '', 'path to images in TF records format') -tf.app.flags.DEFINE_string('model', 'resnet50', 'Name of model (resnet50 or inceptionv3).') -FLAGS = tf.app.flags.FLAGS - - -def sample_images(image_size): - """Pull a random batch of images from FLAGS.data_dir containing TF record formatted ImageNet validation set - - Returns: - ndarray of float32 with shape [FLAGS.batch_size, image_size, image_size, 3] - """ - - sample_file = random.choice(os.listdir(FLAGS.data_dir)) - dataset = tf.data.TFRecordDataset(os.path.join(FLAGS.data_dir, sample_file)) - dataset = dataset.map(lambda x: parse_example_proto(x)).shuffle(True).batch(FLAGS.batch_size) - iterator = dataset.make_one_shot_iterator() - next_element = iterator.get_next() - with tf.Session() as sess: - images, labels = sess.run(next_element) - images = np.array([sess.run(preprocess_image(x, FLAGS.model, image_size)) for x in images]) - - return images - -def main(_): - if FLAGS.model == 'resnet50': - image_size = 224 - elif FLAGS.model == 'inceptionv3': - image_size = 299 - else: - print('Please specify model as either resnet50 or inceptionv3.') - sys.exit(-1) - - channel = grpc.insecure_channel(FLAGS.server) - stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) - i = 0 - num_iteration = 40 - warm_up_iteration = 10 - total_time = 0 - for _ in range(num_iteration): - i += 1 - if FLAGS.data_dir: - image_np = sample_images(image_size) - else: - image_np = np.random.rand(FLAGS.batch_size, image_size, image_size, 3).astype(np.float32) - if FLAGS.model == 'resnet50': - # For ResNet50, rescale to [0, 256] - image_np *= 256.0 - elif FLAGS.model == 'inceptionv3': - # For InceptionV3, rescale to [-1, 1] - image_np = (image_np - 0.5) * 2.0 - - request = predict_pb2.PredictRequest() - request.model_spec.name = FLAGS.model - request.model_spec.signature_name = 'serving_default' - request.inputs['input'].CopyFrom( - tf.contrib.util.make_tensor_proto(image_np, shape=[FLAGS.batch_size, image_size, image_size, 3])) - start_time = time.time() - result = stub.Predict(request, 10.0) # 10 secs timeout - time_consume = time.time() - start_time - print('Iteration %d: %.3f sec' % (i, time_consume)) - if i > warm_up_iteration: - total_time += time_consume - - time_average = total_time / (num_iteration - warm_up_iteration) - print('Average time: %.3f sec' % (time_average)) - - print('Batch size = %d' % FLAGS.batch_size) - if (FLAGS.batch_size == 1): - print('Latency: %.3f ms' % (time_average * 1000)) - - print('Throughput: %.3f images/sec' % (FLAGS.batch_size / time_average)) - - -if __name__ == '__main__': - tf.app.run() diff --git a/docs/image_recognition/tensorflow_serving/src/util.py b/docs/image_recognition/tensorflow_serving/src/util.py deleted file mode 100644 index 8877e932d..000000000 --- a/docs/image_recognition/tensorflow_serving/src/util.py +++ /dev/null @@ -1,61 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - -from __future__ import print_function - -import tensorflow as tf - -def preprocess_image(image_buffer, model, image_size): - """Preprocess JPEG encoded bytes to 3D float Tensor.""" - - # Decode the string as an RGB JPEG of unknown height and width. - image = tf.image.decode_jpeg(image_buffer, channels=3) - # Convert pixels to [0, 1) - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - # Crop the central region to 87.5% of the original image. - image = tf.image.central_crop(image, central_fraction=0.875) - # Resize the image to image_size x image_size. - image = tf.expand_dims(image, 0) - image = tf.image.resize_bilinear(image, [image_size, image_size], align_corners=False) - image = tf.squeeze(image, [0]) - if model == 'resnet50': - # For ResNet50, rescale to [0, 256] - image = tf.multiply(image, 256.0) - elif model == 'inceptionv3': - # For InceptionV3, rescale to [-1, 1] - image = tf.subtract(image, 0.5) - image = tf.multiply(image, 2.0) - return image - -def parse_example_proto(example_serialized): - - # Dense features in Example proto. - feature_map = { - 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, - default_value=-1), - } - - features = tf.parse_single_example(example_serialized, feature_map) - label = tf.cast(features['image/class/label'], dtype=tf.int32) - - return features['image/encoded'], label - diff --git a/docs/language_translation/tensorflow/Tutorial.md b/docs/language_translation/tensorflow/Tutorial.md new file mode 100644 index 000000000..aee385c63 --- /dev/null +++ b/docs/language_translation/tensorflow/Tutorial.md @@ -0,0 +1,266 @@ +# Language Translation with Transformer-LT + + +## Goal +This tutorial will introduce CPU performance considerations of the deep learning Transformer-LT model for language translation and how to use IntelĀ® Optimizations for TensorFlow to improve inference time on CPUs. +This tutorial will also provide code examples to use Intel Model Zoo's pretrained English to German model that can be copy/pasted for quick off-the-ground implementation on real data. + +## Background +Language Translation with deep learning is a computationally expensive endeavor. This tutorial will show you how to reduce the inference runtime of your Transformer-LT network, a popular topology solution to translation. +It is based on an encoder-decoder architecture with an added attention mechanism. The encoder is used to encode the original sentence to a meaningful fixed-length vector, and the decoder is responsible for extracting the context data from the vector. +The encoder and decoder process the inputs and outputs, which are in the form of a time sequence. + +In a traditional encoder/decoder model, each element in the context vector is treated equally. This is typically not the ideal solution. +For instance, when you translate the phrase ā€œI travel by trainā€ from English into Chinese, the word ā€œIā€ has a greater influence than other words when producing its counterpart in Chinese. +Thus, the attention mechanism was introduced to differentiate contributions of each element in the source sequence to their counterpart in the destination sequence, through the use of a hidden matrix. +This matrix contains weights of each element in the source sequence when producing elements in the destination sequence. + + +## Recommended Settings +In addition to TensorFlow optimizations that use the IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN) to utilize instruction sets appropriately, the runtime settings also significantly contribute to improved performance. +Tuning these options to optimize CPU workloads is vital to optimize performance of TensorFlow on IntelĀ® processors. +Below are the set of run-time options tested empirically on Transformer-LT and recommended by Intel: + + +| Run-time options | Recommendations | +| ------------- | ------------- | +| Batch Size | 64. Regardless of the hardware | +| Hyperthreading | Enabled. Turn on in BIOS. Requires a restart. | +|intra_op_parallelism_threads |# physical cores | +|inter_op_parallelism_threads | 1 | +|NUMA Controls| --cpunodebind=0 --membind=0 | +|KMP_AFFINITY| KMP_AFFINITY=granularity=fine,verbose,compact,1,0| +|KMP_BLOCKTIME| 1 | +|OMP_NUM_THREADS |physical cores| + +Note 1: Refer to this [link](https://software.intel.com/en-us/articles/maximize-tensorflow-performance-on-cpu-considerations-and-recommendations-for-inference) to learn more about the run time options. + +Note 2: You can remove `verbose` from `KMP_AFFINITY` setting to avoid verbose output at runtime. + +Run the following commands to get your processor information: + +a. #physical cores per socket : `lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` + +b. #all physical cores: `lscpu -b -p=Core,Socket | grep -v '^#' | sort -u | wc -l` + +Below is a code snippet you can incorporate into your existing TensorFlow application to set the best settings. +You can either set them in the CLI or in the Python script. Note that inter and intra_op_parallelism_threads settings can only be set +in the Python script. + +```bash +export OMP_NUM_THREADS=physical cores +export KMP_AFFINITY="granularity=fine,verbose,compact,1,0" +export KMP_BLOCKTIME=1 +export KMP_SETTINGS=1 +``` +(or) +``` +import os +os.environ["KMP_BLOCKTIME"] = "1" +os.environ["KMP_SETTINGS"] = "1" +os.environ["KMP_AFFINITY"]= "granularity=fine,verbose,compact,1,0" +os.environ["OMP_NUM_THREADS"]= <# physical cores> +config = tf.ConfigProto() +config.intra_op_parallelism_threads = <# physical cores> +config.inter_op_parallelism_threads = 1 +tf.Session(config=config) +``` + +## Hands-on Tutorial +This section shows how to measure inference performance on Intel's Model Zoo pretrained model (or your pretrained model) by setting the above-discussed run time flags. +### FP32 inference + +### Initial Setup + +1. The model source is based off a specific commit from the TensorFlow models repo. Follow the instructions below to clone an older commit into your home directory. + +``` +cd ~ +mkdir tensorflow-models +cd tensorflow-models +git clone https://github.com/tensorflow/models.git +cd models +git checkout 8367cf6dabe11adf7628541706b660821f397dce +``` + +2. Clone IntelAI models and download into your home directory, skip this step if you already have Intel AI models installed. + +```bash +cd ~ +git clone https://github.com/IntelAI/models.git +``` + +3. Skip to step 4 if you already have a pretrained model or download the file `transformer_lt_official_fp32_pretrained_model.tar.gz` into your ~/transformer_LT_german location. +``` +mkdir ~/transformer_LT_german +cd ~/transformer_LT_german +wget https://storage.googleapis.com/intel-optimized-tensorflow/models/transformer_lt_official_fp32_pretrained_model.tar.gz +tar -xzvf transformer_lt_official_fp32_pretrained_model.tar.gz +``` + +4. After extraction, you should see the following folders and files in the `transformer_lt_official_fp32_pretrained_model` directory: +``` +$ ls -l transformer_lt_official_fp32_pretrained_model/* + +transformer_lt_official_fp32_pretrained_model/data: +total 1064 +-rw-r--r--. 1 359898 Feb 20 16:05 newstest2014.en +-rw-r--r--. 1 399406 Feb 20 16:05 newstest2014.de +-rw-r--r--. 1 324025 Mar 15 17:31 vocab.txt + +transformer_lt_official_fp32_pretrained_model/graph: +total 241540 +-rwx------. 1 247333269 Mar 15 17:29 fp32_graphdef.pb + +``` +`newstest2014.en`: Input file with English text
+`newstest2014.de`: German translation of the input file for measuring accuracy
+`vocab.txt`: A dictionary of vocabulary
+`fp32_graphdef.pb`: Pretrained model + +Or, if you have your own model/data, ensure the folder structure following the structure depicted below to run the pretrained model in Intel Model Zoo. + +``` +ā”œā”€ transformer_LT_german +ā”‚ ā”œā”€ā”€ transformer_pretrained_model +ā”‚ ā”œā”€ā”€ data +ā”‚ ā”‚ ā”œā”€ā”€ newstest2014.en(Input file) +ā”‚ ā”‚ ā”œā”€ā”€ newstest2014.de (Reference file, this is optional) +ā”‚ ā”‚ ā””ā”€ā”€ vocab.txt +ā”‚ ā””ā”€ā”€ graph +ā”‚ ā””ā”€ā”€ pretrained_model.pb +``` +5. Install [Docker](https://docs.docker.com/v17.09/engine/installation/) since the tutorial runs in a Docker container. + +### Run inference + +1. Pull the relevant Intel-optimized TensorFlow Docker image. + [Click here](https://software.intel.com/en-us/articles/intel-optimization-for-tensorflow-installation-guide) to find all the available Docker images. +```bash +docker pull docker.io/intelaipg/intel-optimized-tensorflow:latest +``` +2. cd to the inference script directory in local IntelAI repo +```bash +cd ~/models/benchmarks +``` +3. Run the Python script ``` launch_benchmark.py``` with the pretrained model. +```launch_benchmark.py``` script can be treated as an entry point to conveniently perform out-of-box high performance +inference on pretrained models trained of popular topologies. +The script will automatically set the recommended run-time options for supported topologies, +but if you choose to set your own options, refer to full of available flags and a detailed +explanation on ```launch_benchmarking.py``` script [here](/docs/general/tensorflow/LaunchBenchmark.md). + This step will automatically launch a new container on every run and terminate. Go to [Step 4](#step_4) to interactively run the script on the container. + +Substitute the `--model-source-dir` for the location where you cloned the +[tensorflow/models](https://github.com/tensorflow/models.git) repo + + +``` +~/tensorflow-models/models +``` +3.1. *Online inference* (using `--socket-id 0` and `--batch-size 1`) + +If you wish to calculate the [BLEU](https://en.wikipedia.org/wiki/BLEU) metric to find out the machine-translation quality, pass the file as `reference` flag. +`newstest2014.en` file must have only one sentence per line + + +console in: +```bash +python launch_benchmark.py \ + --model-name transformer_lt_official \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 1 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --model-source-dir ~/tensorflow-models/models \ + --in-graph ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ + --data-location ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/data \ + -- file=newstest2014.en \ + vocab_file=vocab.txt \ + file_out=translate.txt \ + reference=newstest2014.de +``` + +The translated German text will be in the file `translation.txt` located at `~/models/benchmarks/common/tensorflow/logs` + +3.2. *Batch inference* (using `--socket-id 0` and `--batch-size 64`) + +```bash +python launch_benchmark.py \ + --model-name transformer_lt_official \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 64 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --model-source-dir ~/tensorflow-models/models \ + --in-graph ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ + --data-location ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/data \ + -- file=newstest2014.en \ + vocab_file=vocab.txt \ + file_out=translate.txt \ + reference=newstest2014.de +``` +console out: +``` +Graph parsed in ..... s +import_graph_def took .....s +tokenizer took ..... s +Translating 3003 sentences from English to German. +Total inferencing time:.... +Throughput:.... sentences/second +Total number of sentences translated:3003 +I0419 22:50:49.856748 140013257643776 compute_bleu.py:106] Case-insensitive results: 27.510020 +I0419 22:50:51.203501 140013257643776 compute_bleu.py:110] Case-sensitive results: 26.964748 +Ran inference with batch size 64 +Log location outside container: /~/models/benchmarks/common/tensorflow/logs/benchmark_transformer_lt_official_inference_fp32_20190419_224047.log +``` + +The logs are captured in a directory outside of the container.
+ +4. If you want to run the ```launch_benchmark.py``` interactively from within the docker container, add flag ```--debug```. This will launch a docker container based on the ```--docker_image```, +performs necessary installs, runs the ```launch_benchmark.py``` script and does not terminate the container process. As an example, this step will demonstrate online inference (--batch-size 1), but you can implement the same strategy for batch inference (--batch-size 64)." + +console in: +```bash +python launch_benchmark.py \ + --model-name transformer_lt_official \ + --precision fp32 \ + --mode inference \ + --framework tensorflow \ + --batch-size 64 \ + --socket-id 0 \ + --docker-image intelaipg/intel-optimized-tensorflow:latest \ + --model-source-dir ~/tensorflow-models/models \ + --in-graph ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb \ + --data-location ~/transformer_LT_german/transformer_lt_official_fp32_pretrained_model/data \ + --debug + -- file=newstest2014.en \ + vocab_file=vocab.txt \ + file_out=translate.txt \ + reference=newstest2014.de + +``` +console out: +```bash + lscpu_path_cmd = command -v lscpu + lscpu located here: b'/usr/bin/lscpu' + root@a78677f56d69:/workspace/benchmarks/common/tensorflow# +``` + +To rerun the bechmarking script, execute the ```start.sh``` bash script from your existing directory with the available flags, which inturn will run ```launch_benchmark.py```. For e.g to rerun with the different batch size (batch size=64) settings run with ```BATCH_SIZE``` +and to skip the run from reinstalling packages pass ```True``` to ```NOINSTALL```. + +```bash + chmod +x ./start.sh +``` +```bash + NOINSTALL=True BATCH_SIZE=64 ./start.sh +``` + +All other flags will be defaulted to values passed in the first ```launch_benchmark.py``` that starts the container. [See here](/docs/general/tensorflow/LaunchBenchmark.md) to get the full list of flags. + + diff --git a/docs/language_translation/tensorflow_serving/Tutorial.md b/docs/language_translation/tensorflow_serving/Tutorial.md new file mode 100644 index 000000000..c584495c1 --- /dev/null +++ b/docs/language_translation/tensorflow_serving/Tutorial.md @@ -0,0 +1,211 @@ + +# Language Translation with TensorFlow Serving on CPU using Transformer-LT + +## Goal + +This tutorial will introduce you to the CPU performance considerations for language translation and how to use [IntelĀ® Optimizations for TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. +This tutorial uses a pre-trained [Transformer-LT](https://arxiv.org/pdf/1706.03762.pdf) model for translating English to German and a sample of English news excerpts. +We provide sample code that you can use to get your optimized TensorFlow model server and GRPC client up and running quickly. +In this tutorial using Transformer-LT, you will measure inference performance in two situations: +* **Online inference**, where batch_size=1. In this case, a lower number means better runtime performance. +* **Batch inference**, where batch_size>1. In this case, a higher number means better runtime performance. + +**NOTE about GRPC vs. REST**: It [has been suggested](https://medium.com/@avidaneran/tensorflow-serving-rest-vs-grpc-e8cef9d4ff62) that GRPC has faster client-side serialization and de-serialization than REST, especially if you are optimizing for batch inference. +Please note however that this tutorial is focused on optimizing the model server, not the client that sends requests. +We use GRPC in this tutorial for illustration, not as a best practice, and offer another [tutorial](/docs/object_detection/tensorflow_serving/Tutorial.md) that illustrates the use of the REST API with TensorFlow Serving, if you are interested in that protocol. + +## Prerequisites + +This tutorial assumes you have already: +* [Installed TensorFlow Serving](/docs/general/tensorflow_serving/InstallationGuide.md) +* Read and understood the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md), + especially these sections: + * [Performance Metrics](/docs/general/tensorflow_serving/GeneralBestPractices.md#performance-metrics) + * [TensorFlow Serving Configuration Settings](/docs/general/tensorflow_serving/GeneralBestPractices.md#tensorflow-serving-configuration-settings) +* Ran an example end-to-end using a GRPC client, such as the [one in the Installation Guide](/docs/general/tensorflow_serving/InstallationGuide.md#option-2-query-using-grpc) + +## Background + +The Transformer-LT model is a popular solution for language translation. +It is based on an encoder-decoder architecture with an added attention mechanism. +The encoder is used to encode the original sentence to a meaningful fixed-length vector, and the decoder is responsible for extracting the context data from the vector. +The encoder and decoder process the inputs and outputs, which are in the form of a time sequence. + +In a traditional encoder/decoder model, each element in the context vector is treated equally, but this is typically not the ideal solution. +For instance, when you translate the phrase ā€œI travel by trainā€ from English into Chinese, the word ā€œIā€ has a greater influence than other words when producing its counterpart in Chinese. +Thus, the attention mechanism was introduced to differentiate contributions of each element in the source sequence to their counterpart in the destination sequence, through the use of a hidden matrix. +This matrix contains weights of each element in the source sequence when producing elements in the destination sequence. + +[IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN)](https://github.com/intel/mkl-dnn) offers significant performance improvements for many neural network operations. +Tuning TensorFlow Serving to take full advantage of your hardware for language translation inference involves: +1. Running a TensorFlow Serving docker container configured for performance given your hardware resources +2. Running a GRPC client to verify prediction accuracy and measure online and batch inference performance +3. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case + +## Hands-on Tutorial with pre-trained Transformer-LT (Official) model + +1. **Clone this repository**: Clone the [intelai/models](https://github.com/intelai/models) repository into your home directory. + + ``` + cd ~ + git clone https://github.com/IntelAI/models.git + ``` + +2. **Clone the tensorflow/models repository**: Tokenization of the input data requires utility functions in a specific commit of the tensorflow/models repository. + + ``` + cd ~ + mkdir tensorflow-models + cd tensorflow-models + git clone https://github.com/tensorflow/models.git + cd models + git checkout 8367cf6dabe11adf7628541706b660821f397dce + ``` + + Now add the required directory to the `PYTHONPATH` variable: + + ``` + export PYTHONPATH=$PYTHONPATH:$(pwd)/official/transformer + ``` + +3. **Set up the client environment**: We need to create a virtual environment for this tutorial. + + - We will use a virtual environment to install the required packages. If you do not have pip or virtualenv, you will need to get them first: + + ``` + sudo apt-get install -y python python-pip virtualenv + ``` + + - Create and activate the python virtual environment in your home directory and install the `tensorflow`, `pandas`, and `tensorflow-serving-api` packages. + + ``` + cd ~ + virtualenv lt_venv + source lt_venv/bin/activate + pip install intel-tensorflow pandas tensorflow-serving-api + ``` + +4. **Download the pre-trained model and test data**: Download and extract the packaged pre-trained model and dataset ```transformer_lt_official_fp32_pretrained_model.tar.gz``` + (refer to the [model README](/benchmarks/language_translation/tensorflow/transformer_lt_official) to get the latest location of this archive). + + ``` + wget https://storage.googleapis.com/intel-optimized-tensorflow/models/transformer_lt_official_fp32_pretrained_model.tar.gz + tar -xzvf transformer_lt_official_fp32_pretrained_model.tar.gz + ``` + + After extraction, you should see the following folders and files in the `transformer_lt_official_fp32_pretrained_model` directory: + + ``` + $ ls -l transformer_lt_official_fp32_pretrained_model/* + + transformer_lt_official_fp32_pretrained_model/data: + total 1064 + -rw-r--r--. 1 359898 Feb 20 16:05 newstest2014.en + -rw-r--r--. 1 399406 Feb 20 16:05 newstest2014.de + -rw-r--r--. 1 324025 Mar 15 17:31 vocab.txt + + transformer_lt_official_fp32_pretrained_model/graph: + total 241540 + -rwx------. 1 247333269 Mar 15 17:29 fp32_graphdef.pb + ``` + + - `newstest2014.en`: Input file with English text + - `newstest2014.de`: German translation of the input file for measuring accuracy + - `vocab.txt`: Dictionary of vocabulary + - `fp32_graphdef.pb`: Pre-trained model + +5. **Create a SavedModel**: Using the conversion script `transformer_graph_to_saved_model.py`, convert the pre-trained model graph to a SavedModel. + + ``` + cd ~/models/docs/language_translation/tensorflow_serving + python transformer_graph_to_saved_model.py --import_path ~/transformer_lt_official_fp32_pretrained_model/graph/fp32_graphdef.pb + ``` + + This will create a `/tmp/1/` directory with a `saved_model.pb` file in it. This is the file we will serve from TensorFlow Serving. + The [`transformer_graph_to_saved_model.py`](transformer_graph_to_saved_model.py) script attaches a signature definition to the model in order to make it compatible with TensorFlow Serving. + You can take a look at the script, its flags/options, and these resources for more information: + * [SavedModel](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/saved_model) + * [SignatureDefs](https://www.tensorflow.org/serving/signature_defs) + +6. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. + For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. + To compute *num_physical_cores* with bash commands: + ``` + cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` + num_sockets=`lscpu | grep "Socket(s)" | cut -d':' -f2 | xargs` + num_physical_cores=$((cores_per_socket * num_sockets)) + echo $num_physical_cores + ``` + +7. **Recommended Settings**: To optimize overall performance, start with the following settings from the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md). + Playing around with these settings can improve performance even further, so you should experiment with your own hardware and model if you have strict performance requirements. + + | Options | Recommendations| + | ------------- | ------------- | + |TENSORFLOW_INTER_OP_PARALLELISM | 2 | + |TENSORFLOW_INTRA_OP_PARALLELISM| Number of physical cores | + |OMP_NUM_THREADS |Number of physical cores| + | Batch Size | 64 | + +8. **Start the server**: We can now start up the TensorFlow model server. Using `-d` (for "detached") runs the container as a background process. + + ``` + cd ~ + docker run \ + --name=tfserving \ + -d \ + -p 8500:8500 \ + -v "/tmp:/models/transformer" \ + -e MODEL_NAME=transformer \ + -e OMP_NUM_THREADS=$num_physical_cores \ + -e TENSORFLOW_INTER_OP_PARALLELISM=2 \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \ + tensorflow/serving:mkl + ``` + + You can make sure the container is running using the `docker ps` command. + +9. **Online and batch performance**: Run `transformer_benchmark.py` [python script](/docs/language_translation/tensorflow_serving/transformer_benchmark.py), which can measure both online and batch performance. + + If you are not already there, go to the tutorial directory: + ``` + cd ~/models/docs/language_translation/tensorflow_serving + ``` + + **Online Inference** (batch_size=1): + ``` + python transformer_benchmark.py \ + -d ~/transformer_lt_official_fp32_pretrained_model/data/newstest2014.en \ + -v ~/transformer_lt_official_fp32_pretrained_model/data/vocab.txt \ + -b 1 + ``` + + **Batch Inference** (batch_size=64): + ``` + python transformer_benchmark.py \ + -d ~/transformer_lt_official_fp32_pretrained_model/data/newstest2014.en \ + -v ~/transformer_lt_official_fp32_pretrained_model/data/vocab.txt \ + -b 64 + ``` + + Note: If you want an output file of translated sentences, set the `-o` flag to an output file name of your choice. + If this option is set, the script will take a significantly longer time to finish. + +10. **Clean up**: + * After you are finished sending requests to the server, you can stop the container running in the background. To restart the container with the same name, you need to stop and remove the container from the registry. To view your running containers run `docker ps`. + + ``` + docker rm -f tfserving + ``` + + * Deactivate your virtual environment with `deactivate`. + + +## Conclusion +You have now seen an end-to-end example of serving a language translation model for inference using TensorFlow Serving, and learned: +1. How to create a SavedModel from a Transformer-LT TensorFlow model graph +2. How to choose good values for the performance-related runtime parameters exposed by the `docker run` command +3. How to test online and batch inference metrics using a GRPC client + +With this knowledge and the example code provided, you should be able to get started serving your own custom language translation model with good performance. +If desired, you should also be able to investigate a variety of different settings combinations to see if further performance improvements are possible. diff --git a/docs/language_translation/tensorflow_serving/transformer_benchmark.py b/docs/language_translation/tensorflow_serving/transformer_benchmark.py new file mode 100644 index 000000000..a5cf43654 --- /dev/null +++ b/docs/language_translation/tensorflow_serving/transformer_benchmark.py @@ -0,0 +1,181 @@ +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + + +from __future__ import print_function + +import os +import sys +import time +import argparse +import grpc +import numpy as np +import pandas as pd +import tensorflow as tf + +from tensorflow_serving.apis import predict_pb2 +from tensorflow_serving.apis import prediction_service_pb2_grpc + +from utils import tokenizer +from utils.tokenizer import Subtokenizer + +def check_for_link(value): + """ + Throws an error if the specified path is a link. os.islink returns + True for sym links. For files, we also look at the number of links in + os.stat() to determine if it's a hard link. + """ + if os.path.islink(value) or \ + (os.path.isfile(value) and os.stat(value).st_nlink > 1): + raise argparse.ArgumentTypeError("{} cannot be a link.".format(value)) + +def check_valid_file_or_folder(value): + """verifies filename exists and isn't a link""" + if value is not None: + if not os.path.isfile(value) and not os.path.isdir(value): + raise argparse.ArgumentTypeError("{} does not exist or is not a file/folder.". + format(value)) + check_for_link(value) + return value + +def input_generator_ts(file_path, vocab_file): + """Read and sort lines based on token count from the file + sorted by decreasing length based on token sorting. + + Args: + file_path: String path of file to read + vocab_file: String path of vocab file + Returns: + Sorted list of inputs, and dictionary mapping original index->sorted index + of each element. + """ + with tf.gfile.Open(file_path) as f: + records = f.read().split("\n") + inputs = [record.strip() for record in records] + if not inputs[-1]: + inputs.pop() + + subtokenizer = Subtokenizer(vocab_file) + + batch = [] + token_lens = [] + for i, line in enumerate(inputs): + enc = subtokenizer.encode(line, add_eos=True) + token_lens.append((i, len(enc))) + + sorted_by_token_input_lens = sorted(token_lens, key=lambda x: x[1], reverse=True) + sorted_inputs = [None] * len(sorted_by_token_input_lens) + sorted_keys = [0] * len(sorted_by_token_input_lens) + + for i, (index, _) in enumerate(sorted_by_token_input_lens): + sorted_inputs[i] = inputs[index] + sorted_keys[index] = i + enc = subtokenizer.encode(sorted_inputs[i], add_eos=True) + batch.append(enc) + + return batch, sorted_keys + +def _trim_and_decode(ids, vocab_file): + """Trim EOS and PAD tokens from ids, and decode to return a string.""" + subtokenizer = Subtokenizer(vocab_file) + try: + index = list(ids).index(tokenizer.EOS_ID) + return subtokenizer.decode(ids[:index]) + except ValueError: # No EOS found in sequence + return subtokenizer.decode(ids) + +def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10): + channel = grpc.insecure_channel(SERVER_URL) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + request = predict_pb2.PredictRequest() + request.model_spec.name = 'transformer' + request.model_spec.signature_name = 'serving_default' + + batches, sorted_keys = input_generator_ts(DATA_FILE, VOCAB_FILE) + + translations = [] + batch = [] + inference_time = 0.0 + sentences_to_translate = min(batch_size * num_iteration, len(batches)) + sentences_after_warmup = 0 + + for i, line in enumerate(batches[0:sentences_to_translate]): + batch.append(line) + if (i + 1) % batch_size == 0 or i == sentences_to_translate - 1: + batch_num = (i // batch_size) + 1 + request.inputs['input'].CopyFrom( + tf.contrib.util.make_tensor_proto(pd.DataFrame(batch).fillna(0).values.astype(np.int64))) + start_time = time.time() + result = stub.Predict(request) + duration = time.time() - start_time + shape = [int(dim.size) for dim in result.outputs['output'].tensor_shape.dim] + translations += np.reshape(result.outputs['output'].int_val, shape).tolist() + print('Iteration %d: %.3f sec' % (batch_num, duration)) + if batch_num > warm_up_iteration: + inference_time += duration + sentences_after_warmup += len(batch) + batch = [] + + average_time = inference_time / sentences_after_warmup + print('Inferencing time: %s' % (inference_time)) + print('Batch size = %d' % batch_size) + if batch_size == 1: + print('Latency: %.3f ms' % (average_time * 1000)) + print('Throughput: %.3f sentences/sec' % (sentences_after_warmup / inference_time)) + + if OUT_FILE: + print('Decoding and saving translations to {}...'.format(OUT_FILE)) + decoded_translations = [] + for i, tr in enumerate(translations): + decoded_translations.append(_trim_and_decode(tr, VOCAB_FILE)) + + with tf.gfile.Open(OUT_FILE, "w") as f: + for i in sorted_keys: + if i < len(decoded_translations): + f.write("%s\n" % decoded_translations[i]) + print('Done!') + +if __name__ == '__main__': + ap = argparse.ArgumentParser() + ap.add_argument("-d", "--data_file", type=check_valid_file_or_folder, required=True, + help="Path to English language input file") + ap.add_argument("-v", "--vocab_file", type=check_valid_file_or_folder, required=True, + help="Path to vocabulary file") + ap.add_argument("-o", "--out_file", type=str, required=False, default='', + help="Path to output file (optional") + ap.add_argument("-b", "--batch_size", required=False, type=int, default=1, + help="Batch size to use") + ap.add_argument("-n", "--num_iteration", required=False, type=int, default=20, + help="Number of times to repeat") + ap.add_argument("-w", "--warm_up_iteration", required=False, type=int, default=10, + help="Number of initial iterations to ignore in benchmarking") + + args = vars(ap.parse_args()) + + SERVER_URL = 'localhost:8500' + DATA_FILE = args['data_file'] + VOCAB_FILE = args['vocab_file'] + OUT_FILE = args['out_file'] + BATCH_SIZE = args['batch_size'] + NUM_ITERATION = args['num_iteration'] + WARM_UP_ITERATION = args['warm_up_iteration'] + + tf.logging.set_verbosity(tf.logging.WARN) + + print('\n SERVER_URL: {} \n DATA_FILE: {}'.format(SERVER_URL, DATA_FILE)) + + print('\nStarting Transformer-LT (Official) model benchmarking for Latency with batch_size={}, num_iteration={}, warm_up_iteration={}'.format(BATCH_SIZE, NUM_ITERATION, WARM_UP_ITERATION)) + benchmark(batch_size=BATCH_SIZE, num_iteration=NUM_ITERATION, warm_up_iteration=WARM_UP_ITERATION) + diff --git a/docs/language_translation/tensorflow_serving/transformer_graph_to_saved_model.py b/docs/language_translation/tensorflow_serving/transformer_graph_to_saved_model.py new file mode 100644 index 000000000..c5cc250ce --- /dev/null +++ b/docs/language_translation/tensorflow_serving/transformer_graph_to_saved_model.py @@ -0,0 +1,87 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +"""Import a Transformer-LT model graph and export a SavedModel. + +Usage: transformer_graph_to_saved_model.py [--model_version=y] import_path export_dir +""" + +from __future__ import print_function + +import sys +import tensorflow as tf + +tf.app.flags.DEFINE_integer('model_version', 1, 'Version number of the model.') +tf.app.flags.DEFINE_string('import_path', '', 'Model import path.') +tf.app.flags.DEFINE_string('export_dir', '/tmp', 'Export directory.') +FLAGS = tf.app.flags.FLAGS + + +def main(_): + if len(sys.argv) < 2 or sys.argv[-1].startswith('-'): + print('Usage: transformer_graph_to_saved_model.py [--model_version=y] import_path export_dir') + sys.exit(-1) + if FLAGS.import_path == '': + print('Please specify the path to the model graph you want to convert to SavedModel format.') + sys.exit(-1) + if FLAGS.model_version <= 0: + print('Please specify a positive value for version number.') + sys.exit(-1) + + # Import model graph + with tf.Session() as sess: + graph_def = tf.GraphDef() + with tf.gfile.GFile(FLAGS.import_path, 'rb') as input_file: + input_graph_content = input_file.read() + graph_def.ParseFromString(input_graph_content) + + sess.graph.as_default() + tf.import_graph_def(graph_def, name='') + sess.run(tf.global_variables_initializer()) + + # Build the signature_def_map. + in_data = sess.graph.get_tensor_by_name('input_tensor:0') + inputs = {'input': tf.saved_model.utils.build_tensor_info(in_data)} + + out_data = sess.graph.get_tensor_by_name('model/Transformer/strided_slice_19:0') + outputs = {'output': tf.saved_model.utils.build_tensor_info(out_data)} + + signature = tf.saved_model.signature_def_utils.build_signature_def( + inputs=inputs, + outputs=outputs, + method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME + ) + + # Save out the SavedModel + print('Exporting trained model to', FLAGS.export_dir + '/' + str(FLAGS.model_version)) + builder = tf.saved_model.builder.SavedModelBuilder(FLAGS.export_dir + '/' + str(FLAGS.model_version)) + builder.add_meta_graph_and_variables( + sess, [tf.saved_model.tag_constants.SERVING], + signature_def_map={ + tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature + } + ) + builder.save() + + print('Done!') + + +if __name__ == '__main__': + tf.app.run() diff --git a/docs/object_detection/tensorflow_serving/ObjectDetection.ipynb b/docs/object_detection/tensorflow_serving/ObjectDetection.ipynb new file mode 100644 index 000000000..5e975ae0c --- /dev/null +++ b/docs/object_detection/tensorflow_serving/ObjectDetection.ipynb @@ -0,0 +1,322 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Object Detection: R-FCN and SSD-MobileNet" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "from __future__ import print_function\n", + "\n", + "import os\n", + "import time\n", + "import random\n", + "import numpy as np\n", + "import tensorflow as tf\n", + "from PIL import Image\n", + "\n", + "from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array\n", + "\n", + "%matplotlib inline\n", + "import matplotlib\n", + "from matplotlib import pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "MODEL = 'rfcn' # Use 'rfcn' for R-FCN or 'ssdmobilenet' for SSD-MobileNet\n", + "PROTOCOL = 'grpc' # Use 'grpc' for GRPC or 'rest' for REST\n", + "IMAGES_PATH = '/home//coco/val/val2017' # Edit this to your COCO validation directory" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "if PROTOCOL == 'grpc':\n", + " import grpc\n", + " import tensorflow as tf\n", + " from tensorflow_serving.apis import predict_pb2\n", + " from tensorflow_serving.apis import prediction_service_pb2_grpc\n", + " SERVER_URL = 'localhost:8500'\n", + "elif PROTOCOL == 'rest':\n", + " import requests\n", + " SERVER_URL = 'http://localhost:8501/v1/models/{}:predict'.format(MODEL)" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "def get_random_image(image_dir):\n", + " image_path = os.path.join(image_dir, random.choice(os.listdir(image_dir)))\n", + " image = Image.open(image_path)\n", + " (im_width, im_height) = image.size\n", + " return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\n", + "\n", + "def visualize(output_dict, image_np):\n", + " new_dict = {}\n", + " if PROTOCOL == 'grpc':\n", + " new_dict['num_detections'] = int(output_dict['num_detections'].float_val[0])\n", + " new_dict['detection_classes'] = np.array(output_dict['detection_classes'].float_val).astype(np.uint8)\n", + " new_dict['detection_boxes'] = np.array(output_dict['detection_boxes'].float_val).reshape((-1,4))\n", + " new_dict['detection_scores'] = np.array(output_dict['detection_scores'].float_val)\n", + " new_dict['instance_masks'] = np.array(output_dict['instance_masks'].float_val)\n", + " elif PROTOCOL == 'rest':\n", + " new_dict['num_detections'] = int(output_dict['num_detections'])\n", + " new_dict['detection_classes'] = np.array(output_dict['detection_classes']).astype(np.uint8)\n", + " new_dict['detection_boxes'] = np.array(output_dict['detection_boxes'])\n", + " new_dict['detection_scores'] = np.array(output_dict['detection_scores'])\n", + "\n", + " # Visualize the results of a detection\n", + " visualize_boxes_and_labels_on_image_array(\n", + " image_np,\n", + " new_dict['detection_boxes'],\n", + " new_dict['detection_classes'],\n", + " new_dict['detection_scores'],\n", + " {1: {'id': 1, 'name': 'object'}}, # Empty category index\n", + " instance_masks=None,\n", + " use_normalized_coordinates=True,\n", + " line_thickness=8)\n", + " plt.figure()\n", + " plt.imshow(image_np)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Test Object Detection" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "batch_size = 1\n", + "np_image = get_random_image(IMAGES_PATH)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "WARNING: The TensorFlow contrib module will not be included in TensorFlow 2.0.\n", + "For more information, please see:\n", + " * https://github.com/tensorflow/community/blob/master/rfcs/20180907-contrib-sunset.md\n", + " * https://github.com/tensorflow/addons\n", + "If you depend on functionality not listed there, please file an issue.\n", + "\n" + ] + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXEAAAD8CAYAAACB3pQWAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi40LCBodHRwOi8vbWF0cGxvdGxpYi5vcmcv7US4rQAAIABJREFUeJzsnHl8ZVWV7797n3PunJt5TqpS80DNFAVVzKKCiCD2U5DWBrUFRZyfA90tTT9xaMQBxQnFRgRUkGYSUOa5KGqghlRqTlKZ59yb3Pmcs/f749ybSlIptN/n+Wn9dNbncz83N/ucPZ19fnut31p7Ca01szIrszIrs/K3KfK/uwOzMiuzMiuz8v8usyA+K7MyK7PyNyyzID4rszIrs/I3LLMgPiuzMiuz8jcssyA+K7MyK7PyNyyzID4rszIrs/I3LH8xEBdCXCCEOCCEOCyE+PJfqp1ZmZVZmZX/ySL+EnHiQggDOAi8DegCtgLv11q3/H9vbFZmZVZm5X+w/KU08Q3AYa11q9Y6B/wGuOQv1NaszMqszMr/WDH/QvXWA52TfncBp57o4tLyCl1XVU1vTzcOGYoraxkc6sXJ5MCVmKbEdVwa586j2B+hf2yE+OggCpfqokp8GAw4KYpFgJLaKrpHBxnp66I0WEraSWAJiRUwse0sWgjsjMKVIDHQOPgMC6U0jnYwMJG45LRCaYFEInAIBn0oQ2JoSTqdAmEhtQuYmNIl62q0MBDaQRoWmZyN0OAzfQjtgJBorfH7BVlHEzADhKJhYiOjCO0SDlikchrHcXCVi9/nx1UKaZgoN4vAj0EO6TPQ0sAUBol0CgMTn0+ihcZQBtqAdDpLwOfDxsUyLaTSIF1y2SxCWPh8PlwXNArXsRGGhe3kkNrAMsHSkJXgOAIhFD6hwTLxC0E6qwgUmWghqK9bjOEKlHDJZbJoQxDyBXFsF4VGokAIEAZaaaRwAR9COijXQEiVXwECAWgBovAfIdBae7+FmGHVHC9CgGdZFq7XE3WBVybepK6p905tdsJiFaC1RAoNaASSjs5DYLpUVS/CJ6XXrAaNRiNBawxZ6M2b1I3Ij+HE13nzMmPvJ10/8xgnz8N/pexY+zPV+ee3o5U3J1K++fOcPB8q/+fUW6Y/x2PXT25S5zv9Zs98UqNo4bWntcjP4AnmQkz5QqApLOBCm96z1CAEYuqyOr7aE3TvjTd2DGmtK/9U1/9SIP4nRQhxNXA1gOEzaS0Z5oFr7+bLh69n5ZEFbMm209dYjN1tMn8xhMZsOsNZIiMua983j67Xklx1zef5jwf+wNHeI9x01sdY87n38b2Hv4zxoz1E6qPMr4+i+iM0VlfzxOs7qS2qpDMxxNsvPI19bUeJDyapLCnCHUsxt9yg+uTzuOv+31ASjlAXSuOqBlLxEc48dRE9yQHKfGW8urWFupI6Kiotxrr7GSgpZc68Ojr3tFAVCTGYylFh+hmWNgCheAqzsprGogDDoSwNqWJOXtzE84d3IMoWkk0MY8SGaE+MUzN3Lundh4hHq6mzNf6iEEkSVFaXosazXPf+D7Kv4wi72w6yIlDC0dEcc5Yt5MlXd6Pig6Srilm7ooG2ln4aIgs4Ot6N3dzDsrPmUp4JsSXRQWZAUFLiY8jNctG61WzvP8yenV2sK1lM2u4gMa+BkvEMi8uj7GrtpG00wKqGGi47+0x++dRvUEUBVtbMZXd/H41RxVnvuIKh7q0YfW2saHwvD776Pb7w4834ksOISANRDKIBH1m/JjAyhFtcjZWRZM00QlqAQko5AcCGYXhAJcB7B8TEJ79u0FofBxBC6knAbyCkBhw8Zm/KuptST0EKdTo2mJZESgmoKcCvtcbSBtoSCFdhWD5SMs233n8VsfmattYu7rvnFXLJLEILpGmQ1BLh5ghIgTSO9UEIPandfP1KTLTn9X/SZiYEoBDCu77wf6UUUvjQuAihJ+bK6//UMc807kIf/hSIK6XI5RwcW2FYJpZlYJkmQnh9mGmjnPycMmkHKSWWz0ApZ0r/JvexcI/WgpwL8WSOYCBEwHCQUiBQ+XkQM9wncNypa2RiPqc/b5VfU0LlfxukHIWtDBylERIsofAbXv1STN4MvHuU9uqU+WcppUQpBUiEEBjC9cap9IzrTYipz6ZQXuhvpChwdMaHMU3+Upz4RuBGrfX5+d/X5zv5jZmuX7f+ZP3ilu188oJLubPtIeZmSnBrSrno5A1owjz72gOsaZjL7v5hikcV6UgKpyrEaWddxMjrD2EmitjWNUj9KfMxxmEsMczOH7Zx6g1vR+w6zKF4J6tWreJg3wBSWmRNH4t9YcatNG5MMkaMIkY4d+NV/OLun+MPlxENO/T1x9m0cR52Ikf52pMoiSvSEYf/fOEVVkYtxu0QOFniAyaL60sZVaMMdmeJ1kUZGk5TZAYoWl7NIjmHR5/+PSvOOolaUnTGRgkV15EaGccyEwRkDd29RzjnsstZm6tml9XH5vseQ/mziJCJGShmfsik7cA4RbVhEsU+GMyx6awlbNu6i0QgzDJRzd7De1mwpomTfAkS8122vJTFdW0C/gjoJH43ylhKY5VAoifOvIYI80+qprW9FT2iORRzKS0xCIUM/NVlnF++gFv/sI+T1tQw3n6Y0YzLNe/bxDOvdtNQV0ePHGSx45A2BE5kEcmBFj771TtxVQ3rl5bQMZzAzDhYpo9E2OCHt/+EKilYEhjkkvffRDyXo/BCCgFS5DUZ6YFQAcTza2jy+vJeHimPLfzJIC70lGun/30iwPG+xQSAA7iuO7GxKKXIKYGWGQI6iOE3+MKt3yRq91LUUck//ODzlLohspkMhrTQAjIKcukcAb+FKZz8hiUm6vfazQOalvmxuBPADnnAkToP0npKf71+yYl5LNT/p8Y9Wf48C4U8WHs7kdeOBinQrppy7WTAKnxnMy4+v4m3sYqJjWh6OzoPjLajyTpgK03Q58cU9owgXpg7VymUAuUKtBRTxjrTeAtSWEO2bZB0QUgTUyiEdPEZgoB0yQ9+Upvet60Vri5sCAaGVFM2DaMA/HlzQjH92R3rkyho7JOkKBrcrrVef8IHM2UG/v/LVmCREGKeEMIHXA48cqKLk+kkH35HGYNWO1X+pdz78xd4+M7n6U6N8NiD9xNLCEazgr5ECjMUIlA0h9beDPsffYKWfcXs6SpmfqgMgY+O/v1ktObCG87h0LbNbNGdNNSH6dyzF5VIo4eGSQy1kfENkOjqIRfposxvkC1q5Jf3/Yyy0grMgCI2FqdpeRQRydE5HGP/71s4sHeE1jdaWF5ezMlzTqE/NoQVDKHCaU47eyVRsxrIEIsPUVUSoLuvnxXBRpqz7Xznh7eyZn4ZL/WPMSe8kFRuHHu0CxUPMpw4ynsuPIPh1+7nns0/p3h4iMGhEbKJJGosTHVpmN7OGEWLfYz0jpDrHmJuNMXOp3bTFRsjmskxcGQ7DQslpnJ4rM9m78N+ltcsZPmS04jv7eJn1/2GnrYeigKDrC5LsKZO8Mq+Tg4dSRKUIdRYkqqKesoyYUKVVaj+UTbTxdvOqyfXOwapEYZMk+/f0cwF5yymPpLj6MFetvYbZI0yWlo3Y5c7JHf0sXFpFa0v7sSXDSOkxatbnuT7N36C+p4nObM0yvYxg5TPf9w6cNHHNHA98ws4XXOZDFjHfk8F8sn3zQRWBfDzPkzcO12jBfBnFQFXIIXFDd/6AmW+l2neuZ21Jzcx+IttoASe8pXDtm1wXMAgkwFHH6ur0J5hGJgmGIZAGiqvgXtANVmT9MYn0NrIA7xECMMDD+N40JpJOTvR/98MwAtzYBgGhmEgJZimB+CuVriue1z9M4GmaQE4E5rq9P4UANx1NK6jyTngIohYBkGyJ+idRLl4HwVKaWzl1aOUQik1pf7COCc2oIJGrRQ5V+E4kLEVAoeAYROQDgLXW1MGHqcjPStRkbeg8lq2lJM3B4XW7gnnZMIyUHLio9zC9nTs8+fKX0QTz3f0QuB7gAH8Qmv9tRNdu2LFGn39927ikjkXcd3mb6PvPsKDXb/joqJVBFc0sH/ZCO23vYThOmRVBggRqSgmFetHa8kZ9dCSEaSNUkLCZCiRwjI0oZAJ6QwpR6KVSSw+TmVJFDUwgFMUQYkQrqlZsSzM9pY+wpbFSCqDTFmE/RaxdIZGK8CAP4nMCErCFr0ixxIZxq2WVNuaUr+PDlGJHHY5PNxKTWUF3cqm3lboMk2sbZSNazeQ1D6SRzsZDseJZV2WN9ZQUlJC2lZYmR62dic5ZUEjDeUluLlxlA2itJEnHnuG8rIyjsa7qSyOUhwoojWZozgbwLETlEcDZNwo/oifsXSMhfOraN98hHu/ez8f/e6lhP31pJ0cScNkWU2UHTtaqS6L0hvrY/3y82jr3U5kQS3ZrYOc/s7z2b99C7sPDLP07FIscox0pzlp7UKC/aXUn3s2j9z5S4zgEI4OsLA4QDxXDyWtLFv3Ds6b+y5uvu9KlkU28S9f/h6hhipe3L+TbOdmYjsFD3a8ym03fpaF5avoyzoY2tPKkAKfFcB1cvm1wzFKpUAPMAmEhacVGcbx4HMi0J8O9JOvMwxjosxUkqTIEjFMskqBE0AGYliqhJxKYRMircZIp5P09To8+YsvM/c9V/O/Np5NYjBOaagYW4+T0g4+oxThJrz2LAOZc9CGiWmaFF7TgiY4FcymjkFpIw9++X5LiZBgoEF7JrurDAQuhlSeDyJvqUgpcV13ipbu1XVsfqZbJDPRV4Xfk+89EXbMBOKF+44BKYDp1a09LdpFo/II5ih3whqTUiINd2KDLvhKtPZh5+vWAhxHYQgz305+Pl0bUxoImW8LlfdyuRPjdF2XhGshtMJvGN66lAVqjzwgyynPSCmFKwyU61EopsVE3yQCU0omk99aaxTGlLlR6AllRSh9zErLUzwlxaH/Vk0crfXjWuvFWusFbwbgAAc6D7A03sTHfnEd2/9wK6XZNCsCFTwee5b3Lj+f1js341OCUFM5q9esorjKRyqTIBgMsnBRI6+IMJFcGdlEjp6OTnTWpiQUJYnECpQyphx8aZsVdXVIQ1G8cTn+iET6ByGrkXYl/3zFVygtaaIyUkumVBMKGBTVhClriFLl87Fq1SLC9RWsX74GVVbM6vR6Xux2eH1/nCojiKX2U9nQSOPCjaQyDoGIIhnXXPv3/8SrrS10HO6DucX8/RkfpaRUIpwUpcF+GqMOrT1jvHtpBa9taaZ5Xx/DQzHsuEPz1j1c8L73MyIU8+uWoZWPpUsXo/oHCZcImhbNQ4b84E+SVmnqamrY27KfquWlfOYrl+C3olQGl9Ax1EG9E6Is7pCUGc582xI2zqmhM32IJlFGbGucBetqeeLxh0ngQwR6KXPmM9xRxNK6Zex5rQ1HZMgefo7c6CHGxhU+bWNkwgzk9tO4cjm5PS20x1t54jcdfO6zN1NeV88j9z/AC7f/gBd3Pcst993AurPfxne/9A2eP7wZVMRzfWqBbdtkc+nj1sV07XIysMzE7f6J9Tilzsnc8eSycTNHWFtkbPAbAXw+cOJ+tJNF5VKEUfzspzfw24d+xDN3/StDiX52PvN/+OnXP8aLse0kZJLX9u/FMB0EcfCZ2K4DSqH9Vv5Fd/OaooPr2m8K4F7HHDzQdzEM6TnKXSc/JwapLGRyCtvROK4HShNA47pT5muy5jx9nqfP+fT5KwDxTJvOTBz09Hon/1aqAI4KhddfR0PWVjgYWJaFaZpYlonlk5PGkN9MMEnbNlnbwXYV6awL2kRpjRAuEhdTanw+A9MSGCZIw3OqKtQERVbom9QultAYUmFIME1z0hqbajl4CoSR/+1tNp4/QyCliRAGjgJXT/2c6DkLIcCQx63NP1f+Ypr4f0WK68r0rc/9gc+dex6P/eO9zHnXmdSvLuEp2cwFlSup3LCQ8niWkYOduIEQkZoIQyNZfALQOcqNGkasMYzcOI4S+BvLEPt6Ka4uY9gnKHL9xJJxfMUhUqNx5hdXcWh0lLJwFSNHO6isK+eKBe8i9OFVHHy8he7mbVj+EI9ve4mFi+qROERdm1aVoc6JIizN/vEYV//Dlfzom7ezYpnBxrOvoKujk1cef57zTq/Hra2n49AAO1va+elPbuM7D36TDaqSe7e8QW19hNMXL+Do0BijXT3Mra6ju88mIi0ikR7GlcCsqCKlfMwzKhkeP4BKSXIyigoEWH9yE82799B6eIBofSnLVy8gfXSIjqM95IrKKVtcQ26wGydrctM1H+PiW/6de676Hnc9dieZpiSDL7cRWOVnqdvEWJfD0XgrYxWVnH/mQgb3D3K0p4uUEeBji2u4dUcLp69ax+YdHaxYF6K6yEdCmvS0xclmHIYHBrFCfj59xZV89FM3MTwwQm9I8ounbmX8988wEkyxqmoTpf5OXnjhAMOJLA+9vIOjnRAVWUrCRfgsg1QmjZn3/E04fOSkl4xjC1tO+CrVcXTHidbzZCA7EV8shMDQLklhYbqatC/Jbx9/kEs3nUOgZA4vvPoiW37zfQZkmgtWfYZHuq+iszVEuevyv7/4KBuWLSZrCN571RoevrOZ9IiLGXJwUIR8QWzXngKiM/WnoEFPHdMxjtzTpD2nmqM8zdVxJUpo/Ab4pMepT9emZxrz5L+nb3IzlU/nuU8kMznrZr7P06BdV+PYAheTrGPjNy38JoAHqh49MZlS0thK4LoCN7+xGIaF4zhYpsS0ND45jYoSeStCS5SaagFprXHyNJgh8NaaIScomfyopsyF4zg45Ok2LT0rwNQTfg2QuHlt35Qzc97TwdwotJHXxIv/mznx/5LML59Lx+92MNIxSvEnF3H/3h9xsC/NF77wOaqMYiJH+xjIDlNbVkdxNEQgYRKxDYxEhrAOk1R9DA/EsftM6mqbcPb2smLDWlR5hFT3MLG+fux0klwuh2EF6RtMsHbBAnQmzaZ3v5V5J51C9Yp6nvvJw9zwpX/hqV3b+fVLL4INUhj0xseQTQ2EQkUcTA+zNlDGHFnMM3c/zpqzTuHkDVeQiQ0Srhnmin+7hF7/GMX1lRRXudzx9a9x9ceuoyFWyfO+DhbWluLTPrYd7CI2CivqFxAbdxgQaWRwnHo5l8aqOYynsownRglbSUb7BXVzRimLDJAa6aT5tRaCRaW4xWGqSiup0UUI0sxZUEtO5eje18XY0QQ6JPnRD+/l2tMu5adP/pqXXn6W2LYhPnf1v1B2YIg/dBymvmkR42GDRFecbU+20Dc4QElxkLllcX55aABfWrFv3zY2Lc4y1pdBp03mldZw+qoFiPEBRjNJdrzeydzAqVx56Wnc9cdfUfnkDi4ObmBO+ZnMzyznod88DOHzoFGzdtPJ9D25lcd+/WXKqstIpBMkk2nPETiNP5yu1Z0oCmKyTL5+pvKZNMfJZTlTYqYydA4d4atfv5rDjz9AVWQO0ayDb6CbT3z/Z6xespBfv3wl59V/mnef8Rly5QF+e++ZPLllCzfffQe/vbOZa7/8IVyjD6E1OhAkqx2kKjhjPa1NuRLlTrUECtTO1DEZeWD3AEgDWVeTsgUpx0QI8JkKn6kxTXdiEzgRFzuT/Kn5KlwznW75UzLZ0Tr5byEECoHtGqQzkHIEaTuv5ZoKhJ23QJgC4EoplAsSjYHGb0gClsAUNn4DAqbAb4BATXKM5+dYS1y8zc9xnCmWhVJeW64uAL4zaR16Y5nMswshkOKYti6PaRY4SpBzXBwtcDU4iin8/OQ5B689Yfy/Q/FfBYi3HGrmf3/xajoHsnz36AHUiy6XfOcaKl+NEa5JYwcMLDuIryxCtNKgqcwi6YwSj9hkHReVi/D377wYozRE62sH+NLdd3C4tY/OwQyma5KxFEsWNrC0pIgFJSEsK82wOUaTkry2bSvPPPsHVp61iY/ffBM//N//zqc/fz3FQcGZn3oPdjLNki6D3a/uZrArxfJVa7js8i/zL888gIomaVgeojd1lD4yWKkMrz/dwtLik7jnkccwiheyff8DfOSjZzFHJgkcEOTGRjnSN0p9OErncIxy3ceQM8Q7VxfTFYNnYoO0NR/klMY4NVYdA4E64tKhpdeHJUKETJNkyTh7WroJYNB2oJUntzbTY0eIlBaxoiYJzhjCDOPEJQtPO4WUo+hJdHLFB04lYaZ49olvM2aEqKSUbbnDXHfBRaw5fQFjqoNQFNoPdmEFyxkfGOXkt6xk2IbxbD2tiSF2dLRzsKONB198gVMuuJiTljUwsH0PA3WSeLSUy6+8hvAlb2Hzs0/gVsDp7z6f1Zs28ON7fkR99RlsO9TBF2/9JpedexVPP3gPlRVRtGmRzdq4wlvQQD7OXExxck4FZ49imAmIZzLtgeM2Aa01PtuHKSzGGUG4DrabIBWL0XVkmM7+ZirqanBN+PpdP+LJo//JrV+5mNQI1NYVsavjNUZGH2OJWUO44nS+8+138pWNV3P9F04BdYRgcS2WX/DOd78Nv6vA8OHoBGg/0hRkMmJivAX+evLfBYDQrkJKcFyXnGt6YGJLclmF5TMI+WyCJvlY9GObQmG80zXvN6NDJgPbTOXHomuOt26mfwrXFsY1tS+eVuxmFU7Wwc3/z2d6TlS06ZVrhRb55wwgDFR+jiZoEik8mkkqtNDYDrhKIgwT1xE4tspvlgJlu97GqiQ6v7YkJqbp8+Y+z1O79rH15yiFoyRZLUlrQVZLlPAjcEC5GBIM6Xobi1T4DAef5RJA4rqW93EktgZX52kwJbxoFTmJ6pLgCI2DwP4vECR/FXTKmrUn6+e2bMfvwulnrYfT5nJO43z+8JN7qHnHSqpbEgyGMljFmkT7KHtGBqiJVlGJy6GhToKEEfPnsy6RJGObYOTYPjpANJdhPFpKbkDSUC5omlNJ2jWpW1dKYn8Xw8UGQRVhoP8gpw7N42XZxpxT3sHvH3qY2mLB4uoNlOcilHzqQpa2ai5asokr7/8KJfE0zz39Iqlcire9bR2H2vdy5op1vNrRS12RRVurzYp5xdRWWXQG51CqD+J3MuzeWUxZVS+nrb+G79zxdT6wUTCW1OwaLcUxsqysP51tW17n0rU1WDLO7zs182rKSeRibCgqZVgdYUFpJU9u7WV7v8PCBfW42QzDhiDgakr8gne+/XSe37af9GiCcLWBHvaRtLuI1DThVz0MDEU4f5nFhnd8nHXnvIuVi0/iutPOY/T8M+l94pe4/jFSsoRYQqDUGLVVDVREE7y4a5TFdQ1kB4eJxYYpb5xLaciPP6zZ3m2zcX4DuVAFDectpqx9lBee2kxQJQlEQlxz7bVse3Unl155JamxcZ7uH+Aza9+KGbWwlSSZcTDRaKkxhMAQnjlbcGpNBl7DLCx6j0/1IjRmNt/heNN+OuDYahwhDEKJEt535bu48O/exVOv/YxLL/4KT77yVYRvIatXnsO1b/0QH/zn04lWRjm57nxe2/U0/qRBeWMTw2YLpQYYJVG6dh4mW6fxDdWRSoe59eeP0jHQxpOPPsTnPvpZLJ3FzppknTEMGUaaWQz8U0F7mqMRpUFqkqkc0gzis1wcW5BzFKYpsfL0yUxjngl0J8/TTO//ZGD+U/gwEw0zEyUz3SHqut6BMLQ5sVEJ4TkDNTbS8HvPNx86KrTCcQW2rRB4PPf0Zzq5fi9ix5igrwobiZsPh9ST5sHrmso7IwE0pqHJ2RoXCxeRr8fbFC1DYAmP9vPalvkN8xj37WqwlUHK9jZXvyERWmFKPRFzrgXHP+tJY6gs+W92bP5XZNfBHTx33W94vvslihYkeHt4Lvf+7i4Wvfd01pfNY192lNf27+CPj79BafU81lbVIUgQ82tK8VMRCjL6fAvzyqvQKJq7uqmxShkwysmmNCkjR1WjprW5h67WFp595HXahzUNdSuxezRVgSb2zk/x4TPOINu+hesuWssV776Uwx27aW59ggMPfp3Pvvciat69kq33P87WnmYu3HAuLz69nXjWZNOqjfQM2XSOjNKoa/nwJz7KI3/cx/MHE9x85RdImyEefnoI0XiYrlgxdz//Hfbe2EhxQmMryVUf/BS9Q2mCTpKFS+P88UAvbW6cxdkkY8MC1ad4aOdRhlOLMMtL2BnTXPX560hJTbSoBHt4DFP6GR53uOeBFzlyuIPB+AA9LaMctdOEiqsxkjmSw/XE4jYPbbb5t6/fQpNZw1e//x0OZJvpePEVDJ/NonlNBEsdirI2UaOGuWRBVHL22g3Y8TESQhMqKWW4Z4iD4xnajsR5/xmLCVU0cP0nr6HvgedoPbKXJSv9yKCPYFjw8x/eRnpM8f5zLuakuvUkdu/CZ0ni4zkyOQVa4/cfH3J4IseY1gXQOn75TtcGp2uU0zXxsCrH5y/mfd94D5d/4uM0lUSpCqzi0edvo7VH0rxrF5FEisu+dDG1gTXUOgZky7jtqw/S1neUyob1jB1uIzYY4rmnXqG0vo6+lgSDsUNc+9lrqLThjus/TyTXimac5LhAGFl8RhRDSCwZPCHQQh58pEe/BII+/BYIrbBMTTQkCOTpk8na+4k04OlzMlPZnwL6E4H+icoK2n3hezLPLITw+Hvp4uY1UyUlwgwihXfKdcIay2/WUsp8dA9T6nQcZ1pbYgqAa30MwEGj8s5lR2kcNKacPHaB7TrYwiLjGuRcb1PwGSY+KbCERs4wP67Go4i0iYNBMiNwbIEhfaSyIHWehxczO4en+zH+XPmrAPF5c5fyd4nruLDxTPY9NcLZiZW88f3tmLqSgzsfId59gMWBCq5/xyd47uA+jvSnefsZZ7A8XIpVU8NQNESkvoxvv/QaPckR+kMB4hKW6CKKBuOErDBtB3OYQZdE0mbxsgoSJQ4HX9nGeLafI+1J/IaPLz7+BEMZTXc6yc4Dr3HGGctpWjSX0V2DNJy3km/c9FlWXvYOYiPDXPTZD/PAt27hQIXBezd9kJd27WGFKOLl0W7uvO/H3PfcY9zz9Ct89Jtf5EDzfi5+70aGDkaoaPBx9wc38M37Rihd2sDcy24hsS/JS7e/zJ5tr7KrI0AoO0Y0UsmhmlLGBo5w1A1i1BTT132Up5rHcatLee13TyDHcowmR2hYUIuZs4ncewtmAAAgAElEQVT4wxj+EKFQCF+gjIzlx3CGMIPVdA51cfkV/8hI/yDlc6pZOn859ecsYv+ObQSqy4gNj9B+KM5AfJzIODg+m+pwljfGB9nf/AojYz0MJccYdXIMJMYRPoMPrbgA0zR4quUoR994hvvuvJPmPfvJHB6nL+Zy1lmVOMY4ZbV15Eb62PR359NljLE02kiqooiscrAsgd9vkclkppjhk0GpIJMP4cwE4ifSGmcCoUK9mRB84e6bqAzatL38BL95+ot0GgeIjkuCiXE+ddP32XLwGf75vf9IX+tTrJnzeWqWhxhO5qhdWcPru2+mcdE6RnLN1EZXcWB4BKt+KYHaSn5y55f4zC1vITjHzz98+Abuuf9+zKiPpzc/jhkwcJWNdo9/Yac7NiFPjWjQKjcBaBqJRh63CfypuZg8t5NBf/I1hciW6fHW04F+Jn78RL6NgnhtmmglcGzIZhTKEEhLYJkKqR0Kp08LY3ccldewJRpnQpufDH6Tx2y7GkcVNOJjMdvkj+ybQnqat/KsPy+EVee1aI2jfWRygkzOxZLgkxqfofHlwd6VhagTiaPAUZKcMsg6gowryLomAZ+gOCzBThPxC4T0LAutdT7O/Ph5ejML6UTyV0GnlFeVa/8cRbwnQW04SrldSY9vkNKmYpz0MLnhLAFl0V4uOXn5atJ7uuiMtRF0iqheUMWG1Wv49WOvkh5yKC1RqNgwVeWSNfObMCvC9LUP80qnwqiSpPv7CCRqKCsXhMMGw8ND1M+ZQ7vTidubwl9SxOp1p/PG3pfw90J5QxHJAMwZSOMULaV+6BAXfvDT7Ojq4fLLruJnv7qdG759M1d/7Eq27dzO0FCKz3/gCr533x2sbqpnJNtL33iAjVVB+tJh/BU2EWeck5ctoLqskq/ff4Du/YdYOCdE47JzEAebGRCjXFUV5mudgyxYHcZoDpEiSWh+JUODMWqDIeJZE5EaY1TZnhNGQDRYhJOwSWIi0goddikqNsjGbP7xsk/wvR9/mxtuuw1fcye/fPoBKooFZrkPUzqkMwkqassZbnUI1wRJdY2QKSpBpWxy8QCBmiz+eIbBnji+ihALqup4+MmXWHL2KQwNd7N2rIgll5/NLf92G6//egcf/9a5lJaWsGTJOnS6j7OXncqm8z/GkfgY69auYSzrJ2Q5eTNUg1C4AgwhMaXAMswpL/HEkWbhIKWJk8viD1iY+FDCi3JAasjYZEUKy+dD54oxLNvT0oTHKyNcAkYRyVSOnJkjGJDsev0ldmy9jx0tWyiONDKWTBCeW8JAbwuRcICTqt7ByWXnc/e+u7jxsn9iy5Yt3LX1OoLBBagRwZKVizBGu2jvCZEuClMVGGcwmeDyTV+kNBcltLQYq2oRO3fdx8ubWzh/xblcdMk7MYpNksLG7xzLfnEsdG2S5aA0SnugL+WxwyreaVI/hlYoYeMohRQBMspBuDl8poUWEkPI44C3UHdBWz0RiGutsSzrTemSP49ykVM2J8dxcOz84S6BF9o3KezPcb01McHVu8ccv55TcurmoV03H80kcJQC6c2jIaRnuempJ1q1FAjlUVUTkSgCdD5G3dE+L9rFMPFJiUtuCuWllIOjTCzh4AqPL3cdgcBEaAef6TlelZAo7fl4DOliFiwL5ZVN3nwMBEocc6DWVEb+LDrlrwLEDb+pF69qxMlmCDaajB3tZ/H6RvZsHiQi/Ihxk5KaLNZoBS+2HuT2O37A127+KievPx1lD9C3tRunVtOfGCBglGIkTYINQezBUcqqy2Gsm5qVSxnqGCeVtCkOjtI6bJBMpkilMmgkK2pqaRsbpq6ynJ7uQZrmFNNyZJT5jTX0DHVQUVxO3ZwSWtuy/ODLt3PRey5AW/Cd625gu46RenYzr4T7WRWo5tm2bUQNQbimmkwiQ31VmFCFgT8W46TipTzY3ksyOYibcjn35At428ZTaXm1n3lXr2P0qS388eGHWDQ3Tkfax2CyjrnzFG3tYwyNplhUVUws7ZAdGcMsL8GJ5aibU8bO/ftZ2DSXw7E+VtY00jfcz8joOFHLR9m8udSm/Xz/S9/j47/6NmNjvdjj/SwprmCk2MXULmXaR9XqMnrbeuk+mMbxKbLahVFNJqSoIszChSY7ehwGOhJEwjV88opN7E4M4rdtHtvcxsEv3ssTyQfYsWeMskiAbbueYNSCC9ddxNBAM48+3cXpp5Zw8/95FFfYKJ+BVpPoDelpiYYAUxpI45iTrRASJoQgEAiQSCQwTYlh+bDtLIbwHGJZaZDJZLBUGMMYwpRFCBTaNhD4EEKTcZII6bJ98y7Ofvsq9h7Jccstl+Mr2Yrft4LObkWJz0Q7mn0jr3Hn9c/x1HP3E6xZwn888SnKjJUsX3Ianfs2c9ppF9KTPkrL3kOUpBOMRqs42P4G3/qn53niPz5N44q1fO5D/8qO9mbufPgbfORzP+CksiWYGRcpNVoKpHKPA9iCFPhkxwZXexy4d8AHPEtEogEXF1cZ5LIgDYew30IrLxmbYKqDc3Js/GRn72ROfrrFMpN5/2ba/onGUyhX+WPyFKwBoae0L/LUhFIKpTVaH+OrC+thCoYI6dExSuMohcqDpWHkfSxi6hiU8IBUu14yPAOZT6ugvWgS7dFYUihMvM1g+riVFqBdHG2SVSZjWY1pGvilImCC4R1fmji0Zk6aVykltjvVMnQKfct/zy33/+1w4hIY6B5jPAOqPcuyJevY80YrtXOgcgHEwuOUlLnEug7x6JYdPPLU8zj+OO3NT5KI9zHgH6Q0HGZFw2KcTJzSuQ5Lm4IsXV1PfQgyxS4DPS6uMUwuCwdjWaTMYoog9TVzqa2qpCuVIpgTWI5NadCHkfBDwE9tTSNB4aO+OszeA4c5dVGQ2x68iiXrSli9sYFdbg8fWnMGL421IxNxSldWURIs5tyLL0Gkk6womsvgSIKq2FziyVKe7n2d05oWwFiYNWXLCUQUv3jgcZ48+jTXN13Ktx6+AyPVR3NPjv6RKJbdwd4jhymfH6UsapEyEpSE/TQsL0cmh0jLFMnUMGevWoblZKkUQSLjApUzWFk7j/JIBSKVY2eyk0s++24WLZvD1Z/5FOW18xhUo7hpm41LzmbD8nfy4K/ewC+iLKiNMjSQJmAX448ECBWVYWZHef3AAIGgn42LKrj2LU0c7T/E0Tf2IRhiWWaclR9/K394YhvDLTt4bs8jHO6WFAcshg/tYkv7ES48q463X341512wgmxVFNCe42oCvAWSyQdxvJN9hiHyPLgmnU6TyWQIRcIIw0Qol6DPj5HnSn/+i+9y8y0fIRgcQ2fLOdTZQ04IYrkYjpnG8afxF0dIOTlWn7qcG2++laaAxaKa0ykqXcrRdkk5WWoa1pI0Epw9fwP/8cL9bG318cwjX+T3N2ZYUr6e2EAr85evxupO0L+tg9JABlEdoH5xgOJMHcSHcEPzOfOiK/nG3Y9z+10f4hPrP4m5e5yRvYNoyyArBaaTw3XdCeqioP0WRGuN46h8hIQg4yi0Mjim2SqEtjExcO081YSF6x47pFKoZ7KpPpmfLrQ9GVCmhwNOl+lgP1P55O/j7pHeiVtDesmlpm8cExq4lrjaRCFRSByl0ZMAtZASYLr1IhFTAHxy+ZSP8DIX2q7GdgVCS0xhEgIiEsKGIGRITAmG0BgU6D7To0YwcZVEOy4hSxGULgHDc4e6WnlpU7RAINFa4mqJrQUpx8VWXpIvWwlsJcA10Y4BrkSo40NNTyTGjTfe+Gdf/JeSr33tphtLgwbBaJKmqmJ2bW3j5E0XMTTicGS3w3gIitpLEaeW8MPbbic7HkM4Jv3Dkpydpn9M48YyaGmzZN08sqF6Nj+/jYQTpr8/TWDUYDTnMJSRZM04I4N+KotKGYp1YwrJaL/N6GCMnCFwAwYjySyB0ihGNs5Q3xhJ28+8U+cjRiTPbeumOJhlZDyDDGfp0jYPP/cQ8UN9nHXG33Fgx25yhgOd43z+M1/n0c2/oipYSV29QpomMlNDKh3DqjI4vK8d3TvG6adv4umfPcC/3fU1gkUpuslSH6xjQVGcxfXlOP41mLER2g53U12zgM6BXhaV1ZALWSSHMkRD1fR09aAjUd570rnct+0FGksqOOSmmBMM0ZsYJZwzCNXVsNyqpLy5i7bcAMlMipraRs7fdCGPbt3Fpredz3D/fvrHFcFAjiHXpdyyiEbTjGWiFNctoMiXJBcs50jMpq23m5IAKCdIcJUmXF/K/PEgr4QGmJsMMjQ4gp9qhnWG9rYxyoMZdrfvYv3Z5/Lqww+y6Yy3T4CFYXgUiiELB+xBGgIpjXwMrsA0LYygn3QqgWVYCCVBCGwkKa147rXXefa5n/KWM9/PssWncLBrHz/94Tc4c+NbqSgrx1UZpIjQ17mX5196gRu+83m+dPVX0HKYLXs6ObSvl42Lqml1XMp8Cdq6exhURxnp6cEfSqDlAp7ecR8HY9tZ0FTF1s1PYwRq6U310y/LKTVTzM+tZtlb3s95G9dhJyDq5Eh1PcIH/+Fu/v7md/KsE2P7/U/w+r7dtI90s7K6CeErHBU/pilP5bYFSpg4CBztHbcXhvYOugjIZiVZ1yDramwhMVEe4BiA0AhmzgQ5WUueiS6ZXj4dkCdHnLyZaK2xHYXrKjQC11UoTxXOP+tjWSwndQJXeaF2tvIiO0BhmBLDADGR0tbjubWXlQCvxKNoLMOAfKgq0+iiQtiqqyWOFt5hH2FgGB7/bfkBofKnMTUIT0PWIr+xCHDyB49MUxD0gWWAYSgMKbyEWVIghEQLOXGtzqfwEkIitUQKiSGll/xNC4SRz/0jFd+9+abeG2+88fY/Nb9/FXSK5TO0v1ixqGwRcdlFVkvKQwYqnUboYuI5zdBQlsYl1aR6e3CLItjZGE01TbQc6WZBbQlx4qhkiIsWL+eB4SM4nV0Ea8qIiCL8wmIs3suwsqmpqUcPjDI0EOPUtYvZ0X+QcMkcPvAPn+SX3/pnEskgJSGJo6MknH6i5TYrfJUcPNDHvAs3MdQ/REVIM9I3RgQfkYpihuJDuIZJT18KnQYnKKmMhvEHQ8S6j9JnpmhqrKIh52P7qE2DL0NUl9E1mkTXlHN022HcdI6XOvfxgSvPYUnCRpetYcuBLYS0n5q5YSIRk7IKP3taunnbW9YS7k4QKxe4YwmqS0/ltp/dTeP8alafeTEvP3k/Rdph0xnnsfXAQWrKDJr3d6FMxYa5pbyyZ5AVEcVnv/pTuof7ePLe32KnE1x0xQfYPdbJY4/ey5lnLCcxvJ/Q8GJ2HW5h+YoqSmUQNxxixPITDBfz8vPPsaR6Lo52WHnmPJ5/ppmVdQ0kdYqQU4TjugynFE3REEdSCSyh+LdrbuLpI0/zteu/y+iojRBe2KBSLj7LmKJNGZbEsiyy6Uz+CLbFhz94GT+76w5sEcEiCXaOpx5/ml/u/g4L3MV09x/CSGf59lde4hv3voNrr/wZXZ0xVq1fSgiTu3/xCO943ya++++38vDhe7nnzmf54+1PEg100XJwO6PEqBhKMThPERifQ3ffCD+75UVu+e41lJZHyYV68MerWTn/JA4rm9efexDHjhP0FVFTU8PRAwcQlaO4EipKziFQsp1ofBlvHHJoWlfBDR/5DiG/nwN7mzll4wZc4Xq2PSd2SuZyuQnao5CIynE841spxXjOxM4pDBN8fgjJY87L6VkNJ4P39IidQvl0OmSyTERiuO6Mdc9EnWityWQktoRw0EI4GRAGhvQO5Xi+jmN8uOfElYzlDNK2TdDvwyKHd6zdOz5vMDVsUUpzIlug40IKiSk0Po6lvS34VYQQXrI15cWTOxpMAabhuYpRkBUCv2GCcvL0jGfZuLpwYhZQXmIsaeTn0s0f3JESoQU5pdHaQLk6r5AoDK0QhURvSmArg4wCjYHrgs+wMUzPYbqwwvzboVNCIR/zNpUyYh6ltmkVFbVzOdA1xoCOcDiTplcrSot8dLQewQkaZHMGq2oW8MaRQyyuizA21kONW028KMXOsR78RoZ3XfE5ZCqEkBbtPV0ox48vF6TvSD9JN8u89YtoVwmWVDWgApq5VQ2Eq8rw+10SJHjLkjOwujQpu46nOkbpFyGee/pVRuMp+g/ncIsiDIeguXeQfSlNSoRwlEKHFPMqi0mpLO19/cS1oiw0j6hSjBuNBEokKVHEG47B2991MqfMWU24KkBopeD2r1/LqvEg0XmLEDWHmbd+ITnLJho0cYIJursGWHPSMtoPDbJvZIzmA0lCbilH23fwlRvfQ7UM8e3bbmH9xe9i1fq38OSO7Zx20YU8v3MPb/lf72E8GedQa5y5JVG+9bt7+M0zD/Hb3z3Av9/2Ay7ZGOQ/X/gdB15/gbmLNF0jGWI9jdQsb+fKy8/DkQ20RyPsPNBKsneAtt27KS9dxOhIgvGRcXJjJmHLYm/XKEVFxew43Enb+H58ZpphJ4XrC+MrCnL9jz/CQNcYKAnCRuNgWZJwxD8lMkUIgaG9NKeBQMDLmAfUrKkhISLAOIZrcuk/Xcby9Wezzr6MuEpy8Gg/hr+MO37zGQbsKDff/TX+sPMl/DrKpz5zLWe86wz+8457GEoN8oG3fI5XfvlDegb+yGOv3UvLYYGVLaVbVtLXbJHW/VSUFPOTX3yXLXub6R5XZPYN8MbYbl5qe5kjzU8gtEsqlcAIaoazwyxYuhZDVlMaiRKJDGD217Cj+wXOOmcTuZEc/f19xOOjbNh0Gul0GtfVkzIEHg+8UkoCgQDBYJBAIIDP58tHaXj5RXw+H+URSV15kNqyABURi2AwiM/nO5abfRoXO32eTxRtMhONUuDoJ5dPp2Nmusfv1wQscLNZJAYU8sfkTzsVcnIr7UXd5FyNqxyCPhMrzxYLUQDDqYeipJT5SBSFqzW2ziccZDLIywkHueu6aMedlgjMIOe4pF1NBrC0jZNLk3McL3TQlTjKOzBEPo2ui+FRW65E6EkbmtYo18HQYOLiM/IHgJQCbZLSFmOOybjrI6mM/MlbL/OhwiSVM0hm/3xo/qsA8cR4lki2GJtS+gZaaD/QSk1xDbn+NCU5aFQ+iuuLWLS0CTfnclLZSrZtPUJtaTUpn4+jQ/DSniNkhnLYozZrnDDPPf5zvvnPX+PowcNU15TTNzxCWTDEioXzqJpTyqHt3cxbsILO8TinOII9z/6B5EiKf/3HTxPvETx85Hm+8qlP8PEVF2MrlyUrlnHZ299LWaSKXGqIr37ph8RiCt9YijNPOgmRGWXlyiaCpUF64zF8YU3YzJJIaXyJHpZGqzHkKG8PmwTdJBVBg21P7mRX207Koz4+ufpD7GjeR3oFDAdd5o4vJr1/gF/99NNUhW3MeA0yZdK8ewdVc6qpW1yDGXY51NtNqsSmf3sfp6+uYuOiauxXdmONDGCVRhh69jXOWryKkZa9lFu1pKwIiWSaD334OipsxVVrFnLJx89nf/atlEfqWVW/kNMWvY3OvV0oZ4RDbQvZ/NobXHrhQipyaULFBtq1CUWLCUbibDhvKfWLqnj18RdZlgvxkcs2IhyHlesaWOIPI3Ip+tQYxSMu0aTJ8KhD2UkLGBptw++TRIuCaOWQy6S9lzTPg0uZTxGayU7EAN9z7z3s7exg+69/zFB3mm7Dzy+/9QSfv+Z0hiMdqEN72NBwKZ3tcR575Q+I7hYS7d2UWZKWRC/Xfvzj/OKum/ngF79ALt3HaevP4D+efYDDyVYao4s4yWpleeNq3LIwZiZFR3cvd3/vOVpaf8QPv/sUsaFDdHX1MM9YRu2on337h4hrhfYFyKbHiA8OEghLYj1hdLyGQ7u30Dc2xrL6IMXVIX787V+xYsVyyisr0ELjDwaOCy+b4HRPEAZYAKIpoG8aCCN/XDyfKe9EoZrTAb2Q6GmmNgsyU/z49NDP6Y7Tyal2pZRYlsAnNabhhYZOaUt6vLLtKGzb9dLCKkXA9PJ5W0JjGV5IoNQeOAu8/POFj2Hm84K7rhf1oZV3+nJS+KEQAsdxpkapaA/8s7ZDzvWiTFIuJF2TlPi/7L1ldBxXuv39K2hUt5hlgS3LtmTG2LHj2AE7DjNNOA7z5IYmyUxgMklmAjOhCZMnzJwxxbFjZpJRlmzJYmw1Fp33Q3XLkuLMzbq07l3v/6zVS93VVaequlTPeWqf/eztocNw0Wk46dQddBsyYUMhYilETQVFNUExsCRbyMw0FXRDYFoSSInB8lCmHpUVIqaFGZOQo4pNmzQtVCw8SnwwMiWimiBm/B/jiTvdDuq21hJuCdCxXyIjL58DLY2EFY2ILONOhqARJtZh0dIQoS62m6POm0ZuYTqDk5ORhJvC3HTSM5KJDUlnZziXoSllPPvyU4w8eix6p8WgYQMxiREJdRENxhgwIg1HcwPJ6Vk0DUlDi7QxujSLZ7/6lDknj8ZobOXvLRs5mJHG3DvvYN3+rVS37WfXtp1MKj+CqROPZtqUk2jxedD31TC0aCxtzZ2khQyS0/1kmmk48eFtj5FSWERjtJPOpjaaR59AqpJNsuIjfUQxF5x7PIU5gnVswIpGyBODkDvb2b13K8npDv7y1FI2Nqp40oJErCCFOWPYsWgFka4GWvY20NzpZsPq3UQHS2gZYc47YyILarYz9oJrGFxaTFDSqItquNx+Zg8po72zDu8gGTpNzr72NOpjyxhQ6OWbrc9T6BB88t03rFmxgBPGTEW3UshOjeIryuLrxTvojsWYOed4Ai4YWz4CRzSNHatrCHbrDJw8mAMpu/jy03cI6QY1He005Q5A1b1MKhhCzGOx88AB7rnvZX43YQ4zxs3G4/FgGAZOpxO329uH2mZZlj355VCJxWK4HE4uvvA3DOlu572NC7j2zjMpCAT55p23efiNn1hb+RFkpVIVXk6bp5bc0VOwtFH8/ra/sHHL+yz666Ns+fR7Opp/4PyTp1JWMY53f/wWWWtj9pAT6Yzup6NwMFsrqyhJHUGaN5tl77cy5eTRiC4LuWUXeZleho85j7m/u5XF1e0oMRiQYqF3HWBE6QmccOJVrNnSREDdRkckyomn3cIdv32Nh++t5vrTb0SJWcSiYdxeF0gSMV3Doah9i1/6BdvDsUV6U/HsCWEZCYEkJNtR6GeY+qHWP4D1Ls3/pVf/7ROBuT+LJtF372WJv/ZkpI1rWxIg7Alc0zTRdJOobqEZdn9OWcLlUHGqEopsw22HON4JFUGpz6CvYuJ0SLgcMlgakjBtMSup74Ruz+Ai2RPplhTP0mUZSXZi6DKmoWCaClFdJhq1TTeccXExVbFwqSZuh2lTPy0J3XKgCRdRS0JDxRAKpiUTsyRiwuaMR3QFQ7cxfksR4BQochwPl1VbjdEQ6JqFKoFT+fWK4v8rMHGnqogx56aRnzGSzWurMGPd1O/pJD0VwgEZFIuCjAF0RE3yipNo2r2fQNhFZrqPIEFGHzmeJR+volPvINWfxMSxRWza00FekkVbWCIkaeRlJeHrjlJcMZiD9TVkFY6gq3svkWaFTI/FARFiYFY2EdOkqrWR3139KO88+jijH78J8fJOnnjnSW6/9jzWN9dz9/k3sVhrZPlnC3jkmLO58o1HSC1IY0BaDtV7N+JT89CsNiIuwVHDjufU8Ufy/uJ3iYYl9jQeIMebTMP2Ot7+8lMWv/s5j/7wGkdPLGFzVROOoImc5qOrPkxFQSYiL5XdO7Zx1ugL2NteSbi7hVajkzHleWTkFrF6yRqKinNZt74bQ2/m5muO5vnP1zPAn865Z9/CvnU/Ut1USce+Zna2BJg9ayY7G5fy7DUf8sm8P7KhYT+Z5SWs2t3IzWefw7efvUNle5RJQ0rQukzMATL+QIikvFQkvYVQnRs5JYtwrBFNCeNyDmBP/X5K0lPpNkKcPaiYRfuqaW2DS+64kS8Wv4W0s5N7r3uc3QebuO3uezhu9mwuuuhC5syZg8fjwev19uC8CScd+4YDPQZOp4olxVBVF5IuUzJW4pE73qBly3ZWa18gd5USCHVSb1aRmzocSa+lICsHy1HCxvUfcMuVT1DTsJFps/7Iww9P5YgpcyC6nkhLFp6scrZs+pAxQ8exbOdm0oKZFE8swmzo5EC7SiBpNeXKDIpnDqS7aiff7VzGuLQBRPRkHnj4c067ZDgnzLyeqpqvaWmt58pzn8JV30Fu0TAaanfyQ802phw9kvV76tnYWMuT19zH1PJCzC4/qCEsNYmQ3kmaz4dlGSiWG6GIeE23iWQoWIrVJ3uGvloyhwvUh2OPHA4eSfT5iyySXt/3hlIOt86hY0ms83MpV8O0A6mt8KcQ0+xgrsoKTtX2YZWE3INBS5KEsCRMcWi/TqfTXq/PucuH2D1WfDCREvMGNt6NZGe/pgWSbBIVDoK6A92UEWYMj0vFpdj8biEkDN3eBlkCoWMh9ZTrG5aJsBSM+HlKAmLItoytZeECTNkO8gnZTdOCmABhgmJCWAGXAFXYFaE2J1LCaUFEgeFZ0v8dTNzjBsfmEHU/LcEvmrFCBi+++ylGCNxpmXgHFjCrdCYZXp3js85gasGJkG4wI3UoppxOcsRLQ2sbd150I0XDcjlQ140pAoweOQ1/isyRg/LIKcxALsuhORZmXOk4mtr3okUVXN4YRfnZGJLF3sYmXEqU9JRiVq+bR3e2zKL776fVtZsbzirl3OsfILovzIU330hsTwe7F66m4LJzGD96GuNzhlGSV0qur4i9ehMWLsxmnb1Nm7ntT3fy0+YdRNu6aXLpeAwH9RGNzkCMXT4HV116I/tbZcxmC5GSTCQm+PKHVYw++gQCDSEG+Sv4ZMF7OJJjRMJB0joVOva2sWXVTrIyiqjc00hOboySYeV8taCZY0cPxpI8PP3Hexg15Tg2VbbTHXbyyhOvM7Uom73bYdLlZ3L+888zIyOLI4pm4w91s3r9DjLcqUhYDPYU0JJmkA1XutQAACAASURBVKaFyR08hGSziSk5KQwshNZoFZmZqZQXDEeVI0wqzidiqESCDrYaGoUjB1JUmseRmbN47qp5vPfhDhzDhzD3qquIRSMsWDCfSy+9lJycHJKTk/tkfYksD+ISpVYMWRXIqofWgMZ3333L6BkX8PLKd9nq24THGEiJz0GWp5S7bnqWb176gQtO+i1NbUG8mUkMH3cGu5ta+e3c5/nokyuZNXYiA4w01n/fTYteRVdwDWFvEhnpDu646VUyRh9DUnMqe+sPMn38Ubx7yTo21NWwZ9EHdHYOYmLG0XQ7j0RvbeLuu08kO5wLbS0E6trQZInlP63GyvLx7dIPea/673TyPfs7lzDCqTAxR+XVF6/l2OtGIqltqC4QrhBppKIFLRTJjamGeoJtTI8i+rng9qcKHu7VH5/uX5HZP9P+Rfpdr/56V24m/vbeR6L03c6sbd0R3bD1TgzTDt6mZXO/NcNE01RiUQunpKAiUB2JJwQlDpYk/ERNTEvvc+y9zz/xVGKaOsR1wlWHbBd+ybYGj6woaJaLkAZRQyEmHHRoPsJRBy5LkKkaFHgh06mTJEVxCw1FaCiShcBEt3QiQiVqyWiWbcAhLJWoqRDVbWglYig9EsEmCjFkdKGgoxAzIBwzCWoGMd3CjLNd/JKGz2nhclrIihln7kBIN4mGtV8dP/9XBHFTkbHc6aRHc9ECafidKo/++S5mT76CWFcz4b0HKTx3ChN9M2nL72ZN9gY8ipOFTWvI83TRYTSSN9DHV9FKpg45noyBA7Fws71uIxlFJThbOvG1WchmDLROIl6d9rpOSgYUEoxquD1Rygfl4mtLx+Eawiilm8otjeQ4k1CiaRx12dU0ygUoJY0I0cln775F9dL5FEweyDEnTGDDlmXMueQyXHWNnPqXR5C8EE71M3HSUbQYQSYOKqe7JUxzWjIvnXc/7y5aSd6QXOQMhblzL8O3pxPnfli3vJJidy7HDTuC5x67hwdPvo07TjmNP07KwVfko6q6llZDw6oooKkLArKXSEhChBW621Ixm9uJOBto7pJJSgkyZHwOr73xDLplEsrLZvdP39DVmsSggkKOypP489k3M+6+vyOnO6mtEVx03S20mBJpHi8tbTp3XPIYQSmdlu59CHcOP9T7OagHuOakqWiyoEFvZWC6n5DwEOsO4EsWbDwQZOu2AKrTzarPf0uraVJcmMXxE6di+b0EDY1YJIKI6Wia/Y+qKEqfysGeACFUfH43O3ZuxxRw3c2X0NFUzeTUEoZ6nXQGKgmZdWyrraQutoGdi9dwzfXTaTc7seRkDu7bjyXV89XSvzDhwmIqt7ewprGFYZMnYShOavb7aWoOMNhXyp7qbt78+M9ILVtYun8l951+BzUt65k772zKjozQqqczsjCNls5NmPoSSieNpyVqkVU8gG3Vy7F0g+WftFJf/xEPvHEVuUYAf6vCnbPf4eTie9gZDDLnyAt59YmlfP18DQv31tCuJuE2dDQfCLfTprSZSZhxQSbd0KAX9e5wAbp3cO0fWHszSfqX0f+r1+ECdO9g3h/e6c9uQVERsmJXK9qlRpimhW7auuCGJVBkA7dT4HZb+LwSqmwh4gVA/eGa3vBR4nh0TaDrJtFoFF3XDzPo2KXwMV0Q0U17HdMeHiTTwKNo+BwGTqeFpQhChkzIUOk2XQRIIoaLkOSg25IJGyph3a4iFYYZ9xSV8cgWHlXgksAd13kxDIuYoWMIiOqCqGagmybICj6XQrJLItkNXrdAdTrRLZmwKdNtSOiGhTAFUVXBieNXx8//FJwiSVIN0A2YgCGEmCBJUjrwAVAC1ADnCiE6/lU/mckpwp0RJHdwFnX7m8gqSEK2VK698EXGlCQz9YSTKJ4+gZQ6uPaqy7ju/luQHCYXHn88H3+5gPXbdzPzyCF0RCDT7UdP9+ENt/Dis19w57NXki0JGjrBkywRa2tm6rCj+GLJj5SPHEvMGyMpXMvI/BGEGwv4Z8vXVAzKItidhdzexW4RwmnG8Ce7KU8t49Xvf2TPypUMnzIFj+TErGljZdsBbr7pPKaPm8TK9WuZccKJvP/Zi5Rk53HqlTdy+2U3gSbzzKIF3DT9GF559EH+seVHlm9YR8f8nayo281px88k1GbgG5bPa488zJP3XcN5DzzJ7Dln84cji/nrs2/x50f/zNKmBhS/l6buZjJwYvhkUmWJg11dJOXlUpZRhDPHwU8/rmD2kOEEk1W62yRemLeQK489lZ9qlvPP176nNkfgrA5w7kWnU+JPZtyZR3Hvn15ifmHpf/j/4b+y3RkO4XR4icQ68CelMeeU4ykcYnLNFQ8RJJm/vXQu7i4dD2U0WLvxCCfl48bSrbtorNxCiiuLjNJcDlS2MrDUT/GoU9ny01paG3aRWdxFMNzAjj1Bhpaksm3PQYZXjMEdSKXbW0eWUcCVtz3Ku68/S1PDTtLy4GBbjFarC6PBpCglk11tlaj4ueXc+3n1k78hp0hccOVNLJ0/D19SGi1Ne8keMIjPPvuRld/v4I6nz2XywLO48Zy7uPKGY3BNHsTT17/K/a9djxrai0dK5r6bPkZVLd545z30SIDCAXnMnHFiHxu6BK7bH/Puz+3+JTjlcJDK4TD53n33z/77M1l+HvDjx2b145Yn1hUyLtcheOYQVAO20QeAimn0KhiS+xto2BxxRY2vYx7anxCgCxkhWZjC5psbkkCNTzZiCQQGLkXG0G2bNSEkkO3jMYWCplvo2FREtyyjqBJu2Yrz1cGSbWhGi9MUEXb2byIhC3DIxCVz6SnBNE3DHtAsMIWEZlkoAhRFxRJxezpJwhWDgCo4qkj+H4NTZgohxvTa2d3AIiFEGbAo/vlftq5omKVfNbFlSxMnTJxNoTICV2OYN757khOuOQ9JcjHck0nOCfmcO+sS/vHcB8zMH8c/fvcJRYNGMmbaEAIxGHvKWTS0BtDDAVKTB/L7R/+NQJcGA/0YsQhFsp+p04+h1qVw0syLCHS1U1vbwqXn/JVjB53LSws+Qpcy6NRjtHorcQ5MRgsFGJ6dj97RxVf7trPti/mMmDiF4085kcpn3qVk2mh01aTT5eSDFR8wtqSCrT/9k8G56bRX7eLzV99hUOlAyjIknjrvYu6Zeyufr1pB88qtDMaHNzWN5x56EqdH5s/vvUZFxQhuvu8ONh7w8cFfX2Dphx8zvHA8RqmHancAf4ofd0YKg73FNLdH8Fs+rKhMbkUJImzQYbpZ8s0qjho5irpgN62BJiKhDl5/5U1qXFVkSWlMOHsKads6aN6+C1fYyRl/fICuJQ08dce9/wX/Dv91zTAMXA43hm7y6ssvUbV3EytWb2bJwjdIkwoYUnAk46efiUODYRXHU79hG6W+YpwZUDqmgqqdTYTTD6B7HLzx91tp0OYTc+9iyYa1ZKS5GZdxNA31KRw3YRaiK0qHZwvN9RYranfxynMP89iDr6L7AtS3KQzJcDGqazAZcowGN5w97DcEQxG+Wf4lA8oGkORJ58NnHiYa0+morab9YCNXHXMfvz/1MY6YPhBvRGXpT6/RlBTDneGnc/0PvPju/fg2xHBq+XQ0d6JIENJrKR8xnLy8PI6ZORP4Of/6l973XnY4GKT/sn+Vxf+a9kuTp4dWsPqwVNT4e9VhywxbFiBUhKX2kR0WmBiJ6lNJjr9+zldPGESYpolEwif10LHZy2xaYMyAiAkR3SJmQsxSMYSKEBIqEioWTswebrnLoeJSFRwSOGSBLGwvUEWYCEvHEoYtQCYrmJJd/GMPMnGpWctWmzR1A8vQbSldIaNbMpoFUUscYhgJgWQaSJZANSDoAJfOr27/HXDKacBb8fdvAaf/exsMKxmKf6gDrV7wj+VLue2666kMwrwHPmfkCZMZO66CpCOG88MHX/PBd09x7fVnc8pjNyBNTWbK3JkYUcjKz6Vl3RrSS/JJtbxs3XeAz17+FK8jnWdOe5ODDbBq3x42bd1G5drVvLH8Q5IsPyk56fz+r3fRMjyNTdtX4GkJUO7K4Id5HbQ3RDhn+oWMuP4udu+W2Pf+ItzNQZ6893Q+e+srKl6/hzMunMVlt9+A3NGCW/jxFueSE5Xp0NwccGex48BmJo0uJ+IzOf+O2/m+eQOjpx/PH/7xDn+64xHGTB/KU/c/zKWX3sLzv7uL0tHlaIEI+YNUWgIBXv3kOZ5esYHAimWcPmQaWbk63XX72WeEyM0pxhEWxFKGsX15M1qzRKS5jtHDRvLD1lrcHpVISxjdDauX/hk3WfgK3GSkpHDCdafz7rLPGDFnJGdkF7NOrsRthvtclwf7JW39Px+u/dp1Euv1f59osizj8kDIkpFUnW4pmUWftFO7/1u279lMW3sLTXoVKza8hp7qYeXWjwgqYV7+6hE8SjHD0kYT1LaRmzOE9p2bKCgbguKTqNq/j+nH/oagnEd2eQpdsVaWb/+BpIwiUnzH4rcKSYl24LAU7nrpclrrduDJ9JLKSMJGDVaem/RYgOe/eoetC7cwLWcCJwy+iKOLclDcLvIyx9PeFsCXUcolf7uU5qytzJw+hGtu+Z5QLMy1t00jGAxw7yWPM0Yfz4yrbqGxeS/tRg6SgD/c8zgfPP9Hftq5CdOVgpCCfeCE3kH3kJembRJsGAa6rh+CTuKmA7Yxgh00TfPnPptCSOgGaMJBZ0QQjZnENKtn3d7Zfm86Ye/A31syIIGBW8KWl7UlA0wiuk5EN+iOGYR1gSUElqSjOhLmGPFzs+wKXbt/CyH6PinYTyO9JHcl1VYFlEAXoAmZsG6iCUHUFERNCcNU0XQ7y/c6JJIdFgo6DqcEirDlcBGYyMSEApb9SbW91XDIFjIKFgqK7ERBQhEWpmEXDSmS/Wwg2z4RhIVlW8ihEMNByJAxbHksHMi4HAqKED0Ttqpizw0ITFyWicv76ymG/1k4pRrowK59fUkI8bIkSZ1CiNT49xLQkfjcb9urgasBHD7v+NFyCksb69m9ehO1qzZy0V9uYOZlp7Lr+48onVDBsoX78BVmc3BTDYXFGZgxB93EiIS7mD75KKo3VVFd20bt6rVcdeUtpFsR1qgmq97/iMzjyvh0zu+4rvoTsvxJJBteVqz7iSlFpaR0Rgjk+Fm3bRcp6Sazz7ySz7Yu5tsRV3P56vfp6D7AoFQnCz/aTnFhEWkXjuSspFIKOj/jrj0FuNta8akaLhUiuothRWkcfcJcHr7/Po4bN5w9NbuR3FGqtxlcctTpPPnKUxSPGUZZSgqVXa0cNXU4WsSNqmrsrN2POyWHSQOTWVPbiBYwMKJdnHneb+hsMFj87Wf4Kpw45SQ8nW52Ne+ioiwV2V/EUG8ya6p3EY4aZOZ6CUQPkuwvINmXzM6N7Rw5KZmlwX2kNOURjnZTv74Twwdr5y+mqqmBnVsqcUoa/PkJHpTgD+JQUO3/Hn75u/7rHa79mm1/p9miVoamIVkhfly1iaTUAJ9+uIyiAX7Wrt+MlbQOj28wu3bVk59rku8bzZb2TSQrI+lsXU9KVhae8EEs50BuOOsesjLzeP2beXR0f4sSm0wgtpvTL7ycb958jW7ZJHVQOpGDJtNnzGbH1teo2quSVjGU6tXbsdo7mDxmNg0Ne9jdFOC2P/yJU086h7KyMgqKmtn0bTdzrjwRImuJmkPJzeygqctgxtQpEBjO8FEZvPbpQxw77VoGZeQQbvHy+UdP0paykblzXyA/t4ArH7mQDZ918u6Lf2JixZFUblvF6df+FndcGSlRsNI/iCfs2xKwix3oJDTdxDBM5PikoWkKLFPgdR1ii1iWhWY6icZ0271dVkhRbGaQqhA34Ti8KFbvAJ5goUiShCUSeHacEigLLFMGJMz4oONUJVvgzLK36zEcTvRp9s4vJUQ/adqEgURCAVOYIMkqhiUR0e0JTkuAZknEDIkU1URVFSTZzqSJM2csElxxm79uKw7atEHDsqVjXYqEW4mPgpaEJNtVnpohEREqmogLbAlQBEiyzS4SJiDZTwtCyDbbSrJwSOBWZDTLfoKInwIxAboFlhAYwmJS0f9MxeY0IcQ4YA5wgyRJ03t/KRJX8jBNCPGyEGKCEGICUpjoKCd5XomZc8ZxxGVncv5t9/Lmk+/TWWlhdrmYdcppRGprKRqaSUbIQb3TYFzhIAorJrJ42z7kdA+Zw7KZdNxIuvPXss5cS8eBA9R/voTSkmLOeuMR9n65ij2bdjI7kk+kMUpVfQtZvoFcdfpcVCPGtZfeyBEuL+75e8i9+2xad25lYn4xb7y2jFnTxtL22Jc0f1fJyIGzeWfvEcwpHom1t4szJ13N2NxjKHK6GZw5iUVvv8XqDetZuGIz5TkZyAKGDk9jbfALvMEIT3/1Aadcfj1fLPmJkKqws6WaVQ17iMgKGYrFFZffRiwYpbOtCW9xNkuW/sCH/3yLNtFNZ6ALvc1JbXc9px+ZhsNRTGvLLg4aDpKdWUhOmYaqFrzyYJQOHSMskV2g4c8aQpE+HkmB8oqhDBgnceLsGeTm5nLJeRfwb7+/l1NPOusXA3jvoHy4bLv39/8qgP+a78GezNIcUTpjBgFXKhY6T7x/A9Ut1bQba2mKbCIYLGf/Pp0xZSmUFowgori4cMINaPUNyKleZC2JOquAOcf9lnGzT+T6v57OvtBKVqxtY2vLB5Al8f0/PyTY3UXzwVZWfbaSM866l/nzXyPiG8rpVz1G807BjWc+w8sfL+WD1QvJHT2FvLI8ajYt4LoLp/O7e65h9Q/dXHzFqQzRFEKeJAYV6STnpjAs28ve3ZUs2fQ2323+lDytlNqWDWzetBhnajXOiSO5+Jp/8NwzN7Crfgt/u3weO+f/yLW/uYexU2Yw+/RrMGOHpHj7MzMcDgculwtVVX9WoQm24qHb7cKlOmxDA0XGoUBC/VCWVRTFgS4sXG6JTJ+gwBdDcWgoDg1J1rEs7WeZeG+aY/+/umZimgZCGCQo48KydcJVxcCpGLgdBkLYE4RCCLAOqVT24POy2SNFm2hC9PbGNOOccQUJB6rDZftaGgYOycIh67iUGF4lSrJTR5FNJCuKpGs92Losy7YiuxwXuELgkARuWZDs0ElRNVIUgyQ5hojvT1XluNuQiVMBr2zilXSc6CSpAo/DwiVbqELH47DwOgQ+t0SKC9yKiVOycMi20JsDhZhl0anrdAmTmGHDW05ZItn96wWw/st44pIkPQAEgauAGUKIBkmS8oAlQoih/2pbt9Mrrr7737jisis54dyRXHX7wzx9/+2IWpPRR48kfDDG5r27KRqSgcCJEhEoyV6aY83M1MYgpeewaP+3JLmTCMkGZw2/EJw7cfiG8fq81zC7g0gFmbgiAUpLs5k2aCov/v0N0uYM59OHXmX6ybNYs2kDn3/8FjkBHze8+Cceve1qFnV3M6vTSctBNw+ufB7RHuGaa86mo7KGVqeLvVu2stuMMdk3gGkleXxSu4Xyo0ag72lix/4Wbr7iVuZ//xF1ukWez+DKY6/l9VfeYk1oO3XzVzHhlovJl00K00r4ccNaMrPyyUrWGFZyPLqyFd1lMbVkMo//7VMoz6IgxUXdtnqceRAJRsh0JdFd1cqsc06nRG3nuw3riHmz6IiGcDgtirJLiHS2s62qhrtnXUfZtAqueOQOyqUkXlmyhfMnDGHFpv0YBLnvrRcZVJCNctVvDxvIoW8w//ey9MMF/0T7Ndv+/6nlv/M+y9Y+zSuPL0d2Gtxw762cNudi0AyOmjQKj8vbAyUkGCa9P8tygod4aLmiKJjCwDAMFMmJEDY0YGuuqL0mE22dEgGYks3HdioGiuJAkWzjhoQkcO/Kx97HkYB0EHH8WrXhGtOwg7KqOnvkcIUQCNOIa3/bEAXIcb13uQ8PHeS4jriMxSHJXFmWsYQGwlYGFMIutRdC4FQVWwnTRpKwLANZyIQkCQwTh2ILUMnYaog27CShxDHthJKmKdva4sgKhjCwhBKncQsMYaA6HRCLHysqIGEIE8kSmPHiJkW2KZJYEpgCochYpkrMkgkbBgYSTlnBqwIaWCq4VZBNDUUxyc74dfZs/+EgLklSEiALIbrj7xcADwHHAm1CiMckSbobSBdC3Pmv+krPUESmSGbK1Tey/h+PUTruCJKHDUbXdaLfrqB1wnAamvch7WqkKd2Jtj3AjCvO59GLr+f+B37LmedfxEWXX01mrpOYCuWZ+Tw27zXyHQWs/GYZn2xbyedvvU7KID8OU+bsS37LPx5/ABEQDM3MwTdpJP+47n7m3nczJ04ayrOrfiI3OYvsgmS21FRx6zFn0Obz8d7G1eQHoowbNZJPfvqKOx9/itoflvPcOy9QUT6RMe48xpx+DE88+ScK0waQkpPDkvUrefT2+3jmxVdRAwHeWfg1FQUlTLnxYoxFK2hCkGqE+ctN1/PmmpXEnK0cWzaQhqoDLNi4m9NOO5muumYyTzqRJ+58ghRiIEt0C4UsVyqKabJyQyUTpucz99o7ePOdN2hr6iLdLYjiJzsphdbGBj5bPp8n73mIPbV1dDTXU5SVj+noJL+4lGEzL2X+4w/TbsW4aNfBw16jXwOT/L/2H2s/nDGDDE8nf//LV5x43lSW/7SXhR99w7rtP/HA3X9BV4yegGuaZh8ru0R22jtbVhSlz/f9YZD+2ieWkNB1HUlSsUxwOMw+OHx/xkpi+0T23NsGrWdg6AW59GWV0NNn/+WHq1btWSYBQsayeolqWTIydtm6LCtx2zY7g7fLQu2qX9sk2dYat+L969KhoGzr1ycKmg6doykAZJvfLtlGHGDrkNtYvNljdG0X/NjrGonfCYEl2/oqlimjmXrcx9OyrfkUGVnS47qLtilE72uVnf7r9MT/M0F8EPBZ/KMKvCuEeESSpAzgQ6AI2I9NMWz/V31l5SSLuZOP5bHvvsPQwhSMTyevq4hYnk5T034yvS5kw02b2Um75uVPSRcy6ssbuH7SONKLBuFTvZx9zS3cdMtFDCweQGbAoIQk2o6dRJFDoW5bNSt31/D1Rx+zZ/kSHnr+D5hZWezZXIcbG++59d4HuPOam9lltPLe3c/wxlfPM3XECFa37ya9U8KRk8XBziAP3nYXv7/9bp666k4W1a5j1/59mChEk2LETIlvH3mJKdecR+WXP/LWwq/Y+t0/+eeyldSHopwzqpzdwQCPP/UM77zwGrp6kILcDBRFwuPq5ECnzsBhg9ny4zbwmrhTsgm1tDBz4lCqqqpZXxnCcqfQ3t6OS4+gO50obgUpLGgzTFI8grJkH9VyM3pXLilSkE5VoySpkGDEQUv9LoYU5zB84gwWbV5BV307UU8Ow4xuTq8+fPD+f+1/rh249gIispurZ53BUaefghUMEvL5SDbNnwXSROsdNHvT9Xr7a/anCvavwDRNvQdXtyx6glVinf77PlxF6OHoi/1pif2P5XB0x/4QTc9yEnh5AsqxJzFlRE8Ato/fhj1sQV4LS7IwLTvA2gJTEoYl23K+su2taWfhid8z8demCwJYpm3zhpCxJNku1pElDhnkHXp6SGDrpiWISCpa1E7pXaqJyyFQFRlV2AwW0zQRCWMPSUWYOgYSmpAxgLL/bhVDIcQ+IcTo+Gu4EOKR+PI2IcSxQogyIcRx/14AB0jyZ7NU3sWfX3uS3PF5ZBgWujuAnJ5K5n4vM2ZdS7rTTXHnAJJFgGF3HsGc4eVkl/uQI/uprN/Kuqq1lKWmonWFWas1ct59j/L6ubdSGetm2vgTCLZUce3MKYwuG0ltbYx51z3F8LJi3n/xbdRuuOPqGzj1gWup+vALWtt3UDi2glV7djA7qZABZeWEOqMMMKP885WXcBcOJGd8GXWbD9Ae64ZYIwX1UbSaJj5ZuoANn//AtGlTmJUzlMVrN3Dy7deS53cTHDWc8oFZvPTJQ6R6WlB9EX7cvoa9sRY6ohaRUIx96yppikgcrINoexS35iEsZSB50jntpFKGZDhIdZqITD9utxujowuvW2J68RCum3M1O9u6cNX4STViVNcEmJgzikCsCcm5j/HHDWLDrjCLF3+OWROgqz1IVixGyhDj37tE/6/9D7TaYCVjCitYsPIHJHScPi+psVifdXpnqP3FrWyhKcdhRax6t94FQ7Is43Q6AZvSaWuSHF43pXeATQwcCfpg/37hUGBPUOn67/eXxLkSAblvVan9SvQL4JBA6YFz7KcUwzCxzRfs4iLdsM0eIqYN+ciW7Vqf8MtMwCi6acbFs0DIEpYkg2khDNNmqpiGTUa3NBC2cJUkLOR4cIcELzx+PsJC1gVehyAjGfzeGE5FoMafFCzLQlLs4G1YEpqho5sSphnXWOl72f9l+1+hnTJ8xAjx8Icf8LdHb6a8SiNUlcz8sc042prx1DVT1Rhl1I0ncZKZTZG7jMc/+R35k0ZSrOps3GeiyG3E6mU61BAh2cfkoSP54Z+Lufm0uTyz/FWOcx/J1xsWoiZ76LYg1RHh5b1r+OG2N3nvD6/wft233PnlK7w0+yLOOP983BUFpAsJZ6dGU66BK5TCUGcSbQNykDdtYmdXG/c99BzPvfooZncAX9RBe7rKvdPPYuH2Dayu38fI0aWUbNFImnoED5x2AS2Dinhp3l3UrV5DJQFGuJJp9yi4HOmEogcYnqKyulEjxVAoGFrKjr0HiUU7SckoIqBHmZ06hvnbvmfM7PF8+fl63IqFqsmUlhawu3E/WUKlORZmQMpAQtIBrjvzLh55/R9MnDycvDQXy+q3U6gbNARb8fp16jYXMXacjwWbtuKO6tzV0ZeY+mopnH3yDA7WtVLTXs+4I0/h+6+/5JTJoxkwNpWWvRGWb6gm5FbpqmxmwMBJjJk4FKm9mYPRRmZPn8n9z/2VHCMNtxcMt8apk2awpnEvVV1B9n69Cd2w95m4wRM3u6ZpOJ1OLnv0Rt685xkWzf+O9VuruPPWG7n02lNw+sa6zAAAIABJREFU5JeRlZfLyvWVjBxYxwWnPcGSld+x+LsvINBAamk+iuygPdxB48Eoed4U6toOMLLoJNLSulixZQUOl4zPr1M2aCgH6w4gyU58jqGYbgkjVE2ynIvm97Np83zOPfUGag58xO6WEKlGDt2RejKzBqIaRdx064M8/cEf+fSBjxkyTeK3lz7IF6veo7TAgxFr5+QTXuf9D+9B9XVT16Jz1OgJpDCO4O9/jjB6732QDS1f8c7La9HRcEcdhF0mnjgH2jCMHsGw/pOL/WGJRGDrn/H2DsS9JxN7C1f1/q5/5p3ovzcu3/t9Yls4BNsk3vdll0h99tH72Hs/VfQcE2qf8xECu3Kyl0t9QnPH3oeJkON63TELWVXwKhYO2cRWI7efPgQylhV3+LEsJDk+mFggxfelSPagIHoGm/hgIkzbcShh+KCYNuVQSMhCxu0wMCSBZCmokoqOhdGjHW5imPbApCgSiiqQsK+FgoIsq2Sn/rpin/8VQTxjQLa4/PwzeXLeW+T6BF9/u4wJJ0zi2PRpXHXppTy7agmF7UsIxvx8/dNO/vnxWjZ+dS8ZecfT5AnQtrKdr3e9yR7Nyb5tG5kz61hOnXouf3nqUVwWXDDvjyyYex+7twcovXgcA7tTWPDKBzgnldJeXU3+bWeQvLUOVzCKSBa4oh6cBT7aQyGsjjaKRgymprqFspQs0pAYNX0mTz//Ev50L5bipCCSRMZAmY0Ha5kxqYTU1FzcnokEzCoqty+jvUFj5NBxjJlUzvtvvcXFF53Goq+XctSUI1hbvwfqgzj8YTKyB7BrRyu+Qdls2rGFbN8A2gKtOGUvE6fMJs3p5b1P36Eg1YeEi5AWxolFKBxEzUpCNEUpHDKRppYahNHI83//lCuuuoJQcxspOQ5efuEznnv+fjLcYVZX7+epO97kiVdfxms1csyyyj7X5IMhmeCOUTy4mLp9QZLyhpHuqGJSeSlKOEL9gXo2iGSstkaksEbxmNEE21OZMn4cN8y9iT07tvCbWy7m5Bknsvib+Sz+8GOuf/weKspH8NBDj/HB+x9y1jln9rmhwS7ccDrimaEEp5x7CmefehojsorQQjpHzJmD0ykR6gqT5EsCYPv3K+gMHOSyZ/+N5154nttvv5XU5DD5jnE0ybtxmQZFBaMIdjWSbElsam7Ep1jILgnV4UGXuylUBuPI6SZHGcHewJd4rGRiQYvCiccw/4MfmT55GjuqNiP7unF4PBTlutBFhCuO+px3Pn+M1ZtW8+rrH3PehSOZesyJKLF6arUoPqWe/MxjmFhxMgWFw7j3z2eQX/Ebhj/515/dByevW82EinFY7sMXtxwOvjgc1gx9A2jvTDcxKdm7394Tir8U8HsH8v7+lr0DeR8cu1+A7r/N4QL4oQHEtPHvBNwgbIceUwhipq3LrcgGDkWO0w37npP9G4Bh2RWSLpX4E4PN3JET+4+v19vvEmx8WlIlJBlUIWEJe14CSenBy22dcQAZS7LZLcAhv1ApITMQnzuwEr9N/HwUB6pl2Zm5JOyqTUu1DSskg5L/bkz8v7JJHod4Yd4LlBx9LGdNGEPLH7bzUclGLr/qNPJqXexauIPT593JjsUfUz5WYevOIbz/2jySRxYyMSuX8uGpFDROYZexjLF5E1i44Ucm3nUt4ww3Xy3+hlNmz2Hhig9Jza5g9ccLmTzrCO4pPI7ZT/wRyQ/lJ52E0rKfcLEP/+5adultFKcVoctBXKbGEcOHUrX9AG2qhsutYshuZhaO5eMVC3C78ujYsot/e/5p3nnlaXIz02hWXZjRRvJyVZZ/Xsu1D/6GNd9vIMkbZGBuGmvX7abdqzNx0DCiIY0De+uJhHMJp9cyMreQvPwsdq7bz/5gFz5DZebxs5n3+ruENcHgyeNpPlCFSxZILhceh8t2fnEAmobH78SV5MMjuYhYUULBKIVphQRDMUyPzmXnXELV8i0MnjOTu6+8ipLBaTR2h7izsa/gzubjK2hy6VT4nbSpCvv3tdFxMIAhZTJhZIzyYWmsrk4hWNOC319Lm5aEw28xefwUPv5gIari5OgTz2blirX84dKbOPXcs1i3YiPHnng8eszgu/n/5MSTZwO99ahFXO9ZIRaLocYknB4nCEHQMIk5HLjMLhyOVKJmFI0gfj0JTdNoj2i4C7PJicLtp57HRvc6QsQQQS95Lj9aso6SmkqkrY6SinLW/rCMKdNHUBNqpsQq5UD7Jkqyh1LduJ7CsnNo69hGqGEvcoqX5EgFEX0R6flT6Wy2sNxteNQwioB2ZJ75/QZ+WvY+6UnlfPr1jWzet4/zjjyObbs3MCL7DDa2zMefW8HqbSt49fHFRPCybPzgn90Ho758kTPnnIspJ/dklYfDtKFv4UvvdXruqV6Tkv2375+9959EPJztWu9++k94JoJ4oq/+PPLDxZj+uPrPnyT6BnEAVbEDckw3sEwVh1NCjTNAJEnp2ZcQose5R1VVJGFPaEpy4twsVEm1/TqFjXdL4tA8gSzLSJaJFJcvkUVfZpAZX0+PG0HYk6MykmzDK4nz1YWEZYJlSVjIyA67itMhxZ3thQHC5pybloVuOWyWjywQsvjVRsn/K4J43tBSMXWATFf7Xj7fIDijbDaZU2LcOuchvhgc4uO556MX5jNAb+bx8z5l6JVH09JsseTlB9mSlMyB7xdy/gU3UFiRy9EnHUHNonqe//x51i3+iI07avjm7fnc8tCZzJp1CWuCQfZ98i73XXIvG7xu1rz/FJGCYtJ8GXRv28XGhr2MSU+ho6ONUJLE3MtPZd+3O6nMieAKmKiKizsv/i23v/0M+obdnHb6LXhjjfztqw9JTvUzbUgFga49pI+cRHdUZ++ebcwYlcX2jRE65A6y0iW84SRiBIlFDQy/H7lVo1XVGJaWyfr2CP5oAD0k40p14ZJ97K+rJXlIETmOHJb8uIzBGW6UNA+qN4lYMIqJicfUsdwuXLjpioTI9PtI8XvwpiazdPEm/vbCi7z5+qMoScl0bGvjqS//wSU3Xs4wyUuT3sYlm5r7XJOvjywm0KDjT05iUB7sbk1H1w6S6XKxJ9aC3OriiOkq69d3kTMgl8IcP5Gok/11tQwpH0NDRwMFKQMIBKOYXSbfLFiEIgwcTieGqRGKRvC5knpYEJZlEdM1VFUlHIoiZIWoLKNGYqiyjCkbOGUJU3NgKTEcqoeujhiqJKPEYuyNtON1eXGleUjRVZydOjPOGsnASSOJNm5C9SYhFCc1tbs545hL2VD1NWEzhdROk7JR49nXWk271YAjmolPdOFMTqKmq4OnbviYFz+8nGAojcb6HRQUOkEZRWvrNkqLS5gyai7z3ruTG69+kk07llFsFbC6oxk5ppCdq7Ns6UYGDE+hsbaJFOdocioER+aOofqG3/3sPrhyh0ZhqQNLPbwDPRx+kvCXgnj/7Xpv2zuL7w+79KX5Heqv999ES8Axqqr2GC70/u5w7/tv37/f3rRFO4jbxTQJ+VnT6vU7yGY8S+8nGCVJmGaiKMrqqfDse0xx30wh/QzisRkkcfaPJTB6+rdQJRvLd8h6T0ZvW8ypmEL0HLthl/L0/NYmpg3RCMvWMpclDBJ4v8Ah+v4G6SnO/ztStMHWWty54xhzxnOs+HwR21z7eO+9H/n96+fw8S1nEklz8Kdjb2VjVTslyYUMcEkMq1CYdMXl7L3jbd7/8jum5w+hvSNMtMUgf1QGrzzxMAPSSigbMYTbHzibrLRUyj1ORvo3kZIEry1/n/Ttz+GbkMyWR5ewfd1GmqlhWHYG3snjyB4+gPsuOIO1K7ezQ2pnlnsoXfXNVEwcwtvfvsYlFQo5w6ErtIp5G7/hzb9/zKgp42kJxlgmxyjNc+GSApQWJaNlZtGeYpBXlIRXNqntDlMnW3R0htBCJpbTxVA5m73tXeRGY7gz0xkz42giXd0EZIkB5UOJddTQ0lnH28+/QltIR49ANGQR7NJwqR6SPX4iukmSJxNLuNDCJjtqD7Lv4H7GjhrIfbffSKS2jY5AA5ozyu03zqVIkVFcTsYOGfSza5IjRUgtyGBn7R5qO7rJy42R6jfQlVRSUj3oOQYr10fwFaeguVOp3N+BWxKokRhWSwdHDixn04YfGZDqJKMkGQ9xowchUCSVFLe/l4yo/Sgc6Api6BCKaMQ0E6cRRTYsXA4PHR0GNVubqKyqoa1NIRRQMKNRuiOdBDwKvtR0fB4vrd0mIa9CQIWNlTXUrFpD0cDxFA0bi9Or8fgLP7J6ZxV1lS5cIYXsvJF8s/oL2lqDSFqIpFSLTt1PS2eMiqyR/PW1c+nuLCMtz0fYdDK68A7Csb2kpZXSFa1n/oq3+ejdJkaNms3m7QE6kg0GV5RRNMTDuvXLKBvpwaV2I+uCCdOOYmjBJJ6d98Rh74OcUgmcPw/gvYNu/6If6BsI+08I9saLVVXtmXfojZv33i7RT+8g3ztj7p9pJwJ+7wD+azRZ+g8qhxuE+g4cFnJcl0SRE9m/hGU6QDh7zCHsAiABltZjCCEkJW4CYb9kJFRFQpVlnPFCH9UhIckWltBBMpHiqoSSJCEpKl7ZIkmx8KkSHsXELTRMJGIWRCyZqHDYE5OWjGYpaJZkPx0ocnygidmORJKEQEVDwdIVhAayKeEWKrokbHaKCeHY/zFTCJdLEqllMiW+JLa0dlM8dASeUICpxRlsbbZoN6upKBzD1LKzGZBfyPwDC7BiXp5++C9keF3c//lb+BWZ30w8meDBJrrbYwyaOpCD765l4hMXc23ZUL7cv478rHy6FEFqR5SIU8JXWEvLvlLKlH18vSPCiILhyCkpJI/ooGnxQfw5fgpySunW28hMHUB7Vwv5MYMlDoXj0wUuZx5vLfwBDJOsojyyggIr3cJUQzTtd+IwFCaOG8jy7Vuw/KmUlw2jast6QpLgsvGlLFzbSlSLoKRqRAMSUclBINJNfko2bd1doHkozJDB6aR6TysOjxc5GkZ1KnaptC7jFWECJKPr7SRnZ6NVN5Jank3MCKCYgmSvBxNBalYOekc3LR3N5Lt9FI71UbnWJC2tjTNmTKXrke/7XJOvj81BHPQx4bLjCW3fwscrV3Dx+Kn8VLuXNLmYfZEQIrifrLQsMrL9EHUQCbSCCzL9ObTUN5KVorBtd5Ajp5Xx9tvLMFwy3oiJ6dBRJBliKqgWOGWa2gK4ZActloFXKKimiYSJplt4/T4O7KsmNyUVl8eP5ISYrmFa4HY4MUwJ1elAyAKn5EDXDUKxKDhk8tJ8zJw1Dn9qDpPHTSDY3UHjwXXgT0LVYmzdWcPQCUUEWoLsqtmFI6kUy2hkymQPNdtNGgwHnu46XnhwDc9+/xBpaQPYWrmGjq56SosqsJrChNRqFMmPYgUoGXcy3cEduLsUGqsPYCoZpGZ2kaSOpCtWhQj58RGk6MOtP7sP7kdgaAEcqq9nWf9gerjWH45IbJdo/Xnbvb/vD7cklvUXzYK+xg+Hw70PBWabCmgvl/scg230odnX6xeeKvrCRPH3GLYWS5ylokgysnIowPf/PYRkoBkqwpJRZdGTxff8Joi4b2Zi8vLnv3f/8zvck0vfdQ9l9nalkRnfV/wcxGGeZHrl0TasEj8eWSI7xfF/JxO3FMgIyrTLCmU+N/KGvWS7fCyr3Mi+qj2EGmOsXb+bjz97hq9b1rFn6xcs2PAC044oIGeMlwM/rvr/qHvvMLuqsv3/s9vpdXrvM+k9JEAICQECoUtTRAFBEKVYUKogghTFirwq8EqVJh2UEkIISQjpvUySKZneT++7ff84M8lkMijv+yuX33Vd+zp71tl7n7X3mnWvZ9/ree4H5dFd1OcWUVlVSl3tRIxYlF82rWTD9Y9xzysf8M+738fjqKd11QG6UgZiwsWhtgksKXCypttgau1E+tMBepvXM916Ia3hNAdbZUKmj3Xv7KFxzzY+eGs3jpkL8Pf0s2tfkHAgzKyKCZR6i7FrVgoqp6C4jodkHsHOQewOFwNxE0vcRrVmQR9qo6Z2AtXVBcTTCu5yL0ZOHEMqpMznJV/yMrtyFom+EH7ZjsU0kSUnOX4vRRMsmM4hZJcV3XBSZlFJSEFidpliZxTRq2PRgiS8KmokjiY5QJfxe4uw2XJoaekmaESZUF+HWpJDZ6/KlNI0NSfO4GB/7Jg+qfHmccbxxWzfspNo7x6uPnspu9IxzirxYC2TqcsY1E+uJdIzQLLfQEv3gthLZUUebpsL0RAQjXzmTpYQ7XZefO7PfP+276DZTXTZRltYZ3c8SmOsma7BGPGoSThjkhMGIZZCkgUURcHhtBGPRKmsrMSQBAyLgCGCKEtYbQqGaGCYKdKpGGoyQSQdIaVlkCQRLZ3iUMcA732ymb5wI819H5JOxmk48VzkDvCUurj5J38k3R1A9uaR4yqg1JZmSpWdYI+XpGgwo8rJ3OOv5P2dn7Jv1ypWf/gmE8tLOHvBJVTleLG6QiS1InyinX++0IetzUZg5S4SisjC4y/FV2Tg85fT0rMGxeIlbg3zrZuPXdQESCUCWOWjk2SMgOho4PyixcfxLN/xMvGMPX+sBT5SxrPMv2iyOJp6OcJPj7RdlCQM08TUDRwOx7jW/3jXPLxIaUoIojz8RjES+i4wkq5t5LdGNgwFQ9WGjxHBlA9vhi5iGjKGkYVQST7a3XEsjZR9WzQOb1l659g8orIoZlPCYSAaOvowPy5jYBHBqmRlD2TRzG4YyIKOgJYN6zf0bJo9hiM9v2T5jwBxRZAJiArGoEhXwkp7rsaewQCZJh+SYpCO2RgI9BK1wLonH+Nga4Z0q8yQlObE4nlsD6zEel49AbWHoJxEt5ukEXjox7ezeZKLB+++nbv+9hjb2/fiqylj19qDSCU2ChWFT6MxKoUcbHUmj101j6A/jydeeICTltXjy4/hTQ7ApBIOxFUSnRHeff4Dmg7GORSOs7q7kx3tnXT196LoSVL+BP09bczJW8y0xdNwF7rY9Pku7P4yXIVFeL1lBNvDODK5rF2zi2muakoqZuHQk2xrG2J7SytN3QcRfSYZNU1JXT6tXR10dYbxdxlcNWE2Dp+Jt95GT3kpubKdRTlumlSosOVjsTnIKXGTX+8hV48wYVIZfYF2PD4JQQhDxkEqaTK10o8g2djttXFyMkZHuv+YPqmaW0r+hCRLJtv4qDVMOhnimnMqmFLsoliw8/s7/sDmDS1MPuV0Hvr1A9irK8gpmYiqyAxkuiif6SNlFWgOJNmz9xCNLXu55cofccLEhfRqKueeN5W3Hv0Fv7rhNixpE0mWUVSV/jKFnz/7KAISkpIdVA6njVQmiWyzYKgZMokUFklCNrIco81mRZIENH3E8jHRDB1FlvE4bETCAmK0AEOz0t67ix/d8ANi4i5ahyT+8thN/Oa3uwlt30mv2YmrOEXz7jyGujLMr/8qldoJTPLNJNHWzaIJMzjpuEWEgglWbn6SzkQbWlpEiQ1ic5Yxb8kkvvvTu3EWzibHqGTF9ueJqTGCiTg5xbVYvIV0xwT+8vebxx0HDqsPRG3cxAyjAXE8rnps+aLzR383HliNPnZsJOZ41x+vXSNJGbKZ4UVMIxsRKkkSGgYaxmHVwZFt7O8cVigcBkoNgUwm62KZlbg1MIxsJqGxqowjGe0VRUISDAR0DFNDN1RM9Kyol2miyAKiaKKlM0fyusJR3jcjk8II9Tce1XRkwdM4Yo0LAookHkVfjRx3+NkJoBkGpjDsNy+YKALIAoj/A2T+jwBxHXA4FTzlDqySRn1eHjPcdkrPE8gUWlk0qYSCnHJC4QFkR4SacISTz2xgxpwCJi45jlurf8oV13yVq7/7EG2iwndu+yY+Vy75FgtrhvazfNVK3ti2nNk1PpyRDp6/9Q6mT5zL6k83U6YmCPoSWAMJ7v9oN15nP/OOy6N1o0FT5xAbdn1Ohd/F2bllXPGzqwgaPVRXFGMkMnAoRL2zmPLaCgI9nRAVOW1yPjs/fR5HZ5SDn29j4ZKFZJJDbFz7OS0bmplXm4tNjNGdibNi31ZyU7vIlSQk0UG+Pw/FEDESVoaCIgcOteDKd2B1KlgcOsv3bWT21GlE+kKIXW14fDqJqhRP3VhDPDlAfqkfp2Sh9cAgoX4LLS39pFIyA/0haivqyMuTGUx1kkkmsClwgc3KQ01p5JDtmD5pbN/Hf73VxRQqWXzSTKrLv8EPH/qM3iGVeHKQW+65ggvPPZPtH63ixltvR2gXUONuVM1BMKKik4sYayIR9+OyWPh4x0qu+P65uKcEuPXrs6nPL2Zb3xqmLlpMc38nhwYPcfLNl7Igp4iuf65FxkIikUDTVFQ1mwVIxcj+x8oiSCIZKZsJJZ1Ok0gl0UwwM8bhgW1qJuEUhEIpVq5czubG/Sj+GVz27amk1SrC+/YSyNi4/tqT2BwaonW5iZk6medfeZbKsuns3f8W+5XP2Bl5lbUHX+fDnS0E+gZIRRopUmcjdwp0qlEqfDWklTCfr97O1RcsYEDYgu7pRLS7EdIJaoqmEU/spS+wjUXlZYQHneOOA0EUMZHHtYBH+02PpR7GWsgjAHxEqvboMPmxAD5abna0pOzoqM/R1x4v3dtIyYLasROIYGT/tjuc7Nq39xg/cFEUD3P2o61hTdOy2XsyGrLVgqIoh4Wzxlrho5+L3WpDkUUkOZu/UpIEZDmrJGiaetYvW8gCutVqPcq3fUTGYCzdM3pyG837H342ZL1QDAQMU0JCP6zRYgoigiSDKGEOZzxSTXM4/6Z4OC+oLGTBHOHLc+L/ESAuGjLTZ1UQsIrYSzwMhRU6NYOdm+I4gzodqTgLTp1LzBDp7Vewzangk/cPsHdthp/f8gC/2vkHnIKNVz56lLMKnMjdISLAtx74Hmsf/wN1xWEuWTCdTzbtIBg1eGTnCvoHN3PK4gbCuX5KrZP55swSZvudVBZOxS3lUjrF4IzqKnSbQjoeY0VHL5MsBcxffDqhcJKKqjJCso+m1B7On7aU3LQHLEOsbupmsLKIqFDAxZf/iI5QM7rPzZKF06g50c+bq/cwuaaImaX15Lg0us0avHll+J12chVIp1Ts6TR5eR7K3CU4VPD53WyPRBgQa4gaBpVVLjx2D7rHR9MOkz3RZRT6y0nHDYY0k7q6KuqKvTicFnwOgwkNfppjzQwFVRafcyZh0cEFU6fwXvMB5N5mdgSOpVPadtmp8BVy/4tvEI00ce3VNyC1BNgWyKW/PcTqYC+LZi1myoyZpAZ7caoaQ73dVOXMYuHcWt56ayWFuVdw9ynlzClScAgenHEHFSWT8VeeiJmwMsExnelVE/CLdpS0zu1FZxEIRvjt228QSkVJWk3SoonF7UBy2QinY2iahjwcEGRqAmk1jShZEAUZqyKQ0TU0NQ2GiaobGGoMq5BEUbx4YvkEe7cihHx843s/pjlkxWcrRLd3cP9tf6BsSQ4K7bz77mraWneRwuRAexK3zcWZM5ewbvl+AttaCe4s5MrrHmTv9i3Md55GILIPQXdx8bcWUDrZwd03rGbHWxspV+vwJCbyyfoVuLwuquRSDnbvwV04OP5AEFSSpLOWp5HVA88KVnEMuMHRYD4aYEZbsGO/H33u6LoRUBpL5YxdUB19zfFomBHwP0zTCFk5V020oaphYn0JHv/r3VjsCraMiZZRsWnGURbtaHCUZRGrVcFll5HRERixikcSPhxNBx3Wb2H4OQzz0YaelRTI3mNW18REzuqaDNeRTd6WtZ4FE1EysuJfkng48EdAwhCzlJ4hMKxBnt2yP6Blc4YKOvpwkJZgZqM7TT2rsqjqxuGFz8MToJB9O9HF4QlXH/8Na9x/m/+EhU2bQzI9Xi+CJ0UqBN58J4lYkmRCxOmykMqEcbpcVDgLCMQGiMYMBItMLBUmz1OAOykTNNL88vG/kC97uO2nNxIXQ/SqAS4sO4v3m9aTY+lBd1rJK6mgNp1kz2CUlGJQ6ZhAKN1J2rBj85jk624s6RB7BzRmLZjN+oEt5DWb1J4wj03LN3H22Wfyu/9+htqqPNA0aiy5rA4NkRcPMWvhSXy+Yz1XX7qUvzz7HhMKaolJLSyYVMuBTCf2wXq6+3dx0ikXs2b1Ok4/dzYvPP0+RfWV5BdL7Nw4hMPrQMYkk9KQFJnKwhxkm53tn29HLHFRZJgIfh/EDWLWGA5dpLN7ALfLg8WikM6EKXG5sVcUc6h7ELE/hE0QyCgeCr0SRf4ydGcIvyeftct7ySszSQ6G+EpL6Kg++VO5i/POqifcMUCPJNPVFOUrZ17Oo79/lNpTanAHYry7upn2Pev4fPlmXnz7Fe548EHKHB4uuvJcKrxV+K+aT+36ITyWMN2DKfYRYnrNVHo/+Ziv3v4YP7v7Bzx77d0YsybhamggLQg4BA2bLqFYJBLRfvT+OO/u3Ux0VwvTTprP0jOXkcqksViU4XRaGpKgoJsG4VgE0VAwh93JbIqFuKah6WBmNDZsXMGKd25jX6uEL9+B3TbAUDyHTCTJ5Lmncs1VF7H+k0b6ujbz9/de48dXP8bq/R/RvukzJF+GvqBOWYmPqM0gz4hRWXMJz/zuL8y8YAIl+TamTpxAZnAbNy57hbolkyktdbPsrAtZOu9sXvrHw8h+H47CPpJ9XiY/c+zC5k8yGtaYyPaWLRSXllBSXMLooTzWOhxbP9pSHwuyYznx0eA32vVuvEXS8TxHjomoHEOnQNZHe2SisCGRtsk88+df096/njOuuJWTK+fzxLt/5epzrslqfI+61oif/BdRP2MDjsZbCxhtXR9ZbB32aTfF7AQjHNFLH3uPIzSPgYhkiFngN0Ezs28qpnH0BDZaAsA0s+JXgmkM66sImIKALhxxYTycxm647ZKUXbQ1hienQp/t/x4/cYtHMBV7DiVOC4JVIZSARKYPn82CKimk4wlKS/JoyxgQCWLFgaansDtkMkmgPLWdAAAgAElEQVQHSdIoioIzbNAfCFK4oJyG6dUc55zAoQ3NNHp6uHnRDVx+2qU8d+gjXn/+Hg61xTEkGTUepshqkOt1MmtqA39buY3vLpzM/i0JVjTt5HfPv4MNCzfccjlpU0VyuXnw/sf4xT3X4ZZctCQCzHALKFV1hPZ0IBf58Ti7SSUKEDUJf56EkTZwJ3NZE1nHpUUnsD1ygJBgQ00pFHk9bD3QyJxJU6lumM2bL75ExqqT4/WQNCRcThuRSIR0wspVE+M8vzPEhLpaevpClE8oIxZOUVtRx/LtmxETGnMqJ7KucTPFIjgLChDNHGonO2kPWlG1FkxDJD6QoDAnj8aBNuSEB3epxNdXdxzVJ++enE9cFZh/2mLSXb0cinWw9bN+Tlt6PieUT+Dtf77PdT/8Hm+99TKffvYeZ371m3zw6vOUFFdR5sth/uKFxPfEufKO7/LyE69w8rwK7vrjI5wxbTHpggwbP27iz/c9RBQRf00dyYyG1y1hNTxIYhRDUTBVDVM3cfg82AwBl8tOMBUnk8wgIrBp02Zq66vJ8+QiyzIDwSEUq4WkqiKLCh67m4iaQRQUFMVKXoGdX9xzMy2H9tKndyLlKoT3qNTMPpnTphfQ0yYxbVo9v3n+l+S60hw6EGHuxDLy60/mYGgdN134ED+55ypmT5bZsioHpyfCKZfdwMSqOl5+6xFiuwME7d0UFPqwKA4K/DZCsTinzPk+u7b/lokTTqW/J8KcUxaz59xrjxkHV3fvpbykilQS1ny2loaGBqoqKo+lJsYB8dGfMH4gzsgxI8eN520ywt2Ovt5YS3tcwBsDuJIkHU5gbLVakQSJ6793BQ2zHXQc6uF3Dz7NdTfdxqO//i2m6kKxHbuQN/Zex755jDdxfdFEB6N4eo68MYzQK9n6ES+e7HPR9Cw1ZAggjwLfwx43IwJZI7z5mEXa0YSIaZoYpoSWjTtFMjUESRxzf9noTk030A2R8rwv550i/7sD/v8oJYV1hOVBIgkbuhbGk6tiU3MJNKUori/ELiUYCA1iC6qYTgPNDCLruZAUKcoXaY9o+CwuYokQRplOZcpkCRUMpCwc9Bxi944kVd+fxvfef4TdB/aye1uMGrdGMBpg6rwGkn39ZJwCH6zdRUGRnTXhHjrENmpmlLD3Hxtxz1tA54Egjz5wOy+8+Vfuv/1qhFSY/GlFpAL9HOiwU1lzCHdZDpJVRrA5SPbqZEI9ZIpyKSmbgc8WxPZJOb1FaUzdQ1lBGZ98sJd2owPJEGk70Ep/tB27YGBz+bGm40hWL2o8iagZWMQ0y7vzEc0QOe5cEHUwIugk6ek5wDy7DSnfj7eymimBPvpox7QAzkG27elCNeK4vWUkUjHcVh9tnR1MzcvlkJah3pIHHA3iFU4vgtNByz/+wfcvPY6HP7RRN7GYVHcjn7Q1Uz2xmusu/SaP/+ph5s++iE8+XskT973NT5++AWfMx0d7P0LvjWDfWEF5oYvZs89h4wuXce6PvsJAT5BCh5U+p4Bd9NPc0kNFXQm6ZiAZaYpLioi6FJbvWs2Lb7xGZ3MrbTt2YXb1YfHmcPU3r+HBn93HgvnHZy04RSHlkJm/9CycWoZFpy3j21d+m/zyMsTuATA1otEUxaVOooJE3dxFbHjqL7Tv6KGy3E0y9iztHUW4B/rY2zMZMdCB6amkfIqHdFU1n679kNr8MHf+4FIWzLiMDV1ree69fzCtIp8f3PID3l7/Kld966dY3F4Ort2DnJtg+57n6NvvYUrVPGor3Kza5mXl9t2441EOtO1j+rnHjoPtezrwF9Rx560/4tE//hFN14b1wo/WSRm7P1K+CNjHA8LxLPLRADw6DH+EohlN6YwHpCPtFEWJdDqDIEjY7ZZsQmEBpFQnyfTxJM1m7rrt26T9YLXaUU0Nc5T73dg2jG3v2AXakXaO1I+24ke+M00JUTySEUkcloQ1jGx7BUEcBnMdEDAME10HTCkrCgZIkjkc3CNg6AK6ceRaiqIgcjQvj56lTjQkELOa4xnEbN5OKTtpHGW5I2OYArqRjSb9suU/whJ3+STzzNOmsWbvAUx7PrbeKEKxl341jiekITlEYrEEDosTwRCwi1bMUJSwT0BQdWwZiZShYc3xoKOTDvdhLSvC0w0JX5wqwcm0k5Jsb60iHh+gpryYsB6mSNEZigocGlCprfeT7jjEpMWX8NGH71PvkZG99YSTEudceS6nTFjCybeczxKXiE9ys2rrfkoqS4iGNWID3didBhX1k0n3H0T0NJBKduATvSStPoKxbsr7VUIziuhZuZvLzp/M4x+24vPnkUinUA2VSUU2klqKcMpGWrdhVUwkWaO7u5/KynL6hhJY1Aj+cj8de7q4aHElyzdGMd06DTXVeHLsdDbuo6LYyt7BOOcuvJrejo9YvyGBxxkhL3cyXf2bCat5ePNV+gIe8jNBeiURm5Tmur2Ro/rkmSKYsnAB0f0HmT/Fh6emllc/2MCE0rNZveM9ykqqWfrVm7jqrIv5fPMK9uzfzHsrX8CbthFKxTEVB3MafKzY2s+kcgWBBD+66mnu+NMtrHhnLWeeNo8nH/ojHRo0TJ+Dkowh1Hj5yU9vYeVfXqDA6sNe5CedTpMyNOKqiisDaVOlOD+P7XsaOfur52Cpr2T71m20rd5McV4eXo+LBQXlhJQIE/3FXPvL39NuyhQkZCS/nWuWncXHy1fwwG/uZfvqdbSYacprTA419jOlYgYre99jsmMSuuJANYfwWTMEMhboh+NOruNg4ACP3/0Jj/zxV+w/9AETyxpIBGKcddGNFOoz+WTzw+zuW4/FOp1TTj2Lb1x6Ltcsvo6Ks518/PZa0qF+Nq5q4sG8gmPGQenjTxIK9HP6sjOZOXMmuq4Pg9nRi5LjeaSMtVJHyhd5r4w+bzyXw/EW9v4VgMOwNokgIBigGSBIsHv7au79/U+ZO+VCfO5WDvY0sbhqGVu2vY+nZho/+cHD6EaUUCiD1+9DFiRUQ2NYDfeYex7fpfFY+mfscxirDXOsVZ+1xk1TwDSy7iGjL6sZZtYV0RxeVDVFdEM4TMmYZlZPRRy+ti6AqQ8n2xCH+0/PWvGCYCIMuxAKjEw4AjrD4fyqgIZIVb70f4+fuCxJrDy4i9ppuZj9faTtBv6Ejtg+REVOLlpcxWtzoaopNDON3SaTyVXIMQTuuPAKujMhTIdMOhqnwOLBMIuoD1nQFImyqhKCdhfbN7qwiE48YT/1OQUMqj0km5NkzDjz8vJQeg9Rm1vHznfWoUhxiit8ZAZaKPe0se2lp7j11vNZ5LXQti3NobRKMB7nQPsAN3z9Ssqm1eNJFOHvO0h96UK2N22jsGgyA24Hu3fvJRQQ8E9bDE2tTJs/ha0DGrFUCl1XScVjeK0OmlsNfEV1CFYTq2KQSmVIJpMU5RcgmjKiGKeyvIZQT4a8snLeWjfEoJEi0KlTY3fTuEsjkXYwZClgw2e9RJs28/cX+6mfnM++TJDaQpOvn1pHvhZjQlGahBxmxnFTKBSdONLH/htcfv0dXHrRrZQWT+Wt5nb27D6AOwd609tZdMIJzJ87jScevoq//eP3NO3dyDvPP8sFyy5hxolnY1f9JJIRehWdAkeEXbt6qJk9i/t/fy33P/wH3n78zzi8Ki/98594c7wIYoYlF57OV6ZMouUfq8h1eFFdIoneXsRUEiWZxIeJImjoooEeCFNVUMjmbVtpX7GJkj6DCquXipiGGYqxrnkfO/Y08+SK1dxw0dkcfOEVLDk2yixutGqDN997jfyqCUiFBTj8XaC3suTME5i5oJS/3fcJVX4vedUeZtbNxko1U/wNzFtYx3HTLsYQDBxek8bWN7jk1MtxVlZh8ZRx1jkXcNwl9Xy0/U3mzD6TofSnPP/Ujfz0Zzey4NLT+eDDF1i99iC51Q1c8a3rxx0Hl112GT+59U6mTp0+7OEhkE5nueIRYBptlY+1gkdz2/+KAvlX7oJjf2ss/z0aHI94zmhZ/luHVCQBIrzx3gv88nf3s27lLl555mOMyE56dIl82xze3vQ3Bt1OfnLJvfzkpssRJTs5OblkUmk0U4XhN5DxoknHRoP+u3LYT30UdTTeG83IIqMoikiygChlJWuzlnI24w/6kaTUqq6jG+phL6CsBK552L1QRECUBUQJFNNE0nVkdEQzm4A6y5eTzXiUVTlHNI1hnXMdQfzy8tDSvffe+6UP/v+q3PXzn987X6yhbEoZGcnDYFMvLn8RomyjsakVQ5DIqCYZQ0c1DaZUVaGYJhmnwntrVzFpYikFuTmYkkw4k8Zj6HQNBclIKh67H2dOAG8qn8pJNrZv24Fe5CHZKpFb7cclixzq3ouaN4ldBw7Rm4ijJAwcjmK2CW3Eu8PkT5Iw2gcxjDRavZ8Kw8bB9kFKbfms2rKGrqZDkO9BFUN0DVqJSibtrV3EQkNoeppUUuDAQBNmzEraprBrWxyLYuK0OhjqC3LCcQs5GNhL4+4OfHI54cF+DF3AMNPYbE6CoQR2RaGnL4CuSaiaicuU0CwyBYKLIewMDjVjMQQyMTf3PHgnc2tgyreuZ9/mnZT5dBRbgvYBB7vjKaKygNxpI2m0IvTrqIqF6TfHj+qTh77dxBtP/IldXb24HD4Ew825p1TxwdY2nPYEcaOPWEZgw7bNNB/YgSRnaI0FGGrdjWZPcOW1P+TDd9bQMCmHcDpKJqVz3T238Pc/3M/rK17nkkse5nv33syLzzzNPTfdiNVlJRDSyERTGE6YUlREntdDmcdDvd9HpVUmxyMzvawKr5R1zRIUkY7WNjxVVaSjMRLWDIpLoVgViJoGCcNOy1CAaXPns2DqXHps0NvWRlRIIeXmUprnY8/mD0CoIDXQwpT5V/HLx26moqiIPc3LmVI4B1kZIpU7yJknfI/7HryBXIfMx1sbWXryDNYc/JRtazZxxonfwF9RxtWXFXDZV+6irGIZh/Y0c9KCs5i84Ks8/+wPKc9fyOe7fsvE6QvY2fga1d/IHDMOpkZvwuWyIonZaEZZloez1RxbxvNXHg1Wo+v+leU+3nVHf45cc2R/LKCPWKGGISArIoaZ5p6Hr6Wuvp4CTykCEr/6288w1H3kp0vpG9yMb9JE9mz9kJ27W3jg0cd598OVOB12LLKCKonYTenw4vTY+xjr3jh28hlrfY83MY3v956Vtz1yXyOGjQlkM/5k25P9TpCzwT2ieORNRTfIJoI2TTCzQZuiIGQTT4jCsMksYWAgyRKyYA6fD4JoIgoiCNkoUtWQ+MMj9/fce++9T4zbUaPKv7XEBUF4ShCEfkEQdo+qyxEE4SNBEA4Of/qH6wVBEB4VBKFJEISdgiDM/nfXByiS8/jUP8DW9btpOtCJ6HEhhBKcteQMcjwevC43OV4vHocLj93JvmAPrWKCWN8Q1ZMaaAuGONjRQSoZozjfhc2mcdK556IrScJSkvABg3ahl8EEnHDe6dijrcSFAH1RG7v2hjAtkzE6YiQGFaZ5rCS8Gvs6NlBuraBlr8qO5jALlkxjVnkZgxv2sGnLPtKhDHK+G4vTSo5mJ2d+IW57La2BDlKaRpnHis3lATkPh5kkX7Uhe9zsHwpg5ARxex2YsoE7x8NgMIAv6mfptJPpbm4lqWRQJBObVSY314skm2jxDBanFRQDRRJQLQ7SzjihUAhVD+AeyqDbymjvbqTSXkRiqJhZsQh1dTNZWFnOnj6BNU2dzItFyOm1IntNFi+awYUnVuC0H9snc3MsbFizk+YOld7GDjqGOtnc0UFDWT1ixImuK9Q5nDhFnUUzZmPqLvobu0GQqJxazH//+Q4GggfJ0wqw5+TztbOv5fd33oKq2Dnj4tO56JyTad61l1dfeYm8glyig4NYZBHVDTVON5lglNhQjH4zw5BdxOF3UWp3UmTTsbkNqotzKIglyc9x0tffid0CVbk5ZEIhysqKECNxECXynR4+evFVdASsaZFV/3yFJ5/7PY6UhqlayCQbSMddXHLJz3n3tevwFDoIWAdw+qAxuB3sfjr6Nc656Gucfs5F+PMm0NWzmYP7h/BKbmrKZGK0E+5IMPv4M3h52594c+WliEoj7W2v8tpvTqd8WjUTKmN8vm0Hu5raqS+dMu44KMzLRdeySDI6xdo4Y/JL14+tGwvSo+u/6DsYP1x/ZF9RlMN0RVJN4JGtbNryIkvPvoC9B9/l1EmnMbnsPKaeeBUWAWLJQY6bNocf3voAVlS6WjopKinG7fEgKxYEWfrCNowF7fHK2OPGo2DGLsSOnhTHo6SymTkNwMw6tggjFIyJaR6hagyOeM/II1GcIwE/IxK0JtlEEyNtHOGODmcIOjKhfJnyZeiUZ4Azx9TdDnxsmmY98PHw35DNel8/vF0H/PnLNELNc3DdlK9yoDlCqdXF7GA1i09cyhMv/xXRaQM5QyQSRVQMnJqE0BnG6bSSIMXU/AZ8Fhcpt42ugSEIxDj9xAbWr/87sxpqWTRjIrLFwXHzFjPY2EPj5p0099iYXDGFIp9Mjx4lMNBNLKWSl2OjOQPl1grm+CdiizQxaVEuUzNTeeGDAE2hIOVlRRj5cYpry4n0D9C2u5sTL/0WlZqHpo5OPJ4MMyw++uJpxIyJRUwhe91YlQyFDj9aMkA6Y0UULQzF00xrmEJry36GxCizLj6FT3t2s2zZJfjy/QSDKVoOdjLYOYQj34/otCIYKmkjiRbrx5/0Yq3xE1dVOnIVwrE2pDwfGYfJX9d8hmPpeax79WUCzmlEAmnmVyqIsxvoS/aiKgFa9vfxYVuU5DiLKDU+mTsfvJTIwdex5u4hNmRly/puCgsk7Hkw0NGBzWslHowSzXSDO43FqZNMpmna0obdmseCk85jSJC5fPEynnn9D5Q581k0/zSEPic3XncVM+fOweIQ2d1xgIzDj5rWqPPkUGIR8CgODsoagXAKNahhBJJM9OdT5i1kgq+CSsXC/OpKTq6rpFLViRk6E2wqM2vLmZProzOdQUmkSJlJPj20C3+5ByMU5/JzbmBS6RRyBZ0fXHMTD95xC11t7bTu3YqgT6c87SLVk+LkeZeDKrCndTmF3jTr1r7H+5s+peXQLn508Z18/O7rBMIhHJYy3tn8NI8+8Q0GA4cos0xFTVVhy69nwYLnCHlyIJ7mH9s+59OXM8zIrSNiGT/YRzQUDMNyjPveWE+T8bS7x1qtI8d/mW08fnv0b48GOVEUkaVsQJJuZqMU02SQLApvrfqED578I21GN939CvfecxNqjo0VbavZ27+OQGQfljoH7Tvb2T8EL7/7JGRcrNv0Nss/f53b774GC2nSaRFNTyHJcjYJsagfA+pHvEvEo9YLRlMtI+0dfW+6rqOqKpoKuiZgGtkcmCMBTqYhoKlZymOE0sqmW5MwDOEICOsGgiAffi6yLGCTJZyKPKxxbqKbJpqpo6oZdD0DhoFoqkgiyAggSlmPFU3E1C3ogghC1s/cKqnH9MUXlX8L4qZprgbGplg7H3h2eP9Z4IJR9c+Z2bIe8AnZjPf/suT6vOzZ9wwXfftCcqs19ld08cJnL+IpcpMnGzgN8Cs2lHw/GUuGXsPJeb4kBa5CXt/xMX6fj2JB4NRpdWi5Emv29lCRV0Vn/yC7th4g40uw6bOP6cvEUINpBgZiJLQAyUQfbjkfh1XDImUw5TCSkuFAWzPnzJdYNvt+IgcK6IkFKfRqzKmaRzKZpMJVAoJKLJ5hwqRytm1bw+atW+iNx7HYKmmN92LRFMRM9p8tnoqjer0cTOzGq3tocFsIxsM40iY9wUH6AkGm1Tbwyl//mzMWziQZ3Ed//yA5uV4Eq8nkWTNpbmwlE0ricfrQNJ2oU0QPRxGMDL39g4QOBXjk938i3RVhqK2fBfNO4KeLTqO3Lc7f/rmCPI/MkNlPajDFdRe6OP+EiWzb3MqObU3MnFx5TJ/0pHpZWusntzTBhPwzuOKCQl5/YivNLbsZ6te5YNk3qJowi5nHN7Bu8170qIlH1pEyCnoySWtPP73929CGupgw+3geufrXvPbSHr7/o59TXDefvCpwkKJ9RxvV/iq8qRSCFEc00mScAi5nmqmYnF9fyOJyK7NrnBQUW6go8uH2S3g9MoUuhWJVZcmEiQwkQlSU13LevMlUN5SQ1rNrJyRFJpXV85e/PI67zE/NjBls/GAV+7qGuPCis3h25Yec/40z6NV72Ne1hTXNLWTsfaz5YAMWOUQiaWf752HCYZGFc05i7UfdhDUnV3znPmKdIfrNAey6QPPQQdo6LGgJEVU1aDzYzgPPnMQvfryK7u5+ij1+br/tq5x8wlLmF31r3HGQSseR5BRwrKfJCJiOWHVjQX6s1TlmDI+7jT5/vM/R34+2vA1zeBJRdSRTIJ60IBtp/O4E5eedTuvWRnyldiwClFrrqBAqyfRIBBWRVJcHb3wbJ1cXM+H4E3nkvosoKJYIdm8l7fcjp0ysiooiuUglk8iSiSwqR7VjLMUzdpIZreQ4FsAP8+zDgTxZ/lkfTkuXzbspycJRqohHnAXH6sxw1G+awnBfjZlIJUlCkhREUUCWJUQRDEND0DUw1SwnLurIZHOBGsiYfPGb2Njyv13YLDRNs2d4vxcoHN4v5Whftc7hun9ZIv2D+OtOoiBZR6k4laJcO7kleRi6QNjjYEjL0K9HEAbjKKEoU5bIvLJPx2krYva0QtrUDN6EnR2dvbhjVmIZG72yiCDBQLiPXFsFBzoitO7rJ1LoIS2b7N7Zzpa9AySj/QwFHQSDIqbpZKg3SHXdJJ75yMqbG+8nT9+BJaHSuK+Lja0pZk+Ziz3fSlFhLqIo0tbRQVGRD0Gz4bMW0d3RhKh7cdodxGIxSktKKCjMpTjHQTJm4hAyJNIiPiR6wwFy3T7OWXYGg0NdHDdrBpIm0byvG8miICqAqNHR005uTj5GTCcYiOLzeFHCEmFDxhfSia/u4sCK9ezfsINpE6cSTifY13SAC265iZRLI32ok2JLMZb0ZErqVWyB49i8LUJHRCed6yPYf6xlWLaogdvf2UKwR+O9xhW4NCvfuakUOT6d6jqVT1a9StoeRMHkim9eQ0WOhkP20RRtxVVQxdzaWXja7bTG4/zyjp/zz3WvkSjUsYgCJ546nUf/+hHHzZ3D7tZDGD5I6UMILh89IRWH4SLXUBB1mYGuPmQEyqvrsOeVoVgSuJxp7PY0LpuGo8yBQ9HR4ylsEYFcw07ckMjPK8ZEpj8WhnSSJx58gHtvuJ7H//gAl1/1DX78vZt5asVr5EoJ1m78lK5NBmVlJ+B2FlJlv4rUgEG4S8BTMIPbf/gbfn7/N3jqruc5/ZSz+ccbv2Za7XSmnfBdtOAgNrmWAkstEysnsqVpJZIricuZ5LjyK/jw5Qcozz8NQ4pw+U9+wcsfvYQjr3vccWCYCoaujguiY63kEZBSVfUoz4vxEjr8uzIaoEfXjf3+MICJIlpGzdIomMiWNL1tHeSYftwpKxcffxnBVU2cd8PNpHqSHF97HGVTfGx84Q4WHX8tT75q0tXaxmfr3qMxCDgV9rR1Ew9/CqIDU1ZAyoAoEI0FUdWj6ZMvWqQcOWasDO4IyI+sMWTPN4/ajlAZ+lGTwpFnMPqZHDmWYZol+9hHuHMBiyxluXCyXieaaWCK2X0RE0U6IpiVXUDVEcwjFItpfPmIzf/H3ilm9in+j/0UBUG4ThCEzYIgbI7EEhwKdbOlcTXvN29ne0sPFaIXjyowp2gSB/Z2UjOlilQyRo+qkV6vM2XCDEy9m3WrD5IXVdkWjxALKkR0ibRtAEcmjWgrIBC1squjjWJbDt958E7U3ige2UqJq5qgDm7ZjitjoAkiad1g7rSJWK1RCmZDb4+NRx7+mI6KXJx+Lz3NH7F7IMDG1X10d/dSXlWGacDu7fsxBpKUTcyhSPQwEO8jlAySV5zHYKAPq0Wmsb2N2uoaKsvcZNK94JQp9niZOncu69evJwSs3bmLqCHiKixFEAwUxY7L4SM/10ZxVSFDg0EESSISjGD1aDhlkdnnXoTn1EpmfHspf/3gZTbs2IRfseJze8gvq6a1N4xNtnLJ1Rdx/PRZbPs4ybZACNuUSsqtLqq8KuloxzH9k4Of6+bPpD+Tgn4f23sTuJ0zOHFhO7HufGJRB62rWolqYToPdaMV2Zk3rYHzLp6JFhnAGT7AwOQgRaITzxQvT7y4nNyMExPIT8Putavx1Q7y34/+krefepW/vfoS5a4cMKNcMK2eyrpcLjlhEmcvnM3MyTW47CIei47FkofHVohD8WMYLnIyBslYEJvbSWeRC+bO57/XriWjawhinAfvu43r7/weP/jDPcQLJO762Y+ZuuhU4pLCPTfeRWukGSEjoUzYzd7tOzFb+7HI+/ivp98j4vYRsDTy2vJHOO2Cr3PikkKee+55XnpjCxE9SaVVQbYtQgoNcMaFp3L9t3/Bdec/xJ9+9hlT65ex8JRTOWQbIqx9wGCggCduuwuRPlY1vjjumHDaLZim/SjwGQGkEV/taDRKKBSitbX1KK53rLjSmLH2P9pGnzdy/dHX1XQNWVHQDJ2uwABD7RFSB1t48Ml70Fv288TKl7npibe5+8xT2B/ayV/X3IVTmUNR/QxeXXsLv372AcpLavEKBrlVkGsaTPHM54mfr2bLno3cd+s3ueuh6zElGZvLiT5O6rh/gStHRXOOgPfIfRwR9hpJ+zY6CbOIIEhZ0a5RdUdg0mBEr0UQTURpjFjWUbFFoxZbDTANUDMGupYVyTJNE1MQMAUxS6uIEgYgCQayqCOL/y/SKV9Q+kZokuHPERm8LqB81HFlw3XHFNM0nzBNc65pmnPLiovoDPbjduUgYaPc42RPXw8/PuVKpi04HV+PSEfvALqRpqy8mJjVYPvOzwmZKhXV5XSHQpT0RXns6d8RMhP0DaqEomkaN+1jwcQppNUkhpzh/UIZ7VwAACAASURBVFf/zlAoTE9nlAEjzuSGCRTkVBDziXQRpD+UYsveHtqbQqQH46SiAb7/i2vpWbOV7t44vQGN+FAI2enGIolk0kGmH3cS0cgAMVuMHesa6Y2o2FUfkybNpaOtE1N3oaZT1JRVYmQyhIMZaic1oCAT1+J8vOpDrrzomxgpmFM7nUxHCFFOU5lvoT+awFPgZSCRobevmcr5pdQ25JO0pXDY/WieFMlAIz6vk5OKpjO/pIGyygIsRW56+vt46vf3s/CEOlZu+ozWQQsvf7iG0opyYiRp3LKJXptOUvKzo3PgmP4JdrvpTRfT2dpHJhNBUA5hOq1s2zGImBfg+IVTmLLkJHLaHTz74ev4zRl88OlKmtYPINqdtOLD1+LGkTyEorlZuHAWO/buQTBg+94WTlqyGK11Ana/zJvvvcZjT93HrIWz+Mpx8/FIdkqrytg7tIdBTMKIDCQC9KtxUloYu2Tic7mxu91EXHYKi11kAkki0Sjfvut+Dh5S+fFV13Db7T/A7/GSU1DM5pXrueSUs4nFRdr2trB32xqsdidlnnqm1TgwInZOOENm/kU1fLB6Bz/+/gWk2nWUfTpKOk2Jv4g5p1/OmvdfJdEd5mvnf417f3UfZ5x8EVG3yGvPv8F/Pflzvvvt63nquRdoPtDG6s/eoW39IfK9xczIz6GqNM2hUIA1u3vHHVS6YICQRhREVC2BoBuYgoZqQloUiKYMWlvbePbjN9nw2TowoaN7gHQsxPJ3XiIimmBI2YAR0yCdTiNwLCj/u3LMoqAgYOpgIqBqMWTJJCPAho3LeeK/7sbv8/BY45/5zUOvs/VggEWTq3nmwdv5zt1PcP+Pn+aiJTdSmDI477yb+fqsu/Gub2H121u44rwfMqNsGXff+jrfvPp7rFn/Ocs/fxN7RSmzZ5+BXY7xyO/uw2YbCSI6WrdlBLBH39doKmXkXkZL+Y7QUbomYOhHAPqIRZ71FDENLStnJeiIgookZpNSjLgEYmT9yXXdRNdEdI2jsviYQpa+SWsqCCamYGKTRayShCQICGZWwXBELEwws0JZOgqCLCBJYzIV/YvyvwXxd4Arh/evBN4eVX/FsJfK8UB4FO3yxcVhYfcTuzhx+my0znYs/lwqJ85iee/f+fUv7qRoWTWpWBjB4WWCowzV4aXUUk6BVEBmcIDcOj/u2UX8+eGfYcuXyMstId/hp6qikgnHzWWqvwhPeQGuRIrj50zn9ZeeJpXspzCsERwKc9mpl1EquIgaKj7TRtJip7dtgGvmTSSl9hNNi8iFVixFMj3JVtzFHgJ9QTqa22javYP5M2YyZ9JkcnM8ZGQNe56HVStWUVs+Cy0ZJhOGgokNFOWV4sgvxRGxkshk8DpySQ7F+Nvbz3LG0gtYs3UVtVPzSA8l6TykklEMgvEkvnwJu6MEV1wlZURYOn0hqbSTBXVT+XRfI4m+DHa7yc5dW0inDDxePzkFHkDG4nHygx/ewNN/+h1XLT2f866+kfWNQyhxN4WSh3RaJc9/rHvKm++tYtumD5lQ3EBeTozO5nzcGScNqk7bjgHWb1rFu39/gk2BMJfOPIHNjWvJK25gIJAkFk1SXl6JnKciuTPE+w5y7ulnsuzM04jrKhd98+vcdtsdXPz18+juUXnnnXcp8czlmU+eYMGcSeQVlTGvchYXl5/O2u3NfLKrhf17hlizrZmWziE0WUF0CRjOJELahpYyWHDKMhIeD3fc9H0cDo2KhjLyi0sRFIPdm9dz/Pw5fP/2W7jrF7+hoKqSto5GDPcg511wBYOhmexu7CB0cBrvrd+JrSpG9cwpdEUaKSuZTa5vMn976u9ccu7XeGHd25z99eNBg+tv+zpr1zyOItfx5K/XsGrFJzz0p0f5fNWjZMwW9u95m75AHyvXNJIyqnh91QbqXVPpeX1cuwZJE7EINlAzRFWD3p5BVq7bQmdfF0PhNh764bk88ujN9H/yFvt3vs0PvjOJlX9+kGee/iEDrXHu+v7loJBV7ROyUYT/m3IU/21kLUdRMjnYtBVTdZMxNAaaewlHI3hybHS3bOPX177J3377Q/aH32XZuVeRIkwos5PlH7xPvrsI35ypLGhYSNAeY3NmD2XnnUpuXhEe1cd9z/2apefW8Ny7v+SEWUuxN2q0H/qcxUuncuePHiCWShxO5jxWCGwEpEfKYT3xUfcy8jnaEhdE87AmeXZyGOHY9VG6KyNvABIjtEl2gssC+MjPCKKBKI3vfmmVh/l8QDezIlfGsP+hPBz2PyLSJZhk827qWZndL91f/85hXhCEl4DFQB7QB/wMeAv4O1ABtAGXmqYZELJP7DGy3iwJ4FumaW7+d42YOXOmuX3Ldu685i5+9vQD2CZY+NpFZ+EfTPFGSzOe7ii2pMJAjojdIlIiyhTVTKZxz0GKXBKVLi+bYz2kBINyTy6Rtl4m1lTw8ZrPyZk9iba9ByivrEAXwC/a6U3EkE0LF0w4HltVPr/47WOUTiwikoxiFSSsTgfpcIRExOSCq77F6396HNWd4g+3P8bjz/+SmB7C6rXjlD1s3XGIu299mAcfuo28XD9oGbR0Cqe1AF1KE0nHSPdDWk2QV1fNey+8wcx5s7j0mjP5fM1WHHg4dcEJzD/3YlZ/8iJbN6wgNiiQtEicsnApg31b6ensISYm8OeV4jSdDESDeEQrA/oQRWIp6VgA0+tiYGAIQxUxUhkEXcXi8JDUVew2C6GeXjau2UtUNTj+pKk0VOdiJFSGBHCLDr7VcrR1+PgEN9VVTgKdUepnFBOLppjmc9E+YGN3327y7fksWTaPF5bvZ+mM+ZhJGxNnTuH3v/slmp7E5XETN1Q82Nmy5gCaZOJ1CSS1DBZZQRIlEKB6UgUte9r56d13YLOEsfaF6d9ykA2tHZQVuTh71nH0DA2wc6CHUpcXryNDWUkRNpuNWCJFWyjF1v6DDIaLmX/ObIoMG0KunXyvmxxXEX2xEKQN1ny2jk937qPQ93+oe88oKcqt/ftX1TlOd0+ODBNghpyRDAIKooIBQQ5GzCiYwzFgQsWAAQNiPCgYjiIZRAkCkjMDE5ic80znWFX/DwNIOuc5z1rvetd5NqvW9ExVV1XXzb3v3Xvv67oM1DZEuPPqe9AmhlGH9bhCbswqB8uXPsukmeM4fOIYQwaOY/ueXcy9fx4FBTtZ+d2PbPxlJ+vW/0anTr3ZtX8tO/7chBJpoFHl5PKciWRkDOCeGTfRd3IPBg0axYDckSQYErhi+jD0somFS5ZSV5tHQUsR/T/adNE8eFGRICJQVl9FvC2NtZuWkn98K7I5ntLSCtTlZei7mLE3y0SpXZw0ZdLb6CWvOUiiwYBt8HDiQqmkZjgYPWocolp1Om97MVnUpdIm5+4/1yRBQPAH2b5vDWNGXU9DazNHN61kS94vXDH2Pj5e/joDcq8moKogOTqLQ0d3MeGqaXRJ60+sXksoEOTD154lMjiZom17WblgLdtL9rLt4F72b/2NB979huVvzCG7Uyq1cjGBKjWZOT0ZNngMn3/yPq+8tYz0xOjz7vPcIuW5+fF/RVFw7mc+c9z5Yhvi2Uj8Us9BUHHWeZ9pAZRl6ZxvBcpFzxm5w2HLcgcToqicf+6Oa4undTmFcwiwOnLi0f+hxuZ/BezeFherHGhsovCpheQsmIOh0sO6ukNErT9B+NHrWDJpPPVNLhKykri66xC+Wf8TQYcFb1MLjvRYqspqsCXHE5IlLJ4Idns0Nd4GesR0ptTVhk6WsGj0+ASF8tYmFtzxBO+9/yW6qCjy6o/y4OPPc1n/vjz31FzCPjcWUU+gzUOLLoRdrScnI55ql4G28uPo7AJVTW7m3vMIG9b9SFr3BPbuziPktSMIfmJjzIRDAeyGGIZcPokVa1egtDSRak7ncHkRV4ydwI9rVzNiVH+OnDyKQVRz/fSbMUlmVu9dS1SsFpsHMtKi+Wnzbl6b+w7vrPsMewgcDpE4h55gWMYd8iBG1LT71SRk28k/XkxKUiKtjU0017RiM8YQUvxEgqCoOugzFbuGynUHeGf590QbjCz85GWcYRdWcyIz9p08b0w+6JzMVUPHs3b996QkpZKRYOBwyI8pUoe6RSCzbz9s3mZG5wp8fFTHiYJ6vJXViKJAzz6ZuEMBYvWJ1IUb6ZeVzbIff0eUgsiyjEGlAU0HM9wjTz/MS8++zldffsaEKZMYO3EUD4+fzP4/DzN6UBIOjUBjm4sTte3YjHrSHTp0KjUqlUAwHKIuIPL18UN0MXSl1xU5aMMqstIycEdceE1mGk6UktO9G7+sWEOLT6aysJQbZkyBUDsRJYk2dyt6jRa9TkNsTDyx2gQm3jAcd1s7s+fczb5DB1m/YTMjLx/D3sPHmPXAtTzzylusXb2Mz5//kukz/0a/sf1JNDhobPRQ7C0gWLUVDDbefWMzdzzdGatrJJmDU+gbeyVOfTWPTn+aF00XpzYm7TnGZ5/8neTuPWgr9iHpSxiWk0B+cRNt/hAanYzeq8WkdSNHp2HwVuKVNRyvbSQ3MY12Vw3xKXE88sRyHn7kGRYvXny6Pe+vTocLHdr/lGJRFAVZjFByuIRPvruD9xbsYfpL3ZCKFWbe+jE/b5lLsm4gihDAaoimrt1LvCJR3vIHad0u54m5X1BUlcdLj83m7acWsq9hGWu+3IClUypKoBqHOhevXcYSUHAG62kQBBKSrGgDSTz+wEIS7VEERRGVdH5keiFH+oVF3XM/14Utmx3vO/vq9L6/8t4dz6jjuXUsEB1kWB3n/SvSFlWcVrM/HxR19h7OXFLsuDfpnNKhIAioTztsTnehK5J0Vt5NESAmyvAfOfH/CsTme59//mJ+Uy3PP/soOq0eg0lPc5GHpS2HWDJrMl0cyXhFA936DeardxejTTUieRUc0UZaqivoFJ+BN0nC2A4xMQ6CNe14AyFSkuNp9DUj1TYRjLWibw6Qrnew5uQe+iUmc9tjzxKZOJj5o+ZyzbyZ6FpbUGtMRLxhXKKfftYYIpKHYl8zuuJq3ElxTBt4PUaLisI/1xB0CIQw4vO5sehiSI1xEGu34Q80o9Wp2ffrRsxmA4LDwuTb7+W4s5HiE3+Qkt2LDYs+4an33iV/8x9UFVXRZHExKC4LVWIsBklFafkRnpmxkHnfPoFGp8Ho0FJdU0u81YTRWUFDCFzhBrp2T+bUyVO0u9307pKN4JcJajS01rUS1gZwJKYRdjchqIwEnC6ef/MtNqxazZ87NlGkONFpZMYb+mOYeey8MTn6nYn8ouPER1tQ1O10yuhBRXEx3R3pjJ3SG1WwnVbRxPYT4HIVM3HiUPKP5dOn/yj8SLg8LsJyHSaLGp3o4OCuXXgQuWnqCNpUXtp8Kh7++0wGDB7HkCH96dW3F2+/+SqPPTcftSjSUHWCGI2WAycr2FlQwWBDOjF6NSFFQolIBEUZc0Cm3RVgw/4CHn74XqJDWvQOEx51BJ87QHFBAT1ys9GZIpwqLae23ce1EyYS8HlobXHj9zgJKO0YjQLhUIBQwI+3ycPx43lsOXKA5pZjKAEDxcU1tFc2I6sCzJoxjU7WTsg+Dwu/fZ66hn24PE1ERUfR2LABu1XFkaMn6Nu7L4uXvsqgnGl4lAZ0xgbyi3axe/tGVm9dQeINF6dUXEe6c7RoA4Lag9BWiFqTwvK9R+ieaOTg8UbqW4OYU31UnGolOhIimDiY5d/9xoCsgaiMWgxqHeWFXoqr/yDGmsDw0eMRBAlJ+qur41zyqHMj0/Mj8wBhQQORACq1lggBvvjqNcxJSSx8/1kybb0RjHry/vwWc1Inpg99gF4jxjOsz0imXXM7K1d9hM4azXWj5/LZonv5ZNkTbPihiI8/m8eeLX/SEpPE6w+/T3buOHYdXMP+ghLGdx7LsEGjuXnOU/z663Ky1H0YPvEqDvy+jl9WrGXYiCHIsoRarQFEJCly3v1fqqPkXLswVy4IIrKsnNPZw1nQzpn9f6VqFETxjFjHGSCOgCKLKErHsaKo4kx/x9n7QgHhL9qCs0LNwmmec/iryHk6haMSRdSnO1dee+0/Q2z+V0Ti3Xt0V7ISFaL8mQx5cDYzpw5HH9HSq+dAvl7+MbMevR+tGOapkS8y9oUJpA/uibGhHY8mSIoliYAZtBEdWimEKIqkpqbibfOza+dexo0ZQkuUGt+xUuoCToxxDhxtEcQ4LdlZl/PpF19wqqWQ5Yu+Zv9vq3HXt1KTbsbhEvC4nNjRIHnCNCXamJJixtmeh2JJoKgmGSG+nhRFhd+oobGlHatsJa/wFL1GdMPd1ozKbIKwH8GpJTfbTn29kVP1+7h96GyuvfkeistrGHfbOLplpOJuqCY9PYeRY0fw03c/IBiDKIYgl3fL5sjxCKWtZWRnZKDRapFUQSJOEbUhRFiKkJacgbetCZ+6CW+biexYDY0RPWp1iJqqMIqvDUmUiDJEU1JSxd/uuI3jJ48SF23mmrGTUYW+pumJE+eNyduJ6VhMYXxiC8lxqWid1bhFCzFWI1F6LbHRVgJ6CPoDCGIEkQTKC0v5eeUO7pp7L2U1p+ibEUVxeRgkFwa9jSuuGEdpfj1+TTUHDuSxc8VvXDNtHINHTOSjRd9S3egiM8ZCRBTYvX4zzz/+MKOSkumdnEKTqhG3Uo+nToXfqOAIyjjtauqbAvQeMxm/GKalpZWYJAdSg4uSpka0RhNN7la2rfmdhOgU+o8ehxjpyDVH2y2IKgWzJQG9UY037KauoZ5GbxtaIL1TNhkpyeQdOsSm3T+y57dqHnryOiL+CFpLE65AK9HWbrzxzDfMf/9Wjp/Mx6SSSEjIYMiIy1m59jtiHOnIhnriTfGYVd1oFfbx1JPfseC5l+n65cqL5kHz7KtRaRVadBHSU6w0HqwiPSYNoa2GdmMsdepKak/Fo3NUMyxnKNWlJwjaHHRxuXDa03C6TuHwJnOytZmnH51Dp74jiDNbkE6DUs4tWF4YjZ8L6hFFGV9QjVHtYfXq9RzYuwKXyc1liX8jv/AIPksFalU6V6b14tNtn9PZquH2Oev5/JVc5LSeJBrS+G3PauItlxGoKyFr/CACTfl8u/MIzz/wKDeOmMY/flxN7152tv96jNdeX8hjj92I09PCm//YytOvPMR1sX35cuuHdB94BS8+8AZ+wY9Go0NRpLPyaxcWMM/YuZ/xXLtw0bqwt/5SbYlnOcmVixeHMxH8pc5z4TO+9H1dur//zOJht5n+7xBgaXV6TGOHMGX6ndw/ZSyBsBmNWktBaSlXLX6BRbd9SLxR5O3ND5NksdGv3MDAsROYnHoFPslLlBRL8doC8hurSYkoHDy4n50HD9C0r5BKo0BbYQGGODM9U7tw7eAJ5PQcTqr9Mv44sYk+A1JQghoKKispqa4ld9wIbM4g5qCEUVBT4fVwdPUOggWn0MWVI0Qy8CshcruEubX3WISIFU1bGK2io7mulXGjx1BeUoAsqekUF0+yaCO1s5aTjXWUFRwjNTuZX3d9wSvLbufOJ8az4qNPcdhEevcchFeuY0C/YSRnWvjs6yU8d8MrRCX3Z+o1fYlSoO1UJVp3EPwRREOQ2Hgz8TEWVIIPp7oWKaghK81KrUeDWVFoqWwn1iYSVqkwWhNoDUUYPWow9ZUHyOzsoCVYQWVrCx+vuVjZx6hpJyneQapOpLWklagEBa8zRKNThRxQU13nouRUMUcPNWGPtqEXBYaN7E1uVicaC0pRt3k4eaQeBR9qrRGdUcu2Tb/z67ZfOHGgDoc+kY+/28zq3/9EI6XTv5+Nq4flEBQFdu76nU6DetErtxMBsY19FfvRyDrivQmEpAghXwhfBNx1bgb3GULY68fXUIPXFSJS76S2tZVdBw7QyRjNqb2HsDriSE3PwtNUj0orYbJqURvUNLXX89umb/jHkjc5+Ot6/MVlmFrCuKuaOLxuPfmH8qht+IMHb5vLxGu6M6J/OnrJTVulgLMOhvUfy4uv3kdLvUxCnIYRoyegaNrZvX87nTt3R1KqkP0GGnxOSn1r6J6axW//fJmEpEvX+ltUIQqrThKur+DAimIkrwWVJZGRs5aw9VQ1Cc25jBxxOdJxO4WnSmj2xDI6ewpOvY3R42Zwx63voDQYidY62X1sB3qNg4had5aaNRKJXLIoeK6UG0B1bR1GXZj6Wi9r1n6NR2jh6sEPY45LJGRso7qykt5JQzD0SKdzXAp9Bt7BZwsuZ5O7BTywu2AHVnsCAV0RMV1jOVrxJ9W+Rp657w76hRNY/NX17CmZT6XXyYx77wavG5fSij1az8IP7yIuEEfGsFyCji68OOcNgsYWGlobUak7CpAdIBzhos9wLjnWpZz7uXYpuTv4a0E7I4B8dqEQ5LMaPggyCDKiig6w0Ok+8wuLmmfOfW6+/lxh5Qu7gM7sO7P9p/ZfEYkP6N9fObDtIE8vmMe8x59GDGvQxTo5cLSOhfd+zAd75tE3dwrF+ds5vPsYT21eysz4Xrz6xfP46iSOrNzJ86c24P92O2sKt9EtJQ2rIlDa1oDcFiAUVggSxGazoTGYcZhjMFuM+FUe/MeDrP59Cw6dioF3TMBf24w9Joq6pkaiIiI1pZU8/MhjZKgaeGPlN7hiBTKy+lGypRSjPZqS9hoSNT5iHZ2pr6xFEDXY443MuHomP/+4lOhME4o6gUSzilK1gwzXKbZXBnm0l45D+lhOFp9CbtOgI0CbxkasPoQ+omf0tCdo+uVVJt38AV9sfAdNgoXSolIC/jCVVXUYDcnExmhpqavAaAZDvANva5jBg9M5fKgGk16HotYjSuXo9Vm0lfkpa6qg3e9hYN9cBqWPYm/9EWZePZV1v65k6Nod543J9wO60lhSg84hYdaY6JsUQ1AQqfY0IHnUqJLiaSo+gaJ2ICGQmxFPRrqdxKgBaBUDny55j9SsVBr8TmwOCw1tTjLi4iioreP6CdfzwN0P8PILT6AoNnJ7JzJ42DR2n1hDZmo3zJjwedqZPu1OPr5lOjnp0fgNYZoragh6/VT7vEieCNE9u+KNiaKxpRlPi5ZIqBqbNRq/LDO632UUVpTSNTuHZ99eyNB+wzEIOgSNjMvXzvG8AswmO/0GDcGo14Ek42p3o7IJ+Mvb2VW4B5PRwrBxV1NZsJZBIwZSWRkhZ0Af6svX8viMj5h0zQBSu/XCGqUgBDrT4qomMSMKu8WF011IXZ0FRB3dcyw01LdS3dREmn0I1w29i5P33XDRPNh9fSxT+t5JYek2PMEaMlO7Mu3GJbw6fwZpSaPYsHczA0b2x2/TIxW20dmi4cPNW3j77fnkbV1Pu7OB1GgDGw40MGtSVxQpkyHXDqdzp8EYDIaLcsaXAgaJoojH56apvJZf/liAq0XBqRTQ6HHT3dqTstJCHCkDqck7iG2wDX1FHMZ4Oy0lO9Ek5BLjFzjVvA9DfF+0Kg9ypYreU4aRqEmjsqmI3XuWY9VmokT5kY0xpNmjSbAPZ/f2l7jn0eWU15WhMRj48MdFbPvgD442HGXZp29hMKYw6/bZxNljTztx1dnPcVFL5Gn7d5H4pfzepZz/2eNOX+88h4+MIKhOd6ooF4XEFy4Ol4rE/9XxiqJgizL+38mJL/nssxfvfvBuRo8cjlqvRtBDJKIlxmHj+llX4W6FeQ/NRtKIvLV+FT+/Nh+nt55jB/JZ+8ACRn8wB9fuAnYc2c603sOo9DTQAKQIVhorG/g5bz/HNu5AVGkwNHhxWgQCYgjnkQISL+vFnn/8weLGPdQdyufjv7/Kp98sJ0tRuKLLIEZdNZVP35/PCVUjfTJ7UtpcQYI5DpcUxOyVSDDokMQwSpOENdpKp+ws1P46CivK0FsMSO1+GqO0FJYdYUDyOPKPH6FfTgotLUFana2ow0aags00BgNkx0iI0Qn4mmo5dGQLCbFp7C8pZVPhEdoqGwg7/XTLzqK4sZwuNggLEWwWgbZwNDZtMxl9e7J/4yF6d8+l8HgtvbvaqHKpCVW0E8kwoTPpyE60s29nESOn38hvO9dSdfAof+4+yuh5549Jwz/NpKTpaPNoUIkWqpRqistcSKiJS02jrrIWg0aL16ohRlRT2+LluuHX8/TLb5CVm83q9WuwpMfS0lQHkRAJCdmU1VSjVSmE/Qrf//Qusx/4kB82vMddt93L5SOvRGo1MPmaK6lpbOCXTUvxelT4xEZ0Qgh/gwu1X0uRtx1UBtKHDKNKr6atoYmm5nZCThcBRWLgoNGE3XVYdBZibUa++O5ncrL6oFYLuOQgrvZWik9WcNWYa4jJzCRaI2I2mvD4PdiMejRhPQaLmt59BmIUNJgFLakZ6ZwsbMVhdHFyx3e0uTQcKlmFyh3AnhzD8bLdPD57EUFXgLz9O6hp9GDT6whFnMTaDJw8FcGW2AnFo+ftRX/wzKdDyLgheNE88K6OJ7prJsfy/0SiL6X1Few7UMdV02ZS721kTFIiktpA99gBZOVGc/JIBZmJiSxb+g1+tYYx6d3Ys+NHnn/8ZT74ahuDukVR4vIwqO8IwhEZgfBpMWL1edB9tUpAUWuJKAEIRdDLUez6YxUVzVX4nR46xZqoLXITivjonHUNpWV7sXbuzDNTF1HgrCE12UJpgY+Fjy/kcMEfBBULFfmH0dv6YjL4OHJiDyZSuGrkRL7btJpYqwZR56HhaIT8lgMEi0oJ2RTyDixF61OQTCFibLHUH23mwO5vmXznW5RU1tPuLKR3jyEdkmdqNWpRTUSKoNFqz+aUgUu2+p2xS6VdLpVauTAaR5FPsxKCQAf7oKKcUSMSO3Lsinzetc7TGgXUoupsaVMQhA6ffxrhKZ9mPDz3+q+/Pv//GxbD/79MUZTzNPnOPERFUYg1q4gYYPGWjWxe8iGRn/fTGGvk7dsfZZHnKM/2GUvngMCqRd/wj5UbKfW10yU+hfa2BkZeNYprewxnbP9hHPbX05Rg5NqubwG1dAAAIABJREFUQ7nv1ntIuupGhtu6M2/nV1zTbzAlG3aSFDFy7ZzZCInJZF87grikaJJGJiEpEY5WnWD+rI/RNdRwa+9oVLo2OnUP0tKcQCTejksd4vC23dREmYlKTqaSAEPGTKP5SCu6Shs1eXlMGHEDp2oUth5vpMkfS2Z6PDG2XNJSL6OiUubwT4cYMvImUuzJNAgiY6ZMQtQbSUxIweiwU1RZhkmA8V3jSYnPpqRKR//+udgMGVSf8KG3CwRM5ahTveTVN2GKBAlkqIhx+knUWTiSV0NMrMDKbz/mlh5TGTRkBnc/9dBF49E/Q4vHr0cJeYhOjqAK98IQpUORtVQUVyCEw4QNDrqao/HrFXTmKD7655d88v47JJkt3D51BoEmJ53jshg2eAIJCfFYdQqgp669hqT4brzx3i1cO/Ee+vUYz4Y1KzCna7j7rgfw17URaXQw56078LR52JtXx8mT9ex3ldDQ3EJpdTXF3kaCVXW0t7lpa2zB6XQyuE8vWqryGTRwLEK0hm83bsOo1WE1GiEUoeDIUeSQhnFXX41bckGgBUlQcLlcqFQqvF4vSjiIEIxQW1uLKyIRNrYQDsmcaviJrVvWE52ThdkYTXV5C/ouGcRkWrnnlm+Y/8oTeNxbMSf6MUZpkbUOzHYLzkAxsfEtuFvLMca5eGGOilj1pemETOZ0Dh5dSXL81ZSW7OXue94jPa2KAf27smvdZkpjQ3iK/+CPVd+hTsglohOY+8mr3HDLrXSRTOwuPkGhJRdXswVHYgy7ir5ixt/mUlZWhlavRRBU5ym5n3FSoXAYDWDUmBAMBmSLxOhpt9NaFcbqFdh8tIg7HnmYUn+EvQW/cuXwW7FpXSxd/zCCrxF3e4gbJl/Lj+s/53ilwtH8Bt5YsoXg4aNcddOdJPRx0Kg7xbXTZhJnTSVsjMLk7IU520is/jKiu2WiSHaavWkcKM/nm49eIa7axLZtyygNt7F91TpqSo4wfcrdyEoQQa3tcLoCaLVa4GKagHM5VC7l0C885kIK3zPvO5OmuZSJohZRVJ9Oq8jnnetcYNKZc4YlibDUIR4hiyKIZ/jY5Yvu/39j/zVO/IydpW08t0nfYGbR4i+YO2Ii11Ul0uez5zj8zvc8+dMXPBQ3iJ6P3oElKY7XVy7miztmc/KXfYyaMhXJYmP70TxGp+Qw58M36BexMe/Z5ygsKaV6fwVbl3zBzc88SLjIyb2jr0bRqzg5OoV7Ugax31vDu6++xZ0Pzybp8qvpkzkAa3s081cspFlKpd4YQ6yjL6mJiQzL9GBUhak/5CauVzo51fEUHDvBbeOmobOokA1uYiwqMns0UR6u4/PPvyS762UM7d6flfuKMOBmlMrO0u9/wdzZisflJpKkpThQS9mhbVjVAXw+H/XNTXj9AZJiE1lXVE/Fsf3k9Mog0ZhIRUsTnVKt2GO11OWpGNd7MI60KDRiMumx0Zjpwo4th+k5sB/Tb/k7kYCf2lQX7372Kvn7frxoHLaeCIBB5obeem6LttB3oA8xbESlEggHBA7uKUYX8NMsuDB77FgSTVjVGpb98i05EwZysDifyeOnMGboRAyaGA4f3orNpMFk1uGXXDQ0NNDaLnO8fD1X3NSN/cf2opJqeGP+k7zx7gu8seRTXrn/cTZXHyWYbibPpKfolJoWxUAkNoGy/GoKquqorW4g2O7FGhdNotVBRu+++JQQq37aQpc0B46kRJqdbZRVlJKS2gm3qwWzUY9iMmGO6PD6/Xi9Xnw+H4JKJKBSOiLTYBh7cgKV5QFk2c3dt79Idpd46k41UNdWgN3RhVsvfxyrxsamdfPxaQ5QWF1CTEICzkonbT4noVaRfr1vIuCyohZE2lsDNDq6IRguDcJJSxjJoY0VuANhNm4pZ/+GxRTWq3j3o0foPMBKePVBwp260LdnL3Zs+440Sxur33sfIeilNdzI6MlTuH/iTewo+pgu7lpMoX6canCRnZ0JgEarR6O62Onp9EZCIT9FBSVs27GZW+65gcXvv47WHqZY14I11sK7777BtJG3o7WrGDTkcjzVsQSdsbR7Vaze9g1HKn6m0r2LmJgtDB0+hR++Wkqb1sPiJe/QWg+mcFcWLHubWIsJnc6OsXuEcLgOL3nU5wWZcNm1yA0BYvHz6JzvcXQ30SA00r/Lnex3/8rCtxcTQYNa0nXQvSqcbs8D5QJWx3Pzzf9T1wr8a9GMf5d2ORNs/pWPj1y0/9xrK4pCKCITDkuEIhKh0GnpNlkhIiuIlyic/qf2X+XEzyXyOe/ByxK3X38dihrmN6zj8LeruHfxG8xOG8WIJ+9C2xTA2erm7bueZNZHb5JuS+CKnH70ve4Kfr31JYbNvJK4GD3mGA1ffLiQPGcZV48YTZXLSXZiInp1AMmsQzSpuD42i0G3XUHRoVIOVlfx8+Y/UFc2U1lSSXpuEoNMuVSX1qMJyfQaN5macoWp4x/l+Wdep7j5GBu+3U6jO4RsS2H35jwixliu7zuKkK0Br86Iq7yJe26/nsN7N7Bu/y56xHclIulYuv1nsg2d8csim3ZvxZfXRoNHxC+p0LaJNLfUEfH7SI5Pot0XwZg6gjpJQ4xax5/Hf8YUY6G6rJJBg69Dp1fwtlfSJ6Qj0H6CvINN9JwwmC+X/YOmwhpUWgWrMYkVL37By/PvxpTU96Kx+PSmXqQ5zHx/XGJ3TAxlZS5GjjAQDHjp2r0Hd957C9ePmcEDPSRSEz2oTumxxkWTX1mCQaUQFiM88txTtIebmDJtNNdccR8lhR6C/hDRUWnUNFZx08RO1OXXk54ax66DvxCR45n34st0js9FaWnixmnP4Cx3U95YRVH5JqKGtLHjZB4JKZ1wBXwE5RCBcIiwRs0VA0cQslswiGZWrVuOyaYmLNsJ+GVCQYlwUCYtMY3effrj9vrppDWj2PWoFDAZjQTDYVp8bvSKGo8KPDqBiMtNvMGIOtSV5moVGiELjyKjs0kU127lm43vcGz1brQxMQzuMgF7TAwJhgywG2gulolLu44ju6sxGLrg8Rro33UU5moz5tpLT9gde35n6tSXsBDh1cfGo+08ginDxnFwm5esqMtw9e1NkluhteZ3suNFKgNWYiy9ye2czq1/e5pRo25m85Yj7NpVj2VYCtW1Bkq/XorX5yYiyYTC54sRn3F4Uhi0OjUZmZms/eFbhvbthTuwjU6x2cgaC8agl+SEKEradnNZZhpf/fAsXXvHIVqaqGnbhsai5kjxSfLLSzkWSiU96wge/z5GzJpE5qjLSXQncKB8OQfW/4OS1o14/AbamzQ8eMcyVBENrZoSjLY4MoclIpmsfL3qFUIx0dx2/8vs3Po8C5/6ik/mzaS88U/mvDWFKrmjMByJRDpQkKf9xqUi5n8X4f47534hTP/cv//1/nN4yNGcd8y5+e8zr3UaFQadBq1ahUYlngUCnYnGL+yO+U/tv8KJK6cRSx29lsLZavrZrzuKgsasQxIUFEFHS8hP/YlirA4dy975mOge3Xj6rddJMiXR2Z5EwKplQI++DEruRo+HpvHIY08RaArz8Ly3qSxupWBfIYNuGEuc0YBfDpCQFIVOVBA02o7rhUIkyyCGw0y/chRfLvice+64gaEZQ/H6inn19bexmQX+2Popg1Iz+GDjN0weNYFZU+4kv7mEe16+n9m9hyMEvNw0ZBKbtu1HdPWn6ngdnc3p7Nl2kt4TLsdTX4irpojDZfmkjxlEdFoK1112FUZLLIWFRfQIWIlxpFDvcpKdkYzF7qClpYX6mlo8lYXkxjs4WLIPKRxNS22IaIudP37dgl4bi9MQxSGfF0NqFvFGP+8v+oDJU29FH5fI8J65fPTRRyT3z+K1Oe8hOS9WGt+rmYQhpDAkw0Dt3jz8sgOXX8/wMd0xWmIIaRSO1Rxm4a8+IroEjPFe/GiwJSRiNdqZ+bfrOHD0OHXNHl6dt4B/LPuQ1z79iOZGL0IkTM9uqbSXVaIKBqk82sSAnGtxaB18suQbpt49ma3bfiPVrufQ3hMs/3gVBYUKOu1V9O7Zl7amWlqDXlqbPPz98acZ2K8vYV0EY0jF0SMHiNYmkJPRlba2FiLuIC311SRn9kAd7SCi1RF2emgIuPHUNSNpNATDAXSKjF6jwaNTaHEFMARViLJCWKMHY5CTx3YzZMRE7pz2OqEKO0mafogBC/akeJqLGtlxZC8hv0hV8wEaakp55JG32L1vEWFDCwF1ESqTh/3HT4EjQIXp0rD7rMxmgp5ysnOTeG/JLsZeNpyel13LkAF9CYaLueOWe3BHjtOm6KgrEEjJspLWuwstRb/hCdbz+p05DO6TTEVlCerAcOa89BiL1y9DCUfxwoPPsXHVP/CGwyhiK43OMJIcRA4LiIEw3oCGlvYSpt/2HAdKDuKqN2GIcqGqaScmKh1B0tJU3cy+vVXUVBWzZe9OdJEeBNp1ZMdG0y8zhx++qmKM+Sq++mQXIY+VK3vdiVluwjZgAPGxFvTRXamutNPiP4AUqWbRR/ehNbkZPmAMa7a8iadNxuvUYRF0ONva2fnD1/TscxsLXryZDfl7CLksLHpmJfX7TvLjPxYTEkMIkoIrEj7rL85NFV1Y9Dwf9PMXyZUgqDiXAEtRzgdC/auWxDNQ/A4KW+m86PvcvvS/fu/oR1ed3nQq1Vm2Q1BO94xztg/9P7X/CicOEImECAYDRCJhwuGOQTlDsymEJcx6I2JExtncQsTrR2r18szts3ntnbe4ZsBQeo4YxvFjJ9hy8hAelxdnxMeIfpcRFkXCIfD7wkwcP5GTJ0/S3t5KONzBEqbRaDqUqi9Ybc/kw3w+HyaTCX29gRVbPuPrn08SqSmhp/0aJnebyJjpDxLpbueu2ZPY+Mt3hPPDDOrTj9433oxp4lCatu7hxgceYl/BfgyxvThQuZb0gT1R6oLYzYmcbHXRI74Hoco25tw8g5fmv8bRPw9yorqd4xWVfPjpJ5Tll3G0pAlDexgvXjKyu9CsaWPatEfw+fRkRSeiNYvsOXGKyjov7uZyAqf8VJfVEQgUM2TYSMxGFT26xNOlUww79uzgyLHDpDlSWL1mHZNGj79oPHZsXkNRUwifKpriZhUmXwMHdzZis/VgaJ9e7N6wmy2HDzF92kwIqYnozfjlEL0TuvHQ/c+w6M0P+Me3i4i4Q2SnpLFzRyGLP3iarKw4HDEJlJ5qpK2tCrXZjz0ljq5JnZlx63i8Xi8xjkxuvGkSoWCQcDhIVJQFlQBbfl+NyWGkpLkO0R1h6nU309BQR69ePYiKtuMMuvGG/fgjIbbv3IXXE8QV8KLTGciMj8fsDaJVidhMJoJ+Hwlxcfg8ToJyEF/Yj7e9japTBZg0AqIqQiDgRR0RcbU2EW+1M/26m0ju7OCrz9cTdMu01BxFbZCIcQikJNpAr8Xp8XDXzLf4/ucXiTXnYpCjmDb6RSqOlaHxGgm7nZiU2EvOgdtnrmXFtmXsXb+Cux+7hWFDBuE8cZw5Lz7HgIETWfbNSjK7zcbUqQ8RdTR9ug7lvfkPc6rSwldfLyCp+0iaDDU8+Le7OVmWR4Mkc3knO6vXLCPVbMGU0wedEubonnqOHjyAIgYQtVDUWMOHHy/l0y9fpKxoM2OH34TLGWbXzlIijjBVFWHaQirK69pIsXRGq4/Cr1UoLPsZyaiivCHEiZoirrl5FK0aJ/c89hGffr6cjz57nZbiQrb+/CTWrAwSEuIYP3gKnawjOXCoFJvJS3W1j5qavbTXWfF7XVisEWy6LO4bOJ0X//4h+7cspseAMVjUfemd2YtQQGbl5+tQx8djFnUIahU2QXvJuXupNsK/HLN03vYXN0rH63O5Vi5lZ6D65173X/Wtn7ELC67/6rj/rf1XOPFLgRDOfM0IhUKEEWh3ewgBZoeDn/N301DfxJuHN7Fy4acsf38xGYKB4dOnsGzdavI37sQTCNEvuweSTktEEQiHJWRfAAGIirKcFjeNXEThqVKp0Gg0Z3+eQbld89hTmOOTMbrK2Hy0ggpFpE/vgaz57gueip7MC59tZt6bC9lR9Rv+6gCqWAMnNu/mcL84RkbH8/HqNYzqfy0TZj1Hn15J7D95hMbGelJSUnjk+Wf56IW3OBJp4r3nFqCOiAheiZb6RkIhP0mpSdhcMUgJMWTHJlB2sgqxSsWSH99g6vCrOVZVwtSMq/jmrU+J0hnRxHahQm6nqaoVlS+RWlc1wwf1YcSg8QwbMJyG9nq++W4JL782n++2rUO5BFGSiBe7RaSopB1zfDSiZCW9Uxw///ITS5d+QHt7GzePu4nvvljB0H6j2LbqdyLudob37otg0qPY9JSXFNCmFDHl9juxRiukxI9BVnQcKTzAVdfNIpRwK+UVMiXFZZS6KhnUbwJRZgPxsVHs3HmQUFDGaIpCktU8+eTTJMTG0eRrp6q9lVtuuQVjtA2DQYdaIxAKB2hsaeS3zb8Tl5RMZU0tiBoivgBp2V1olTz4DSAJEopWQG3RU+dqBlmNPwBR1jis5lhS4tMQRW2HooyiIUg7kgD9+o9n6bKvyekWw8LFL9J10GWk9evDmMmPM+/Fr0iJGkBLWQCdkMva7Y8x597nqC4rxO0K8eFnj5LVeQBOaTdINmr9l564+7ds5vn7nqM6IpN35CD3PTad1JG51BeX88nSr7FFuzlUWcDe/X/iiuSjNcYy5YY5xHXqTGJKNnpLT+SdJZQ07iMzup2yf77FvsbD5HQdzJ+FfzIwJxOnYsKS7GDNro9pbbOzbv33fP/LqxDeT1VTiBkz76fFF6TvkKmMuPpBio8WYlUZ0cgBYhMsFNQcQ91ixyFEowqm0M2Wij1Zi83YBTG4ndaGLRwufpurJ3clrC+hqqmO3zcoRPaDXrBxcP8W8lr/IKtLJqKgkJKcQ0ujhMrhpbGllZRugwkaA0x6sje3zrmSSTPn0lLs4esPv2bTxh+ZOXcsqQMTye3TFxQ1/oAf8RIOVJa5aDufdvZ8uzDtcu6xHdvFvCp/UdWeed8ZXvIz2wVz6gLU6LnnOjdq/z9Z2JRlhWAwTCgUOf0zdB43QjAYRBA6uJQVRcEegOMHjvDaB++xZ/tOtvy8hvX/XE3G3VN47m/341Ik0KppD/jwNLWi1qtQq0WEcBi1WkQlCGcb6mVZ7lgowh3XjUQiRCIdRQpJks6SyKu8MsvW/I4S35lly5cw+6m5lEdqqG9uYsLdD9FZAGt6T+Y+uwCbzsiVQ0YxzZLGty89T4Ixjj5Jyew7uY9Na7dwbH8RdrudruOG888vv+fhWXMYets0kuPiEKKsIIPBYSCESH2TG78nQLN0grDXjcsv0WtQJ1rrK8jsPpoDpZvINnXh5SXv0W/CeGbNvIelH3zBgPQBZHXtTr3HQ8/UHuwoyCMougARgzGFxvIIH77+Evt3buWXr5deNCaPPLiA+hoLOb3SidVG49fo6NMrB5tNg1cO0ynXwe781WRelsjrn7+DRq+j/lgdv/25i0F9BuD0KvTp2Q2Lxc6TT8/m+N59NHtKUAUDxGtMNDfnYY84GdonCZNVZtMvn2F3RAERykuraPfWoTFL/LbtV0SNwoo1q9BoNHRJ6MTDN92B1mzApOv4v9FS38yCV9/gxLETjB46nEMHj2C1RRORVIT1GtQ6K3aDAWtIRUtjA3qdlmDQTyDgQ1DJRGnUNNTW0BJw4fS4CUSCSEjExsYjyyF0Gg1V5U28Mu81bhw0he5xydi0EiVHWtm7+yBNzQGeeuxtEiyxSMJJQm2ZCLKKy4Z1o91VwsQrJ+B2+dFZzYQsLWRKl47uPlj8Kht/LeLaCXN46fH53DPjHpZ+/iVffvoSM+6dhVQfIjcnh2f//jTpKclYzGaGDk+jR9cMZjz6HHUnKtAP+xvNQRemQCzfF7ow6dJ55fWbGHHVGO6bNJ2VH7/A9u8/JVoys3XrMvJKtzCmzxwOlO5jRK8xTL6/O9tWzUF0nmLTxvfpFJXObQ88jEVtQW7yEZRclLYX4QzU4bfV4w0ayTt2DG+TD0fCYEJWI+31AQIRP2VFeVRXeHngqckU1/3Jp2/OxtHFQrIsYjKrMZjtmO0NeMVqDFZoFcIcPXyEREcco7rNZvGCn9i6cTm9+nflh30/sOyDFdwz/i7uvncu+3/dTJOzGbWowsNfQJ+zARkdIHhZ6RAuPrcQqSgKEUk4b5NkiEgKEalD8PhfIUDPc+bKuWRb/7qt8d/ZpQqqF77+n+y/wokLggCygBRRCIRlXH4Jpy+E0xfCHZZQh9VEVGosagOCSiRLsHDFxAmUVVVTG/RxzwfzKfA28vt7X/HWL8sZeeUYBF+IoKiAVkfA5UdEhcZkQhTVBCJhkFSIdDCHmUy60/2zf+XizzzcM0i2sEFCJ0ejkcIdDIHhMA/MeoIvlv1OVCcRpxSme3YXpHoni9Z/x6wP53Pnlwv5bPF39LplEr3SujBwyijqCvL54p/ryUxOwF3SwNTbZjLj0Qfok9GFP/Yf482PFiCkOvj226XEp8SgDoVYsvqfxNnSCbb68PpVHD/RgKi3Y3GomDz9Nq6+fRr1dQ3s2L4dxaEhIIc4VN2A5Ncgq3ScapYwtUQQzfHozbG0VtWTHKul1uMnoWcOTz4596IxscfZeeiB+7hm6BTGTb2aEaP7U+cLo5EsmE12HI4kVH4TelUSLzw+D5ezjdcXLubhuU/w47If2bZ2K3p9NyYNv5n33/yUVSs38/cnXyVxaG9uvXcOO3/NZ967b+OWjFw1+j6cchvXXj+Y1hYvndKSiLfauXf6LCTFyrPz3sKiM9C1ZxdUop7MnC5YjFqCQTeSpGBx2Ljz3jsw6GxUO9s4erIYuz0VvVbHuMsnoddr8fgl2rwBoix6JJVCjMUBah1+l4+QP4TOZEYr6DCoteCXiYuKpdXZig4zpyrKcLbX07N/D97+fBk1kp+thT+SnZVDW10RNqsRl3CMZT//zozL/87hA/l8uOQ9xo65l66pozhe3IaiaiPVNoBoVQKNoeZLzoMfv/+GPkOS2L57PateeYAI8MSLrxId35n20jzK6ovQKWEO/bSI3r0nMXToILbmneKH9esp2rybCfffxbVDh9Aj505KfIeZ3iON8bfMpckNm1b8TGtaLdWH/4CKfzJ+4l2s+3kRlTUutDojMVHRHD+xAbU7mYyed7E1byN1NfXoUmJZvngVGTk5uEJe4nLj8Yla+nTvTW7iTST2zOHGYc8gGL04og3YRYiSuzF41PXcMPwBEpM7czK/DC9puOw5hIIWAoJMcVE9JYXVtDeCzmyjrkpFj/hOCO1NNDWWUNy6ljeXPk1YgeQuw+inyWDuqw/R99qpLH9pHldcNZ76mlrC4Q61KBkJSYl0MAAKIKIGWUREjaB0cKwriMiKQDjSQUYlnyOv1gEeEpEkBZWgxi/4iURCIOpQ1GH8ej2ySofKYCAQUYiEJCI6AQ1qAhEFNSpkQURCIKJARDkfiHQhxcGZfReiN//vRuIKeMMKzqCMKyghRzRIsp6IpCES0eAWREKCBq8ENY3tfO0r48MbHyKw6TA3f/E6akHmyivGEWsxM/e+OwnJETRaNaICRq0GtUZGpZaQ5RCyEkCtAUkOEwwGURSBUEhGVgRUai0KIoFg+Cx38dlI/EyV+hykmCRJBAKnNRHDEiOG9iUpwUbBiULev/MRHnv3NQbEpvLG/AXMnD+PYRl9KCgu5PVHHuSUuw6rSU1xQwVXDLicp9/+ir6OOJ767EP8B46wdfN2Qm1e1DYzU0ddiVMKojWb8bQ7seuMBGK0vDvlIaRmGbGsiQPlx9m1YzvuKhc/rFpHjtGBN6hlxaKl3HXjvfgtCZzc+jtvvfUSNLfR6YqxSL4wjnqZtAHDLhqT4tIScrOzmHXfbDat30rpyRp+/W0zVoOAVpCQggGkMPTr2Rej3kKU0c5d987kymvGMGfu3dRVl9Ov12DumjWLBW8vwOmrJ15tRKhop2DbLhKjZVZ8t5HGilMkmBP4+N21PPvKfLL6OsjK7Ua/yy7joy+WUNtYy9dLv8LtdmLQm8jO6YoCNDXWYdKpUKsEDFEWBAUaysqpLalHo6jo16M7FpNAQd5uNCofNivodAGC/ggH9h7glxUr2P3nDtqC7UQlOujRrxuds1NJTE+mS/euKKKCzWYlIspEO2xotWrGjRnL5t9XYpItyM02br31BpYv+5LGula6ZQ+htaWCLUe3kptiQ45u5Icf3+Tb734iR2cjRrTiaz+ETlAREzP8kvPgw2+/JTvGzMO3zOaFlfvZsuFXGspOMvfv77C/sJLErD78cfKfbD91AkHWMP2ea4iXnLz96psIMRaGDeiJJdXKNWOu5Obpb/Pb1jomjbiCNEc6olXNVFM6u51+8r0SRz55ArPdicndgMu9l7BPorkxhLOiGX/Tb+R06U1uxkhSdDl0H9KFwhNNZCeO5b25O0iPj+H4vh0UVa3F17aTY3v2c+JwA35Jg1cMYE5s5LIho1izaSPJSVYSUzWopFM4jFV422oQ/HZ0ajchq4iiiiLs0hCntqEJt6ESwzRVeBiWNIGAR4VPSScnMZndlSdYtm41vzz2PEMefZCkhE706N4LjUZzOtASQFEj0CF8LJ/+JykSETmCLHDWcaM6P5ctyzLeYICwLKHSaghFwghhNWZFTxiQZBVGtx9TW4DXn3oRv9NNTaANi6DGL3Tk3yOnMS5nznlGkPlCOoAzLYkd/fogSXIH2pPzBav/N7ny/wrYfZ++/ZTt2/9EEEQisoysqDoeuiQhKTIiCggyKhmsNiuSArUHT9CpWzaSUUCNgBoRQdWxogWCHUVLSVKIhMKotZrzqssqlQpR+KsAAufry3UoegsXNewLwunVXJYJhQN/obkAQRLwBtqIskUjySokEfQhCGlBrUBbQzNeOci+AweZPOJKfjt1hM+/XMQfq1Yx++ndWTbZAAAgAElEQVQXmDVpEuMmT2LQ4L4kOboSHVDId9fwyrwX6dutN/ZOMagUAUWnRlBpGadL5KZXnyXBGkOb5CErIx1bQiI/rf6N5NhYZjxxF2P7DGDz1l2UnTjGtP9H3XlHSVVlbf93Y+Wuruqc6AaajGQQUBGzoGPOESNjdkzjjDk75jhmx8w4JtQRRUAQJEmWHLobOseq7so3f380jQ0y78z3ru9d63v3WnetuqFu3Trnnn322fvZ+7nscoScIImt69i8Yz2P3vsW5eVlzJ33FV99/Qlnrt+xX5/clkyheFXWrN9AqjWBZKX55KvZvPLBqxx+xOGouCmpyKWusYHtG7axdNFqyivKyGQyeN3dFGOJVBIRietuvZV3X3+TWe+9gdvnpU+fQbz9xd9Y8tN7PHj783z44RtsrtuGYssEs3LojEbZsKwOR4RB48ZjZDRCqsj4KUdSUZSHJDoMGtSXVSuX4vWF8IgqGa2LLTuq2FJTT15OHwoLygiHc3F5vMTjcZLxOAGfn2hnK5LgEAplU9tQT0NTPfU1e3B5XeSFwvhDuQQCWciqm5aWFhSPB0dLE/Bl06+iiF3tWxlRXMmsWd9y9xO3cskZM1m24gdmz55FV1Sj34AQpQOziW8QSedkc91NV7N7RzsP3v07MmUO/e0htCRKqXzj6d+Mg+N++Ibbbr6Fj7/4ig7D4Mkn/8RxR8/knN9NJJMSEQWbVatW8eM3H7Jq/SY+Xf4zp44axpV33kT1piqUQIiZF1/J9rp6fqlp5KH77uS2a65nW3UN155/LtfdcQMlSjO2aFJ5yDSq61cx3u1nrZEmorYTbwoxelwIszpIQ7/NDHFVcNZJT3HD42dw8fRbeH3Wi3g921G78gn0K8HI7CQTKWDM+MGs39SALbeR7SmmozNJKFjKtOOP5sfvFtHJDvyuAiSXSVFZiLrmTSTr++DzxfH4CmiMphCdNLGYiRkwKShUsSM1KK0+zr/tjzzz0hOs+aKOaGeS9Xt2sHPNIjKGn0suuQi3S+n2d3eP3L0tKWALv60hfqBFDCDurSKoWSaiKOJSVWRZQnNsFiz5kcNGjaOjtoXBgypZsWYDdSvW0ZqTRo+2MHDqSRxeOQKvrGLIFkbG+i/rmff+3d7neqzv3kFUx3EI/4dp9/9fWOKiKKAqEoosdOMn7QyCrSGLNi4JFAm8kozqVuno6EBKaxQf0h8roKBICoLlIIgiGA6CBR63iqp0+8EVr9rtDxd+7UBN0zB0C10zUVUVSRJwqXL3dyQBUfgVktRTHMg0TTTdRNM0EukMtiVi6A5axsI0wHRsAv4wpm7hmBncGQMUG01LImoGgYIgfYJ5TD/lFOSQi+mjD+WQo47hT/c8zjF9h3Lkrddw1ykXcvUVd7B61VwW71lL/9JCxg0/hEeeewpfMAvLsVFVFcEWOHz6dHLKixg09hBGjBvDig2b6Yh3uyJ03eSSc65kYGUFuuHQv6iMj994lVimnbZYgpy8CXw3/33OueQEpg4/hOmnXvCbPulKxHnro/f5evYXrFu5lHseu5sBlaUcddIJ1DQ04Al4GD3qUEzDYMyo4dz9pzvBtHG73TiAYRi8+f77rFr/CxddPIOfV6/hlAsvpv+ASgb178dztz9KSM5myaJVHHXYdLq2tXHY2BOJJ3XySvKYMHUIG9dvoKmhHkl0aG1pQbAdYtFO2toidHYlqOw/mEi0A1O38AezKK4sJdaeoiCvgEQyxuYt61i1/Ds2rV9CV7SW1vbdKC4Zjy9A9e46EGSKC0oZP34iZX0rcXsCpDsjRCMddHR04FIURMGFbIuIqgtZCTKodADFFRUEKzJMGjaOPU2NqG6V6dPORTMk/nz783z9z228O+dHTp4yjb88/Rcef+h2WgyLTJPE5uotxMS5Bx0HhitMn/5F/Pmmy7n3iivxbttK0GPw7Vc/0dEe5bTTp3HfXXcwZMwxfL9kHbYtM2zSFBSzCMPblztvuYd3vviBqp3VXHHCEWxZuZjTz7mI40f0Y9XKpQwYWUAqbzANWWFqOqop8PWn1VvEwDyFY8gjOxRlY20HO1MNJLYGufrif/CPRc9TIhisWDqfgV6BQXlXoColTBh7CUPzz8bvhzUrF1PeT8Dl1Whu6iArlCCc1cJzrz3EsVNOIhETKM6X0Luq2bmmGqVzMD5vBDOQoTNaR74k4RVkilWFfpKLwXGFsHMoWRNHMPvvb/DT502kZJlQto8X3nmEitFHcfXMy/H6uiHBHo+7F/mxgCgKSDhIOIiOvXcDWRCRBRGJ/QOVgiAgCgKS2H1OEWHF13PYumsL4SwVWfLx5w8eQ9CiyMVB1s1dxZkzb8RpaGB98y4MU0c0Dj5Z9JYD0/97AqOO8ytL0MGu/bf68z++8n9QHMfpVpC6iWk73UX6FRVVVpBFBVGUcBywJRGP349m66guGVkzEJMZZFnB0a19TKWGpmObFk4PVZNgIogOiiqgqvJepS6iqiq2/Wtd4t5sIb07ufcxi+4Z0zDMfZsgiCCJpDPd95IlgaSjkbJ0fI5E0g0ZRyLjU/GaoDkZ0orFjefOQEtZLF+9gbePu4zb3/2Axt3VNDS1U1payOfL5nPZLdfRd/hgUqaO6vVgGRaKI3DXJ28xvGIQaUHDK7poTaeoq6vjn59+SltnhJDh4pqr76CjdheP/Pl+ZElloKeAP975By498RgWb4hyz7Mvc9S5Z1O/u+Y3fZJIJCjPLWTS+HGcf9kFWCGVR157ml0bN2F1Jmmu2cPsT2ZzyOAR6Ckdx7ARRRscAxsTRRXZsqWan1f/whVXXcm4MYfw+nOvMHz4cAyfw5+ffYCzL7iWeT9/wsSjzuTcCy9kd+MWPFkZahs68WXl0ae0FGwHLZNB1zPE43ECXh+ZTIb2tgihnFy0RIrGZCcakG7v5KzTL0BLZmhvbsRx0mDKCJZEZ1uc1oZ2WlpaaG5uRlZcKC4PiBKSS8URRWxR6FbcDsh7lUIqlUIUZNwuD7ppYnkFvvvyCy485nJOOnMcl11xHoeOn8imX2qYMmUKyGnirXG8Wav4YfsKFn30FkpoA3lylFynEMctosd/WzcF4LW/vMSIyRfQqbqpGD0arWQSTV2byC7LwVPoo8+gceiKm7KhA6lr3czXny5g4JBRTDjuaKYePpmla1dw83UXEs4vZE1VGjGQR3trNXmFQ1jy7T84PJRDTdVuyholhkhhgkY96ab1aB0Cg/LyEOIyud4cBFc7Bfnw1dwn2bDiS4SCcnSrgcOPfYijpw6nvj1FjquJb76fhWQM5cGbv2Plj9tJxFzgaadPSR9k201puZ8ff3qbYFYJkZSIKYco6peHlVVLWzRNsk4l1xMi7WlBE9Ko+T4SIYd4noMeqkKJNKF4deIu+ON7z7Js7tfkqj6OP3QIkiSRTqcxDINUKt1dYdBxsO3usgI9gGtxr4L+V8iPnn2XrCDRvfpOxjMs/vwr7rz0Kj5btYgcU0Voa+D71YsYd8pRHHfBefRxhfn0y08ZWTkQDZsujH26bB/MsRcdm42DYXUnJ/Uc/++6Tw6Uf6vEBUF4WxCEVkEQNvU6dr8gCA2CIKzfu03vde5PgiDsEgRhuyAIJ/wnD+E4YNK9WY6NblvotoWDjSSDR1FwuVRcto0LB2QF07ARJJmMYGPaOrYMpmRiODaiLCHKEooi4ZYlsLs5B7EdcCzcLgXVJSDJvwLze+oTA7+6WHplbe2brW3wyC5UVUGWJVwuFVEUEPbiTRNJjc6YjmMr2JqAKcq4TAGPZaFYFrZo48KF25Jo2l3Fy688x/GnncWI62fw088r+PucbxhZOYZjzj6XwfnDWP7Ldi4952xUx0G0IeD24w16mFDQl5tu/QOSJaGLDk2btjJu8GA2r9vAC089QU1bDbPefZvZ//ic86+dQW1NDTMvuYzjp57J6j1VfP3+G/yycC233/hHTj3//N/0ya1XX03fsgp+Wb+RXTW7sKIGR005lguPm8yan1YwcsyhDBs0kJ3bqrjgwhlMPeE4lixdgW2JSHsdXOX5IW6/6Soef+g+Xnj9NbwemVUr1tK0o44n7nucj9+bzVUXXMOVl53MhZdey676GjxaOYcPOZ3q6tX4/V7stg6UrhRZnhC7tm6nM5XA7fdSV7ubmm27OOuU06nfvAMhnUGLZSEGPCjBLCZNPJzyEWMZOmocQ4YPo//AvsiygNulkkxFSKejRFqbSBhJookuCgLZVOQXg6gihbNwSSKObhNNNpM2U1RV76B+925GV4xhxJQjmXn3jZx1/r0cfexZaIbMtGlHUbN7J6t+aeTGm2/ir88vp6+iUj71UJLtJUS7bBJ6NYVZRQS99QcdB5deeR4rlz7H0499xvZv3+cvd1/DpCEjePaxV9iwcjnvv/ZXBuY28tXs1ykpHsyGlQuY/ckXXH7x2YwcM5JJEyby6ew5DB1YSb9SmUxnFDsBL736PNOufIjGYJpLTjyBxvwEHVmtWOWHsqeuGl92KbscCUnykU63IhhJTCPJz+s+RnXnMCDo4+Tpl/L3937Phz++yVEjhvPTgoUMGzeRYX0KefSdGRT36cvNf3iIM4dPo2F3DWmnFb0NmmPtiJkOzESETCqJpssIcZmcLI3cQos6Yz1aPIoeb8OJJxnVvx9bN2ynoqQcw0pTnD+JmVcNpuXnF1GCufiLdjF/9zrStonX60cSRFR5L0ih25ZCwEQUu7lGe2B/smMiYWEKBi5sNCODT+yGGLs94BGS2KJAJArP3/sgA44fwvpfdnPG2BP48yMXYtc2cdixR1Dm8fHNJy9y42VTyZYTKBmFJlXDI9gYZnd8TBZkECxcLgtVdmFbCm7BTSLThaMo+DBx4QHTQJQFLFskk0lhOey3/afyn3BsTgESwHuO4wzfe+x+IOE4zlMHXDsUmAVMAIqB+cBA59dyXweVMWPHOkuXrtw3G/UQh1rO3iiyYe4NLO6N7toOgiSC2O3TlqTucpA9fiVrL0SwJzDZU0j+wP96MGxm7+ysg0WX9ydP/XXm/dUXJuxdWegAKLK4D2/eO3lAcCBtpBEsgY8+/Zz33vmYxf/8J8dddAbfzf6cZ955g5Flg9AViXv+dDtuE7r0KMHcLPRohkI1xN8XzkWxRAzb4pWP3mNUWSVb2uqp37GLUF4O7bV7ePapZ3AEG1PXEQUZSZG7/8bepmiPpZn15Ww6LtnfpXLk9wtoaG7i3HPPZcbll/H6G2+wcPlP3HnL5Rx9+GTmfreYjdvq+GbB98z/5hs6ol24ZYl33nmnG8JlmiiKBI6ImTH45z//ybhDx6GqKuFwmFQqxZbtOzj9vKM47dSLWbTkCzwuC5fj5bkXZvHHB89nyvgrefWll1FlhXA4jI1DSUkRBX2LyA9m4bVBzIZx4w7lqac+oL2jCSEQZPphU9neuJt0V5w+hRXYloGlW/gCQVrb6rFsHU0ziHXFcblV0hmNYCCLzkiE7ECAuKXjmBa7du1izMBhBLKykbMCxE2NbEvhr2+8zhVXnkRescqyX9bz0dtfMLh8FB1t7eRkZ/Pgg4/x2ZezGTt5CNkFpVRtn4Vb8DG2/GRiRoKm9BrK/vrzb8bBKVs30pH0c+2FR/PpnHk8+9ifWPzlMqpa63ngsYeZOHkas9+8j7QIo8ZM4eSTTmdA/0pOnHYM77/9IW9+9CG33nwbnZEGUvEEpeUV7Fi3mcHjxvD2Ew8ghL1U71xGju4QUDPM3t5CKtuiXMhjZ/MmCvNHUFo2gFjdGjq1NIahI6oizZEEBbKbfkccj6dDZsH6l6koO4LBk6ay7atF1Hg2MEbIxxlQhNUisKXlZ4L+vjiWSV64gNqaRsr6FNPRspuEGaektJDSwjwa6+M0dUQZ1n8kKa0OPSOT5c3QrHXhE7y0pxTywy7ocigoLWZT+0aKrSMpKMzi6jvuISSrCGmdYChMWkvvN5YPLCrlUl0kkgl02yLk8WHpAhklipAJogkJZK9KqhWSLfVUa9t58rO3ubjyVPqfcBjr//4jF15zIRcePY5+V57D1ORgxp51CB8s+JqyvDFcMu0oOkwHbANRB2/ARzKWwPT6yQd0wWTttm24MxkGThhHe6SdbCkA2MQzSbK8QTQnjSy493sfQlmu/3ccm4IgVAD//A+U+J8AHMd5bO/+XOB+x3GW/1f3Hz1mrLNoydJ9jS7v5Zg7MG21Zy5QVZlExsB2TLI8HtJpA8syep5hP+vZNM3uGboXS3bvTj5YplXv478e+5Xx+sAU3F+TA3r52fZGwHt49nr/LoBtGUiqguRIdMa6cAUCfPzue1w04yJwLGRJBQsShkZVpJUrfncalmzi8blpqm5h+dr1/DR3IUeccCyqKBFCpTUgkKhpRDMyFKle5q9byVlnnoVhmyiijGbq3fh4UcCyLXAsOuNpAoEgjx1QhP7Pto1oG2ALZHSNRCqJGRP5fMknVK3bQml5BZ3NbRwzbRpZHh8PP/IgYw+dwMyZMwkEArjdbhAcbAdEQeT9D97nogsu5OGHH+bee+/loosu4tG/3Mu87xfx5dez2LmziuWLt7Nh7VKimTr+9sYS5BDU7KpCEMHUDWKxGLJtc/bZZ5KIRcgOZuH3uWhtjbF02WqGDB1Lc8Nuokaa46ccyU+rVxJpqicY8OH3Z9HRFcPr9uByuXC5XPh8PjIdCWKmRkYwyeg6ssdFV3UtCdtgWN9KEpkkTZE2yvLLyM3L58STf4c/mE9H21Ze/OuD3HjNY0w+9DD6l/fBpdgYlsRf/vIEl884l7+++Co7tr5AJJ2DXBTH1VBO+TiJsP9o5Puf+804SM28hz6jfbgbBf72/SzOu/gKFn/1Gng0Sjx9qRhzIhNPmMxHL33KzGsvJZ5pp7g0nx/+/gVfzZ/L51/OY8WmteSH8iivqGTl6lUsmz+PyROO5IVX7+D4oSeyYtMykkYLJS6HkmIXqpTLG0tXUtH3MAJ2K3Kwk7gepLWrjaAUJJwVoqs2iqtCRLIcQkOGsHrOB/z54R/YtWUFi76djePew8ijb+eG02cy46xx1HfaZIVswjkBmprqEAnSf4iM7ChEUnFM3UU4rxDdbCS/cBBV27cgO2FsS0fJ01ANC3dWBfHWbWQHywkWeBGkMM/c/w++m/M5Ha0WgyePYFh5X8hk8AWySGeM/dAhknwAzluV0RMpPIqK7nVRs7uZQ/LzeebvL5PlCbN+6Ub84QhxMUmeaxznnnEUt193BeKIHI7peyE3XHQZ89vWcczA0Xy2cDErHruOY576hDF9cvD4A7gybsyuGOk8N2FVYO4n8zjp9Omsr6vnu/f/zthjJ9DsgUOa3RROG0kg7sIdAANw25DAgvSvHgCArID7f1yJzwBiwGrgVsdxooIgvASscBzng73XvQV86zjOpwe559XA1QBlZX3Gbtq+c98f6HFdiM7+xet7lHhcN9iwaSN9ykrpam5jwIABeDyevWzR3VaxaZr7YIKObe5jyuidmdmjjA8sQwnsZ7n3nOt+jp6aDL82uL23lvGv97H3ImUEnF6JHb1T+0XbQZclRNtCtiEp2gi2gxcVRwYdkEwT03JYsmkTD902k85IFx5XAD2t89bbH/D+nC949q77QOr2FIkOkDHI+BQUQE+nESwbW5KRJQlVEcERME0bWZXQbQfVFsCB+5X9fYXXJiMolhdMAxuHjKFRkJtLJt2K1u5m1cYVPPPCY3w7byGZeAqv34Ou62zfvp0RI0aQSqXwerzdvrK9NN+m2R0odrlcACTT7VTvrOeNNx/lhaf+wdrt6xg+uJKJh51C375FdHTGSespkskkiiKjpTP4/B4mDBlGXVMjfYcPxG0pdHVpyC6RVEJADbtprq0nZqUZWzmEqh1VtLc1oes6mmUiSBncLi+apqG6ZNxqENGlEEvEyMvJpXbHDhrrG8jtW4wgCBQVFKNJIvlqiOxgDoKlcNufbsUnyyTTbWzYtoG2thZuuO56BGTi6Qyi46a1oQpbg0VrfqSsYjzba2uo3/Qls+bMY9u6PTwl/5a5pf76G9jUtp2zjj2dT9+Zw3U3z+Dlxx4m7GS45LYHeeGRC5i7WUeKRXjltVe49Y4/0tjYzocfvM3tN1zPNXfew6VnnsmTjz/Eh/+YTUdXhE/e+xt7Ni8nkxJQC6sxokPxGrvo7NIZObqSf65czYhRA1gRayKry0+4wEVHfQuuXBFL95FIinj9EK/LsLhuN/dddB3rty5h6ZxNtCSjvHzXWyzQf2RQe4A5DfOZdtgZNDSuJRCUqW3eRbSliIK+KZyoTMZK4w46ROriBAvyKcoN0xFrJxFrwE6GQbEJyDaRtEpFZZDKvidTEPRTtWk9eSPGc92YGVSXpnnhzSd56Kq7CbhdeNwukokEqkc6IBFH3m98JlVQ4zrfff4lO10Zyh2beDTDno4FOKlcauM78QgKzUmLCleQlBKgOrqOC4+6iZVr3yaTKSKspEnGo+zRMoTDYQRTYOadb9CybhWTph+HvCFCW5HER68+yVFHn0l9w2ZW1/3E9uW/8Pt7/4K9q4qtyVZWffk15z74BNK8rfS7/my+vOtlHv3LoxjC/gHRQNZ/psT/u4HNV4D+wCigCfgtXurfiOM4rzuOM85xnHHhnBwsTUe0HdyygmQ56IaJoyo4joUkSGysraK+PkHS40FwuYhvXMvaTev5Zec63nrreVyCTNw2EWSVVCKNIDoIjogjZLobRpAwrW43jKSIuFQXkqqSFlLYmoJh2pimSSqtkTF/9f7YorAvmt3j0ulx6/QobkVR9uHJu/+bgG06+ynwHlx5T/DUEkCyLEQEbFHAg4RblLFFGxwHl2Uj2yKGCvGudqQMeGQfrUaMrOww9e0tPHXffTgy2LaB41jYgo3jlVFtG8fsRt6oXk838keRsPeW7xTl7olREcCRHGzptxmEuZ4gAZ9KIOjD53MT9PuIdaVIpVV8RR6mHHskL//1dU4/5RRcHpV4PE46naZfv35omoYsyximgWlb+1KeJUnaG0zuVuZuNURTSyNXzrwTR4VBQwawfPVuJh45imQ6hmAbiKaFSxDJcnsRbYd4pJOWdAflxQV0RGJolowugdsTJOh3YXRFCPtdjK4cSCwVp7C8iMHDh+CWRdweBVHqJtpFUUjb3RO1YQv4fD6WLvyOhmgDReUlFARy8CDR1dWB1NZFV6qT5s4GNEHHVg2yi1Sef+Fptq1bD2Ynr7/yJrYF0eYOHnniYb75bi6zv/2MKRMnE2nZjtZez9IVu1k8+weGjxx00DFx961/5OFbH+WHL9fwzPP38s5bnzPhkELClYW8/crtFBVWIOgmCxcuZNeOneA4vPrqSwwdPJ61WzdxzVWn8uCjj3HdQ0+y6MuvKAkFGXf0aAYdchjtsUa06FDa27cT8BSwQ4tRYLgJDx6O2pqmbyIbtTNJIt1JcUUubiGPwiyJgNvLsH5HogRinH7sCNavXUfA4+V3Fx/G5dddwZwFr7F9zkJcFTn0CShsXP8t8c46Nq5dTbpTI6+wi3hbAn/Qj2UKpOp1Qnl56GYzu6ubUPKzcBSJQEEMtytAl50iKwTplM6aua/w7N9uoTXksHPPIr5J1PDLl19y7rQZeD0uVEUgkoyBW+lW2o7cjRG3RAzD6i61YXcjurJ0jfVbqhkybjxda9awetscttYtoKa9Gkf2EfTmMaBsChNPPomI2Eo430uoq4AxU6dy6tjbaW9qp0DtQ+HI0/jdBVdw0xXPI7sGMTHUBzx+3nj6WVa1VfPy068iKDob6jfx5oef4haGMaxwGG89dCY7Oxtp27WN407/HVXzFrGeZl56+S2OHzsZI+qiLcuFIwtIHhd+2fUf69L/liX+r879d90pI0aPceb/tBIjnSbg85HWNLwuGVME3YAtzc18+sk9+CMOHek0gWCAo8+ZyeKnbkbO7U9HugFHF7EMk5OPvYbjzj4X1W1gmCqKksSxZURHRKQb0qMZGRQZanWTTMygfu23HHHUuQiqhaHryLiRJQFZFrElB9G09xGl9qwMehd+P5C4tbe75WAY0QOzsw6EHgHYpoUsKWi2zptffMLHzz6PJasknDTJqgi7mmq6XUKiSEbL4Fb37/TfuoMOTsrac/5BcX/L8F57/6Vd9yTkoCgSiUQCn89DMpnG7/fv424E9pUy6AkOy7K83316vTfo2GS6uggGs9EFkTPOuRyfz6atoQtb76IzkyHg9dHc3EwgEMDUTfqUFGJZBqVDh1EWKGBH3W58Pg+CoWNmdAS3SKwzQlNrEwgSHncWpigyoHQA6bRGY/tuPKqKJHYXXdPTJhnT5JeN6whl+/G6VERRJZPJEM4LY+gOXpeXpGbg9WdRWd6fY6dPZeXiebz4/Is4lsy7H7xNeXl/Jk4ej5Y08IeDbN6wkZ+WLGTUqAoWzF9HQUmAp555mkOPPonHH32eN4Pe34wD8ZnXmHbcsYwc0o/v5i3g5adfYPiUEaxatpiMriHFJG64/iZGDK9g0Y9fs3TlzwwZNp5Vq+dSkNOPN9/6mO/nfUKspZ7XZv2d775bzvXXXcvydV8w6bDJOPU1pEU3LWaCMp+HVH0Kl787kaFBiyCIfSj0irSkNSJ6K1ka+ALQHktQVBIgkshl09pVeAvzmZQ1gZaiBNUbGlm5bAuj8oKcNPN4flm7BtOUsDSdgEtB9QZoSyaRNA1Z9hMMudGdLvxykOwyg0hNAo9UgOlxI0teEno1iYRMbiCItzhFos2mqyrJkCOPZE9sNyOtMTzyl6eRszQsUycli1gOeOwDSRj2ukTpfh9/+bmWFVu/o615D232NjximGjTRlSllPZMPX4tF3egE0OZyNDyAfy87HnOu/YffPfP57ng4hsYOm4MH7zzDufNOB8hplEkh/jk3ddZ17mSkaNGsHb2Foyydlo7I3j0JvpOuJGKeJj5y96nK1vlpPMfZNXHDxKwLjAAACAASURBVDDxlCup/XoVSU8nSzd8yykn/J62uq2ccuNdHBYoQHQrzG3cxthgEYV52f9zlrggCL2pSU4HepArXwHnCYLgEgShLzAA+G0E58CHEAV0S0f1erEkCcsw6NItxE6blAKLPnqay4+5m9XtbQitW9i6dRsf33MtdSmT5lgMqVVAM1sIqlV8u+hFbvnDFTx+59Wk2tqwdQlDE9E1G9O0MUwTxePlpdmzuf/q6Tx0y3F8tWAut91+BSt/WEhQ9iM6Nl1dUQQHdM3E2cto3VMwq8c107sofG/pUdIHZmr1rpd+oAunt7LvsfoRQDYddrY2oHjc4JJAN8j3BQEbUQTTMJBF6V/ep/fnf5XS+6+O9V5tdEv388uyjCBI+P1+4Nd4g2F0Z8H21L7paZsDkyx6JhQZgWB2GASRJcuWkhOATJdGQX42uD14RANLj+FWHETBoivWTkXlALJC+bTuqmPhsh/RYq1s2vAzK9cuZfW21axfv57m5kYkBGRRobaxiq1b1zH3uy/o7GpizOAx5GTlEfblYCVMREVk4y/rCAR8OJKIbgtolo3H7UPUbNyyQtLKkJuTTZav+/9G2tp59OHHQBB54+1XOfvsc/H73bz88tO0ddQy74dvGTHyEK66+vekU/n88OM8GvYYbNvawMRRR/Lhm68ddBzcfMXFDBvWD0mOMfrw8RT2H8lPc38kIMkMCHtIdNaSytSxY9s2rr/xFvbsqube++7igvNu5YzTz+Wqq8/hmGPP5tY/3scXX85j5aJl3HDWxdx45PkUZ5Ik5DK0jhRiRiGzu5MhE8JMKSpEd6vk5+djUUeD3kqyo5mQJmCHbDbWdJBXMIwt62NozV0cNmUqBYWtnHP1dcR2ZvCmY7z60l1sqIqx9NMNyH6ZQFgEVxJfWCaWbMfWDZIZBxsJW3WB7UII6zQ3ZFAH9KFT7iIlQlzvIpxVRn6Wn6rq7bTtMTANSGUHqVo1j5PHnUzlqUNY2bQMUXGhuP3IgozH5d5XC6lnXLk9Iooi4GDg87uoblmIV7YZPW40I8pOpWF3FfmeqbhEP0X+gUS0FkoGn0Gu8zM/rLiLYUPPZ/Xaj7jr+vc4euR4vG0J+g8qp3rBIgr9IWJeqG+32bp5ER11NiOml9Ba30yW4CPaVcjOdXNp6C8xcODxnD76QtZ9cz+ZWoummnp8ZRJ1jVupCFRwwtGTKZ8ylUNLiugKKcz/egHTywbjUX3/Rmv2Gqv/ATplFjAVyAVagPv27o+iG+OwG5jpOE7T3uvvAi6nGzF4s+M43/67h8gJBZ0TDjucvv0H4tgSoyeNYMCI8fgG9eUfn7xF1U+LiEY3o6pZ9C2dSEPbTmL1bRTkWXTFNUwnRloPUJCdzY5ohHyfi+FZIAWGIOcO4aJrbgNbw7B12jrbefujD9m5cT55Ti5dyR2UlAaRbD/XzXiAYVOnMm/zz9QvXcOl518MigpOtwXak+GpKMp+yuhAi7c3quX/hnppv+t6AqKGyVmP/4nWBT8R0dPkBvzkJb3MWvItiiRjWcbeSeXXTLHe1nNvK/xgDNs98oCw/3x+r21149/3PVs39LJ3neYexe04Dqqq7rfC6Pm9nhVKz/GedGTbtruBiB4vt911F031DSQsC8lQScXqsKw4huVCNy0S6RT1jS1c9fuZtO5ppLCgmF0b1mGpJmkcGmvqcYkqlmniKBKZVIyAN5ui4nJaki2072nA0TXU7ADh7BwKy0qId3bhVhVWrl1NlseH7hioHi993LmkbQtXwEcyFcMvqXhLcpBNAb8vhGFCdq6fwX1KGTr8EN7/4C3ef/cTZAXA5uzzjuOK627mmCOm8c6b7zFy3AjqaxsYNGgIC5cu4/LLZ7Dux23MO3LIb/o/74N3aKruonJEGXm6n7fnzOFvb97PucdeT0tyHgX9BvP1x4t45IGHuPbKq3niuadYsnYFLz7xEM11VSSSAYaPGM3jj9zI9bfcyTdzFuFyS2xuXU4iliaQaSNkm+xuyFAsa6jZQY7IL+G5nZsIeXPJyXdYtS7C4X1k2gPFoJusXbuDISOPoLQwjy8+e49oo0NLXSvnnTyWoqmVWFGFq298iAVf/wO91WH+4reRs8ooLiukvnknjmWQ7tTIDuXimDppRydbcFMQkDHCCfKKs9m8vYmAGMDlCrFr1y6ywwEk2U+O10VhgUCro1Ia9HPl7d/Q0b6W3EQBUp6PYX1LSBkakiBgGeZ+77QoyuiaSTKZor6+kVnff0C2oLOjOU6By6E2Uk80soui0iF4va2kElEEz2B8nQIz7nydvorIzLuvIt5RRdGIAdxy8pN8+/1rbEltJNedy4b1PzCi77HIgd3s6fRR19hEv7wcFGx2pBsYFTgVSV9OdcLFhefcwOo1c9nSvA2zfSNecRhiaRqP5RAlg5wwGTbkNFbNWcj7X33Ly2+9ydSzpnFiv0P+3wU2/6elLNfvzDj9CPY0Seh+mTKvTMKIY6CRVAU21+9iRLA/Hsek1WwknD+EpoZd+H0G8RaRrAHg6VTpiMQIFBSRHdRpbrHxqmEC2R4aIi1otkhxQSWRjk7CRoxtkSiTysvZ1bqLbE8OnU6KwuIh1Oysp/8wH1lKH64451oqhw1CMtxY7gyaoKOlbLJUdzdHHt1WqW3bKLIHsNEdC9mR0MRuhebRHbosjYDLtc8NI0nKv2Qa32c5IyBaGoguJs84A3FPIwlLxyO7GJ3Xn5c+fu+/nBAOJgdD5/TIwZX4wVOEDyYHvke9YZf7viuKCE736kGzNFrbO3n06eeQHInG3VVojky0tQq37CcYDtLW2U4iEiOT1Ln+hj+wefsOFFUgo6fJyysgGcuQiXTQFGnBckF7tB3RgWhrO16vF103GTJ4OIZl0li1g5b2JiyXm0BWDoGgj3XrV1ESKsMVUAhIMqYk4ZUksMDr8QMicsBLRtMI5eaguFzoaQtFErj7zj8y4pDB+/HCGoaB263SEWnjsccf55Zb7iAaSTBnwWdU76qjamctV115Aa3NcdqunfmbNvyz7VC1p5Wa6jruve0hHv3oEV546BkKCnOp3bKMPS2NPP3Mq3xw111Muvh8XEGRgtwhnDF1HDPOOoP6zmpG9R/HgnULmXL8xQwbUsyGBRuQkp3UBOIENBO/EieuV6JrCUqLNJbsbODo/oNZlazFV6fhKysjmYqSSrRjxzPM/ucS/vTwwwgJL4MmDeK9D94gJzubin6T6GxezOP3vMUf7ryDYUNH8cOKWST0DNlSLhktRTjsJqHFkD0u0h0Zgp4cMpZAQd98iuUyqhp2ktajSJaHiJmk1Cfgz/FRJ7cTNofidet0mL/gVgZgGFGyswspkcq559l3mX7CGcyf+zmCY2KaOp5ezDqCICDpDoJXoH5PO4889SiHlJdTNu4YWpbOY/KVJ/L8A88wZuLhhPoXUL1kNfXpdlqqf0ZWkjRLnYTkYv5w3nv8/cubOPTQY/n6y7kcetQkVi1ZRGu8mXC2n6BfQbG8xBAIyQbJhImBzow7PqTjixU8sfwlrplxK9r2dppamgmNKKd14RI2ddWQ30/E6JIQFAfb0DEUE3/FYYxRcskuHICvPMRZR574v0eJh0Kqc96hoxFzZfw+L7sa2igscyNpKWIRN7m2QDDLZp0eQeny4i0T2b2tg8p++dTU7KFP/1xa0yrVjQmy9RCqtw5fYCDR9gaEYJiIXk+J3Q8pXUdOUQG19S3IoTIamnYzrKyUgmApc9atx+UpoCDHS3l2HJcSprhoAKdceRmt1XWcOP102g2DdStWYEVTHHXcVFweFwjgCJCOd2LakO0PYtmgmxaansGvykguBRFhnxWv6xlkWf1NOxzITIJtgKBQdvR4+hgCGcHG7/IydeBY7n7+ib1ujX+vyA/mjz5Q7j+ASeQ+5+DlUg/2/X/la+9NPdVtwVukNR3J5SaRSPHIg4/RVN9EfUsD2X4vSF5sI0VrSxOdsSjxrgTTTjmN3PxCEnENt+xi+45NlFaUYZo2tmEielS8koqg2ZjJNF1WDD2ToTMWpS3SQSgrRF5RIe3NLWza+gvhrBB+rwdsnf79y6hrilDdVE+xPw8ry4tgdPv5o7E4xSVlJKNxZEVBdrnICobAlijIzSeTjvH2Gy91ryhkeV/QWpVdGLaF7ZjIokAilWZn1R62bKqmqFAlHCpk7NgxPHCQPri8o5Oq6kaCJSWQjjCkqJRzL7iB0aPC/PDDNxw6aQJePEw9+VxuvvQUXnlnFk888gCHX3AaVR++y7GX30l7w8+s25ngpuuuY+Ahgznl6OnEjT0EbImigkHEhFrcAZVUpIuSgmLSu2MMqEyxPJZNKt6AX8xldXU7fcr8uIUoQ8dfzMYvZ1MwaRQet8mlv7uDex85DU/FaLbOW01GDXHB9DPZUPUCfQqnEddqiXZ2dAeM0zbR9iSWKVBWUorlpKit2U1xiR+X5COWUvCGOxDNfrhCCrXbtuCWPFQcMpAtm2rpWyHxy/ZaDhs3CLc7F7+YYnPCRSjby6znF+CywXB0bMNEEOV97xx0o79UW6LLp/LRe+9hOVms+eFDHnj5ryz/YD51WToLF3yGHauntCBAslXEzkuTkUUuPv4Wtu9sZsPa13BnS7TWOJhGE3phAWHbTcDroq2rgaLCXLoiUTKCTVFRiLoN9fQdPoo1P8/n/AteYF39ehqW7Wbc1H60b6ulJWWjsYvyfsMxIxvAl08ot4zq7bsYOepkUlozVQ1bqBx8HCdOPYLjxhz/v0eJ54Y8zrHTB1FgWERlm6C3FE3pwNA7qdkToU73MTbHC6ZDUHDY1giiINMVN1HzE6i+XCxEcn0ymh4jEnEYUZJHXDSor2lEy3YxNCRQtdPEL0Bp2SGsj+7EiscQgioRR+KkwYexeMkWJo8ZysqOtYQNN1mhPOz2NKWVhezu1MmWsghnQ0aziCY7yA7n0NYeobKykuceeZH6jihloRAxTUeWFWQARSAWTZCb7ce0bGzbRFVlTHN/JXkg7FFwRASxW4kXTR5JiSOhyZDl8XPWxOO5/p4/7lsF/Fe+7t7ukx7pbSH3/PaBlvj9HPy9ONDHfrBzPWLzqwtHFERsTachGeOOe+/FaY9jIqAYJt6CML+sX4utGQwYMJiOSDOK7CK/uBSPx4coyPg8fnRdJ+BW6OiMktIy2KZGMplEVl04gojX48ey0vhUN5Ii09LegoFNQ3MLTsYgocfx+8NEO1oZ2K+MfhVllBdnES4s5YfZi1i/czc+j4AvK0g8mSYrJwcxrZHRDdweH4Ik4/EriIKHcCif226/kfLifPx+/7421fQEsuJGFGQcG7bvqWXdhjV0djZy1u8uJpTtpqm5k7dLCn7TtqdvrcXn99LZ2MCn877i0ouuw2N08tMPK/jy2w8pruzLU4++yLqfFvP0O0+Q78g4YpTtu7YxbsJ5qHYnRxxyFu8ufoMH7nmbZx+8kk4BjHSM3Jz+BPNEWnZHMJVm1IibXbEGJo3sz7bdUQ4rMnl3TSu5hQrJCBhGFyW5AeysMMUBNw1NCVbsbOXwYUdww3nXs271J9z3wnMcecFZmNF6ivMHsX3zT/glP1nFNrKs0ljfgSQoeN0+XN40dVUNJBMJvEoJvoEBLCdBXbVJbiGMqSynOdNF+64OKgrLOfXMN1i3ZxmP3XwtJ59xJh2pNRSU9sdOJHn5yc9564uXOfKwY5k8YgyCI+5FHf0qLrebp154nWsvvpQmM82zz9zCiSdfwebv57GiuYrfH3suZr7Jpy+/RVppw624cecMoKZqBfgUrGiUon4jsNpM5JBDU+MeFMeFV5TZY9VzVGUpu9tdyB4dvaONpJKL7JXJyUkz/ug/ImsCVRtWctHhp/J18xZqvpvHFTc+xJO3XoNVJmNbrWAUgq+dXH8fIp0tDCyvYEdnhFAmTvGE6Tx+3X3/e5R4VkBxbrxgCPN31lKa3x8n1okmiTg2uAIh3J216KpJW1uA6tYE4w8bQNWmLjxenbr2FgJBNx7HQ5Eo4CsOEdf9RLpq0OqbUUoqyFb8VFRkkUrprK+OovoSlLiz8OZ7EWNROgyHPdEMRovEySefjKTptGZSJKMmbakqxhV3EbX6ULWtkx1124llFE4741Rsy0LTNLKzszAEh217dlPgckNAwWuJFOSEqWlt5JRjTmXr+u08//zzSLJDKpVAOuCl67Ho9lVOdEQcNETJQ/6hQ+kvudGlbiV+8VGncsntN6Cq6j4LsAcF0lsOpsR7TxY9+wdDp9zn2Ae1rHvu2/Pd3vu9EToAiAIC3YlFmUyGO555BrOlk1hzG1LQRzIVR4wk2JOM4PF4EPUEO3fU4wuqTJ4wBTU3j1RrBI/UDZFMmTrJZJJoPEFJnyI62puRUhki6UQ3O5EsUburmtKCEspLy0mlUiQSCQzLpqu9FUcwiWcSFOSFKSstpLSkmPz8UgTZpji/kNzcXKwui5dffY2EpuEJ+HFkm7a2DixBZkDlIHJ8ISxHQrfSFPcNcdvvb0GSJMLhcPek6nTHDxwsZn/+IR+8+ya63UBJ6HAsYQslfUYzfuxJrD7ttN/019WdEXKyQ2xdtY6Fq1eR78uh37h+eDSbJ+65krHjJ1KUM4gnP3+GY4dOo7Z+Ky6fzaDSQjbt2EaOEiY4fABd9RIFUgoxnMP7H77NoSdNZu3PKxnaZyB21EL0uGmwO5lUYNLYrmOKrQSDYzm8xMvz8zbicgtk0iaK7DB4ZAXLvt/GnY8/zcQxU7joqmu58bRj8A6eSHlWKQ89fhmGlM3mbesYMjxI9cYOBBKUlJRgouPzy7S31uHPH0JZnkM8mcCdDwMHDGHJgtV01Mv06x8k1mbjK3URbYtiaGky6e6g86xPdjDzotH4cv20ter4/X5uffItvvrbh0y76DxOHTiMZCaN1x3Y907ato2KSPOeOGK/XJZ/9wMlA/qhJevZuXoHy+e/xJZognETJ6F3hnBLXZRNOIKfP3sRpV8hbTUbOPSIm2hvXMuuup24kx04RQH8XSJduk7EL1KSsGm3M+RmBxENEVv2gdlKXt7pDB2Vz5Zv3yDoHk3xiZMxlXyWPvkGrlGtKO5SqK9m0im38947D1Dcv5B4RzPZwTBOVKZfsUZb9gSitbXMnjX/P1Li0v333//vrvkfl3vvf+D+xmaNpoiEXxrIloxOvF4j0piiKyXSYMQwUjm0JSOUh8vpjEYR3GmCbgeXkoPsZPBJIGZ7iMWTZLQMwZw8WpIqjhmjbGghe1qjrNlS1008kVDQdQvB0YlbxUian/ETjkK2ZRqaO9lZvRkllGbrtiX4fG6qdIuCpMXIiYezrTFBbtpDvL0D0QeOJeHYFtVVUXK8All2mITRSrsmEvR5iWsyieYqcp0I89bM4bPP/kZXcz1zF//E9roduLw+8orC1Na1k5ObTUaXiHZlMDFJqxaKLfPWi8/hlgVkl4xHVOlTOYjDJx2BZqRAdrG7I84zf32W7xcsZNiIUbh83r3YeAHNzCA7IprRXYtGQMJxusld4VeI4yIe2K9PjnDuwbQsBBs0XUeSZQTBQhQcND2NoSskVYvv5i9naP9ytLSApIjMWfw18756jkUrl/HNt++zcesOhg4YwYzf30yuoHSjPfQUhp6hPRolLdiEs8O0tDaR0jRy87L5P+y9eZhlV1n2/dvzmec6p+aqrq7qeUyn052EdAaSkDAFhKAEwaAfInwq+OHIKyKCiiCI+qKR4RVQhGAgA4EAIdKZOul0utPVc1fXPJ2qM4/77Hm/f1TQpBMGfb9X5bq4/9l7rb3WvtZZ66z7es5znvXcmzbsJBxNUqtV8C0DRxNo1eo4koCoSIj4BGJRHNNHEkUcT8So6wRDIcx6m6XlRSrVMrrRQUSk0aph2AaGZeO6Fu1Gm1yuC8Nq0q7XkONBfNfDb3ewXIfLrrmUN7/p1Tx68Aj5fJVYLI3vekiui6DK6I5NJhHHNWQmJ57mzvu/zqtufg2moaP6DngOjz52mDs+/i68UI6+vl7ylSWaDZvzJ0+zWjpL/LVzL9gHY7PvxGi3CXb1ErBL5JeOM37wOGomx9fuu4uKbvDQfffx+KPT3PCyW5g4P05X7zr6N1xGUBzh19/3Ae6/90EuHx2lqLU5+fD3eMWv/x6lqQq+so7dV/wCb3nNG/j203cSkeLIQYeJmktwqI9OucVkfppQRkMPRajpEpF0nEbJIdyVoLpU4vd/8/cILJd5auYZhKU2r7z+cvSFIudqz9BqLpCJJhgbjJHrC7OxO0mtXiZkjOGFphDNLEhVenK9+LUEZ06No0kaOzcPMl6Y5KY9N1OaXiQa66UrbjAyCL29WR777gPcdvsf89Dj9xKNRfjcB+9j8fB50vEAbbvDvg27CRDAk9bOZVimhaQqeL7J0/NV/up3foXojnUUm21CLZGvPfo9PvL5e7i8dysnJma49S1vZflEHi1gcWLiSZpWGTEUQBebTDw5TfeQh9eR0TsyOhZdPTlayzXkSABV03jp8BWUNYdmQSTa1cNy/ikKtSlK0wVWYg6rs2fpVObopJdoGAInz4wTGoowM/EMfjxIRg4QRsEMyFRbNhUBjGoR0ykydaac/8M//MNP/Sj+/G9hiY+ODvsf/qN309LrnDk1w9zSLK7gsVQr0CiWkZs251bq7N/Yy2ytTDwUpCE1kEoqvSNp8ksSWwZkxGSbYGaIJ48sk9ASqFKNdC6FXy2Q6+tnZm6RTK6bmYVF7I5DIDZMIukQESQmZ5fYvPdy5LrLTHmJmCtwZnWe3pEkouHSo8Y5vdIhlenGO32BpiQQ617HukgYLeVgpQyEapSGFmJ5RScaEQmIOjlFY0daZ1Zy0MJBWkWdih3EpoDlWoyk+6nOani5FOXSef7io19AEGOEQzGcgM1dn/8Un/3ip0kpIWwE4sEooVAIp89EdaIMpjx6klFadhgVEb2pY0kaWjBGsWCxf+8u3voLb8WRg/zDP36RM+Mn+fhHPoxn20iSgu2uuXdezCcuIGBbFqIKZ86f5YGHvsDsuMBf/c2f86nP/w0zD38GNbedameavK4QDEhc0tVLoTrLlq3bmDl1nNHLb+PI0ys0m03qlTyNdotms0ksFiEVT9Os1hFcF1HxWZjNs+fKvbhSkE6nQzqdJuaLFN0Wkm5T82y8TguzZeBrAWQkRMVBNywEy6FltrF1i0qlhG3bBLUQrryW2Q7fRVVlMqkoL732AI88/DgXJmbpH+5hJNuNFtUIdiVJhxPEkgniIZmQlmDdui38zzv+jqm5eXp6+lBEFd+0sWUI5Qb5pTf8HD39WcZG+pGA8aOH+PBn/ydL1SaBUgXdL7F9JE6j1qTTMJAyKbwK7PjmsRfsgz/wfM5MXODg177DNbdcR7mwyiXXXsPxhx7ig+/+TYJbM9z+nnczKoa452v3sm33y5ibeZgz43cjmKArDsFEhu6Ox6lyi1woSdnTyckiQthiph5naXaRpBpiz04QjADHFvNosQSL+RIoJoM9A4gdl+XVClJYxLA0tnelODUxx+///FvpSvUwEInx2x/8I+6drPCLv/0OnrzrH4lmfartJl3xHrxZlaf9Klelu5nxZ0mltpCKrtCopskNaMzOjrN/92WMLy/RXqoRdGTkmLR26EqxkeMputUUclzixPICw+uCVI83GNhzG7e97a1cPboRz7LQXYO4GgPXwcXB9H1aHRtVlEmEQhiKwx/+zjtYl+vixrf9Lv9yx524VyV4zcZb+eQf/xVHja+zR9xJaMflWOOP8ODp+zDjQQbSUeqlKmHXoyqq4Lv0dgcxO3UUScXUBRB9lIiMKuQwhSyb1g/z1JEHiYQUpIZMyfERtSKJSBfl6iyZ3ADF8ytoahxP9nA6oEQsemIpCjTRNYNQNYQnV4kqKQIxl3/69OmfHHdKKKL5/aPD9ASzBOISSjxIbyRJLBqlJ9tFsnuUrlyIsJQinVLRZHBDUVTTZGm5QF3I0yrDxPELIDUYn83j2os0yjU6Todjx1bRZBHf8TAMWLe+h/6Bdczml3jZget46HsPsn79GAvFEvmpKW6+8VWMH32aLdsHaYeCLC+eRfAyRFSV+eJZfvHV17NU7rB3KMX9Z08jNfrZ0dXDweJ5VucqRKMK9YrPS3dmCUVmkFs2NTPAasunZUNPb4LV2SJKJogQFNGiNktzDjuG0yhuhLpoIdZn8aIJwmqSw+OriI6N6wmEFY1YMsDWLRvRqybppMjUzCl6121G8MBxNcRAgNLCIvFYEE30aJkCdSnM7i2XYHba1Ks1PvShP1nza3s+tu3ysXD0eWvyPnctZMsRbD7+iT/h3PlxVMkmX7V56bX7mD6zgLs8TTveT0WfYc/Gl1A8Nc6yu4oa6EfoOOzecxmnl9tU2jp9jkRL6dBut7FNh5WlZQzBZ+PYGJ12nUK5xIG91yPJLqYlIGoKtuUhWxbLzRL9iQzFTpugbVCv1qnpLRKxOHIkQF1vIHswu7hEOhJhdmEWVQ2gd0y2bh1lcW4WUQBVVggGQ4wM5ch1JekfGiQQjPFr7/ot+vpyDPT2ke0JEIoOkwiFyPZF0DyBQCTB9u17+bs7PsfTc+fY0r0B3bfZu3s/W7Zs4wufvYOPfOJP6M1k+P23v5lIehopfjUx8z7Gm930xMIsLVs4SgzDarJvSxzxQ4+8YB/8f3qLb957D63SE8wst3j77X/A+IVx8nNlHn7kHuzaCvsuv5HhSw5w/Y038dcf+W0KDYfps98h7Fk4IY1CUyaW0CgsNKh3bPZvG+HU5AIRIUL/QBPHrhOIDrK01CQ+pFKvFMhGIrhigPxyE1UOYBpNNFGjO5XFsus8enSBvZv2UbswwZtfu5/CksFLXrKfe+/+CuL6PhZ0i3LzApptsiHcT9BzqElVglGfaUvFKLcpd1R0p4GqiiQiXXh+FcUSCfX0Y5VNtLREqVKiUTXookHhSwAAIABJREFUykWQmuBFdSaXkly5LsCCZrFe9Smlt/Gl37sTSW6iaRoSKq7vIMky9VqNe+6/h8uu3McHP/VJNnVHUanwmrd/kvu+/vdo8wF6XrmX8a/ezRZ1gLw0wyf+7kPc9pa3YyyUODM9R741T5+aY8opsqUnTkE3yGgSTUXF7dSxTYtELIMveISRCWfSiEurWCkdoRGmSoyOVKQ800LGpLs3iCRmKZfqxOIhaiWdaDaHY7RRUypyvYWmipSdNiE1jaIYWA0Qgg4P3jPxk0PiXYmIf821I6RDISZbbaSlVV5z86s5MzdFSzDoCC00QaJT91nX3ceJ2WlsSUewc7TaHVShhVG3UHM9iH6ApOITTw2QzoWIR5L05Ubp7cuRysSRtAC+otCdyKCqBovnyyzrbQrFZVamZ/j2o/exLb2J88WT1ObqSN0jOMYq3YlevjV+mIFoEtPQ2XPJZTx2+AxRVefqA1dw98NHycQ8xrZuZttIisuv2cyp8aOcP1UnIOqoei/d2QBPVM8gmiKhjs90sUo6k2VxvsG+/b3MNpoUGxbrohE6jQodUeSy7Vs49NQCim8iSSpxRcMVGyTCEt25dTiSTTCh4jYMfEnDEpL4Toes38aTDXyrjSHGKSgxwo6PEgrgCTL5lQobxrYiiyrv/MV38Nm+geetye8aHVBl/vBP/gAtYLC8eIZSoU1SiqLES0zOV+gZ2IBc1YkGVM4XFskMpikW6/SHd6CGhlAUm1ajjKg3KPgitmXgOj6xYARJEDBaOquFZdSARLNjcMnVLyejqWB41J0OjmeCY+GLApbeIRgKITgCkXCQqeV5RG/taHUgHmBlIQ+Khq836dgGS4UCL7vp5bRbFSrlEqv5JUKBIILsEtWiBDSFbLaLRDjEwOgYO7fsYXUuz5nJp7jjM19CkUWuvm4fQ73rQBbRAhJaQKY/u46DDz5DJSgS0SQ8T+aTH/5Lmo7O6vwU9971/3LDy/+GR+7/JQ5NVunvlbDzDRqlAFo8wmLNY6DfYfM/Tb9gH2z/6hc4/dghjp94lA0DW4iM5MhPTRAyT/PIvEB/WKBv0xW0lw6BGOdP//4kX/nUB/n4//prrt22kYbjUjdTDGVaLC0WmMxb/Nq7f5W//ruPYHRcrtgzRiOvU4sus79/AxMzC6zYOhuHxyjNLWM0PAzZIpBIYNaaDHbFEGhxaTDCvWWFnRWF9etHmJbb7M92YfgZJu2TRNs5js4dItafICyItN0QA3aVes1C65KY7zSZmDVIpDTCYagWV7jpNa/ikS8/QHp4jCXdZtvGNKJoc3R6gT7RYkUJ8qbr3kO67PBbj32W3kaDSHoDhh/n4bvvBBeQBBxfQhUEPEAyTP78Yx9ivpHnhutu5YnD99E0TQKaTLXq897rX88dT3wDTThPGY1bXvbrlE6e444vfxojoqB4DfSmSndOQm/WUEwDqSeMZMq0vA6JoI9rmcTCMVA8RLMbJ2wht8sUpB6SQgPVaVCTw1R0g6F4gkrFoac3wTPHzpDsimC2bOp6h3Q6SFiIEIuL+J5Aqe0hqRqyAIIp4CoS3/3qsZ8cEo/ENf/6/evpqBqG0yZYlLh0706+deZxct0pPCmI1jDxJZGx1BBPVs8zoCnojkeg5aPLHV617RoOzp5AjMkkhAiqIGG6dfqG9lMunMW2bQiC4cGX7n6IV157BV65zXJbx7XB92RUWaS7J8m7fvZdVAMxvvX4Jzj0vWnGEl1cODuP0htGiptI4hbMI0e5/De6uTZ5CxtufCtD4UF0rUMmEsSeKfJk+xnMmkC5soI+VeR07SGqcxU67RYnl1awrTo92QFOTE6TTnQjqwK1SpUrD+xlabWMXEsT7XIZHPU5dmgVx1dBlpA6JsFEGFSV4R4Vy1WoGvMMSiF6uzP4rsdy2aSgt8jXfCKBHNlUmwPbutE8HashMrOkIEcjOIJC34BGPr9K4GP3P29NCr92FdW8hJyWWV3W6RuM4lY7mJqL5vvgBLAcm2CsAfUIuqxi+7Ay3+aqy1+BLPmcnLmA69kkAyFky6YjeJiOiyTLeJaJ7bkUFpcBkUv2XU61bBCLhOh02iiyhC+uRZ8IgkBTb68lNfN9okoQLRiipreoVUuEA0GKxSKhQJBQV5yFpUXGevoIBYKE42F0vcXc/PRa/LAgI4oSrm+jqjKRSIR4LMJAfx+9uW4CCETjMSLdWQ498Ah3PfB1wmGNA9ccwDAMcl19yKrJ5k3beeLQLF5mHb22T75+iJDeIZGtsVByWKmucvPP/Rb/62++yM50BcJFKk6Io+dN1g/HufzO8Rfsg/P/z0bWBccoFWtE1CBlSgwTZs6RODFdYf3wOnSxRI/jkU5naevLdNBIpnbx0MMH6RvrJRMY5Mz0d8BIs2v/pVRqTS679go++t73c/VtP8toPcZjq0cwKqdxg2kikkfSM5lzO0QskeRwD1HDReuJ0snnqZs6GaFJesN19BdERpQspw2XV73jF7nmxpeQVhO0uhO889a9jD8+zlhao9Ru0rIyzJshNvatsj3j8JVvzRHbuJmF1bW8RqpUR42asBInuCVEe7nClvWb0M0GK7Uq/ZuDVI4VyFy2k/nHalx/y008+L2HeM/7P8DP7H85juBQmJgh2dcLoo8uBekSJHwFJk8dY15vs29gP2fmjvPtb95D99Yc933l7wl2KeRyuzGnl5ByOQ4e/Drr1w+hmwbxQITF5WVUycEJm8hkGBwaINsdZG5igXRygKXJ05iWiKR2CMfD2LaNLCTBEwnlTKodHUl30VWRAbWXp4+P4wpRYrFu0gEDW7MJyiHyyxV64lH0tECP5bEiG9QnPDzfJNDdRWe5xPgz+Z8cEk/GNX/vgX4UIwRah44c5EDvGN9bPE3a8bBEiZgcxRcMNiXGOLo6Q0xwaGsSCTVM229x6dBOTp45RSQWxLMLtBSFQEdgdPfrmZz9BvWGRzqRJBlP8KXPfY1X/NxtqLZPtTWLHayh6D6X776Rh88uIbh1rMYK7/y1O/jMPR9GLYSpNhbYvWWM+fYEXX19rBxd4U///Mu8+u1vYnVmmrBX5tW3voKZhdOEdZe8p+LXPYRIN7VaATUqkQh0MTq8nki6j93rLkMbjDAQjRKOBVk/NoLTgXptlWplhcXWDLUVmxPHT3Hq/DQz589j6DqNUgnP97HUEpfvT+EJKnOzHbIjWURXZPtIlkplGsHpZaGkku4z2RbROFtusrgUZvK4Qc9IHEHSyXYl0VJ5AorK2Geeft6aTPzsOjK5CAIZwgIUdYFE22F4fYKILNAqt3nKrLM61WT7ngGeOFbAtRRuuOYWFucK+E6DUCaNaRiY7Rai62NioygK5VKVRqNBTA2T6M5CIICIgmPX0RSVaDhIs90GF2RNxTRNVosFfFGgsVrCVkQ0SUbs2NiqSDKeYmlpAU2R6PgGq6ureLU2IyMjhJNxooEAxXIZU3TQ+Deha0ECy/WQEIjHo6RTKaIRhWQ0Rl+qC623m2w0iaaG+fSn/47F5Qu87JU3027YrB8eoVTLM5YY4lh5lZDpg3uSUsOEmMbuy24jf+YO8vk8lXaQlgBhu8lgdpTzU8vse+CFlnjptt1MdlrsiMQ5763QrfSQqlWYtCWWDIGRwQS06mh+BFuxyCVGqNUMSm2PWrtKpdVgz871jE8exyvbKOkspUqH/kiIeL/ENdtv5zuPf4Atw9uZqzfQy8sMjYzRXqozoTcQpBBz5xeRtDCqYDHalSDUFWd5qsDLr34Xi+YQajDDvFNhQB7BOvan/N6DdyA2HIbCOf72N36Jx49+Bat3HW59Cj+4noC0RNyL0dUTY6FSYGU1yrSxxOjADtrNKSRtANFbJpgIoXQEfFWmYrpYq13oUpD68hRS1OLaXTdx4tRhchuCFJY6ZFSVT3/5Lg49fJi+LRuQ2hZ7913B9+7/F9aP9fJnn/5bzh37FplsGk2V8NQiXlvFsQUikRgXLozTkcZIhSt0iKOqGprYoFryGBqWINKNXm4gCtZa+txmADXgInohPKFOSI3iywJC3aBtGVjBDl4gyHAkhmH5qNEuGqvLCIpKsSxTrbUJBmw0y0GJBVBCMr2qRitgopst1vcPcu70ClY7iBSIEdRqPHbw/E8OiWcyIf/lN27BsiQCAQPaCv19gxyZnSCtqKSFNiVLJBC0WGflmFQt4gLUJPDbHYJagG39m3nwxCGkriDJtogpeujNIvtufBvnv/dPNHUfw7OQVRlBCZLesJ3q7HFW2w5aIwF9Id5y4BV84/h9WLpFy+kQN0Re88aP8Mlv/CqaGQSjydax/RjHH+b2jx/k93/zbZTrDfoUmdXaLC9/5atY6EwjNV3USBSh2mDr3pdRWHwM7B7IVAnF1rO8dJ5kQsV3WnTsCN2pfiaXDlOvu0iiRjyc5eipcURBRfNDSJ5MsCfJYLaf7uFR0qksm0e3IDgxtu/dRiKkINrQMXQqjSUMW2a1WqG5ssjkyZPo+mnOrTzD6oqKIC7SbGnElByJGFh2B01z2XDn1PPW5N7LtxJRU3SHA5S8BqO5JNO1CmNdAep6lZgmsTuU5kRzmUenOtx4/euxWiFanQKi5FCvONitGrKiEE7EKOtNgo6Hba6FCbZ1iy2btpHrH8ARBETXRceiU2uCt6buJHQMBEnE9X0QBabn1pJXdWo1XNmlJXm4LRNFDrK4MMOVL7mMTDrHPV+9B79t0O50UEMq8XgcTVOQNRVZezZtgCfg+D4aAoZh/Gs6AU1JEI5IZLJR+vq66ckkiIST9PT0k4wnOPzUMe65+5+56sAV9PT2UwnY5EyFPde8gS996naSIQ1TKlLVTWYX12GXC2y7LEy7ZWF2Gvi+TyK0g4EvvFBn88xN2zhZmsRIZXh9LIPZLTKxVKdP0tD9TbRbi/RHGvghC9dKs1gtMtupIHVkfFcj1uXg1jXEuE3aT1PTPVarC9gpjUathTsvsvPy9RQby0RjARQlRX42j9sTZLuQ4vzcIq4aZqDPo0dJ0RKj2Eac3/zTv6b9nb/iLx7t4a3vu4b7PjfBXrXD1LZdHIhPkOgbZUsmzuj6rRzYMsytN2zh3kee5LIdOY5WqzhGgoGhBvUlgYJSISwlmFvViYaiaL6IoViILQEn4JBMSiRCMcpWnnQ4y2PfrRKMpQh3dF536xuYmpjm6quuZnlunINHnqF3eBQvFGDvJTeQMBrc+9g/8dLrXsl3jt1LtC7Q6QN/2aZdAQIhPK+MGzLwLA+93EXMr7KitPFtnagUQw0mkTzANggEJQTVolkXCQcEWpYLUZuuZBJJrNEseRTaBj1d/XQFFVrNEp4k4ggymqgSEhykcIyl8grlSgSpXWTdri1UV4tgexiWiRIT8W0LwRUx3AD96xSaswqRdQPc85lv/OSQeFc64r/6qg1IQQ3VNYjKIeRMiuOzM2SjYUyrhRDuRbSKDAj9LEd1xEodx5fp1aLUNImNg1s4fuh7xJIBam2fhO8jd0ukhq6kduoJBNfDFjqYos9CzeDyK67i6aPPoHg29biP6wW5bdulfPWxg8SiPkgZmq7FzlSMVs8lTB15gLDtYscGuGZ0hEcXS7Ra48TcCHZbp96y0S7ZTjKfpxSOonoNpFaNHS+5jZXT99GSQvieSFRL0a7WkSQHI9khoMfI9fZRWDqPJUAwHmFk6BImHnmYZb1JqSPRm8rxyFPPsKm7l0hIRnMcCobP1TcM42plVL2PmrOMawcwGxae28AxYyyVVugIEulAjGGlGzOXI5uL0jPUxVBqB91j/azL7kAJNLmjK/m8Ndn5tftwG3WemHkcu1Di9OSTBK04ZyfnqaOSy8YJhlReuruLSO5VzM0s0bHyxGLdlKpt1IiIaJiUCkUATN+lVCqB42IZJgeuuQFCYSTXp14uYVot1FAEwfJRZBlZU+m0aximiawqz56C9PFrOjWrSaNVRfBcWh0LvW1x5RWXIkouWiLOqfETBJA5MzmBZ4Mh+lhtnYCvEE+FAB/bcREEhXBIRZbltTh7QUAU1zQaPVFCsUWCsQhdXRGGB7vJZrsZ6llHb1+OmfkZPvu5O7n+6usJJ2zy+SZbx3Zzz4OfZvOQi+ZJVJQohdoiqWgco1LHpI9q1WP79i4yH/vWC/bBiasGkLIGotrH8blVMjGTZEmhFk0RT/UyIqxgB1RW8nWqQZveeBfLUxeIZMdYnGySSsHYkE+7JVOutGj7YVo0mFq0MDsG2zbESSeGWW43SKp1CmWdjm6TG+mHuVUC2Tgly0LXZUKizDt++V30aP3gNfjwHSepVrvI9qX5xXdvYXeozF/97Xk2bStz7YF9VJwcfTGDB792N/cc/zq1vEh3TGY4GSEXqSOpNoFYjOkzZZZ9izNTAsGoxcbBNHbco9GAnohKkwodTaEwHyTqZxlKxThVmkQ1JAY3DEAxSHnlMNEN29nVPYYX0BjMZnnqwknOT54nHYrT8isE5TQBtUOraRNAQFcaXPuNyf8MOvuR+OfLB9FbImPDUXS/jhoJoRZMqpJPJiMh0UWZRQ59efEnh8S70yH/9TfvQg0oRNo+htAhHM1R8Gt054vkQwqDwT4ExaPqq5iiTdZso4dlNE8A2SOS2MO5qdPEVR9bsgmINp7VIH7JW1h4+mt4ro9nWFhBGUV3Gbz61SyOfwPJ9lhttfnavWf/r3/O227dQy67gQuzh4mHcgR8E9kx6Nl5E8ePP4DgqQQDMXZd8jLOHrqLmukgRCXSeoqDnXmGfRG3A5oLCypsDQWpih5y20Ea7mVQNvFMD9N32XbJTSxMPIIXCuL5HQZHrqI8f4xUWOafDz3Nm97wO4jWEfBFqraK99ufed5YH3rjLkI10NQEkupi6CK5no30p6Pc/JpfwLI9vvIPv0MgvANLlGk1aridDrIHDj6iYOM4DpYPq/UKpdVlGrZFX3qQ1738tSzOXUAMBWi2W4iKTKvewBJs/IZFOBrBchwqZgup1sGJaBgrJby4QsNpEW97+L7OdKnGzi07GBzso7CygoeAL4tYzQaNWh3b9XB1i1KxyOiGEWbnl5BDAZrNJqVSie7uXtrtNg5r+dzbgo/q/ZvuqiBIBDWFQCRKuVwkEtbI9KbYv2sP/bksu3fuYnZ5mQe++XX27t5BpVpHwccLCwQliTNTx5DdFjNVlahpkx6Kk+yEWHFXuOTuCy/4fjy0L0XP2Hqq+WXqDZtEpgtB9jFLNsFUAn21QKHjkekZpNhcxG35DIzKiEaa8WfOkchpXLk7xtn5Bq2aRs10SLQMbr/tEhamJyh7KvmqQ8s3adR8PN/Htj0ibpxOyMd0dfrCUcSQxlD/dl553WtxXJN2vcJffuwDkNzOxivexIC4jphRYc5PMjz2KMloD7fefD1NAhhnj3PXP3+IxopLQ4OFWJ194iLVQpZzYofXZUQ+eiLBWH+bc8eajG5JoFtB4qEOgYCN1bY4fqoCkTCb+gbIxnvwghbzMzWWVxtcvb6LvnUDVGwdrZNgsnaGvnVDbFm/DyEY4uhDByEMQmuFFdPGTxp0tcMQ0dn5pfP/OtcfeDai9v3Pob8PCM8v/1fjAwI/Fom/8JjffwFERMJoJEIxqu0SrXAAo95G9GVK/cOcu3CMmmCRyIaIk+SCY5BKppjXy8Q9BcsRGc0GaAttJFFEs1cx6EUS4ni06VGiLGcEgg0T0bExJBvZV+mSI9TbZQYzXf8pn7NPSyDUdLKJELGYSL1goEUCrFbnGM2M4igtLL2J2daRFJFcIImu2kRTObZPFogmFcRYDL/UxpMELNVEaDq0YhKuZeE3K4RDcSJykKAXwGy06TRMLNFkcEzBaLRp22m2je3k5JGDzBurUDe4dP2lXJzJZX9yAHXYIuAOUmqfJhxdR7s8wVxN4ND32lRKEkbPFTimTzE/RTAcAEXDaOpr/mZfoK1brBSXWFqZJ6LGuWHnlXSPjlLQ21ihIAFXIB5N4fs+utsmmkqS6opSb9bwzBZpS6Qa0bANAzukYOQrGJKHqTeYn2ny2l+4ikqpA9KaGG5Y1dB1B00L4lFBUwVaiIxs34CiSciRICtzyyiqxP5LL2Vm5tmc7N5abnNFVvB9Ed///ulTB88ykUSFRCyNIspUaxb3P/Av2K0mAz3dvOTGy3j9627FNR1mJr9LrjtBs+wwsvMyHj1yjLqukI7VCGXitBahk1SJCFnghSSuhBI8fvQ4W0c34eglWi0Hve0zGE8TSySQvAZ2PcC+HpGjNQlhCJyZVebKJXI9MYbGBpgpLGLXA3iOiqybNHtiPPDMHOmUhG73MpmfQrFijG2OMbdYxMKlGbKIugpmxWKyXqNTbPPF099h/MljHDk2zsZdWymb/exU6/hH/ozHAqv0un1Et29hXegKTFmjXGpiqBa7du5l/FdPMXigB22mSdK0mGKMRXGWndke/vFEk5FkH6mAwlUHuglHwWh4zOWfolhx2Ln5JchTj7EnMIgkZrA0kYwTJnDtJVw3W+GRyuPk8l2Iqsu8M0tvfASzInLKOkEu2cVMe4XYUodK1EHuNBkQh6iGV9ggbgDOP4+8PyD8G3F/4DnHJC5u8/37/674b0Hitgg1ySJQq2EpNmOZFGXBRFM0JM8isWMXcS+IhQ5WiG2iQsYzcMMygY5HK+UjcIH+3g6q2YXW7KKpqjQ7DTYGUxx324TaGoNKjKZk0BREYpKAKkqE43GstvHsWWn/366wdv99/LBfLM/t+/22z33Ps+jtkfC8NGrNJBwU6OkL41smke5BzPmTWIEgmhxH00RIa1iChKFAxvMwNg4SEiw0U6YTVgmKDqlYGDPoUNU9bAu6UAgpQRxZRpVgMJulYTq0HB3PtpE1m1ZLRygXWL9xhEEjSizZTTMpcLGSdX9IZrrZhICD2TIRWvPIEZXBTI6Z+RCOC6JRQ68XiXtgFw00VULWTXTfpNbWKVbzyLJKNj3ATTe+Zk1l3tHpNDsEFYmmbiApMnqrTSoZxxRhdm6SjqljdFoYtRqGrJAOrYkXK5pMoCBRV1Z59/vewanxJ+kPpFBdmUAoim1axBNRXAlqzQqeYZAJhMhG4xTKBebOTrN+60Y0TeGJJ59m7/5LmZqaQfRFPFFAtHx8ec0Sd5y15fMFh2qziGU5KIJGS/Tpz2ZJJLOUSlW+9c2H+cbXD7Jv72UcOHA188Ua/vIJLiycobhiMzAcp95QKMw0MSWXm3alWDnz4mr3eQuGMyMUVwyWi3WCYYhEE/hOi0AgwkoZ0pkyC5ZE/6W9nDlWo+iOER+uUSvZWPUaraZLxXCQPAcxLNNYLBAeXs/hY2cIKIskgxl6d0WJh31auodjRqnYM2zdsJW5okY+b3LizCnGT5+iXCvhSzJd8TRW6xRX7v8Fsk0Ts2Ow4DZQogMcOjzBS15/Nd09Gc5NXmApKrEhHuHkw6v0bA9iCzGCWPREdpNwniAbGuBcc55sei+C3ERvB5lbPkep0iE3FEaVZHKhCCthmeMnJxiyyqy/bA99F05xaHKWT9z+a3zkG1+nUFxipH8Eq77C0uoKkcFh5mamuPzSSzjx9NMMhLsom1GmqhV2JNYxby3Q+0M46GIiv/jZxfiPWO0/qs9zn/+gsbwY/qPybP+/wrUcWpU2ddEn0BGYm12i3NTpVJpUqjVWFlaYKheYblVZxadmmUwIOg1FYN5qYbkGrZZMrRgEUcNOa6TjCt1yD4bfIRKwSXotcMsoIYPetAphGT+u0hJNoqn42kAuTiLl+z+cvF8MF7d/9vg7AIaGX2uiRRzKhkDLA08J0tJFFMFHED1816HVatGxBIJqHE2OIoVixF2XUFAilonRm0iS7kqjCQKhUIBsJEyqP0X3UA4lqaHHYDVq4zkWvmmi+A6K56F4KuG0zaZsFx0/TVkTsAWBnPVCKShJFBkM9tObShIJaGTiHkHRp6mvIxVJkIjIBF0bFQ9JEggFVVxFxHRN6vU67VYLQZAorNa4dM/ltFpNwkoQq9EhIGmsNmoEQhr1ehUPdy0McHaKRq2MrrfxXZt2q7Mm0lysYFkW5UaFWfMcH/6DNxG0LtA35CCHJOSAgiSJOK4Joofp2ICIIkkISOi6juN47Ny+m77uHEvzC1x55X6W5pfwnTWJPNu2UZ/Nz+44Fo5j4XkOprkmMSfJMuFYnNG+fnq7cvT09eIHQji2gofIqfOnefjxxxlIphm5bA+zp6bZvWUnuttmenyJlVob2YMLkzWOu5EX/epYlQKm1aBYbiC6MnQsRFPHVSUEMUBxuUp/LIpj1ymcyyPITSKSQ6ViImtQrJRRgimCgowlC3i2B11JDk+sYEf6WG4kqFlhVuaK5GcszI5MpWkTDl3CQHAXWaGbO798F08++QSC6FGplGm0mqy/dAtmB1LpbtalAyR7RHb1j1GxQ7RrM+iOxczULCODAyjROKf0OKndG6m0ZDZGE3zrwiKh2knOLmXZsP/N6DPT3H3vnRz87uM88vj9zC0XUIIiy0srpKNdHNizjwPDw7xh8wBCX4pqu8Cxs6dI+0EeOnIEKRomPTjMImUWRZH08CaKxSJCSOOJ7x6kKXiUjBrd3R5x2aXpzGI6L9Q0/WG42M3y/euLWew/qP7iNhc/ezH8e8j7+/hvYYm7AmhpcFSZpWqJXk1jUAnycMtkUNaoqB6e5NBl+GRbdc5ERVJFm07Eo65FCQgKsU6D5WCYenOBkBDAVjXK3hLR/Cxt18CpWVRTMZrVFnFTZEOizrFagz4zxpReWRvIc63pi63rF7Gs/xUvRtwv0raWUglEYVDtwddMhEaSmrtCR2riJJJYhVWC3V3IqkB8OEOzYJHWPFoRl3RTQHclHKOMFwljOB0kfMKihBIOsqi4dEQPzYvQZ+hEWx51JUQiZSH5MuFOAKE7jGRHaPtVpKDBqJ6kJuUJRUdeMNZzzQh2rcJG38Ho6BBbD5Fr0Wpl6p0iCkoWAAAgAElEQVRVVmpLvP7Kn6V6ocIwUZ448R0+b00yHA7wdHkOea6M6lq84c23E3UlSrLHaqdBKh2jnl8lHNDwzLWEWZqm0azW0SyPZbuJZEm4hoGqBjGqZcqyB1Y3giXx3l+5kVMnniCW2kPYmeLsxFEmC1H2Xr2BSDBDRAW/sRYFko5mscwmR48exXIdctkehLJBNtfF448/SSQSRVQ1XNdE9j0cOgQlDQsPUQ4gOh7hUJxAOIQoSziWieT61IpV6pJEJJykVF0mk8mityyefuYMrrGW8+OKA5ejaVHO/eMxLr1mE13iAOfml+lK9bGrfwg4/YI5/5mfeS8njn+dHSNZtL5N+DhItRWmp2eJ+TI3vfxVzJfKmJUWu8dsHp6sUi7mwfJQVBs3qFArXqDhhFHlOL7q4bkeyZaKIQTYlJRRuk0K5TCmvYpnq1hhm/7VCTZt3cv/uOs+li7M0agUmZ2bRAkHGD99iqWZJR781oP83C038Zpd29ndtY1bPvs+nvyld1EQRcRCB3VdgHMT0/T1poj36ET8XqDA/WcTbM3G6FgKflBl/qG/xBO7yfZYWL6B77pEkuDQYSyxh2NLF7g+3se22QvcIjr8w+ar+PPiSX73Tz7MprkJ3vPkUyTcFuWVFRLREZYNHb2cZ+toN9MTc2TiUcqJECM4PFrPs69/DHdliby8lrztueT8g6ziH9bmYhfLxe6Yi6/f7//ctj8I/xErHH4MEhcEYQD4ApBjTcnnU77v/6UgCCngTmCYNXWfN/hrivcC8JfAywEduN33/RcmingOQrJMqexD9QxqLM7jxRbr7SbDI6Os2osszlcYinQRSyU4sjxHwB/lvKailOroqw6MhFFFi8mVEt2qhRBPMnvuETJdQ/QLHS6URNoI7CCM4Agshxyi8jJz8ydJrruEfi37wkE9l4R/HDL/QfXP6fuBD3z1h03Dfynez0efV/7k337uR/a563mlNwNwcVqnT/6fDOpF8Ps/oP6FUdcvRPmicuv/cCzfx+pz7s9f/PBdv/G84ve1Ct/Pn73gPX/+x7/HTQe+iNPVhz0xxTMXHmVmxWT3zmFqUoaOpdOpVZiYr/C623o4sjLG6249QF84QTwcIJ5J8vSjh6gpHoobpLlaxPDBS3n4bQPLC7AwPokvmlTUDFJthWY6Q2gOXv6xDzF95DSTzQV8WeTRJ57knnvvYXp6hgfuu5t911/HXd86SCgS5KlTh2lYBpOnTsLWPuREgkgiSSKWplVcxCePrqjUO73s7zLp2iRxzyMBNqR8cps3Eywv0LJtPAG6EjlcXySR2sz4+NO8r+/VjEb6yFdnOONWOSxXePuNr6U3J5CfDJOQwswUK0SlLhyjw4a4gJXo5+jpc8ybTaL1Mr16jjm1n1ulGIdmRBq4lOd/9Gr/OO6R55L2i5HyDyL5F+t/cd+Lif/HJfMfxxJ3gPf4vn9MEIQocFQQhAeB24GHfN//sCAIvwv8LvA7wM2saWuOAfuAv332+gPRcTxCMZ+Wk8D2BfavOsQ2ZPlWaY7oYoFLBgZoyjJLnTKCrePaNoXCPPsyveTjNqFoiCWzRdAyCa9LI+CxaXAHkmiQkFM4cY8dVoysLnG+2KSeDRFzc4SVJPPT8xxvlP6NgC8m4ovrfxxr/Ae946f4KX4E7vnOKX75misxtwd5yeCNvHPbPgbH9uHLYJo2/akE9eoCoc4i3oECRvUMhcI0q+YaKaajMYbSWfIrJoFUmqrhI7bbzBd1tm+KMnbLNTgTDb4w8y9MXPoKOpftQf/TK1k4cY786iorh07zqW/fC4rARz/xF5w+Mc75ep4r6w0+9TcfJReP07ttO1/88F/w0e/eyf9449t5xXU3oteqyEEF03fxgxtYntLoaKvUaibPfKebq8Z6+MLhg7ylax8ddQKeFTYPByJUdZ3G3AJWT5LPPHOUK5SneFWpxW+kRMzKHKEpn5X5E3zwzge4dO8ow9EUi+IKc3mLazffwOrhwwxFo7xseCOvuPkm3vueDxIeDXHEb/CyXZdxlI284fC/35yovvf93P+FjyBKLm+as36ghX4x2f6gdj/qV8B/9M/TH0niz2pn5p+9bwqCcBboA25hTWsT4PPAQdZI/BbgC/5a7OKTgiAkBEHo+b4G54sOQhboKau0+hRaK20mUibZ1SIBQWLzZRtp1Gw6hkdKBy2dRYnFiCeGKXstOoaPeKGMOhSjL5YmJCpUbQEt6LMwVWHTzho3q1kmAjqPLl0gkYiQQaBKg6Rj8/nvzPzHZu6n+Cn+LyAQAJ58nN/6+FuIxhSOLD/Co9/9e64+cBMPP/0YxcICuhGntrrEqtehP5njisvfSCgVY2zzZlKRKNF4hI2WS6e6QKvTxvGqTM1N0S4bJIJxsj9/Kd9920G+WjnBG797hE+W5jn60CM0OwaTOAx5Gm3TRAkoOJ7NNSP7MdQgP/+2d3L46SPs3bGb+WCE8weP8Jo3/jwXZqbZOtBPIp3iY//jt3ASe3jVvo1MN8ZYNutUp+5GDUUZ7dvF4UOPM7J1LyeeeJSRdaMYLYtGqcpAKMD+7Bhnlo7ytXVbORjv4dKpk4z8/KWMjm7ij++/hz+67rV8ST/FlJNHKfukQqPc/d15No0eoHby26h+m6e+/TDHrApX9u7izIROdXGS+uSd/Mg4vRfBDX0DyDtuIvrNu39ou//qyJV/l09cEIRhYDdwGMg9h5hXWHO3wBrBLzyn2+Kzdc8jcUEQfhn4ZYBwUKEdMjm/WOZqrRsnk0DqiaLb0Dx/Fic+yHBPjKq+CJaCmTRgvoMci7EvJjGl5unM6zzlFtg6lyUU86gXbdqCQMyP8c35YySTCbb0JPBQWT+4BUupcOOv3M7n/50T9lP8FP8Z6DMNXD9O+fQcg0EPX4uj0mB4zxa6o9sp2acIxW3WDezloa/fz3WXXIHrF5jLL2KWghSmK4w/eJRA7/9m7j2jJDuru9/fyady7urcPdMz0zOaqNFoNMpCQoGgAPY12YIXGS4YGxt8AeNrI0wwmGwwxmAsQASBJYKQhLJGcRRGk1NP93Turq7uylWn6uTzfhDci9TY4t5P/NeqVav22rXOrn2eZ9dzdnSpGCqX9W9kPrKB4w/ewWcrN/KZv/xT8kaB5x4tki8c4dY3vInX3/YjLsr1cej0QZozRbJ9A3RF4vj5KI888hD9sTBXX3sNU8fG2XzjH/GB176Tj3/soywszHL7889z4OjjtOYW2Tw8zMFHH2evPkRQPMzn372bf//WQ0R0mdjmKEp0mD+9Vuex52axqiWiUpJJu87K3PMku9cSzM2wvGYA5eoPs7FboHckRGahwZ1KCwIVvVXAEOKEOhXy+SwrS88QSg0yLYcpHRgn293LgbHTKK6Ce6bO8NZrgBcXV326t5ucF+BrIu16h62xJK+cn34Rz8e+833K1UXe/v/j/v1zMoEcyHyg/lIn3svj490CH4oPA7/fAfP3NuKCIESBO3hhgn3jt6e+BEEQCILw/+n/KAiCbwLfBMilIkHFM1mfH2T//DR+JE/v0QpL3VnWaAmOKvP022fRmeogeC5kPcSawWmjgRzWiYj9CMICO/vW02gVuFDKc0twksvjQxitCl1DO6jPF5m1XIq1MoWyxpXn7+Irn/sh3PDJF8l1y9+8gcNHT7Gg2Az29ZEVkxjDo4ztvZ3M6DAXCnmej0ZoLrWwhNPILYuK6xEPLKT8HtzJJ6ikddYJOpLnktvxGuaO3oUuxlmsVanKHsNuCFN3iIcU/vyC1/DQpEVt8mGOJkzkaoudO9/I/me+Rl3cQF6VEKNRao0Cqq7Q40XIZHtYLrVpe1VCCrguWLEoCcuizxA5JVcZ3Xg9zxy9nS16F2ftHma2FWVy6iSSqLK4PIceDnH2tj4WD04hdl8A//Li+3PtW95BRNRYmDuK68hsXrOHm895FZXKPPtTc2QFl6hk858/vpMHVyrIpkOmrx/Fh+LsLFeu3cZgXaOzJseE2KZjOdz0VzfxmY9/nKnZGQbzg6S6epmePUk4FCUQXUoljwvXq6zZeT6x0n7O1AMyGy6kPH8IGYN4zwCV55+mK5EndvZFhGeOUFZdAjuM63RIpDZi+UWeParQcmuIuNSbBqoocezYEaLxEIoSQghAlCVs3yMeDmGbFr4skw4nqNTKxONJ2u02iqIQIL4wE1URcQMXPRpBDEQ6dQPR9+jYPvVaiT0Xn0u73UL1ZTQtienWuWDTFjJagNQPMbWL0vJBVG2UiDvJ8qf/5/0x6CapLSzwJ6NbmVw+QXl+lvNGz+Xo+BHknipCy8aZa1FeLDMU72fmsWNkh3ewpEjYnWV6etcyvN0h3O3QTYd2SGFDuMryqc1w1Tmce9ePiC/7vGviCTLzXdy9ch9LSxXya/v51r/8jPd+8L10J7O47TI3Xf8G2gNpnnvgQap33Mct3/w8n37T3/Kjyy8g8vQiR1Ima/sifOEzzzDXnObef7+czrTNG1MnyMdzOI3NvOGDJdLCIBY2x58a40s/eIQBL0HVqeHbdTqBjxQJ064s0t0bo9tXcQ4/RjV3Ed/91/00u/r4xDveR00sYbc7TBye5rY7f4ZkVPE6LeY0g+wJD7vbR16wMR0HOZfl/He9l+VCnJcacaVWJojnWLIskrkEU9XaqnswvzJPNBpfRZ+87mIuedt1fPVz32NmYg5P8PlgufEinggqJcVc9d3PCTqxZJJYSKEtaywXZvk7x34Rz42hXXzdOPU/L5Dfwu9lxAVBUHjBgP8gCIKf/ppc/I2bRBCEHmD51/QF4Lf7mvb/mvbfwhNhfUNhqrZEuK+LfNun6tQIdw2i1JbZo2S5a/owG2SNc0b6OWYmUEcMdroqftNGzpg0DJVqZ5khLc6vKkXOyg2xILusFyLUjBqWZ9AfSrIjN8gaIYSJyWgm/f8Emn6Dv/7VHYz05uiJ5qg1G/Sl09iOgxtxkU7N8B/141x50Zvwwy7WySrnn7ubsbEJlpt1tu5ew6HaAdY7IUpRj5SkE/Zl/EAgbrmUIgqbtCRD8S6mjBWWinN84a6fsHPbnzCvyoxUwriKTtvy6e7eSF6NYwUCsiOya8suTi7OsC6/hjMzs9TCKlv0HvZNHSHp6ZQDm9F0N1JKZ00mjyqpdKezbA/p/Oru+xE3vopsJsr0XJFLdl+EHpjcd/AQQ5lutkeivLQg+ZkHHiCSjLF10w7K5Sp7Nmc4Gj1OIFrUpqd5dmKcs1SNS7tz3HVkis2jI3htiUx3F6btUixXiERT+ONn2Jkb4K60zS9vu52MmuBMy2Xem8NTIJnKYLUD6uUmu3cNszFTRNAF5ostRocyeIJFtbFIb9caZDFHIp9D7O0h1rKpRAO8qkAzJBBVRDYMdPHY0ecQtLNIZ4corSwTjkiookgoEkYLa6ha5IUhuqqKFwQIwQvDjf2wTr1YQc+GsPw2WkjE81wINDwvoN1pYLYNzHIZzzIJSQq6qBKOpYgP9+PYLpF4hERYp7RYZceuLchiFDFUYbFZpst28Jt17G4bV3ppVv5qLJdNOqk4OUlkoKuLpriC3/C4YFAgmggxHe/GWZlFyir0BF0kEzlqywFKwyMb78IomeR61xJJOcT9OisNGdFsUs/CPd/8NsOyzNN2lfdffxMPL06w/8QhBrp7uXzDLh49dS9vfd0VNDstgrLDV+64hc/97T9xxXmvYMye4TvXv50nqs9y8/hDqOoiidkCKzWXjgbr0xt57Og8o9FeNM1iYU5CDD3F5pFuasICei3B1ldrfGXPlVxx3ic4Pl8gKrXoiHNMTjyOvVLnzjufY35sgedLCpvTXdx4UR//enqBoDtHfzPCYiLErnQ/t956K0tBCcdzUWyXYlRiTXUT2159Ia85d5h/+sDHsY4+jbD92lX6ffPgKD+fmSAuh6gHZaxoZBVPIEdJt1ang9659zjPHZ2hvNwgpsdwg8oqHiWscv6yu4quxyJ4VFlohunKQCQWBl5sxL8nTbNB7mKM5suuE/j9slME4NvAySAIvvjbvwW4EfjMr99/8Vv09wmCcBsvBDTr/5M/HEAURMZ7ZLrqCXaHhrjTWSIR7qa7FaMSizKxssClUheNrMjBmRky52ymM3UMU4pw2GmxbimKJrRRhBhms8PaVIhGTWHRsamtazLUtDiiytRqNq2SwzP+PO9Zt5WCvFpJa/UMI2KYNU2fw1qV3kqcxvphKq5KSvLZevFm4pLB8YZJz0A/D546QMU0MGIBmlUm2vEZl5oEsxWcANbtjqB1bJqaiuJ6aLkIx0ozSD5E0il25tdQjQhoapgxt0qjU2GH6zC/pBGJ16jjkZKH8OouK7Ui8+UCITfJmNdE0SKsVWI4ySjh4V6K9QrLiklXxcNKDqNJAvcYFkJuhLDqIRgCuibxs72P0t3fheR2CMtRFhR9lR6W/QIjlkZhcR4zkBg78hwDu3cxMTVGvmTixlKsiLApFmJ7WmfB8bDMMs25FlZY5LnWIotBk51yDrtSIRISaWZsSkaN8y44j4OnDjEzPUl/fz9Gs8PZW4fYmXNxPJkoIWL9aZrRAFu0SAxkKDcspLbL7lSeE3Ml5G3rseccsqE4WttDico805ijq2sDLX+IUmuKkFTHE6OIfkAinkJUQUBAliUsx0FWFTqOjxYNI7YtIoNddCwHz27jej6peArbMUmmcghBDsts03IcCDzwXOyOTdGqkjBETnZarB/ux21JLK9U6BscYXlOIhrqQq/9lMGhS1g4VeRIa4kB2VxVIftSrJhzTC61GOo6G3PqEOnIRlKpJMUziyz1GuiSQgsXdWGBUiLGmdkj5HN7sCMKhcokjpcineqlWFnmuF1mx8hO3vHeW/nsd77H7V/+Et3n7yTshfnxI/cxWVlB9WzO3fUa7p86QXCoRETTyWVSnLftPL53+w9xdJ+//96/8OW/+DtuvP0bCIKA3lYJ7X8eU47ieQ7RWIbFhRKbznoHuxJ7ODVxAKUZolp7lPsmCrzp2g/zaPEz5Bhk9Ff7WNwJjXITVWtRD6qkugYw/Q47Lz+Li64LSA8rDK4RKYz/glcPdbNv7s+IEqE7/yZ0eZRb7v4hhbl78WWDoKtEV1vibTf+kF4H5E0Rbv6vP6dhdAhX9rHyEv3KbZMbugaxOjb7jSWqsSgvzjOC6plDrEQVXvNSe5XJ4YViEHZw12Swj5uA8SKeXkVkvvd3lOHYDUwlRFLXsAMFxNUr4QYjyz2VVXlO/y1+n2KfC3khf+xyQRAO/fr1al4w3lcKgjAOvPLXnwHu4YWMrwngW8B7X+4CQRCgtgNajsHe2QnOjsSZqi2jZSVWllusUbKccZs0qjYOMoFfJVfWqDRN+oQwlmKxIvtIioqg2RxxXaajBoJqkPR0Tkg+NctCEmC+U+JNqc3UfA+z4a+SJbmpj4rt8Eytw/R0g2R/F1EvypZUmKpqs1RaoGS66JEwJ8wFTMviyqFthAyLbEsmUONoIZUeNYcUiqHWHBb1gJLURNBzLJTLWE2PQLGwOj53Tx6k6jaoN5dQrDa+65AXQ/QnHHTDY62YoG4WufWZvShWgi4rTs1s0i9IuLSYjJg0K03mTy3Sadbpbdgo0w4xKctirUa92aSmNslrQ5ysLzHbqDOckHCrNZRkltn6JE58tSfsq1/4Ie/8X/8nvREZyXOYDme598A4Jw6c4sHlWeZqKyxOHWH/8Qn+9JqLcItzqE4Dv1nCnythmxLT9Sr3VOb5ducYe5+4jw25rfT1xHj6uSdotlQky8KoWlx61hrOPXuQWakKtkhD6FCTFHyjRkzqQvMDtm/sZruQoG5U6Y1moGoQCqssWyZFe5ap5XmGiTMxvkxdWkELZHRtgFgoTDgaJxQKoQKK9MJMUVkRMawGyVAEyXYIhxQUxyYWlskk4uRSSSRNQNdErE4bEPEDCYQQQqAjEUZSoiQSMXpSaSRJYrZQprIyixYOYVkaNbeGZQUYDYtTlZPEh3t55XnX4NVfvs1DEIuR6HhEwjYNJc6CXGCmY3JCLlOYPMQKDpIUINBCMB3askdxskCrFWWp4jK/MM10qUq4HWGl3OGZQgslDLd85kuIjkjYFchHsqTXr6EnlkEPJei0Laxii7/7u+/y9ne8h1dddSnxZJgdg8Pc9KrXc/LJJ2gUx/ngO/8ZrRJAMo/jJ/DdACvwcNp1NF0ikAKqjcPMVA7x7YNfYqpYZfeFVzNWO8Dwuj9mtjVGR+zCf+ZuXnXHLdiZbrqUy7AW+xgcfB+tFRnBmKMyOc2Jx+7DLO0hkW6wfsnjstsM+vp3kAtcgq99EkEQaBWr0AhRLM3yoY9czOtel8fy78dzxvEiCxz79ure7c2RK3liZJhTQ+v4yNc/zbff965VPLNBQKtpr6ILcsDOvMx1N17PJXYaN7z6ySqoGbSWVx8SLTlNPN9LR/Ow3RqCt9qIP1hbhGz6ZdfIb/CyRjwIgieCIBCCINgWBMGOX7/uCYKgHATBFUEQrA+C4JVB8MIzRfAC/jwIgpEgCLYGQbD/5a7hA+MLiyihMPNp2Lc4QdgHUQnj1ZtUFcioYRpYrNRKhAWZx5QaU5VlTsxPcabWokvQkYKAgq7Qp0eRZZFkOoEhOxitKo7moIZENq9bw75gHkVxESVrlSxpw6LUraL3qOTXJDi6NI7SbNI0ZBJSirpdJY5Hp+2ScXX0VIo7505wXhAiYpUpxgwU20XNawgRGV0QiXg266I9VIsHCJk6x0IGtqxiGE3iAawLx5AiCmgCr926m4rQoaBZGFmZdtgiJLj4moAX9/HCJklNRhJkvHASsSVRsFu0owFL9RLzlQYF0SWCS7bUZuvICI25Rapxh2BmmSCq0A6ppLQ4upDBCkWxWX0Sv+iCPVx2w/U8vH8vw8NZls6c4NZ77kQe6qcvkiPqhwh39VOqWTz03H6GukJUqm1mrA5F2kSVDpKcpm12WC6tkM/2Ulo5yTl9AtdfN8jFa3UMz2TPpTlyPWWCtoNdbbFgm6T1MIFlccou0fQNFkpLPHn8GBO6Q2jnWp5vLtKUfUpLBYKeKD3JDJloipLrkE72I9oqjXoVIWggCKBoKooewvagYzmYHZtO2yKbzGFYNi4CgSghaBq+B54XYNsugi+QTGQAEcdxiMUipASXuOCQ0kViUkBKUKmLLuFAQQ1EiA+Q6VvPyZPH8YxlWrYLiTx1x8ESWpy872fo0ZdPAA7iMVqSwPRyg+7ePLV6CQQXoSdBYjRHJ6rQQMD0RJKJDF2RPEgicSlEvKePREznrLBKKS+yLpdlIKrzzjdcS2HiBIVCEdMwcTo2PakuynaV8GCKu+76CUZzkacfu5eTZ07y3NP7eezJR0hKMt+47/t84uP/xMZLruXzt36MQI/TXimQiAm4WkAyksKxJdqCxYb8Gg4dPEo4luFtb3k3y/MNfMFkwRUptTpk8xqnthscKC2zr1qjfmicB05+n5nCAmdWTnHexW/gwsu+Sjj8Xvbs+TrDW15PZOFvyArdLGxL0a2sxzj6EMrOK3Fb6+kfeTNJ6/VcfNGPGdHeR6d1Ba/bOE5n8iIGj1nEXzuwSr/eCFzUk2bnVRez/44T/PzY8iqekAPvf+tfr6K/ZfgiTh48w6UbL+CuI0+QXru6WG5OV3Ejq/fVT3/1Df7xvMsICxnaTZko7VU8bbeDKiovu0Z+gz+IsnsJgWw2S6NtMGSKCHENsgr2io8TlZF9n7Lo4Dsuvb29uGaHfDxFT08Po/2D9PcNUZFc1sTSqIKC0rbptyU6pkHdbCLqOmdFepmxmnRsh/lOi1a9SSKSWCXLWKnIaE1mbSROhg7rhQiSouLhUfFMLl37JszARpUDBL/DUDTC5lSWRj5KSXbp1+Ks1XOk9BhdoQSLis8aP8a+SZvuxAbESIT1ShI8jU0b1hCWVdqNOqIsYDomD04eQrYd4p5OTo7QMUxysQzn9G1kMJYjG0sg62F8yyNX9Rlyw+xI9bGpGaLPiZAMdZPUMxS1gPmkxuLkHKMDm9E7HgOb16LW2+RDOQg8BM/lbENjQ2u1UfnEJz7MN75wMx/44HuJJzI0KjbnXXwhz4+f4nBzjsMrs0wvLjIjeXjxYYIgwe6RTXTbKjFDo9OJI9l1Ltk9zM3vfwvvunYbYX0GPT2EuKiy6dw4X/jQuxiN64TkKL4mU5UD1g2dxVx9mVZg8cfbr2P65ATxwSxBKCCstFieKBKNtPGdFq+9+M2o5SZnSovUBBev49Bxq1QXp5AVAU1RCYcjKLJGSA8jSRLRaBTXswlpGuVyHcuy8AIf03YxbZdWvYXZsRFEGcf2aDY7RKNxdF3HMJpEsjnkZJKG4EMiSqBIJNQIgqagaSGi+OhApdEkCMcpNsoU6zoxSWVsZpz+/jghYXWw7KXwTIVybRFV95mdmKdtudiyRp8Z5djYMVxHYN6oM2msMFOpUFisUVda1JvLRD2BVN3jQLNKsppC90RKk+O4uszI7rU0zDZPPL2Pb9/6XR7Z9wRxK0Fzsc2mkR2IUojv3vZ1Zo4cZbywyOiOrXgZgR09W3nPH72aJw7eTbD9StZqEn3ZKK7tIesqqViafFcv4XCURnGeheoiy6USkVgUJRmmXqqRq5rYKzWcJYuSqLBhbAHh4x9h3ja5bPtbqKcqZNXNCHKdLcNXkU85VGYMjh14lHvHPo+5olF8RQ9la470rErsuotYt30Ho/lejhQP8Ivbfw49BSyjxf3Pfou2tUQqmWXX+avN3E2veSu5cIaTew9RjGUY2rR5Fc/CcsAHb/7YKnr3O27gcHGZHz14L4fbBk/+23OreKpmmcGuNavoX/zQl7nph99gceU03b0agvo7fPGSw1mJ/pddI7/BH0TZvQTkfIFILIbfMejzVLKZUZRIlVQ6iye06dTqBLpOtdnirFiIpVMGkqohKALNWpGIG1AyW6TkgLJh4VdbmOAyO5EAACAASURBVFGROBphBDw14FXBAANdfYz1d1BlDdF0VskyOthH0oJis0XO6qch6ETUNrt27aLRFJl68vvkr3gvmUqZsBpFEuBMp46ieGw1oxwslwjaJnpTwUnIrF0ock/UYFdHoeC2sFQFp+Ng1UxqUouG71MNhxBMj4Qew/FsHC+g3qpTEVQ2KDFqoTCt+hnadhyrWmZYT7Nl57kcmHqGfjlMYaVBJJwmIyoEnTYrgYtYrbMn0c0pt8HK1AJObpiYK9KdStFwPKKZCO1yh05vhEl1dRQ9Fe9CsZtUai6nxmfZsGGY+x69n34p4D2veRXNRofbDx9mobLIxJiFHQmzZ/cWrrvkIqKJAJUI9ekC5WaButdkrHySi9PdtAOHakQhIaY4WVnGbZZI6EniyDhLK5wMFtkysoOyoHHk2DThZIKp43uJ5/oRJYeSU8NwFWRR4ZGjj1HzHSqKR7RtcHY+wnjLwzENlFCURLwLX5bxHZ+wolBzLDqCTzis4jgWIS2MZ7XpNBo4nks0EkeWBDp2B8u1SMbjmGYbTQshSRLJZBJPErCFgEQug9O2SUs6JdUj78hIikwHB1/xSGhRZMPDd1r09mylbRxlvZDn6MEJunfv4uXO4kFHRhMEmqUamUyatmVQEFqsTYaIlxRipkAbFU8WEcMa3eSZ9spk4zm29Of5SXmWTKXD0hoLs2zTToTplvJEohPIrosCDA8O0BF9MjGNVH83pw8fwZdFOrLIs0/tIziZ4dFH9qK0fEYv6uG2e4/wiz9+Nz97zblMXXonyd40cmAQ1QeQPIeJ5QI3/18f5am7TrC46BOaavKjg79k7frNtFs2W/K93Dt+iKgmE90xjOwoCIsq6XUhnj56mLPTZxEeiLFwwKFanSIZ6sNsFTh36+VktxZJfXeKyYU6uZEMe1szDM2UcdoORwuTnLfzdRx89i4azTSxngTEUrRNm/qSR7tvtbY//b0v8qHPfISTN3+PsCoxfvQIwy/hSdRNji3Nw7oX07c5ce6942Gu2X05X3n73/O1Rz7Px17sTuffP3QL9z33APBiV87Gy6/l4h2X04wIHHr4CQp6mRdnZMOeTRdgKKuzZf47/EEYcV8UqPoW5XoHKZFkvr5EWDC4tHc3QquJH4nhhAI0RcZNydgtF1eWMCwTVU4SbQXMWlXW5D3cuoRugZ2Jkw+F6QQiTdmmY1r4SodHjk+R6Q/TJw1guat9wWZboCcpEhddJgWZZLFFygh4Yv8z9GkaRixGXzjN6fllJlotEqTosiJMz0/RGdIwjhqkRjciW2VisoyrieRTQ0wYRTTFR6x2uGJtF88UlvEFGzHik0SnKmi0dYGrkts4I7kcCAx2eglWzAZyrIteNYHlC0ipHjQ9xPGZp9ic3ohdbWPrIqgL6D0pxCXYNpwnFvGZiITpR8TfqCEVXLpGNmCtFLAaK9jxECktSrRTZmGmRfIleihXZ7E8neM//inXv+cfkZrPsmXbejK5PA9ExhF1mU0Xp3ndjk8guiW6tRXuOzBDvHyE5TMu60cv44x0kFRXFyXDZb2QICJtZNZrEHaLLDQ90uk4Ch6H55fZFslw7prtjE/OIfdupi6pBJ1xQtFdbO4/H0cwqBpLLM7WyKTjICtMPX+U/PaL2R5tEw6GONxY5vB4gYimsq7nLHwhgS5LKCm47fanUGWRdWvX0zHahBQdwQto6SqoAqbRxsch5qroio7j2VSrVcRAwXE8JEXEF5NEdImeaB/1eoNUPIGFTbhlEIpG0TSdiCLimBZCKKDaLjMQS9PWPPoaaeqhZZI0sMTU73BgvQQZkR3eDiY6NTqhYbKNKqcby6z11rMSOky0XCAeGyEiLZN0BYxQhuhMkaWwwcR+k6u27+bQ4hg5ZKxkmFZLxu/VSXkhVgKfnkDC9QMc02XWNaiZHQJJRRB1QqrE2Mw8GdvmTLtFKN1D+ESJi7ds5h9Pn+LyWJretaNUmlVyuR5My2JmeYpHnv4lFW8CIZjhXCmM45URggYJq85SrEnhhEx6Wwv7jM703uc576pPoQcdzvG2MpgrUzdNakcmGY5vYX5uhv74KE4+yY+/+g32XGkyeXUKagZm0qT/tZcg+xHSqX6Ge8+l45e5bPf1WIGA2HEYSffzg50/YOH5MAmrsUq9l1/9v5j4xQRSo4bSM8iAPbSqk6ccDVFrr848mTr4PMNX7mal4BH2TV45fAXw4klN+zJJhDOrnwDOnHoKb3gbO/q2MX2hR7joAM+/iOeNr3kzX3zmAX5Xf53fhT8Id4rgw0ZVJmrp+JUi50kZRr0IMVfGQcRqL7JjIEfIs8iXLSKKhqgqdHSBqG2x0iWRz6TxNImUFuJMHPozSUTZwQsckrLO+cNdmE0HJ2piL7UxtUW+dtnOVbK8LjnE5Fib7aFtxJdOsGVPH4IQJx0XCG0ZRdRdwnPP0rLbrA2JlJUyhtTib9//dmS9xtmjA8QXa3REBa9u0UrqZJcMOmKEjh6nW/T5+UKZtBDCaAr4QhotkqDUcGlVDX48e5CW4fGqcA+iAq01CdrzRdb0rGOyvghhEXe+gd+zifHCYXpGClzT9zzDkV4iNYdW3KRcq7OsdKMrLl2KwImGgbFW5sG5g4SVDpf39tA9UUJXHEZ2/gX18urgTaLqMXnqNG1FYk1WJJHZxrXXXINmt5ANmzBhDF/i5KG7WKqN86sDDxMkFEop2DIyjJLTuabvMs7fdA7bURH9JuPhNoPyADsGN/Chq3Zy4YCCKgywbWuIkJZjSXaJ79xI1XGQRJ2Yk0ATO7TcMpFMGjW6AT0bQcuFiTaauGsz+A50UJhYHOO7//pz6tUaRqvDwUNHUfUQmuTyjX/9F9LpNKqiU65UUEM6ju/Rsk3C7YCICf2hJF1iCF1TsF0LQjKyG+BLAR3HpFwrU6ss4xgGeB3CGoiyheyLdKVyBL6LbXcIKSpiALKkgqBwqmZSWZlkpjOG6oTozvUTm51bpe+Xwq8ZnPZ9Bqo6+aEeXKHJRbEcTQnCTpiq06BHizBOnWZthSW/Sctu4o6vMNCV45mJWWZac8wXqjTbAVarRbQhMPKqVxAXJQLPQrRsfDwkQcQ0TbzAxXWaNKoV+vt6cC2brnSGbLtNxbV585/8KR/+0Ef4+je+hB+Y6CEZSfZImBG++4vvURw/QGGqxNykzZnjJRqzVVbmwhwvTmEeUVgWmvQ2VbpyAtde+CbcVoFkNoyhdQiGZwiPHkcfeRJp4DGM0G1Yye9TDT7P1e9YYN5dQHJbuJE2zdNlhs46h0Tap2b/nH2nPsLhwic5ufARCssfpBD8HY80byQVWHTWVOn8jg6GV59TJLd9kZbQwaGGoa8OQs4Vy4zu3L2KvvvGN/Dorx7iZ//yde5/ei/y+aOrePr/8i84tHa1n/2mN/81RqnAodM/Z2VphkZldTHQQ3PTvGn72S+7Rn6DPwgj7koBP2yAEZFQs1kOYjHmW1jhOLOFMs1omv1jUyQUnZJuIYoiqihjWTauadJVaGO3bVRP4vTyApGmw/ziIoZl4jdazByZYeZgmzoadjBMy9LpWYrzp3c/uEqWvXS4+I/O4q7xZzGHtnFs7xhSQmK80qZwqkBMSnLU7hCP5ZiVHMxqi4Qb4gs/vg2VKOOeSySSIeWHyY6sJbJi4iXDaPYySVuj4LtsTeYYU4pkAtBsE99rslarkJYF1ke6SAZlvKhE+9Qsoaem0HsyzJYLdGsKiU6LQtxjo+nT172ZMTtMJbyVfe0ax5t1NEdB0xzEYB6z1uRIpURxZRLdjBKTBcbHD2P3R6iFAxS3zb2PfpkrLl4d+LnHKPHKTcN86hMf4E+uvYFO2OL++x5BFRwS0RyqFKY7uQdRKlNaeZaVapuQGkKo2ozXqjRaBnfMPMIDh/ZTq0tEc71U56fo1FeYcUt8875nOFXrpyNN4Tv9FPwO5qLDmZMzKN1JGvUFJhoVVNUnHO3FXljGt+sM+3FkT8JQHNamBxlIhrj/gSKPHVORM0nazRbz8/McPvA8+WyK79zyDXq7s3TnulgzNEIqm8EOPERNIZZOYkdVvKiGmk5iawpt1yScjCJaLpYk4voOWkgln8/jOA5m06W0VMP3wXE8dEWl3TJQFAU/cLE7JpFQmEqlQiKRoLD4INftHMF0HXqyfdQMi57k0MvuibwscVn3Gma0RSLTDYKIRm++B1+Lcn5qHVHJoV436Zn3KXcqSIFKz/Agzy6expBcJDosn2wTy4WoKjpn1BpaJM3pR46g62FMRE5Nz/HmG/+MmJ7A6oCAhu+JxDUNo14jFo1iNlos02BmcprE5j5CQcBysYLrgi6HqZdazFmHiWoxqnKMqKKwXJqj3hSpmf0szYdYfC7LgdPHiM+o1AdyNDoeKxWDjrvC4uwcy6Uy1YLBwulpimemKE5N01iuc2auSGu2TbEpkVTjGIqCZLjEUhqFlTJGs0VhcRpJ9gm3fcpum6oWYKPjGiFisRibzk6T37g6SDhldPPY959meUXkyMP7GJte3aOvWp6mPbu6anLx8Gka5SJ9m4dYidq4B0+v4hnMeizvX12ws/fYLxGSCr+8/XHqhSU8abURjzoWFWd1e+j/Dn8Q7hQJkctzAccWVhCtgL5ED/ML0yiSTTglEao0SIWiNFZqGFh0ixoHbA/T8qjYNr4A84UauQ0badY79GazzI1Ps27zZrqzORIDOeScQe+CyeZL11MpJljyTMLp1YFNZo5xuNpNf0TjyOwSZ49spt0yEA2LUjqgR9YQTahIBpGOjNU/wJmpAqMVEc+AtWKMhcYCQjRDbWKKrrO3MDNTQXW70W2DlXKDE0rAHnI0Qh6xUBS5oaJm15O1m9Src5RTfcydKdE3spGQaWJVLB6ZfpodZ21g+nSRGV8msBeJdMsUjAiTVoneCLyibz1P1VrcZVe4JBNhvLRCOCSxPX8OpdYyxrJJpnuUsadOs7JYwsgpzBvLFIPVRQlDZwrU+9I0agF96weYnVmkcPIw2655M888/x9sGN3MgCfR0BPkjSrDIZWNvsLjK0ssNItcsXYXX9t4GZ89cxq1v4d9Bx9noA7uwA6ksTbzskl3O8zSiXkuvP4iujoZjnonGHA1ChNjtJpLXGB18/CTzxOPNHluLMBtTnDunlFGlDRnig1+ue8JQu1u4ok67XqLfEynKgovVI+1G3zhM/+ArAjEk0kymQxGs4MnQkzX0CUFz3VJhuJYjolj2uRjSZqqgqTqdPV3UbbaJFUdSQ6QZZlEPEMsHKPRrOB5DmIgstQqIgcCmqcRiUQIRAEtpBMEHR575F52r99F3o7yybe+i6d/9gzXXpThwanVKW8vxcmFJTYMn8VmKYLWFeKC+Qx7H3gAac12zhndzR6lyf6pJs/JKsp4AS+9nsWpM6zZkOfYoSn29Mfp0cP8dO9t/N3bPkx1ymDe6LDUsEDzMTt1Pvqxj7Jz507+7Z//ia6eXiyrg6oI+B2TiKLgttvEtBAsNBG7NW77yF9x9MQCyWQcUQRZllHCEXSnl9YXP4q9bpj5OZO04dDtV5COjzFh1qlfojJaHmbq9K2MyWswX7GNHvkQpm+TFAVE3adKHXwLx7dpmiuEpCSK0aFh+siqDr5P12CCpaLHqWOfZPnMAsmeYcpLLfL5HpyaQ8s12Z6PYVoOrXKE0sAK2bk4U67NxY++WL8f/4t3ce4r3oC5ciuxgXMJZ1ZnmNz2s+/QPzCCuOfF9LY7S0UrMZzMcTA4xpZztvLSitBT7RJrencCLx5AXptsogkSl5+/i9se/AlOLMylL7mup3ZIdf/+RvwP4iQuoXD6lEE2PYDmh2i0BLyOQsjxMCWNRgCT7Tbhnjya6VDTbYpNg3UhnQ1xjUpYZfNgjt5QhvOyw/TXG2waGaLStGhWazzfKPPq4e3MWBbJII8Z60OLw0p5dXtKsSvGhTft4VezY7xq9GKeLsxiuhpTY9PEOkVOHS4iOhba2hAnV2xmT8xypNLh4VYRLzzAfYeeo+AOsFSymK+bmIbNfLNEJbLM6UkLpS/FxloGxQkxW6gQdBSqZomxiQXGyxZLpkYu20ssBL5sU/Or5KQ4qcgoST9LvFdiw7pRyHUjJjcw4Fv8w5vfyPnnv5IZXUGJ+bxFGqXPNenuGeLdV7we0XeIeBrXbB6gY7Qxszm6h9YysCvHVb1pQuLqBWOmdVZMl5s/+mm2nrODx396NytWlIXpJ3n7uTewWxtCGx1E9gxKfp7ZiMWxdoH16RE+d9ONJHyPv5/Yj7l4iPXJgK9c+xo2b+ghCLdJrl/DJcMb8Fjg6i3reeLwo7jOPDvP6+XCwW52yjLXXraLHe+4mh17LubUYhazk6PihHho33G++dOn2PvcSVKhXuIZmaopIMUVqrYHrocUQCKeRhTBdD1CiRRl18YIi+ixFI6s4UoybUGg7QcESgg5ksASJaKZLqKhGG0xIBGPISVjeMhIooLvu5SqJVwHjI5Dvd0goWrEo2EUGRTfpbAwwx0//y8eeOwhBEGm4JfY55h8+vmHCF1+Jd9ZhrXnvOVl98TOs9+JG4e1b/wzlEUHKzvKJz/4t9ywbSuHzxzEUZMkkhbnx1XSiQiJ5iEu33ke8uQ8561VaEoWDbeNoTb5yePf4ysP3cVKQ2GmeAxXCWiLYX7+ra/yune9gUysB7PjYLYtbF8AJYKhhiFQaCsukXiC3de8lt4LXomaHcQG2q0ODaNFR4Bld5mp9Vv5yc/28kyhyOFakaPTEzwuGrTcAEfezn+hsL+eIp9yychRXvGWG9h7dIJwT5STZ8Y5cnyMqXKZYmOZSqvB5KkicmYbJ44+SytIsvGVN1C8+z4WZztMz++n2JrmgbseZ2qpwI9+cQtvufkNLM8usK9wgF3nXMx47xhieYqZyBS2vjpwr5x9AT/6wbcQIylOnt7HL+66dRXPE4fvZu++H6+i//Lg7WzoiZPckOf686/iy1/93CqepfMu4MJLz1lF788lCKc1onqUXHIAVwmv4rnru7dQtksvu0Z+gz+Ik7gvBHRpYaLLJrHuLDMzU8S0EF7HI2FZKGqMDj71ioEUSaKIHWorZS4+eyN6J+AqW+Go3SEWVVmX6ie8Ls3+UzpXrzEIxDDvQMOYeY6rrxtm/Jm7qBgRGjvO5saNW/nAS2TJh1KEnmuxPdZHzW4QEwQ8pc07L38lTtPkg29cx53TaSqFRc4NuRzrTHNZ9wbc9jB9TpNsPIde3cdEwSEVA9c36K8JvCKyli/oj7JjJstEq4Cc1NlRM4kXT6AO5ZktLxF4Kpl4hPr8JMcONcjFLWS9zvpUP6pb5an5cbrcMHIwx6Jbw61Osn3HRj724N0kkUh4ScqGw8/ik7wieRHL5f3ccfdjjKkCl2by3D85haik6ek7i5qwiG17mFIPu0ZGeKn3zioLFNQi+Y3DTC7OsSacZak+RaWznVJ6CFUT2HH2Lp4+vJ+z+gX8Ro6/vvEvKWoBqVSGoTNF/ihzE150iMRwipWjY1x4JcS7RxHmFzHDkMv0UpkucKlgkE330SrUWQgZpMJR7E6F9dlernlLjlgyhSz5CFoVt6MCFpLj4ss2jqyjKypmu0NYDdGyTUKhCJZjI6gqXssgL4TxVImKYBESJFRdo7y4SLI7g91ykcQA17XJJJLImoqKiOnYeF6AYRlonkdjpUYzsJBdAVkUCWthREWlajRYXl5meXkZ0Q+IhBPEYjEQJRBFVmaO8/ATFaJCkx888iTRHVdx8P7/RHvz/7wnjAc+xdnnbGHh1p+TTfaTNob51J+/g9FLz8E2yxx4wuTk0jHWJfs50HZRGi5Ws0Jo+HyOtxJQWuG619xM/5MP0pVez9bBDL+68xa8SgTBa6B6LaZ1mUFJwbAaSGGVSCxMIEoojohlt/EFD8dVqIg2yjP7ePjuMf7v936RT33p44iyBvhs3bYJPe3y8JOPcsMbbmJ/+SlKzwb8Um1jR/p53Ssv4fY7bmXHlkH6P/RW7v/mk1R/9QtOPXELX/vms9zxz2/DKS8TDSfwlhtcdekruOP2u4hvDMi2l/noP3+fH33krSxkCmy96WNsvPsOnsvA5jVJtqwr0J+L4tnn89Adn+V9n/4Ez/70s/znv72fD37uF8yf/DbPFhaIVOur9BvqHiKS6CXalgiJGufs6OOl3fCv2XY5mhoCnn4RfSS0jdLJBfbO/pJ8LIcUCLwwauH/RXL3hYSOr3bFNB0H2wdVUpk1Knzla1+hwk0v4mmFArrM3980/0FMu+/JJoOr9/SRaltc9eoLmT+xyESpyeC55/HAqWcRBR8heKEXieYLrB89h+P7n6YaD3NFdh3jCyWErMXG7echFhc4d0c3j04uojdVGv1RbKNBNnwWA4bJ6aSO2KjRk0oh1ld4/Xs+/iJZ/vMLH6Z/cB1TgkWXI2MLHeanZ7CevYcN69ezvMlmYVzlVZf/FW1ZJqIolGtFkrEsraaN4DWYXpzE7HgobQEzYiOZ0BIkpCDNrHGGgeggZWGFpAWmZ0CrRT2AeJAlKijMWCuIgoq5MEOzWyFm5zGtNlnbwo1UGFuIkM8HiB2fxVaLEA5CREcIybC4ghXXaUwsonUlqAU+cREko4GlqeiyiNN0sPwOYV1mcUUnFq5y/fOLL9LDna+9jE/91cf42/e+jSuv+z944LHHaNfLyOEKCFFiYphLB0bYl6hxgdNETQ8zfrKBopYpuQV+dWcVdVjgw6/ewuP1eVQnygfTI3xn/Blm8wKJRAY9SFMqTeFrMTbH07SLAceVMuf2DrK70+T9P5vAl3XEQEYPdLzAxZY9HM9G9DwUQcR3BfRQiACPjtFCVjWcAFRZQZc0BtYO00mGSKlhUmbAVGGeifExFBnankM6lkEUBBzHwrY6tF0bXVJwRRFF0RA1iXggYzWbNCX/hSIrx8GxbERZIhaL4fs+sigRBMELJ/+Qhu/7iKLKxa/dzZ5ch4LdIjPyRs489wM+8A+38/ORwVX74OO/ZQg+/YmriFqLbB78Yx698w5ymTQbtmVYKats0sOMyRVkK4q2O8z9n70LetbynDnHO6+8khOPP8dsKM4rdvUxuv5d9Dae5yf3/AfuyB+DHONfv/wlmsU6sq6SyOUQHQNBFpB8ESUUJSKqdPw2kugzt1TGazX5oze/G71d4BeP3oMbaKjhGHFFZ3pmmpv/7P3cO/8oHdmgt+GzEtLA9xDMNrlMHEf28StNkqkRrnv33/DvX/oUN1z8v6l7z3DN0qrO+3eHHZ/nOc/JdSrHTtWZnJGMKElU5hXxmhERVEQQUYZhZnz1VUcRFBMC4oyggAqogDDdpAYb2obOdKoOlU/VqTr5CTvdaT7sAmlK7PbDe13M+nKea59977zXXvd//dd/PZ7yy5/gSW/8A/7wfT/LZGcLSin8eICxkn4nZ1OtsrueRFz2VDbvu5O9Uwl+4UpO3fo58omLGKcxE8RshIa82ElnbomV5ZpoS0LnuGRli6bHPKEoyP7otx5yrb/2++9ib+Oor/kKH/7SZ3jjr78e/Z//x0PWWfv5XyLLMrLf+fWHLFe/9dt87G/+mswJTkQ1Umt+8sbbHrLOylVP5uiOaR7zqU8+dJtvegt23DA8vcyXbvsqm8urvLF4KJ3wA1dcznOe8gze++4/+L+n273OOlx28PtYdRWfWdzEK03odTl07/0s6C3U4xHZREaxvsp6VZDcejOb/SHJ4pDPr51FeYkqu5xc/DS7ypR3ffk2rvJDbmsUuztQrls0X2RG5rzoWU/kwe4ePvbn/x9DA3yHE/+Taz5GulHRn4w5c2aVfjbDD/zUj/O1OOOWwRmKz/aYDit8+IZraI58lSOjI/z8ru/D7lPMvPC3+d1XP4MTa5Ife+oMKuugh2OumL2ST9xzC6c2Rzzxwj1c941PQrpBt8rQJiPft5dKLbN+VNJM5/QWH2T/whO4Z26Jnpa4xTtYmxAc29Lj8rtW+e2XX8obP3EDv/u0pzFcgz+57wxDvUq2keP39Xj+7bdy79MPIkpPpjQqAuoOw+lplpYPs6u7i1Gxzmq9zAWXd6luPx9V++mX/SgHLrmaiW09tJAk3ZRYb+NJT7+EPG+4eHaOL9y3xr7DQ870RjTRkPHFBWYkuXTiQv7iU3/JD7/oxRxu7qOXdnn5U17GX939ZX7yzb/AdXfezJmjyxS+5uXPfgF7Zc7BSy7nl//0ffzsMx7N8KbD/M8HNsiSOUb1iChVjIfLhEiiXNqKVxmLrQ06jinqCq0laS9DkBFFgqYsGAxX2LPtSRgfOHr0MHcefYDBuKCTdkiShCADBktdVCQ6IU37yFATS9XypZuaju5gKkPam6TBIFAkiUH1AmVT803hBu89aNU2XTaeONG4EDjYqbjt2BI/c+Vzec/f/wUvf8nVvPZVj+P51/3b78Rdd44Yb+9x3Y1/xZ4r97McTXFJNsvi2i3U89PIpTU6T5vkxOeP85ynPpX1Q+tcmc4Rjj3ITOcKpL6H8fGGj3727Vw6kXFs5Dj85T9nY7iLp1x5JT/0knU+85Exq3qer17/FSYme4w2Blixyq59exGNoigMr3jJD/JbP/Wr/OgvvZKdz5xg250XcXZzCVU4NuOSiZ2znFy7l61hwB6RsDYZs9NvR3YVud8gFgnLC/PsjhrKtGL105/kcVc8jSPlgIULXsRX//JaFg7+FNnQIIUgm4XGBnxZEInAeNITn8xxc3tZXJtDuxHV5KMw3QQ9qtmMI1IXMczPUtBF9jR+3LC2EGNdoK/X+aczd/CdHJPHveEX2h+/DG8G/kU15F9MppqNYkD2nf5qXHD0yP089xnfz/zeWW77s0+eN3bihY/j9G++/7zlnSgh6Wje94kPcdVTnkzIUuChTlzOTZKnD0tC/ZZ9T0TiCzNZ+MlnX8iok3HPYJOLoz5Tswl3nDrNVh946ZMfzeeuv514fp47NFMdJQAAIABJREFUF4+T5oEHN2umuhmv3H8lH/zba5ELXU7FXWbzMfNZj7lyndf8Pz/MOz71DxxZc1ywMIkdd/lPP/paPrJ8L/f+zUcpu33++ZpvPORYXveiq3nMxRfQ3+ZYOStZfeA4fz8+CUPF9ExEogJUk6zFMenwKPeujpjqL/DE2ct4zGtez5/95vdzevkAz3x0l9PLS9ztlrliyy663lMuGi64dD/18ZOIPOX42SV6IeeW+ZR5IfDNiBxB3pvjCf157rj+S4wP7OBEWRH5Pj939SxfOHMPt6/0MfefIZ2MeXS9wZkLD3Lf0u0c2Lmf/Z19HH3wTvYudKiLlLm6z7W9Za72EfcJS9ZXFOWIlzW7ecKFB3n7bV+gpyNm/+bOh1yHP917JTff+AV+6WdeTbwwwdqDJ9gYrPPY6QFB9jidNKx0HRf2ZjhSrqFkzInFU7zpCU+CVfjUMcXzX/RYbr/1w8QbgTtObfDYhQxXpFw4O8fuH3o0d193MyeWTmCalM1shu3KcNv6iDRTfPRjtzEZZTgCQbZ6J6aqSZIOwXmMqYnylKbwCOlQUYIQChcqCIG6LkmylIyUwXADJSOSpEPcTalrg1IK52oCmoDDe4v3nshJol5O4yyx84y8I5YKZxq01gQ0UkFwHiECsffY4HEeAhIbSURlyHpT1BS89seu4NjgDEEGpkLESlGyJemy5f3nsyG+PRKvq2V+8Q0vx6kGYaH2JRWOWgZsaamkYFs2RTmuCF3Dzm6PCybm6RrFJQcv5NMf+kvm9u/l8JHTvOxtH6A+dJJl814+9KnTnPIDLs0jZmTMaXcB+w88ga9+5bPsTiJiHXHzvXeS9ma44/gxNkebXPSoS7niaMPTXvcafuUtv4KZiFGNY3phHisCP/Dil9DTEaUZs7U7wcpwA60jEArnHEJ6lDFESmPQGBQ6eHSmcA3kskutGnyQiBBwtiHybe9di8ATCM4TCTB4GuUJNqC8aqN3oFKByThFVAYpIkxiGJua2bTLDZ+6lqfd/rDqH+fZ8Z98FSFS7H7Pe//dYz/y9MegvnGUH1l75Nj2t/b76p9hQkre9d4//r8nEh8SuNNbTp86RBgk3LJN4B8cYSLN6fGIO66/jp7owtoiPZlSnF3nrQefwOTlJT/y0Zu58Kp5Rk3Brl7DqJpgRTke0Jq167/O2vGCdOs2jq6s8qztF9EEw+joEeLtCdVt51dsxtEi0+PA//jKKUSUMhcisk7O8y+/ks8fv4VRk/CctMNpX/CZY8tcePEBDuye4oDbRTzRo45T9HiDS/c/l/uOXEMMbGwYzq449nQmUEvH+HxRkBanqBvPRAQLpwZcfeVVXJ4v8JbPfZhnRzVfM6f4ylTM0+o+8UbB/vkO19424Iq5S7nnzL3ku3PW1CxG7aRzbIV3XvR0HswDH7jnNupOj1MDTzyl6Gclu9Y0J3JB1PE8PVpg4uKruf/EMf7yno9z+ewellc3mf2O6/AjL3oKb33Lf4E4pdws0VFMdzJDP3qGe85uEK05+mPDkt5ENhVxNeaZYYrtG5qv330zi4drnvmnf8GfffS9zE3l9PfEbMQTVNOT7L/iKt7+wY/RsTuJJrdRlSeYzBuOlD227k245oMnmc66BB8RrEUEQRxFxEmKCaaFL+IEYww6UhhjwUlCsERB4o0lkQlmZJCdiP7sHFVV0TiPKQu0imlsjZQgG4OUEiEUSIWNFdVwTBzHeCVI4xTvHKBoREA2FTiBQBGAofRoKRFKYK2lNg2TvRxwTAXFNt8lrKwxOzHBOMAl89sol0vOZ+Y/1P7o597IwekDnK4aVJyABBFFBKWZ6EXYqZzUBBrVFssZLIMkYk177r/LMPmcn+O+5Q30pTkf/vx1pItD8q2P4tH7ejxuasDZUQ+Rd5k4cy+3Hr+XBx54gL0veA5lUXPJ45/Ih//2YxRVTZwo7r/7EE9/4rP5oz/+fea3z7DSVFjnKOqCpz7z+zjy9W9wwaOuYO/ei7njnruY70/gvUehiRTEsWboh6goQYnWueMNJkjQHqsNmIAPbeNsGWkaCRJB5ATeOWQU44IlFoLIGorEE7ygxhFCYAJNPR7hBBS+YKqOSbKU9Y0x9504xb7Xv44df/BHj8gfnX39G4gaTxw5gjqfY/5IbHzvSfZNTAL/fifezSY4bR6ZDC18jzhxnOfsqRPsnF1g65Tnqxsr7JrZRp4lLBYlYVxzRhXMzc5gJwQr44gv2/tZ/uwke/s1e8MFnJKLbOvO8c9r97A/m+f+I6sUK45hJzC9NOLSgxexd+EiwuQk2sOBMMEX8/MVcj+75rmjqVBll0vqMQ/MxFy4Zxt/d8dNbF3YReKO89XRKUgj9j7qCjbqMTcdPcNTfvhyzFLNJWErD7qzvP/zH8dPSLbWBp1nyB0rfO7Gs+zbM8tTt0/RDdPcnBhOnzjNf332C3n3N27kUZ0eT5zZwWO3zvMXq8vsDhln6xFV0vCff+iFvON97+Sj9V5IDNttxIH5JfbIC/n80PKGu25g587t/P7PvYHf+PBfcOLMOhevS77RdUQhMD0xw3oFd4s1/O3rXHNynSdt2cp9fkT0r6iwffrvruHgFZejuinri0tkSUwaGxbWPDccO8J8PIHOW3x9Pppn0EtZYo0PnrmbantFlkyyZyZnWiZsm55jW6440tScPXKGW1ZuZD6e45kHZvnMfXcynUZIJqgTxf/6nVuYnEqokkm0HaB0aKMzV2OtxSKQgPQCISKsbZkHQgiU0tTVCCToRJMnOTSK0eaIOI5ROiDjhKZpkFISQsDEbZQPghACjW2YSFOCddTSoxCE4BBK4gOkul1PCkUIov3rLFoElFB0RUKwHhMFSuswu55Cb6/DVp7S1JSuJN6S03Ddv/lKVDt2Mqwc0zM9Rs0GVkpK65BxxNkORMWQgbGQRWS2T9M4bNHK67owy8mjK8wrKKMRTWUZ9Brk5hQZq4zqhJ5bQ26c4Is33cyJI4u8+df/G5/88N+ysLCNCy44wKtf9zq292fYOjnFXXfdwzYEmyeW+dKh2ynjwGSvx9LGCvv27eErpxfZv28bb/qVX2Tfjt382E+8gqqqkDiklxhTk0TxOQXJgLOGxleYwpGkAuMqnO/jg0UokFqTOYUlYLyn8ZbIe4x0JFKjAkw53c6AQgAETjhirbDW0gUGrmC82RDVjn3bt3HJU57IkEfmxGOliTsJE1lKt9fD/ubvYN76y49o7M3PfSrdB85y2eX7uO6ur7A5vZ0cwXPXTj7s2BOv+0W6aIa2Ylacr6ny3ex7gmLoXWDn9l2cNGe4qTJk3YyljQ0OnzxGyGPmZ7cySUR96yG2HS9xpuHmB3o8YO5ml5A878lnOLO6yucOnSBxCVvUBJPTs0Q2cIWYIBJD9m7vk08orB3QTSxTGyV59p3F5rAjjqmSimy2y+JCn+mx4+577mfPzhy7dJoHj65zQ3GUW44e4vS9t+HXVxChS7+e5OSJw3Smu0xdtIWDvYx9aZ+VzZIshr1qL8960n7U3ASjcUOxFhHbnAt6M7ztho9wbNLy7rO3cktR8Y8N7Im6IAy6I9i5cys33XU9N/ZgZrJkas8O1I7tHDozzbWLx6k9vOuFP83vv+i1fOyTnyZZHtE/MMfxbZoJFbFnapZJ45nxhjseXGZpUPCirEtoPPF6Sf6vKHnEczH3HbqbcV1y6uQiURLTmcz5Un2Wy3bvQm9NuW9YMjU3Q7DLzKmTTO9bIJmaYvv2i9jS7zKuh+T9GawbM3AVmVTsvXSK08lhNoaL3HzmPvJuTjksyMQ8H/m7G4h72xkHg19bxzmHMYbaNNSmwQWP9x7vwHiHJ+Bc+wEyxrTFNygSC4kXhLqdaUVRgnMO7z3OWCQBJSTeBhLrSZ0gdYLMS2IdYYMH2TrrpAmkJpAbyJrAN+FH7z0OR8ChhUR/U3VOtYqH1lpGtSXRCn92nXxUERclWbyVYfHwEGauJf2JDptig1jPoOmTyWl6aorsDHif02m6zNWTbIQCE3t0L2Zkhmh/imRaMuymDBpJiPrsMVPEPlDGGucsNutx3Z0P8uCxUyzLii9dey2Petxj6ZGQpx1u/NL1LC0t8YfveQ//828+hESQZRmjKDA1MUVZ1igVsX/PfgZHT7Fzz27+6oMf5Fk/+DyqqsK6Bu891htc8ATnsdbiTNPCJVKRp5punjHRmyRo2Qbovl3PWvuteyulbAv8RBsVByUpBVTKUQmLEYGqqTHOU9cGVzdUTYlONEIE9u7eyXvf9cgcOEBVlIzHY1ZXVzlx8hirq4+8xZpKchbrkiue/HjSkeP4TMQDU48Msl45dYrFjSXkuGR9fD6j5rvZ90QkLoTk9MoGtdWc2tjkWTsu4GQzYjgaUtYNwhgWO30uu/hR3OqXkWqe9eIw08NpBn3Pn38lZm5uLxvNOjt7PY7qlG5vlvLMEcxV2ziwucDdN97H017xAxxbrrjp0AnmI09v/vwiF4Nn96nAem9IGYGfnWLy+BE2ljs0y0Pu+NvfYHH1FMONmr/9m5u4cGGOa2/+Jxa27eCOr1/H8q2HeOmTn85nT9/LkJorLruS5eEKNoYJkfHYNOZr1Qp7+l1Msc7GpuHKy17A8tlFdj5mJ5efPk2u4GjaY7KTUB8+w7A7xfvv3ODKbbuY7+3j8AO3MNh5Ab/zg1dSnT3CNV8/xJtu/gg7Hpgm9x61b5oFlWOiDWTSYeeOrTxw8lbmTY903356KyVmNqbfFIycZ0OfX9G2/cCjufPz/8ig2AE+sLqxytRERLwu2DUFz7r6Eu7wQ7ARhydylo6fZuLkIkkPThwd8KiFq3jTK57BLDA5rKj0bsTkiPpUyZ5kF4QSzsZkaYcrLr+Mt7znU/T9AWR0nIHs0J2MCF6gIokMYIKn9hblDdK10rHOe+I4bR20E6RJRtPJ8N4z8AYEZA6EkkRZSl2PiWqJjiAYSyI1hawR31LNkAhjCVrjpEDYwMiXREKjRYRUrUxtkAEbHFq1MEpTG5wPeKkZxoJulJKFmBBHjDcNPp9gUBTEKsOODVnPcT5z+aFWm8C49pBHyFASZxnDegxa0Y0i6lhDnrE2LBFkKCOoxgacpNQJkchJrCHLE5ZGsB5NYJJl+uUMlWq4/e57GdqaX/j51xFW1/jUP1+Prx0T3T6nVpd45guew6Hb7+Xlr/gJPvOVL2CWNtg2t5V169juJXnawTaSe+8+xPYrruQP3vlu+tOTXL77AELkKBXjXEOa5TS1a7Fr4RGRIjqXDTb1JpUTVCOPjwSxUigBSMWmcAjj0E6iAhgpiKWg9paQKryTqCgi1QmRjIi1wSJQSYz1NdPjDJIEGQWM7PCSC/ay+ItX0y08ZxpD4huqSOE9TEddNqgQxpES4X2NxOCcI4o0dV3x8LqTre1/zJO4cfU0l58+Q2k8L7rs8Yjtfc51ovw3rdPLUE3FuvJE7pG75u8JJ65qz2uvupqLnzSNGSp+6nc+zoUHDtCZ2MV65NhiPWFtg/W0YIvOiHsxPurR68wyMGM2CkHpS7YmU6yerVDJEv1cMHvVFlaOLbKup5gqxgyiOfT6EL+6SZJ5ds+fP2XRc/Oszwjm0g63r5xkYTyk3DHL0pk1rrj6El788+/gwc1FtnbnOLBlF7fcezcmylg7eQ9h/T78gSn+/MG72Lk7YVeeUqyt04kNIc5ZrQt2dDpcZacZlhvMx5Z8T5+VpQeIZMHSXSvkEwn3nvVcJAKcNcxmPWQFw7kKf3qdxxzoE23s5ujhY7z9xtvZcfAC9MI+njEaMV4es3p2AxWnhEnL1v401WCVwW33M+MnSZKEyZMNa9WYTtAsrTu2xH1Ecf6E7J8+9w+8/Lkv4p9u/2f2bt9C4jUzE7vwccHZoCmPTFMSk2U5FyJILz1AMZ+iSJAigUhzmcjoJJo0Tmh8QOIxLmCtJ9ER1tTINOWtv/br9GRKaFYI5HS9R4QG5yXBt1FMcA7lLVpIHBYfJFJp8KClIs+ylg4oY6SUFEVBnucMh5s00mJrT0d0MHFD0QRQoHVNbMG5hkgn1HXVslZcAAJKaFSkoLFUvpV7UFqhhSDVKU1t0d4TlG4phYBuavK0hzWemhpnxvTTmFFw4CWl2MCN5MOqGI6CQqcCTcrAQ1SX5DqlHBrWtUBUjkEYt7OFKiC0wghHPtGnrCtcUdA4SeMDiawJqiZuUuqoYLw2ZPHBb/C0pz6Tk0dPcPDgxfzkrh/nHX/8+7zyla+krgp8UXPjjTeQSclTdx4AU3Dk6w9yiZjkvqWzzO+coidzPv7xj/KmN7yZu+64lQMX7GNzUOBc0UJUwWFsCTLgjEcphbHnPLgMaBmhXEDFEmkagpJ4LzHWMp3mNMKjlEDpgKk9QmqiukFUliAMdROolKbGk4VWytnEjn4lUalmHGpqGUg6irEpSZA0SjOdJSADPRQhgKkbJlUKOoCQFMEihQAvMdZjq5r7XvUaksox1oGsCRQRZC6AFIzqgqNfvokrf+IHyZcWefHVz2AkPalTXPTkqzlx+/3c/erX0Pctro/wNM4QRRGusYgAcSTJdUQjNf1I48z5DWu+m31POPGQSd7+D19l5uatbA6X2LJzBps2xI1l26BAbN/GFj3J6vIqNkQsLZ9l62QfIsVodcCO/XMMT4yIts+gonVsAfs60wzkmIML2/iGknTnp/jSF9/BQTpcuKvLplQc+1eSB4+bmqJbwNfLgidN7CaYhhfvv5pbx1/jxLDESM1jd1yEW2tQG54dzZhV0+P9f/1GKqW5PJlD9gXV0QE7pmNuW15kYX6WbmE4uTliY90ho5SdborujoaJmS73HXFsWbiQZOxw3SHTY0Gvr5nNMjqdtsdfvWsH5alTnAh99j3uscwVmyRBUSPpZF0GdkDPKyZlxkoqiKQi2RzivCHrTeCCZFx68jRC5orB2VUujgJ5nlIVNSe49SHXIa0cX/zqFynLkvmLL+d53/8CBoM1BI4gJE4IdswLbPA0eFSk2xepLEmzjMoa0IqicoyGA2ScMCqH5HkHUzQYUSF7Gf/1rW9rWQgBvtlqWwYF53Bq7wPg22SYBBcCSscEawnWtYwG0crH+nrIWKSE0JbJV5sbRFKACURRTFNXeClII9s6i2FAdBQ6SrAuEHcyTFmjowgvwAWPdIFIKoRooZRQG5o4Isk12kFpC5Q6d7whIEgxVpJmKbZyvPNP/pTx5oA3vfEXybKYxhR0uhnntyN5qGVJStM01LVFiITKNthmRBKlhOCQKCIhCEGCDFRl+wEabQ6QiUJYAS6AlEgfE6zAeMNEJ+czN17Pj/zof2Bzo6Qqaj735S9z0e49LC4uEhwsr2+wpTvNsCjZsWMH920uMbz2Zt5y82f46nNeysv2Pomb776dt/z3/8KNt93G2TOnOXLsKNt3bqOqKlKtCN6jhEQjMd6iIo1tDFpolDqHd7sGgsCLgBYaKRVGeKSH2hpsAG+hLmq8kESRACEQkcYTyJQC38JedVO0hVpZgguBIvJEtSBDUUWBWMRYa/Ai4AgQJI03JDoi6WSMyiGx0ucYR5I8Sqi8RUjHdN7FljXDFPrGM44dncbTBAsOtJA0iaKT9zi29ACJ7bG8vsZjn/oUbvrcV1A7Z9lVS0bKghaooOhrTekNvU5bmOYEuNoilMAZ2z5Tj9AeSY/NncAHgC20ZUnvDSG8Swjxq8Cr4Vvt694aQvj0uTH/GXgV4IDXhxCuOW/D32aJEGzZNcEUjm2XHuTs5jJioyKtIU0nWD/zIBfv3M+MFMSTPXbNZHTSPrgC38sZnhmQq5z+ssVXMbN6gvr4Jslcwl3LJ8ln97CdmnuaEld4oskJDgwatvTmufE7juXkmYSOF+zvbmOEp1GGD9/7INnOS1lQXWYv6iNiyZxL8N2IDe+5SibkcY9hXWJiSXR2gzP9CJ1EPHO0yjB00EKzq9Oh3Gi7pMdRW9Q3EJZtFwdKX5FnmnjrAnu2TFEUa8RIamGRUjO91tDdtZ0i9YTKEjWeJEsZr6yzurLGdLdDCA2nxiuILGE5CeQmxhNQjFg8vcSW6Zxadbn9xkPsnNnOdNLBeMewPJ8rsXP7Nk4Ml5kLCS94xrM5NjzLZNXilbVtKXrDLEe6QLCOyGsGriBPE6pyRKfXxQyHCNnqbNfVkI5X2FGNl7Bl3y5e9TNvRKsI7QOZ0hSM2ghOBAgC7S1CCAIO5/05PFpim4CUql2OI4iYOImoqwEylG3ELBzOO6zT7RhriWJFbRzWttc0SRJMUxGEbVUHfZs4dCEghUQg8C7gAzTBEysNui3qaZoGqUAJ3bJbwjef5QhnHU1VQWMRUaDTy3n3e/4EqQNvfNOvUDXVw0bixWiA1jHee0RUEwQkQWOrMUmS4b0DLxCADwKpFUiBSlut9FhE7b0SnjzS+GCwTc3S+ipFucHRE0dJ4ylUonjJi1/GB97/Hrz3jEYjtvdmWSpHLExMcfHjH8U173svT3vOczj9+RvZvPN27lcCL1I+8IH/xcrGOgvTC3R6OdYHkKGFTrz/Fr7tvGsTmsETgiNIRezBCY0TQCRxhUWrCGUbIh1jgydCo6XENZJICrwNhCDwweNkQHr/LScuJ7pgQQyGuF6XtHZ42cJuqfOUtqZxFq011lriKENICMHRNJaJKKdsakxwyOCpgkdKSRSgtg2NhhiNiyAXAteFqSymHhU0jeVxP/BsDh+6k7m5Ppu+YbrbRU1p8rRD0sBas4FMY3ztUR6IIpogsOMRlWlIOh0iBFFQOBz8O6jfjyQSt8CbQgi3CCF6wM1CiM+e+9/vhRB+99tXFkIcBP4DcCmwDficEOLCEMJ3b/EdFDsnd2CVYLqeoht3KFKYnN/C8OyACzuXAJrOvGJiZp7eTE5VKMZmnd27OkwWEWVfkOqIfUMJPc9msGRBckUUMzbrxFWfyf6IXZsN69EEZTNiZ3S+ZsjFlz0eaS3LzZj9UYcqgaRxKBkzqGuCDNA0bESKTAi6m5Kz8iw+HRI2a+gH8DlTq55MGg5rx6wSxKbizNoKItKkUymq7LPerNLNJD6Ilp+sY8YDyCKBSnPWFk+z7kvuf/AwozMGqWN8rKjHI8a2QSQRs90ueZ5zwfY+O2fmWVhY4KQf0mkCqhE0gPGe2ck+vdjxgT//AFc9/ukMVw1z+wXjYoMIQfkd1+Ho6imcDVz52CewtrpMNCEwsaYJgqzTwzlHCA1pnmJtIKSCsKGIVITzNaKRECeUtSF4R5wlhAC2MfS6Xd7yhjfwskuuZHm5LeKplGPVTzAYjdkcjCibCkKEjiNC0NSubp2DtG3y0FpQnq6ICDKiKhu0Sqi8J41TRkVBt9slmIZgBUIJamdRGrxL8Xi8LBEqIoSAV+Ica0WTRBHBtVG4VxIRBHiPFzA2NZmKiBw0qsXbtdLgwzmHG4h0Sm0NxJrQWETQaC1x0vLffu1Xeftv/Bbnq/Y81OK4deBxHFMHhyAm0wkmKEpf45F4PC5AX7TnMyjGbed3JalrS6ZjGmlQPkKlCQTNiQfOsGfXBWyuNzzthc/kZ3/uVVR1QASoTMPU3DzD4ZCZpsMrX/caPv6BD3HV3nkmn/Mo/t+3/SrNlq0sFyMuvvRqHn/1BTRNwyUHDnL4+Ak2NjZIIoU5l0QOok1KZklGUYyQOkJFCeOiwiYtpVMVjtRCpVpmCSGA80RaUTUWLyUheBpjyeIM5xy198RRCo1Fyra942A0JiEm1RqqBqM0BtBaoD34OOBLA2ikUCgU1jUYW5EmXcpRjchThPJI135sonY6RiHayFi5gNeCKIANHm8aYqnI8pQ1VzHfnScygclkBukDg1CwXjl2VYphR9OXEalrZx5FgMR5UgKpTGiKhkqC83GL47vz83XfzR7WiZ/rVH/63O+hEOIeYPu/MeTFwEdCCDVwRAjxAPA44IbvNiBNu+y+6vso6oqkcUwhsJGkdpbenmmMcGjriQCdKDY2BmRekuoIIzzjpKZYGVFIiYwjomFMHMB6yxlr8AiSaIPieMEhpYiyMRQN/Ctt7MaNxdQWnWacKAtmbU6hNWWwmOCYMDE+SjClwwyHpGmKdxHSSUKi8YXFMGJDR4yVpq9yBmZMqtpp/2TWwQ0Nm2KVONIUtUNJwW2HDnPk7kM0vYzpECPyGCkUtbF4D3QUaQQGgVQ5mezibYO1NUXhuf3+EXcdOcPy2WsphiMOXnwpWhuEc3gfeOoznkmT59x01130t+xhz8JuvnjNHTzjOU/GNufHhZ10guF4wPSerZg8Iq48Ig5kUUSqYsZNgYpa5ymlwhWGjoKqKmmUwNYlBEee59R1ja8cQw3dKObm67/KD+y9HEvDrulJghD4CB5tBWZKYkXAppp+FVhuCmoBK94xdg1iXDMuR8SdBBMsx88MqU1FrARBKDICvijpqBhfG6wBESuCDUTESAkOh0AgrMJK30Ih3hEDvqzxUcs9TrIUi8Mahw4CFWBCRy00YA1aKEoaYq+oZUBKKEMgFgHrHUE4oijGuIZOEjEY18xPz/J77/xDXv2doj3fYQWBJMsZlSWpCCgRqKoxQQuUVagQMChipVgbbqKSFK01SjjqyqMkFL7mm9TJwfIGSRpx8uR9XPL4x/Ho7RfxuS9+nCdcehH/++8+yit//me56cabODFYxQ4L+kSwtMYFl13G4cEmLzQT/OP0NNtn+mgkqXdsrK1ihp6P3/wxLnv0FfSzDBXFREpSlu2MyErJyBkSlaJlQjEa001jGusRvsYqiY4iYjzBe0xVUuGJogzpDYIIhCSNM1ywKCmQtUBID1JiJSit6XkD1hKcwCuBxoIxKNtSEBORMBaGCRVRCYcOFuMCSdrDNIZ8OsEaj7CWEASZjEHBqBwx2e9T1B7TaStydRPIpUCYCoLFBU/UBDQCEUmkchAcXSuZ6aZUqWXOyZam6tuEuJIBlXQwwSMCJAKka1lwvnLYAAAgAElEQVRN1ou2AvgR2r8LExdC7AGuBm4Engy8TgjxE8BNtNH6Oq2D/3bFmJP8K05fCPHTwE8DTPS6VCtnyVWGyCNM45FSIbynMg4pIRIRjRaMqpoVb5mxgkQ4pHOMlab2oKVsxWjqEuNa7LZ14KptdJu3iczGWIaDATY5v7TVN02rjaEF3YkuUeHbKVQTgZYI06BliosUMumglCRUltQGjAHnFJ20w8gVOOfIdYRzkOiYtJ9RVRVCtInT0bggyRIm+lMsnrqekOTIIBl5gxoIvC/J8oi6GOJkDI0g6BQBpMEh4oggAt4HmqrCIZjK5pnMZlg6vcjQDqjLioWZrfz9P/wjiXQs7N7OcGWVsZhkVJSMygYlzu+4XY9HCAdz03PY2pIlGU0wOGgZAkoSJTGbxSZKKXSaYb0HpVBCEIIg0QmDcUGWZWghyaViZXOFzrFlkl7EaGwQUuACZEiKpiakGmssjS3wY4dp05joYMi8g3O46qBouP/sIrXXxHGMPDeFlwjQitrUREmKjg0utHCEQOJc+3L6IHBCIJ1ABFBSgVaohDYiRDAuS5RSSBmjtCQ4B0hM5RFKI4Oii0QJ1Tb2xuKcRAePEhFN4wmyhXw2hiNckMQGRPLwjZKnZISralKpqJzD+wapWjzX1xaQ6CQiVIaejnBCYIMl0HZjxwuEFzRVw7AZkiUppnGMx2OyxNKZUNSH1tm5bwf9gxfwlf99HbWS1KdX8VKx7moi3XKx93ZS/uANP8OxwTJT89soplMe/9gf4s5jt/JjL38N/3TtZxC0kXRjakrVfgRjNK425ElMJWqoC7JEMWiG9OIeMkrwHqz1COlx3qPjmFQLEhFh6nDufARlVRF8O1MSStFYg7eOBt/SE01NJDSxUMgQaApDlETUpkJohdAdJpuUUhjAM2g8cZrQWIsErBE4B8FrhBCMNS0urTqs2oquCm3iBvCyrdZtfISWClM7krSLqysgIAMIJUhkQhMcKoo5x44knCvzklJh6hKpNQKoKweyhQyVlMjo/4dGyUKILvAx4A0hhAHwbmA/cBVtpP6OR7xXIITw3hDCY0IIj8mzHBd3WBeBjVFB4yrKssBUNQpBZmFDWsyooBqOSaqACgqjJWUITKQ5CRJlPR0ZIYUmSTKsD6RRTG1aDLeqKoqiQPhAt9sly75TFQGSJKHf79NLc2xRoYTEucCGGWNSQTTVypR6KSitozSGNI2RucJlgpArCtE8hL9srcUpzfpwQJQmCK04ubaM76V85oav8vq3vQ1zZoVIKvqVxFtH8A0E1x5z2W4v4PDOEKyBLKbEMfaGyhqcDpSuYdOOsKmEbodOp8PcljmcNCjtOHTiLMPVda688nI2ZxI6nYROnuPc+ZWrzkuqomRhZoY4jlss0QsUAls3qADKCzIdE4sIjENIjThXBNNJYjCOiU4XGdoydZDc8NefJMjA0NZ401DUFYUpGRZjamcZVRXDsmBQlJxtGtZrw6A0+NIhB4al4Fm0liPrAyxZe1zGY5sGbxuMkngpEEpSNiXYGElOJDoEn+C8wTYNIFEqavFsWsdtfIuXhxAQolUrVELgTUPT1AThMViCBqXEuQRjwCaCWglGcQSxwkYCGwlUr4MIBqk81hVkqaQqx8DDR1mr1YiBqxn6isZaTPAYEXBKtPuWDmNqpPLU2mK9oRmMsWvD9lzqCmgj8k6nhSGsbal+zabnE1+4noO7L2PdwTv++6+xfWaW//Qff4LMCcRGSWkMp4cbuNpyeqVm6vufx0vf/Ga+/7Wv48Sdh7nn9Elmkik++v6/xBhD4yx1XRJpiQsBDzS2rYYtxwVKCHQUEYSgk2YE20IGTjh8MDgl8LHC4qnLquWaW09VlgRncK4tZPK+vT/KWsaRw0tPbB06TiidaStYNURZjAu+DSC0RBYVVSxQdUAYQ57nVFVFFLXPQBRFRFGEkK2jVo2j2RyRBkniBQYNRhGsoiKitpqARwRJEsc05RCdxCih26CqsUitkEq3+YxItkFdJ2lpi1WD04ImWBrffoy0yohVB0GCCucHVt/NHpETF0JEtA78r0IIHwcIIZwJIbgQggfeB9/SmFkEvr1VzI5zy7779kNoRfYTiZYOLQWpViRKIfCsS0deg+zmeCHIaRMoRrUUn7Kp8CKQJDG1bSAonA9I2uSHlJI4jknjBHxgottrCzrU+advQ6BqLMZ5pE4Y6EAlBTNphyzAeHWAHRmoDa6skbXFDwrGpwsoAm7UEDWCJEmQUmK8I8pTKu9RaUyUJQxHBf044/d+87c5cegI2/rzrNoNKgxj4TBCEHxNkmiCUCS9STY31zGmxtqmTcZtjBGDEjYLug3Mqg5TQpO5EVE1IneayHVJdB8lc+oq0N82x58878e4++tfZvW+u5iZnmQ8GDA7eX5zjNp44kiRZjGmqSiKIUIIvANXG6T1rAw3UVlCHdqHsK4NwguUh2bcwinCWZq6xJmaRVkxaSQrsmnzB9ZR1iW1NQyKMY1wVKahcYFxbVir1ilDydCNWKkGrIUSMaxYHq4xcCUipmVAKIWWChFAe4i9oisjZuIUzxjPGKEqIt0glEIisFWNtw4vPFKCkAEhWuctpcRbS6Q0WkOcaKJItR8A4TBYSlvhaCl0zjmmo4wtoksHQdo0RHWNKhtmJrYhXUqeTZLqFK0liIfHO2c6k2RCk4mYXEVkKkFaiWgEAY+UgqmpPgKPDgGlBDqPSaa7pDohjmMCBqE8vYlO23nIe6anZ7n/1L08/3lP5rqvXcuP/8eX8vwnPJ6ZXfM0weG1RCceU24QuYpxuYaKh/SCYWP1FBu33MAPvuKFnLrla6ysLZPmhuF4wHg8BOFZXV2mrKu2GCp4jLPoJEY4QUmgsJbgoKgMo6pmMB6AdPiqoRmMSSx0vAQhMMaQpx18Y0nyjOFwiAse5w2NCMySkDZtIlUZz5apGURweNvgVcvnDyGgUdTa0QmSsW9lG2xVowJwLmFeFEOsrfHeYF1NTKCXxEhrUM4inCc1gsRKVIjQLiLRUfsRNy0pwdqmDQCCJHhJbSwuCJQUeFOhlGA0GlBVBVGsSYUgEi0s3I1TnCvxoSH4CucfrpLgX+yRsFME8H7gnhDCO79t+dZzeDnAS4FvKih9AviQEOKdtInNC4Cv/Vv7CFIQI4m9xsoOpamIFZjQFnFMdLvYzSHR2LaOXClEECRKUo3GNJGjdg7lHI0PGFuTZjFR0ibW4m6Ory2V8IS8nUKpTpeN5nyi1/p4gyzvk6Fx3mKcRQVN7T11acjyHGEaSm+JE93CNkpjEkcsNFE3BmvxVYvNOSCpA/2uoohy1gcj4n7O4aVThCxncek0WoKRECU98qyDkILg/w917xZr63red/3e43capznX2fa249gJiUuSpk2bkrQqIKFAqwqQIIJIXCCkgtQruEClcIVUCELiHgkp9Lb0gouKUkXcQBGHgFpqJRWOU3vb+7AOc805Dt/pPXPxDm+3Xq63L4hkf9LS3tJee2rMMcb3fu/7PL//77FcnCeLjBaSqBLL5Gm3DRHBg8y4ZabkxBw9JYBE0eo9AfDhQimCNEWQkqQKzJb/+Kv/G8url7jlnj//Z/8Ml/UlXN7FmQ6dIuobphgRjWFB0KaCbixZgMuZw7Blngu2a0neoYylXFNztm0oMuNSYPKBXT9gXr3lIzNze4zMQtEWiEZxNIm+SEQKnHSmOy+ILBFScx4dCMUcCi6sXATcrzN9v2fxkQ5JVoUsBJSKiCUgFGgSIDqyc1yyR1hJazS5CGKKqJzJ0iBLRAkNsVCUQJSElBKfBQsJTWZQBiMN+ITSgiQKzi9o1ZKnhVdiRVuDz7Dpe3KIOD/xKtzTNjt0MxAFGGPw8dMX8TVHlhTI3lNi5ng88sUvfpFXd2/4/Hs/yTk5ZJA8ONhbw7gsbHe1XOhmTxICITQhe15+9Ir9fgsl80f/2C/yW7/1WzzunvFrf/7X+U/+g/+UP/lr/xwPb+/ptgPnZWJoLH1pQVrO44n54VKbjatl7Qa2QrP58g5rNaEUclPwxxNuvyPJTH9ZOImJbhjYREHcCHIMmMYCghQFzaDx5xklDKo0GAOZRLEN62Ul54LqO1wMGEDmwuPhEa6XlNMJaTYs0dd7OyWi0MwhYoRBasPqF7b9lnmcsa3FLJ5JrvR9X+VaIWKVoZRM0aXWpoVh6Lb46JkzyBLREkTTkopnUhJZCjlFkpH1NCELRUD2kpxXsu1I60qjJckHRHKcG83GKZL00DSUJVR1sRHoGGvPCFlppJApBYT94cP0P0xN/FeBfwv4qhDiO9LcvwL8m0KIP0rFDr8J/LsApZTfFUL8deD3qGTLX/qBZAqQS60UrcvEpmnZ7/ecz2dyqaELTjO5M5xSoJkdySpizEQKN7u6i9QSlvNYjXfA0HWk4KrxTgvcZWEwDVHA0c3sNnse6XfLKYd+Qy6FNXiGdiCFfL2hF4ZhwPvakRaiCo+UNKRcsaGSE7ptkAJUa5hLpO1bwnEkFMjBIyV8tI78d3/jvyfPM71UJKVQTcPqHLv9LTHWHV4NTIBUihfNlo9P94RO00eBURWfyrmSBbFUZpt1xWhNyRnRmIpkpYQuhWbj2Z0kH33xGebvf4vPtIlYfpW77xkhBeBK4c/+kV9ET/7KQUiihmkZSalgrcV7jxCaEiNGa2bvaLTBGIXKmaZrSAKk1CghmU5HShE8XEaariXZht4XXsQa2DgTGHxhshI5Fy65JiGLiKxXbjgm2DUb8rRw0/f4mJBFVH5cVjGVKJIiM6SAFLKKs4Ijx4zLC1Y09EMLikqkFJAiU2TBh4SWlapAZLZoIoVUCk0C0w6VQskB29TdbkjiOgRCsxcQ1xWfI9Za+qxRWVPGtcq6jLz6Pn7wpZoe6Ry9bViXiRfPnzNNE35Z+eCbX+Wzh1u8gn/6ScvXX1/Y244SM5fo6E1DLxsCkUsUbHYda3Rc/IRxI//Sv/jP89v/02/ztd//Pf6ZX/5TLOuZvrHM44WutaAEw25LjJEXm8eEtZYjfcwIJb+rPyCjtOI8znyu2/MQHFZpGi1RotTGsrGEZcVHVxOV0tYmcwx0+21FM0PERVEbn9PKZjuwuJmSM8ZqRJGkAJhAWjIha7YKYipopWrjs0jmsFKEqJqGtOCPHqvbypTbhiV4ZEjkAqlcsVIqgdKpFolgXi5YXU92JdR+U8kepQQxR6TWaKPIpZCloskZoRRFabjK1JS0GNVSmAkKDllwtJ5sDZslo9vuqomobp6h63DOVwc7GWU14fuUOP9J1w9Dp/yd+pu+c/0PP+D/+avAX/2hXwVUB0KJrMuIuJxrs8polJKsPqOyoBcWMQi0FNhGUtTVqZAVS/LoXGuvUVhiEPhQOV7tE3a/I54mpBCYwXKezmT5bvMglUwEXjx6xkcvP2az2XA+num6Bn9tKDkf2fc7sp8QuqCUpNvtmU8Tl2UlpUAvG4q5Po2txi8rQUaSKLx9c8+XvvSz/P2/9zuYjUXSoJuOOTjOy4W22eDIiFQoqTLJYtPSNgPybmK+2bKJDqsNs/e1Vi5NrT3njHe+4ngxIqRCSYkQmYvP9DnxbHxO/OMD//Nf+6/45fd+jvs/9+vvxIpTEXzlF3+e1GhciFhrCMEz7AZiTFAkaZ7QVnGZR4Z2W+vT5folDJE4TRRgWh27bmA7bBinhV5IoltJQrIisN7jZA1gLG7liOdkwLqCC45SCkuKVQJVrjeNUCzBoaUgFE2jNZRAJl9r2rU0IoQmF4nRAzlmYhmBhF8DMTus2WCkIaYaTmpMLbHEFNFJcoUgiLoQDKi4VnmWEsQcKKGy7BKI84WsB5qhwyoFuVCI9fNTAh8D67zQ2nc3D997/Rf/5W/yl/7iv8fsCrbf451ju9lg2x1xXXgrDZwvfH38iF/6uZ/n//qdv8vNo1sQkY0dWJaZYbOh5MjuycDsHN2iid7x5OaWv/hv/zvcn860bYtcJa1s0aagEAQR8CmjpMZHR9da1lQHRocQaKRBaEHKAa01T2VVwO63Fi00Fzw9ilYo7sNCM0fa2w0+OIQqDO3AdJ5ruUcIYqh8fk4aKQSpRDLVjVO3cQmZM2suDFly12S89xhjKihQJK0yWGtrCU8UrNpwPJ6ZQsKpSIsh5oSRGqkUKoPVmtqLl2jR4vyCbfrKo6epys2EoKiC0QZRFCF4mqYhhnAtRld/T/K+unxwWGtYlglra4lvlpkGhfISbzXSZRoPQQuyNixrtWSK63e34rs//Nr5IyHAErmwy5Jb1fLU7rhperqs2YgGGxOq1aRUkCgyhUhhviJMYVm5H88sqdbzZKNpGljdhYwnisgmCMZxpO272qQTEhMD3zq+fOe19KrFZMMH738Lay0P5wd8WdGihkYygrbfsEweY5rayZ7PTKczvbaQYNtvGZeZsnikT7RtS3S1OSkRvPra+6xvZn7yJ7/McZy46fdsZcPTzZZ0uVAuF3plUaEgfaD4FTmP9I1El0DxI6VUFldqjZASTaHkWBltUfDRUWJtjLrgcTGgV83QH2i7O8bzlr/V/Rz/5+99Ff7rd2cETucLL977HEEUhNW4FOkwpHGBxdd/T1cUrzEYqZCjYystZXaEacEYU+uV1Ppm09Vd0eIDJSVG71l8YB1XTilw72biEnApY8ZIDFWAFUskSZhSwouIJ5O0ZkqFKGqzbg2e1TsSiZgqRJhr35+iAmtaiMLV4b7GAHU3mdJESQvWFFZ3IserDlXV01YSYIqg8RlCLdmZUrBkdM5o1db4OIVGFJJwnOd7LsuR4Edc8MSciDnURURpkvs0ES0IUfhv/tu/xt/96u8iZaBRmTyfsX7hYgoKxdRWOdw3/+HXuX3+lHYz0CnF+f4eve15OJ/osuRyvqdTki5LUsxkD6JUy6Mqhr7v6fuWZb0glGdnBzphGWTDgCWujkYoZMxshaEzls5YWmXqCbQUvKko4+Iqz+/JrKXOO9VdgwD8uhDcxDI94EuiLHXjhRZoU9Ay0zQN58sFHRPKJ/ARg0bkRHYKYVrIK0pbIoKUBblUgOD+dE+MkWWaOcaI6jpuNxs2PiJjRoTv/swQAsfLmZQdFMc43iFNJOSVmD1DayAXtLXoocPHgPfVKR9Why4ZWzJR1Ua3hqsNMxLCilKglaBrW7LUNKIjtZE2OaJyhD7QtwopM1EEXBqJSSC1wYdUT3c/5PWjsYhrTdKGu+OJ1ZaKq0lIBi4pQUhQAkokTBG0IaOLgjWxeIdqNV1MdZfd71DSIKWhMQNWWL59OeJ95MPXL2sNa/UM7ZbP7z/zzmsZU2BJju1hz2az4fHjpzx9+gKHxDYdl9mzLgmhPcc8sYTIwT4GbRhVxlqNKYLNbqAZGpZxYjydKa1g39xg24aXLz9CNInLm3t+6rNfImXBNF6Y5hWfC2sJ+LISWCkapFakkDlfJs5G4ZdIUBaRCmoNCGNhFujUkOgIaGIQ5JgwCYixNkXFyjHMXLxmECe+DIxf+RM8N+/uDHdDjwwOm0AXwbJOBCLKtPhrzL1kjTENWvSMy0roJPfuAlazOewZpwVjNbc3G3KIPHn6gg/u3jDLglMaESpps5oM08KaA9/WgblkJiJpWrlTkWA0IWayUthST15KKVplyKGifUaJWrZBoVWhAbJUSGEhXBleUcsta45IbWjtgJANSymMi6dVPUpbZJHILJAqI0xL1JJVVMdFLh6XHC5lCro+YASEJEiyYabU5u5aF4q0TMjo8fNEDEsdZqA/vYpphETlwN/7v/93/sZf/5tkIUmNJfQ7tgmszuwbw3h/j2gGOmWIq8P2O+Rmg06JIjIXI7DbZ7z/+sRxXWqYxgjmsBBzRipY5hlXAk+ePsc2W1Y/seaV0ku8KZxKobQ9oUhOi2P0K3P0RClJStHZhmAkSqlrUAq6qFhVQVOhgvm0cLN/zGA3NLH+/klIkjQEDH6RRASX+YJpDafF0Q49OUfG6UzQAiEjDw9v6UPDm4++zRJntjtLqxIYURubQlQaRBq6rmONiSlmFhyiUehOUaiAAAFwMI2RbrNFYCi+jvYb54I0DYtfKTGhmhYpJc55hNaMOXEOsd5nobBef3+rLKUIEBrvEt4liAHvR0zQSF3d+MIcWHysD0efEFFhFUwi0Qjzh8eJ/2FdOSecc3RDjyxwnI60bcf5eKkYoNEoJG4NCK2QnSaLxOimylQWfd2FBiiByc+0xkKE0mt2zYBbHM9ffPZ6/FJ8/c2HPD08eue1aETd5Swj5+WCLLDb7ch4sou0nUbmSNKam6LxJbJoR4Phss40TcNpmeiMRlDFRK1tmOeZ+fKG+3XCR5iOb2n6gdEtLMvE7e5RjYGH+sRfSmQYNqRYk4AqFpKt0epxmdGvXnIvMmbTcYjgNEhZQFwTu7Iwh+U667Hu1o115AhamjpXctjyf7x6xZ/7hefMvPrH3oc//Su/Qrq6SnyObG8OpDkxLSsuRbCSbWvw04W2a0khY5u2dv5DQmpLbxrmcaZtG5pNDf1Ya8nAw+kE7XUHLyVRZtZ5rfxsiqQU8RpMkbxaZnaiYU6BjdCkVI/hRgtCkiyu7rC1MThfm4Yl11NCKAmtNTLXm3ONVzadgpDVz6KFIIW6U17dQtO2tQGZEpqKtymt0Frjp1CP2DFSYgGREdcyksiCnCKxVGom+UwRihwDkso+Y8C7+VPviYhAa0NKhd/92lf5N7p/hfUykfOCcwHtI1kKOttByngF0jYsPiBLYSEhtKGXHZfTG148u+H169fsd7dMy0rb9FziCFLTdgMgKSXh4kwuitZaRJEooXnR9zzcnxFWElsYRMVgs6xReq0VJSWiAKkMvRRIMg3VHuljYXPYc3f/liePHjOtK8Nmy8N8QsQazDqliEXiXS3fNG3L6t01wCSRqaZYkYKUM82jG8pxwgvDaV3o7FAVtGtthIsUcL6Wvmp9f8KalnX1SK1pi0AqS0gBqRU+zJQsWL1DKFUdLD5gpGC5nJGyitxEKazrUtlzKZBXKEOWqibOuaKNymZKqsEha1vmGGljQFwnSXn/FmNaQi5I1dA3GhYPnaSkiJOfflr7zvUjsRMvpbA97JHaEn1CdS1L8DSbHl8Sk5+JIpFbyZRWpDIczyeG3YamachXikQYS0qRbb/F+4jtB0quN2rTtTxczhznkaV4nm1vkLxbeAoxsrs50Pc9qWTatuXycKQtK147tniCuZCmlbMIhJJRc2Ce64MjpUQ79GitQUlUa7FDx6bbsNs3fOvjj1FKf8KnDt2G7bAhkXh4eMs8jwgtMBjcXM13nR6QUtL3PZvNwLbrUNsNjW4wTpJQhDISyowQDiU9TStRKFKJzOuF8XJiHSecc/iwsiwTLx/+gOQu/ObvTO+8D7/6J/8U6zIR14W8RNxlQcaVzgi6VmF0JktFERLvI7eHR/WBIRTGGM7TTFaFpm15e7oglIG+qTyslPTa0maB8gnpItJXjCu7hEiqkiJSIIvmveEJ9waeJkO5IoG1HFIDGVprovcE57BafvLeKlENh1AfRimla51cQC4oISqylhLKmDqMoK8PonVdq+K2yOsIN40QLUlIMhVtzSWCtIQk0MIiskJLUwMsudSHp9XYtkEoxRoDRel6cvq0KzrWeaQ1ln/t13+d48MZ7z373Ybu2vw3xnB7e4tsNc57NAK5JEzXE3xi6Lfcn09I2yCMRbcdtIamr+/jZtNjra4Rcy2rVRKBUJonTx9Rwso4jrhW8Ohzz1AStsIgjMYMHdLqmmoVBWVMHV9XCk1S+MawKZplWXAEztlz89nnvHUTadPgwoqkYJSuzhcEjbXsNlvIhX5oSSIzL455CeRtyxx9fXiEyKHfsH/6lNfHe1prSSlgG401gq6tBFXTVI98SWDMDu9KdaZcA2IhReQVOS2lEJOnyILQEjd5QojkLOrAbCkrhUJBGYvte7Qy5ALL6oi5oo7OLRijAEkJDWMOmOAYpglJ/c6klGibpmKfIlNkJBcPOiFFROiE6X7MVLRSCt7e32N1BymhkkQLw24z8PDwADFhG41QkqAlb1+/4dHtLfM4ItC0Q4OPicUHFh/pkyVryWmcsFmgs8BHx6PbHfM8k0rC0hG+z5CstDgu5Uh36BEznM5Hbvot5xy4XRX6pmE3jUyNZJMkk19xjUXE+jAKq8NIxXmZyFKA0pRxRK4B3Wc+/PgDXHBIUWt0JSsMFkEBWx8CqWSW0x39ZiBlQ3AzuuuZ5onL5YQgswhHmzXEhIs9MWeUCFjdIFBoLbDaYlpV2dNYWI4LydTdo6Rg2wblnyDjt955Hzb9wCVNtG1D8OG6020JIVCUQBrD5TKx6XuW8wjnCd1aXPCs60q/2WFjwE2egxko08riKyG0BH/lrjOZhMx1yMNaPAJLzJlYIqfJ8QvvfYWvv/yInVSc40rKkcZaSpKUCFlERKmLgSi17ipMixC1xqqwOJFRuv73FCMiC0TKqCKuadBKGkmt0FKCKkhRd8HLOiJEQqSCEOkqdiq0bYvQmpUJqzUpZkiZFA1FSoxQuMWxE4bf+I3f4Kd/+qeJMfI3//bf4n/9X/7Op94Tbp7IGcbjSIy/hFIGc62X5uJZl7oDTnFlUxTNZkuWCr1VoDXpeKTYhnbXI6fAcT4icsFdpqp+VeB8YNh3SNVDzLUJbjuWlLi/v0OUzH4zoNaV8TIybHasOdIUSQqBTmlKLniraZRBh8g4jaxZkTUUZei7Btto1tGT7i/0QrKeJmglj4YtEcFSEr22qAyXZWG7HXBu4jKubDe3yKLhNEPOJArdZsAfZ05lZXe4QQsYtj3LMlFyrPIyJXExMXRbcky8Pb6lb1rCuqJVIieL1KqSW1KzOEfbtqTiiNkhyWgtWeOK0IKcDT6mqzdeMo0T+1ZVqqmBpBPZK/a7lnWJpKBoGkEbLT7A3WHP7epIeIw2GN0z+wv/yuYAACAASURBVIWQAiX7Oj+0GErUuKzJ/v9fAdYf+lUyqGu9MWpNowRCKt7c3V1TVJrD4Ybzw5Hb/WPehtd476uhVGZ8DAQinZJIYZmS54XdMqmMCYFZarSC88MDCsFhf2AWiUZu3nktXknIHj0rGizdYY8PC1/YP+ObLz/EvVnYDxuQifNlZLvZcxwvbDcdl2VEqWuoYrtnWmZSKXQykzrDfQr1dxINkMki4a6NSFEKRSqMssSwInc7Fh8x54mpN+TpgtAK3Vh8DDSxIlFBgckJhSWWQoqZRihciZiSYK6oX5aZ0lYm1adKVLTJktRrzpfv8zDTBVwkhozRhsUHsneIKdRGT/bsNhsezg9s9wMGzduHe2Jw3Ox24D1FSMSmhTUhrKYYQUHSqWr6s1oRUo2SuwxNMKwyk0NACcnPf+aP8OV9R/QtH68C1QjM7BFZXSmUiFaSSOXESykoUR80Esha1/q3lCAUJdVTQMwZoRVJCNprY0xKSS6J4mskOqeIkHVnZ0QtN0DBqKrHLcGTUsQWXVN65KtLN2KUJefIf/iX/32M6hkayzwupFT4C3/mX+Bf/bW/wN11xvo/6frPfvM/J+VaQrh/c1eDViFyv4zcbHeEZeV8d0Rqxeb2AD4CBaEEXC502x1vHo5Yq5EopK3a2pC4Dpxu2PZb4nllVYppntEGVPQcNgeWyxkyJJVpuw3j/cjd+Z7Hpic3GqRk9Y6239CpXBdQY9hsNigtELq+3+6ysPpKkpWU6GzLYHriVSe8REdIBamrete2DS/f3NNZxWGzpW0Nl3Fmu++Zp5VGa+Z5xbSGg1IsywLKMOaFt28f2N/eMKWESpGsICaHFNXy2CWBuLlhGkdEiKScq4RLJoxpKEXQ95t6qulMRRjtNcSTI23X4VwiupXBWkKssxCygJALKa6kcyXqpAoE21F8YOoSLxyc1pXNbsviHPN6QsvKnZMNSrX4NkBMZCVp5A8vwPqRKKdIKenbjrJ4TCxXSD5/wmXjInfHB+6mM8vlTH/YoRF0ymCNYZkW9PUoLFTh9rAlanAhYA8HGiTnEmrk/WZDzIkb3cHyve4+2O+3SCE4TSO+JOa0YoaOf/DhNzk8fcx7N09YzyOqJJSSTPOlMqVZst/sq3u6UOX30qJNQ8gC7VKdgp41cnJkacmlKlJlLhAr4VBYyUVQnMMIyK3EhZUsIDpffQsUZGPqjWItRXc1farqQpPySooLfh0Zw8RpPDHfP6BEYdO3aAlWS6QQxJjR9l2HzPFypusHVheQBW77LTebA/YzT5nSyl43LMuF1mjm04SRBqM0j26f0G333F9OtEPPej0qT2+PbISpu0FXa9olZUqpi5+WhWggGYnS9bhrlWMQkj/xEz+FWSYmMllmfPKIkjDy6j0pQC61FJAimoLIibAuEF2NahOIxX9STiml9hqQFkGtW2e/kjP/SFMpX4mDejRv2550pSLiVVwkyKTgKaVKj1Kpf7+3LU/2N2hZJ777WMscxhiW+dNr4m9e3fH6g4+4f/kagNPpVNUFKVOmlegD/WFXAYCUqtlxdZzuHwglcBlP7LYDfdeQiyLEapAUqdIeskhO88K01rLNtuvZNANGGI6ne1zwjMtMjJnpGuG/1ZbJJNJlJMSFoxvRBtZpZRkX/LrU00+W+NmjpeHp06fEWI2iwtTSSVoczTgzFocqgkf9lhgc1rYUYHvYYpsBIRTjeMaa6kxS1uB9rNI5JarD3tTeCyny4vlTiAFREje2rxK0mDCN5WA63K5huj+ii8Aoi5aK6Cu/LmWNxUfnIQZyrq6dnARaW/puz3hxBF8JmpgqeeOsBKN4bHqsskDVCbiUUccLaw7ssuKYI+1mYFoWSkpoKbFa0/c9PnpCWmhnT+MCcnWo8GO2iBdg8isYRTGKFbifZ2Zfa4jxKjd6crhlWRbKeUZrzZQDcwr16ekcORUO+0eoWLhzE9olzmGtis1hi9SK6XRm8Y7705k5vJvY9N6ji0BLw2HYYhMI57httvhx5f27j3jy/Anr6pm9x6VI23f4UpuzSinmsDBGVyPaqRBE4VISry5nUqurUyMX8tWzXBOE/urtEKAkG9WilGG1mk7XEWSkDD4S14UU6lAEXUT1k5eFJi8YsWJExKiEti1dY1GiYK4CpJzAmhaja/Jz9b7uZt75UOpx1PQtyhqEKNw93BPHmcP2wN35QtE1ZaaU4eO7l5i+RdqG+/sjjx8/Zj7e4ccj43xEbzWrmxFSX+vKGS0UxtgalhISrwprSUxkZhl5NT9wcYGjm/jln3iOPT7UXfRVlxBSJBSQolIpIMlKgKw7QWl0HXQcImFZESnXhawUrFRYqQnf+Q5IUScFXW+J7yzeQurrTiuBrAraVhusMCihMbYeyxMFqRRN0+DWwF/+K/8Rb+/v0CRkDgxGYoonCMEnNqQfcB1PF0zXIoyuYaRU6G2DyIVRZWRrMUXwUy/eY52XTz7D/WaLtIab3Q0iX4kdXemummLukH3D/enIZzY7TgRuGouJBaaADhllJNvDnnY70NkGmSvGOYpEt2acr1rYF4+fYmyLaTpCThxuHyG0wecCUvHyzR3n8cLusKcIGLqe8/mMMJpRSbogEdpw9K4uZqEmZa2WKKM/cchP01RLXi7W5vS80hZJcoEoqxLY2pbT6UKMGaMbFgOxqU3ztDiSNSx3Z7KGKBLLdVi2NQpS5jLN5Fy4v7+vQ0YuZ9w0E5YZPy2UZaGXksE0tclKtTPuiqFB89F0wueEthVNNcowKYmVmpNfsFKxjhOdMmgkjajCsNNlrCloUhWQkVllYVE/ZohhSRWF22935Jjo2pb9sMGfRjbS0O42pDXWD4NMu99yzr42aRJEt9bJ5tJyPK/kxrDr6447jRPFKMq4Mk0Tz7sd53liUvXN+t5rWWbIhc3tAV8Spm2IMXLWNQ33RPek80xGsb25ZXO4QZiKmg27gVgCQgmEdzweNuys5dANPN7sef8ffA0bCtZnVEl4N+F9fchIUZWzJVf6wimBpH4JpDYEK9GbjmbbES31lBA9S5xxea5MdUqsMTMHGKNkjQmVCq1uEbahG3b1OJ1g9YlQMqt32O+DvPV9X3eoUhHILNGjjYRc8BHWmAgxs4RIEaBaRTu0PJyOmKYii6+d58ntM5Tu2OotdIZkK+KHknifcWug7nkkgzT0uqVRDUpKetXhE3BeOTTwx7/yZayuFEUWGakFmVLFVSWTlSAiCFKSpSIiyH2L6Bv0MJBlVcYuJC7Zc4orndEYq2rjVdXGXE61v5HLNRJ9HXAghMD0ug4bMIbY1MkeLmWUtqBrUzOliG0btjcHgmmJumVJkEXD0c1E8en1zqfbA9kHrFS8//pjNo9vWEioruFpu+V4PvHx5YF/eHqDSJkQArOrxr5l9TX52G+ZzhP7TcOzZzcgwTQWEwLPHt3yjTcfcuvh5sljJr9StOb1w9v6fR9PGGOY55WLTJzczON+y4nAF77yM4SQEEHw9u1DTVciuXt7IgTJwzRhNz3t0DPO1eYZtaRRmmdPniI6yzZ3hK4mF3WuDUNtDSlkTBEkEpvNBh8ST548w6KxqsqllFJMwVWKyQVESCyrp216bNMBkhRrGS1csT2AbdcjrGaJnmG/Y1pGWtsgEQzbHT4nHj19grIK3VtoBFEW5rRymk94AklkkhKIxtJJw0jEpcAWg8uRxTu0EnXTUDSjNTRJ8UDd8AVRmIJjzhHvE5WHk8SS0aHQy4adGjA/PJzyo7GIU2Cz2RBOY601upUYVppNywrsjnUAQhMlP/P0CzTJwBrYScP25sDt9galBP78gNUwTgtd07O9ua1deSVYo0cZy4VEp1vapT5Fv/famYFl1zOdzhjdkBOMp5kv3DxiaCzeapLRKK3xJXEeL7hp5PnwmOO3XvFieMQ2WbKxvLqcmYtnnmcuwfPqzVtcyjg8IQSsujLJJZN9gOTJwbERLSUEljgh4gxp5VAUVihWD7v2hl4p2usuXQjFoi2+aM4PZ+bpgvQzabpwcQtjGPHLiYf7l8zTA0aByAHhJnSuONz3XspIJJm0jmgK/uyISdD2DSUtPLnZse0Htl2LW2Ya2XD/6hWKQI4zH778mKc3B4pf6XcNQSSOD29hSnRG4rwHlbBG0UgLopBjVc62BIwH93DifHnLuE7o4Qlf2Bz4Yy+e0iqJxl79KB6bM1tlGVyiyYpBtRgh6W2DTdDkggyB1lg6bRnQ9ELTSE1StdyhhEAKTbvpkKbWzRWKXmtKclglKSFgRUfbbNBF0emWjVHYWNjojqYUtl0LItMU8MeFpiTW8VJNlypzq1ry5fucfL7nGnYDm6EjRc/Pfu7zdMrw0YevOb6+8Gad+dzhMZxHtkYjo2Ez7Oi6BlUSt7st4HhY7tnsey5j5v5+RnjJ5XwEaRAonu0f0xy2fOujj9GmcNgbfuGXfo6dsDxpb/DHmU3fYYVhv+sJUrLf33B8+YqcI5jC5VLBg+efew+yoB8sn3/8CBsTVmT2NwcoGhXCd08MoRD6xLpcIKwU46sATyS63jB7h5WK0/FI17bM88wUp1pHVrWhq4SjMT1CZ9B1cUwxkkKsJU0pOb89YZLE6gapC0hD46BVhiFlNtIQpwVDIYwXNij0HIiXlYEWmQSH/Z7bfs9hd4MpirJ4hgg7BNkUSg4UXRhFxKY6uDu4aiYUOtCfR7RPtDmjNKzzhEgJQ0ZbhRaeEgJGWoqsg9qD9GB/zOiUrASXeWI2mUe6w/tIKZlIxFhBOOzYzo5wMPy/L7/NYbe/ep4V3/zmN3n69CmneaTTFqEFbk2cxgvzOFUVpbXY3ZaiZfWTbDb4XnJ+9fad1zL5mTa3RCL3d6/puo7N7YFvfPhtBtOAUHijq5I0ZHa3jyoDvpx4/OIJH7x+iW4s2mqWaYYgGWzDh9MD82VEonBE4lWPq64TTVKCIjyhrFzUik6SInJVY2YBZOK80DQN6/mMhBo/LyB0Q2cFqmnYXlnzytgaXPKE6BA5IWn41V/503z48Qe8/8H7rFfF7ZreLSs1UbO4C5um4+39kX53Q3Yzy7KQc+Ht2weyyBy2B4StOKVRmlKozmrbEOaVh3HkxYsXV4F/j9CR1SWUlAQRUbLWERWSDsFDckQFMsPJaLyb6GXLN775+7Rty+d0z8+/eMb/8/IjVNIsSpCLYM4LjZVQBNc3ExC1/CIqz1tyJF8npuTrIFpJJoW6a5NSclw8Vl8nCsVEylCKwDnHtuvRRbPGhc1mwyUuvBwviM7QNApptsS40t8cyFIhtGKeVqy1XC4TSgk6owny070YdSwdxJRoDhvevn3LT7z3rJYXlOLu1QP7m2ec5sjnP7/nWx+9xKoB2WxwYQFhaJqWZZ7IriCsxKB48vwzfPTRR5xOJz7zuc8yB8ej/QG/Otp+y92rN+TdljULxHbgnBydF7TDhvE8IpWibBpUyjzcPfDsyfNqEl2q62cdJyZ9pYWodXifoW+rF0VqhVKGVKrPW7cttrOc357IuUIKfd8zzyPJGqTUnO7PdIcNfdvj5pWMQLfV3a+EpoT62TZNy7jMLC6ThWa725OXleQCDsFe9yyLQ24aZlUIh45YMmVcodeMq6PrBoLLLKd7tDWc7quLSMTMrAvFKi7FoxePKYJgBIWCibGeWq8eIWsUIsI8aJqUaZZAlAKhFKZtmecRLQ2l1Nh/TpV8AShK4sKPGSeuhcCEyE4bugJd16GUwZqWx48f07rM1AjEGnh0c8u8jLTdwJvXb3nx4rNst1sQgm5T5URDUagl8GJ7wwZNToEQEj4krG3xYaW8ObNr3k0q9n1P31WG8zPPX7CutdzxpS9+GZkEN9sN0a3sksKOnk0QbFJ9G9+8ecN2u61HwyQxaLquIynJZZ0RWl0fPt9lmaWsSS9xbcwZAaJkCjW4RAmkEomx+lCC99fyR6RyHYGUHCnEGliJlTyx1qKUwBhD27YMw5bdbodbquJydXV6e8Wo302Haa1rf8J7dt1QcchS6nxCazHGsDF1RuVhuyP5RNPUIcX7/Z5Hjx4xXWZ2m4p4tbajXAMlNVhSJ+EUIlIVpKyukyKvHLcQsKw83+yR54lnl8SXL7VhZlbPz9oDuwi9K+ySYicM7RXL+kebl1x/3nf+fOc911p/959aXdOcMGQFi0ekKk1JqdZpY4x4v6I7zY02zGHCeMdGNNyanoNsaJLAWk3XDXz48cv6842sw4Nby+3tLfM8sdttP/WeuH+4qwGTtse7TElQssK7yCEJXvzMl0iNZl8ajNJ02tIay+V0rsMMruPijDE8+cxTQowIq/nGB99it9uxvzmweEdKifv7e6SU/P7X/oBpXfDHiXA6o33gxgyffO7yuuE4ns8czyceP35MSong6slSC1lPmEJ9Mns1hcCmqT5zbavfpG1bOl9o5kybBM1cy1XDMLDZbIgx0ne72sQtgUePd4hr/wgy2hSkuCHke3IYIPRXRtt94gXXCKbTmRwig2mwZ8eUAmNbEC4S7yf82xE1RbbFMiRJmwVDkmzQDMOAEKI6iErhUTOww9AnSVckO2krUhyq2rZcZSffmb8aY8SlyN6LGvO3NTillMD7labpPlkLvnPVvIImhfBJvuGHuX4kFvFcCvnJFtM23Fs4Xo51inpMXOaFuzDTjYE1BZZxwjQGHwPaNpxOJ169fkm/3fHxx6/wy4rrNeLxjg/9hfsmMzQt0dfUnFtWVIb7tDJ/H1PYtu354PSKru85Ho/VQ+IdH3zjfYabPR++elkXaiGxbcN5uZBbyFGy67a4eUHGzBwjaENjB2LMfPjtD5CNYaE2KL+jdE0UsgDd1IaORlRa5Tr4FlENaW3f4aIDrZjcTFYtMUtirgMNqkc5IK64XGM7jFGVr0dRsuI0nck5oqUh+oIWtTHa6XfplLv5HrRmf/OoTge30DR1HuXpdKLrBsy1rPTm4Z6YM2F13NzcIIXAuaWy39Hz8u41X/uDryOwpOt0liIyIaervySTY+GUfSVDQsKXxCl6/tl//V/m3EvuBsW3WXEi411BKMk/9eiW548PZBkZs2O01Eaiqs05qUVN+8mqDPbEKykSqw42X2+4kmtj87pYSylrACRElBIIUZAqM80nXn7wPpOfccvKHCJ6d0uwmjRY5O22PoiN4W//j799TaPCfj8wDB3jdKIRirx++k5ca814vlBSxs8Xuq4hpUBKia7pGY8nHt8e2N1u+INv3HM+reSycnhs0dGRXH2NSls++OjbtH0dRrLbVdbbdDWVqrXmcDggjGVz+5giFRcZuMjAMU9cZKDsGi4y4BpwsjY5b/YH7u7uanZjWdl2PTFG2r6nWVMNcnUNZuiYp4n5OiXpOwM3TNtUIVgpLHOVuV0ul6sdEZrO0iZBcQkPGKlYxqUSKtERg8PohraTCFOBAGMaQi4UJETPbrdFN5bzNLM93JBjQF0b465VRAm6bZgawetl4iwLdzjOsk6C6vsa2ssCPigrq6k9laQFd/HCKgMhBIwxRFVT5cHXYS6UhC6JowisWrJ3inldWNe1Lvi5sLiVKAWTc7iwokphHkf6tkWmHyh+/ceuH4lFXAD5POOcI64L/b5HrzNPhp7zstZmoRVo7xGNIMgWkSPZndj3lsfNhng+c7jdU3KmGVfy8YyNhXJfSwDCSpbxBMExLxeebg/s9+8OQ/jm+x/yaHjOhx98gB4surHYItlvBy7zxHvvvUdZZoqKiFay3W9os6W/3aP7nrJt0Z1mzDNqb1nHE8YoXn7wPmJ1ND7TaEPjM73QdRfeGOgg6Aydxg4NnWlp7YDRHa0yrEUwDPsqke8sQVUcTyiNlJr0/1H35rHWbnd932cNz7yfvfeZ3+le38HGNsTFjIYUtUkIaqS0CoSZlKYtKLRKW4mhJFFTQgNtoVNImyZVVZQQGkFREpqElhKaOiJuasCOg/GA7es7veMZ9tnTM66xfzzn3gRfk+tKtKJLevWeYevs/Z732etZ67e+v8/HuunuHz0hCqwxtKPBjQ7b9ph+apZ4//vfzyc+8fGJyhindIn+LDTHImj2657Hl9fMjw8R3rHbdSRKM5vN2DZrNt4xOH/TfRnJs5Lzqw0Q2e12HN8+YyTQ2Ekw7X2kGeMNB0eDU7gQXy956ESRIBFaT0Q35/myz3sX3/XH/h321y2Puo6H+z1jtAgVWTNQe3j7csk7FgtOTLwhyUUG5RFakKmUVArqrCC5mTtliGRqqiS66F6Hh9lgUWmBjwGp47QD8q+lVAKm7xnDwHq/AyTlrCKOA9JPEUraLUlScKgrXnrwKrIqGZtrTNOybTu2TU+Wlqj8zSmGWVqTZBMwTOsc0KAzSHN2YeCwrKnQWOs5Psp45tmnmZ/eY7WPdKNkvjgilYpxvWMxO8ZbCTIl0xnVaEk6jzKGg7xgtd7Ttj3j2E+HfFmKdoq3nLyFIkiUSNE+MpOCMoMZkVQKVDZherODOb0diLmiMw1DJhnwiFZgrkbKrCBRJUkUCAf7XYdXEVmleOHRRxVZlpElKamSYA3j0NHZnuVyTnN9jXEjulBILfA+ooRkcBFdCIQyaCJb15B4SxxHquMj2n7AeIcictlupsYwJyeUhXFTeaYfCdZzlCQk1k2AM+Mo/ATiCm0gkxmzEBj7LZ4BbxpK+9r7dzI/FVhMN1CqjBGF7QPOePymRXWWc2Um32xvuOhbmjhFd9PoKDPNXBU0cvK3juPItvv/WTlFKU2aFWihOZofcr7fM6SKi7EhK1KyomDnDXFWsN3s2O022F3HrFxw4Ryv7jbUxyd46xh2e2SRsW07ZnU9ZYR1gg2Rzlpaazh56mkeXjxh+CxSiLO3PgPLnGW1IOoEAyiZ0MepDXywhiwvMRZCTHhysaEZLF2zp903mF2LCoLl4pDLixU+gDOefRdwQuPkFFdzSkCikCGSuEgIM6RcQKiQbipLvJ5hUJJZVr0efUxVQZFIci3QWpLmCVk+J6vmaJ3SjQPDODJax2AcHvH6ZHl0djJNPvGGQx487fjGg7bf+NQLFEpRKsVLL71C6Dzz4+WU8+0sahQkURBDYNt2zG/dZhwGovcMbUfwnsvrFchpZbo8PKJr9mRZhgkeL6azEKknMqUJgdE5DHHKYSM4RE8R0asN7/naP4DXkqEb2GpJE0D2ESMknQvEoFjOl7yjOuKZ+oC7acWdmCKFZwiGa2HYzzOyosCECXMqlCStF8hyxoBElBXWTDVsGyIoSZ5lRA/BQqISMpmTCE10nm7bsmv2GDcSBGzaPQ/21zzeXZGXGX/nb/wsT919BuEEw67jqD5gjeGJeTPXPfg4cHi4xFrLandBYKQqFMG0+NHQNA2r1Ypu36CCpr24pkwT5gFun95Bq5y2C9SL2/gwslhUWNuwu75gLFO6BGKVs/UjeTkpzJSQE4Z1tJRlztX6miRLaTZrCpXQbXvmxZJdiDxcXZElOfuLNcJ6fG/RQVNkcw7KQ5TMEdUMUc3YtyODHdhs91OjmZr6E5IkgyAwo3u9NDH0Dq1K+r5nPp+z6xqyuiLGydtqrUdKjQsTFtr0nhgSnLGkUSC1JilK+u2aWZ4TnAelUV6Ry3TqjfBm6qPodwgMwXY01pLParwDoTRr05AWKeWyxHrL4BTKJdRihu0jpDOCVBQxww8OiwIVGO0IasIGuBgm2NVgOUgKhs2eg6TgrF7SeINMNLu2w0l4vL5ilkVSpbFmYDH/7ZVC/L8+rLXcv/+Q5557jscPn5DJyX6e6wTxZEN6uEBnU+PPPJ8y4cvFdIHn6w5zMufFhw+pi5z6cMkwWnSSsNvtpkmj6aiyjGp+RFVVrC5WFEXBOL5xEh/Pr5gVJa0QnJ+fc3J8jN00U5556Bm7cbJzyEkTtVhWGNPTWjFhcEXCarfh+ZgxlzV7FXlydUHfOdJUI3QkRM+1cmRKowNkUeLieFM98UgR8a+xlZnEqWOY/tOjC8QoSYJASH0jg57wvN5MnsMY/VQeeA1nqSQiRHSi2LYNSkzdc0opUj2VGuA3T+Q//bM/y5/8k9+HlZ6nq1s83G1ItxuKtKAocvq+J1cTIrheFLzy4D5FrqkXR+w3V5Dk1LOSLMu46i84Pz/nC77gnSzqGcbZKYoVPJEEISYuhVAJMU6eS4RiIGKu9pTHR3zpvTM+/MKn8OcrmsstJk1xGrSNZDpDEJFSEKKfSl1akxvH07okikAQCbuu5fjtz/JN3/YtzGYzPv7Rj/Hw+pI0z/jwhz/Myy+/TAyGrjHTBJ9P/A6VpRMXJkYSmTB4iw1mShfdXL+60EQk2eBJU3h88ZDv/1Pfx+PzzRRHyxK2fYu4bqhnJW/WyqFkwXbdUOVLhMoIXjB0nnlxQDHLpsks0RzMDwiJYlbW3H/8gFvzGt9tyYTj2btnmM5w6QTdpiVNNPXJLeI4opwiDQprI3mZvg4nm+rpgcEORBHYbrfM5hUhwvz4mOv9nloXLM/m7NqGgztn7PdbiixHCs31bstxsIxDf+MDTZBJoM5qzDgShSBwc0ZlHEVWgp8WE3lZMQ6WNM1IZULbdchkMmwpbmrGN1n8ECPCRYSQ0/smS1hEzZNxj297Mm/wYUc+q3HeT6VM7yeom4zkssRaiyLF2KmjeDQWIRTDOBBTxbDf02s7CSGIJDKwvb6gXiy4brdIJKXUIEAkimo2m8pzftIAFkJPN0HX8Pj6irTIIde4yw1FkeCSwGa/o9aaLC9ou4GuMeRlRjf+9urZcuCXgOzm8X89xvhnhBDPAj8NHAEfBL49xmiEEBnwV4EvAVbAN8cYX/5nPkmM3D09xQw9PjrKckZuPd5Fdsc1D156iXc8/zb6waCqHB0dj0OHnmcUaYH69BPe9ewz2CLl1WbDcZKT6wTjHVWeTTolM1KWFT5YrrcbvuDtn8eji/M3vBSTRBrbkuU5MxLoByKe1EYKlRFSxRgj86qmFkahHgAAIABJREFUa0asH3A4Zkk+Aej7nmJW8Sm2LE+WcLnD1Qm5EAg8IXowgTOtsSbgEsmgIDF6AisJjfcTdjPKqasxEEmjBSEISiGFQgtJCBM4R0fo3VRrS2XEBYF1HnUjRogxohI1WXDkNIFrJFHKiZD4WWLLd+6dkZBi/Z71k0eEKuc4TCu3i2RELjN0WmK6hnG3RiuB7z1e5+RS46Viv9mzHa84PjrCFYIXPv0Jnnn2aT758d9A+jgd3oRp8nUR8CNaTKf9EXAyoTcjwlq6tuHf+Lpv4of+wz9NdTDZjxYxIQSPxyH0JGqwo0NaTaVT5knKSpgJT+BGykXBix/6CPIbPI/vP+T0+IQyTSjqGc/fuoMUgixLqGcHGDNhT588foX9rqNpOppmh3eCvdlzfn3B2Axstw3IDCMkZVlwNaz4one8m//gG76e4eEVFCVawHa/pixnbOc5J3dP3nQS13iCtLgw3XTHcWC5XLK5XuPE1ATnBVyur+ndwJ3jU9I0Z28MhcpwwYDpyZKEY1mwmB9w/9FDHl9dszg7RlQpO9NBqhh3O4ROcET60ZBqhdKaLJHTbuN6w+FyTr/foKRgsAOzIiEvNNb1LJeHmKZD2oFKBp6snpBlCTOdQXCMMTD0E7AsTVOCs+yGFoIgeocWmmxe0I8dUkkG25CoFGJEhkiKJEqPEJOgI88zNtdrApHuZhVfi5Lz2HGUVQwykOU1LkSMtUQsm82e/IZOqWNKFFNm3lk39UTISNsNk0lKCawJ1LpgkJLOGApdEoWmmhcMvSUximSmSbKMrt2T2kg7GhKlSZD0TQtIsIJZVlCkM/ZjP3l5D2YsVgP73HGyPJ6wET6QaUVaazpjUOJzt91/LivxEfh9McbmRpj8PiHEzwPfA/y5GONPCyH+O+A7gL908/c6xvhWIcS3AD8KfPM/8xnEtNJ0reXk5ATbd1R1zb5vSXrDs0/fZj80CCCmGh9STvIC2wyApr53m7UfiN5SpmDDa7xxiywlZojIWY7tBp579nlclvPS9Tk6vPGfH3w6WUYyQRVztl3PspwxRM9sXmGaLa3psDIjSSRd01GmM1ZmIPWAt5wmSy4fPaEKgqyuufj4fQwDic6hD6hU0QdL1JJIRHk5XaBMNXIXBvQNiiNGTbTgkhQvIY+enfJkpsIribWgx0B/U3xRUZCESB0FJqipg1GEqb2eqU3duAgIUglegP8s6ZSLiwu260v0cYmrM45CSlNK+i5wWB1y8eQJ3UxSFBnOGGLUDH5EjC1utCREFkXGg/0Wv28mzrpOOTg5Jnw0oBOJM5CKQBIjSRTslMTbgEKDDnQIskXJ6nwzRSEvNvzoD/0Y3/OD383BbMm67yiLnBSP9wGVZCAimY4E07HPc0TvEVKSKUEfLYcnC/7ij/3X/Ov/5h9l++QhepZRoBhHQy8DSk6Ev9XmCVJnnLztWZ7uIqYf2IcBheJgNkeEqcno2TvPcj3u+bE//1/w8PIJP/UX/wq/8tEPsdvs8dGg/WR7r5cHiAhp5wmrN1Ij3/Cmc56Dg2PW6xVGQ5HnjM1Ams1QwiOsoSoKLtcNB7Mlpp12NklZMNqRwbUk+QzjDMpEri4vkfOSe2VG6x3nlxfcqo+5sIaTUtEPPUpmVMWM3m3I9IxhCOiy5CivJ/Fw4pBRU88UK9OTjtMh69X5ExItKcoSHzS5yZknFWWacf/qkuOjWyAjTduD9RTVHKUrbHDIGFBSTEaswTAIwSyLGBeodMl6v0XWOUc65Xy9QlcVxkzeV4XE2pGxb9FVSjJGbA5pnjL2e0Y72YgGHM/efQv7ppkwsVLC0LGLnlmaY4cd25CSJQnSj2SDQ1UFvR1QYtrn+ujIswwZoR121MsFXb9lNC1EiSwLdBC4oZuw1ZWiGhVCRBo/IMMUJ7Q4KgeqyulCzwxJGKbSJ2KiPubzGZt296bXyGvjTQsvcRqvFfGSmz8R+H3AX7/5+k8AX3vz8R+6+Zyb73+1eK2H+bcYQkxb/+PFAcIFQjfS2xGXSDyR3TCyaxuElKQCvLEE6yiqktYMrOnID2dsr66pfEJZCOaLjKxS7PZbtn4kGwQhz/no/ZcJu46itWT+jeuh+UwjpJnoiaOj73tcNzBut4z7ljA6DurDiTsuFFJN2+s6ThOwXs64xnC0PGQxX7JvtpixJ0umLavWmhACWqrpZwRQMRAThwktNg4I6Yla4sRUJolKUIocZZlYz1Yi40BqRhLT40OHSiRKT2YRwpTCCNoSGAnCgvIIoQgBiJ5Eg4yQREUh33gzE0Lw3ve+Fy0TbCJZMUypgapg3zZUdU2Mnrbd36RgJHYcGYYBmSZELen7nnd/wReSCcU8yQkCnn/bW6cVuJ14H45IHyaJtWBKIWgtpwRDoum6Di0kRVFx++wWD558iv/sh/4su3XPKDK6tkcpjQgwNj2p1AQXpyxyCP8UU2YSLSRKoXcd7dWa5d275FnNeuhxwiOsn8Tb2wZlPLkXjI8vmBcZi2XNcT3n8OB4KhFITVXVmKHh4v59vv0bv43/9Ad+mMtXXqWua2RvUT5ydHoyAcnGkdX5BcI7xGe57j5zeD8ZoZRSLKXENFvyOoEy0nmPFZHr1SWnyzkxOGZHc556yz2GZo9FUMwP6K2jc45m3ANTc8toHck8421Hhzz/zBni+gn9BhbFCToqhLSv75DMDadnGIbX8b3eW9am4SCAzhOcH7g9P+akOiCVCVpo5gdTt/OubynKqb7dNA0600gX2NsB6Sx1lk3t571noTJEO+LMpHZDGJqrc+pcE11PAIokY2w6sJ7B+xuphCSZzRByuokY7xjMSDFbkpcVQiQUOmffrGjaa5T29MOWznu8j1gXkGlBJh19s0bIiComGYQl0JoB6xzOjXT91Hy2OMjp+vX0fpeKVGlSJGO7Z9+1OBvQg8BJSTsajPUMg4Gb8y87mmnRuWnAeZROyZZzgoB+HGi3OxZZ+abXyGvjc6qeCyHUjST5AvhF4NPAJsb42tX4ALh78/Fd4D7Azfe3TCWXz/yZf0wI8QEhxAe6vsOniv04EpQgO1qwGTuUUsyjZnZ8QJ2UDCpCO6IFNPstzpkJZWpG2vWGk5MzjJI064ary/VUo0xnzNA86re0V2t0Z9nbDq8Fsn4j13m3a3j0+IKz2SHZYs5JNUMUmnRWMnhLkhXoLMeMDjcaiqJgfnhEerwgq0oyoRhWWxI0u+2WxWJBNIboPInWr0fYejnJiJ2IeCWwJpKm+aShExltsBgFPhMYHRhlgow5rVSoXjE6jwtTi7hRmnS0pMaRRIFLJE0mMFHfAJs0gwsTiwKY8I8erwRRSWx4Y+QtBsevfuTXENuRbIicJDn9ZkchNBiHtJ6ua+j7nrZtGceePM04PDxkvV7jBYiy4KUnDxm9YXQj/WCoynpqfBGaPhoGFRlkpCPSRsdeWbba0irB8dGC/X4PyBtwVs9ifszVg3O+53v+OLtxTVqUDG6KKiZ64owI1ITMjfF1LZqQGm/DtKU9yPkrP/szXK53OMsk//WeMs3IZxXn22tEoolSUGTHPL5s2RlN38ubxizPfr9nHEfurx6j65LZwQFtM3LhezIHXZiMS49feBmTSvyu43R5SFol2DctpkCMnhdffIEQAm2U+CTHekFGxugs84ND7ty5h4qBpml4fH7OJz7xCZb1HNt0uG0LgyVzknJRY6RnuF4x9IZZoxirkhdefZXTe3c4OKvw0vJ4tWK7cxAVxhjKMmcYBkjEdOCbJFRFiQopXVGQqwIvNIPp2fUt7TiwHQwdHptI2gA9kagieZagrGfIoJYpYzBs2zVDNK9HEUMhePr4hDFXVLomVgVpXnI3m66pIsuY5QVCCGYhQRtL6mGmctpuTze0yEQSvOVyvQadEJGYMSBFjhlhtx0QZAwaZiKhtYZmGEmsIYuegKdVgcIFRGeYJyVznSGlQkaFM55m23O0PJpKbLs9zjlWV5dTb4eaIq3BOox304JN69e1gMoFZAQrIup4xnrs2G63uP0AMeHk8DZ1eUC0n/vB5uf0yBijjzG+G7gHfDnwjs/5GX7rn/nfxxi/NMb4pXme4UeDaUeCGen8yL1bt7CbPcRAc/8C5gWFF2wx6CwlMq1Wzk5OqeoFXddjxn7KB8fkhiXRYcxALBU1kvJkwY6R506OUTJlfX75htdVFBnHp7e4vlrR2hEzjHTOEJWm70YW1Yxhu6WYzzg9uYWWCX3b0bz8eEKsJhJTaBItaduGYRiwY0C/1tBzU6OWYeKQpEhUDMy0JgwDqQbjLIcqn25gHuog8MOKMdkxazYMWYPy0wXndSQLHjV53iFMHYxBaMoA+RioXGQpUiahIQihJtdjCAgPWn32yyAvM5on1+QqIUpBvay5XF0gtSCvctI0R+ucNJlWW33fY4cJ/TnLC2ScDDB1XXO5uuKgnlPk+ZTd1ooqCuogmVmYW8HCSeZeUltB3gW+6qt+N33bkeqMtu1pdjv6XUNvPePg+eEf+DN0dsQIh8VjTE8fLL0xE7gqhElzJCMuBqRWHFDgx5HUON6azjF2oL2+ZFEUpFmGHUeqqiJKgcpSxrzh9lNz7HCJtdeMpmUYG9JMIuQU50yynOurFaZrWXdb/NBzeHhIVdUcVjXbrqGqKmSacFiVHFTVm74/dDJFSaWU2F3LvCzwdiQ4Q11qVldr9ruWZtdzeveMk7pmUZZc7K+5vTxAOEsaAqkIjNuOpcwIi4zBdSACZrNnyBMWsiD1hsMy47m7ZxwvC9q9oW8H8rQgTwvm8/kkNB4HgjNQJRStwcYRpSJllVJVBXmeUhQZsR0ovCDzklvlAcUQKJMMM/SoMsN0LVVRkqQSrUHIyJPrC3SRkEcJUWKbjphBO+55/OgBRVHQ9z1JkRMThfATCE7FwHhzWF9VFX3TIlxAqqnLdhg6iipnGDrqeTX1OuBZIhhymHlBEiNjkZPOl8ggsasdqQikUmHakUIlVJUiYpBqKgWuVmuyLKOcVehcc3L3DCdhXlR07Z7G7jBNw9C3NPstFxdP2K7X7IZu4vAMFuUkVV5QFhnRjlSVQKiBKDqq+efe7PP/KJ0SY9wIId4LfCWwFELom9X2PeDhzcMeAk8BD4QQGlgwHXD+lkMiOJzXXK43FAcHmNUVl0PDSjrKqDk8OkEEwZhIMA6RJzjUtDIRkXSQWJ3RWYd0e+p5Rbd1DJ3judmCi92W2WLOxfklZ2dnXDQdAQ36jZ1zSVYxupGDW0c8fPCYw8ND2rZH1I5ynvNkf0WV5bS94LG/JlhH4jXze6dcnE/Y0JnO2ARPXZ/QDj37YBDJdHcOIUwJFSeRSmNCJISIEZpEaPCQ5hLjIUjBYPpJ9eUVWEVTSMaxJ9NTTEuicVFg/URlE8ojXCCNEhMlKE8MkkQkRA3j2JEWKcYFhNIYGUCUwP43/R56aVGbwOKZO2x3O3bXO+7cuQdSkyUpzgwUOiVdzFivVlRVzc5ZHq/XxOjYmx0yTLnXspwwp/txZNf1tDEQrcUrQRcnmJB3EYXGB4uzFmMj7/zSd7N/ckUIgXI2o0hSLvfXnCwOANhsdnz/d//7/MB/8h9RzWvSPGXmAt5PYmUZU8YsJRssIZVkNtLPU9q9oqwX/I33v4/3fPkXcTq7zfq6xXlDUu+RbcatszNevf+QVx9f8P7HH6Jpmsk4k5Rcr7dkTpLmFYNacXp6i+Ali6rkLYtDwjvv8cknez55/yXe84XPc1wsyGTJtt3SJQlWwZu9RZVTGGtJjku88axWa5bHR3R9T9d1nMzntPuOxfKE5mLD1owcHB9RJxWtC7hZxdDsmeWKy95wMDtG7LYcH52iREKVluhhoM8DXYyYyz3LZQ1CMcsT0rTm6uqay81EpBRCkCUVMpcUTkCtbljYcLHZcnZyyrC+5rn5gk+2PVmAWIIJA22cmvSEUMidYXARHw3Og2k78jSSFzOCk+wD6JjQsCdxGZEE6jlJVdPECSMbB+jCJFHwcTLD+xzGZsO9u7e5f/8+J4tb7PbXlFlF3/YEpciFwHR70mpOFAmi22N0oLee0mq8G6cbzGzOEzdQa00cLb1Q5CZld7mZVtWzfyLaVijabU87eA70nKRMGIfihvS5wXlBpqeu6XG7J1ssuWz3nBwcsLu6RpU5g596GtgOOOHx3rNrtm9yhfzT8+ebDCHEiRBiefNxAXwN8HHgvcA33DzsjwJ/6+bjv33zOTff/z/iaz2pv8WIUnDV9SRZxfZyjXcW2fY8r3JS4YlK0A2TLzLLMpTQFFlO0zRcr7Z4GUiydDJl7xo22zWLWU4VHBfVgJaK3W5Hmqavt+ZKCfqzHQDLiOs6drsdy8MDopaU85pEFJj1iDCS0YCyA2dVjR8HKDQPX3kVkWrUomKnHd6PpBlU+SRLdsGDlshs2qaniSIRYrKkS01IBlJlCIklxJEoPNYNSBGQeESIRDMie0sZJDFO3YhKCaSwRCkIYjKfhJvfaZJphJoYHijQN4S4JEwGdxksKgbyz/K/I0XCyb1bzGYlu/2GZ555bqKvmYFHjx5hR4fSk0B5t9kSTSAbLIWDxGvuHNzBDSMiwqNHj5gfHVAMDj0Ybp8cIxNJ7gKZtWTWUwSP9Q5uWuOjc1w9uZ4ajJIEREApQS5TdvuW3nu60RD6nh/5of+Y7XbLYAdwDhXBD24SB+87jIykrWMU4LctVYBs7PnEB36VA7WkXFSoKuV97/8V/pf/9Zf58Asrfv59H+HXP3nB7mrD2AyTNk4o9nJFfkdSPZfCWc/tu2+hNZHH6w16vqA5Knj2HU/TNK/SfPj/ZLOXpMOaK3FF2A5TTXS9f+Mv/DOGTeGyXdO3e4yIk4z5xr96fHzKerdlb1pW7YrsoODoaMHu8pzCesZxwi9kWYKUgqKY4GRjtNhoOe93mAy2bqC1I6kPJMGyevASst3ixQRAu3v7hOefucvBsoRgJxZQEPTtwNiO3Dk+xez21DrDGIeq53xsvaJeHCDzlDTNubxeszw5JasrskWJSyCtK2a6oCChylKqIiExIwTDfrdm3G0IXk4AuuBQCrrdFhUCu92GMYzM6oz5Yoodp2nKMitJkWwurkiRrFabqZPSOZJEoGIkWMd8tiBYgw+WLMtu8ukjgQk7LPOcxhhqJREyosuc3fUWKyKLWyfIPEVkCdXRkoP5AXszRQJnWYISYbpZBTFxZmKgmuUkqeToaMndO7eoTeDzD++Q7Q3lrCBahxsGkjDNPZvNhug8MXzuPPHPZSV+G/gJIcQEbIafiTH+nBDiY8BPCyF+GPgQ8OM3j/9x4CeFEC8A18C3vNkTRBfILcSbUsKiKFhnjrSQyGHyXPZth7xpHBn3LfPFjDTTWOMZg2PoDVVV8fzzz3N+ccVmP3LvuedZP3yALkvqIscGz+PHjyfdW5pSFm+cxR9ePeawXpBLzW7XkOQZw2g5OJoT9pagFCIJLKqay82KoCcjeL5Y0O0bZgGWUlM/dcKrr77KU/OTqQYaPE6CihIB4AzIiJwU6kilUFHjYwTngUl84N0EAzPEf8L/CCBRhPCajMASuREkh+lQUOl4k/W98UyKqTsx4Kc4XKJRcVKNhfjGTKoIgre96/N5+dGrqEzz8PKcqp5y34v5gmAdq+trZJlx7/YdtEy4Xg8U9YKoNE/OV+RJSjtMP3vXNMQczOB56i33eOXTL9LKwDg6QvBIJRFR3NDsBL/39/yLyCipZ4sp+iUd0keOZgteGh6ThYmJweGMj7zwKb7ve76XP/9f/ZeY4yOMsYgQUVIggmVmJJ0IFEaxzgTWGhIhEDPNn/sLP0JL4Ou/9g/xnf/qv8KT7TUf+LWP8U3f+odZPblm7Kcc/r5rJ3lynyNygTU7ZouCPDtg3/d89GMf45Unj/hCF/kLP/EzPMecL/mG72J+vuHLdgMvvOOIh0+nXK2vKKs379jcb9a8463P015vabwlzTOapuHk5ITtdku9WJK6EeNG+ustFsfx7RNW6w1mDAgtMHYkzxLmRwfM6pqLB49ouw4xywmFok6PuL6+ZnW15ez4Dme3a1KlyU0/MT7GlrrIuFhdcXp8RJqUvPzyy4gswQXFK4/OyeqaqD1Nu8aPjrN8xrpvUMsZae+Y3zrhwuxYOMUwdmR5ifcOp1JioghIWtvjrac+O+Xi8W7iijiPANSNKxUE0Vk8Aang6uoClWbE6On7nsH0WO/xWqOyhFSXDL5FSY3zZmLFR8nlboMNnuPFEd3QUhQZ3dBinKMqUoSaJNW1FHTBMnrP0cExhMjQtMyKEikVu9UaHRVpkbNvGkTwzI7mtKMhn5Vcra45rHI6M1CmGfvViqbpOKoXPDI79mKksJFES+obtoxIFSLccI/0584Tf9NJPMb4YeCLPsvXX2Sqj3/m1wfgGz/nVwAICU4FEi2IIdKMPZWXmO2e8vYp68cXk3iYyNXVFSezmrbZkpYFxlt6GyiKEmMGLs8fcXR6ih0sr3z6RZ5++h679YbL5pJuHDg4OGA+n9MPDd6/MVr39mfexqPVE5r1NUfLIySCNCjSONlcZirFbno2ecREODk5IZeKTz2+5s7t2zx69IAiz4kPA1/8tnfxsFlROYFHoEKE6JEhgJqs3TJONyZvPQMRHy0pgjFM4G8Zp5W1iB6kREhJCALFjY5LTHFBRYDX1c9hujHqFIdCRI8zI1IqiiwnSIgh4nwkCo3Qb9yQCRF538//An/wy76CXddjhKLb7Elv9Fh1WVGQ0BjD2u/IEo2NgUQGVlePuX10RtMMRCLz+ZzV5SV5WXK0POEr3/NV/MavfwIlA6VIQDEJK1KFj9M29cve8x76pp92EjEhRIVz8OmrcxYhZVEuufYDT55c8PTdZ/FDz3f/W3+cH/1L/y2npyc4Z5A6IkZJ6yfGeytGVO/JVELOZLsRScb3/rvfS3E45xf+wd/l8577fN7x9Nv5z3/wR3j3l30lsprKQXme89HfeIFv/4PfyO/+/V/Do1eeIOPUxr5rOv7AF/0eZLT88i/9Al9x96v44Isf4249o409j/+Ff5mPv/wSfzg0fIqCl6o3zwA//+zn8cKLn54Oy1VCXdevy5s9hhASMpkS8WRViR33rFYrZJScnB4xjj1dJzg/33BwJ2PTXsLguXV2j8yN3H/lEcWs4m65JH3nCbvtwLZrQEw+9n3XUs8WjKNHhki33eMLT11lyDShKGr2TYcqS+TQI2VFp0Z2fqQ+KBHrBjsvMVcbnrt7i/2mJWaTXCRRms32irpeTGA0HxiUwTc9R/UCYz1FqenajlQnVNWcXbtBophnBd5H0vkho7Pg3dS4FqbuZ2MMh8sDXG/wZiAERZmnjN5gHJRFMTFogiV6hxkDVVUg04ym2SOiRyUF182AThIUsB0nVtCsricrk506jWd5SbPesqiXkGoeXr6K0jljjJSLEryiLHNi1zNudwQl2fmeEOBkecTYbbH9dG3KJEULzd3bTzF6Q2vfPIb62vgd0bEJgvJkgT/vOSoPaOSeqjhhr7bcf3TOvWeenbZRY8/p03cxTYfrBkoi2SynsoF26AneU5YlzW4kxsjy9JSLzY7VbkcpJWf1RNZr05S+MUj7xkn8pQevckLCIxTkOWHX0LuR4BRZPpsA98IzF4FZWWObkY0fUCphP47cvn2Xpm+Y1XM+9cqnaURkZUdwoCOgJq+kIIFEYYg3aTuJswYVI0005CQY7/FqOsDVUmGtBznlk3Of4ISYFu1SIrybqIBhsiClmabtHFkmGG/KFEWaYrqe1joqOU2eeIsLb7zrC6GYPXXCf/MTP8m3/pFv4Wxe8PDcEbqWqp5TLOe8dP8VDtIZxjouM8NZUrDZ7llWB7Qu4vMCFwRJyHj0YM0HP/KLvPLgFWAS20Y5pUgSqRBpwny24HBxwOHyiIcPrji+e5c80VMsL3q8jJSpYvSOy/0Vy6Ml4zYytg37bcusXvId3/Gd/NRf+x8nzKeGNIJWCiskGQn7vCP3AWcCRgqqtOR9738vu27kPb//X2J4+Ypf/PW/xx/5t7+T1SuX/MLf/lskZcorT15hDCM/9zf/J+qjBaYdmVdzhBbs7UCWZdypD7FJytnhGYuz27z48gOKSvKPX/wop/WSn5jNIVmwbBLeLEC2Wl9xuFyQKo3QinZ1hVGBMj+k0jVFmXN5eUmWFqgYGBpHWc8YuobLzR7vDIt6TlFV2L5lUVS0SUtz/pBOZNSnJwxdP6V1VtccHJ/Q7xvmVcY2CpKZYjcMHJcVfr4gVSkiwpd88bv50Ec/SQiOk8MF69WKXhbEbuT20RGb8Qq70USRY9c98/qMx5eXFHmFNZCIgB/3HC+P2ew7yryg2WyZHc5wcWSW1+xMRy4UqsiIOuXa9ggE2gacDGzDQCo0CIuPgjTP2V5vKeoaaT3bTUtQgipfgPMY62jlxBHv7MQqaY0jrwqMHUh0gY2TmCJXKc889SyfeuklEi1o2y1eCJzpMM5Pnd7WEIlct5aYK5xpmJmCeX6IzlL8voVUYnxERUOTO2aHd+heeESWzmjbhuvdCuUGFAlKFvgQGbZ7Bm+pjpe43W9jx+b/F0MoRbzY0gLORoyKGB+ojxdYFeiuLjFNy9npKeOTDdVyzpXr2Jk1yh7BIkMryfrJOZhAuoAkSdhtVzx15y4nt0+5ePSQQXrSRYnxhiRGFvM3ArBSYFclPDOb8fjinFIpxCybuqpCINMZZrCYtGBZ5OyuV3hvePvbnuXDH/so+uAYGwR5WtDKDtdvmeUZXTfhKQWBVIpJPOAsiZRoOZlIfJyIhmkQGDsdVLrgJiysn9yBMU64WuIkc1Ay4m1AyASiBCHRWk0KMg2EQKI0aZoTnEFLRZ2mhMEgpUAlyVTC+YyRCMmXv+Nd/MI//Iecf/oVrpclp/USdXLIxW7H4wd7DmWFVYqkqhD3H/BQCIYMmv3A5XbDw498mI/EXQC6AAAgAElEQVQ9eYRtRlQ+5ejruiaEgDOGJBMkgLcGESOXfcfVxSMkil9+/z+Y6vtuspf/rn/uXRyenvDM3edY1EsWR8eAJmhDkifo5pq+uc/J4dN813d9F3/1L/8PxNiD0ASmG2FUsBwmG5BUEpkKJJZf+b8+wNd9/TfyP//Nn+LZW2/jI//4Y/zpX/t+5pnmn//q34s1EVUUfOVXfBX7ixWICdg1Dh6XhAlXagPjaEkSj/aSbujw48j1Zcfi9BjXe/YXD4niIedKvHFr+xljdBMgab1ec3B0TBSQpTkXD8/JyhoX7OvxtUEHVJ5SpznpaMlPjjBNh+86ovXsvOM0maOdpvOeIhNoJblz5w73X3mF5dEhTbcHLWjGHqUKlI94H8iqkifnGwblkAj+0a9/hMV8TvCQJBnGBaS8RmaaLgTOr0bOTgu8D0QZEXpEh4QYArPZjGHoEMV0gF3PC/q2Y3m4oDUepSdSYa4Tlmk6getioL9B3XopwBq0C1izo8wyjpfzqePS9cgkEoLD49FC0w5bfIxkUpCT0u725NWUpMrSAjMME+I5ZsTomOUFwXk+8YlPkOYZQmryoqbbbkhVibMjj65XnJ6eslzM2T+5oi0jO2OIUk0SDzwHx4d0znDpB2Q/IKVkbHfcvneL7XbLycEB/a5jCJ7ROY4XGbvzS/KTHPaWMDYcHH/uOfHfEZM41tOLwLJacN3tOcwqMIbdfkcwBptK3vL5b2Vzuebw9hndbg9SMNoB0Y3IWpLkGVVZw2BJBLS7LTEGri7P0QbSWUY3djAGDtISmaVcXV294aWEbUvXG44WRxwvFzx+8BCta7Ie6nrOutvhU8HZ0TEf+Ee/yhe89a1EZ1hdXXB6coI3FpUkPHz8hLoqubu4zbDb4cLUPq/F1J1p7TjpprzDhjDxn5mUYLhAlBIbPFoKvLXIRE6TLtOBpg+SiMf5QCIgBnnDNI6Tud1OqFWhJXZ0PH33lBdfemGKdinNWEi8txjnXndW/qaRJbzvQ7/Kv/Z1X8+P/7Wf4kf/xA8iS8F27HD7AY3i5f0jlvmc3W7Ho+0l9x8/4fLynEQInn/2eZ7+/C+m/l3vpnt4xaMHr9DaLe2+QctJbhzMONEL5WSYV+GGAU4gqjBtFORN40+mWW83DPuPcr3ZsNk1VFUNzvLWZ9/O8299J297+ztomis2mw3dYEmVZpSOJEiEVoxjD6meSJcBMBErHVoW/G8/93e4HNbE3YYf+VN/lr//v/89vvhrvpoxBt72lnsI0/HCJz/F3/2lv89Tb7nHBz/4q2RZQa966qLi8vIKlU5Mk0IlODnt8r71m7+FH//Jv8wzb32O5mKHubrm2/7Ev/emb4mT01NWV1ccH99i3zfcuX2Hh+cXjKPn9PaC682KPJ9y3DrLmM9nNOsthRDsrq6niSxPMDpSRs2+uSYRgSAjAY9Sgsvzx5RlTtO16DQlTxOUkCQyoW1b5rMZq/WGRTHhlIOfsNHReZz1PFk/4vDwkHee3uGDn/g4QsFbTo9I6pzrVcPQG/LCI+IS5xoG0xPltPgIMaIlaC3xIiCkJYZA33UUB3MerK45Oj6mHwYGRvIkYRSBuigZNnsakWGc4uJ6Tzl4ogEPKJHigwUnSZKMRGswFmMGTg8PaOxImlY3UnJJOVvQDw4VLX3r6bqBs7Mz+rFju20py4rTwyM2zZ75YsZiOUNKyePHU+zR2cDtxSFiO7Jr92ituWo7vHOcFDkqnXO52+OCZBvWAGxX1/jBkdQzZOLZ7nYUuabsJVoUuFTBZ7GO/Vbjd8QkHhT4KuHxxRUzlWNCw8HxGTqpcENONw5c3z9n7Fseb7fcee45ApEkzdl5iWsb9v1AsIFbx2eYvuP48IS9HThcHvDg4oK7eU1hYXa04GpoqJKMe8en/zd1bx4sWXbXd37O3bfc8+31au2q3qXW0t20FGwSyEJGxgZbjCEgWG2wPYOwmTETCDBCNrZnBsZ4GEBgg4fFGmgQQisgJECMtm6p1VJ3VS9VXetbMl/ud9/OmT/uExGjkqKZiJkIcSIyXkRmvhf35c177jm/3/f7/dx2LIUhuHPrJNcWR1glnN3YZZSFdFwHy3FZZRG9Tp/9Wzc4e/Y0+4dj2sN1yqqprS3jGEVFKSsWqzlta4hlecg8bbZsEkD8lXuyOHYuFqJooMxVTaVJhNCPYb0KwzDQLefYht80OPWyoJJQqwo0DU1WoMQxDxIQFabtUCuBYTcoM900kZqgKgoqSwO0RiMub1+JN80Vhw9++IPc8dJ7+NRTn2WmlcgoptvucbB/yH48IY4yNja2uPuuC3zVV34Nezev8YEPvIcnH/84T3/6oxR249YUmiQvBIbWlG40Q6Adk+BrFGVV4po+ZZWjlEBoGkI3kaIxR2VZxnPPPdcoJyyTIk6xdYNTW7v82UfezZXDz/L+xxTzGzH9bkCaFUhbx7GOdxq1wjVNKk2hVINi02RNWil8R+G4BsVSsnd4wK/+wW/R7pj85m/8Fu9575/wxMf/jP/6u7/Oxz75McpKsn5mg4e/+iu4evkKJ3qnwRTc+7IHuHzxEr2NTW5euUIpBL/0v72dN73pW/jmb3sTr37oVfyrf/2v+dX//H/wmx94Fxsvck1cu3oFQxi0um1MoZHnJegGncGQ1Wr1V3COPM8Qq4JVlLM+GFJXBfUiQnkWaVWjORa+7jGbzFGWjmdYlHmGjsA2LdZ2trj5wj5SyiYQrhYUasHG2jppHCN0ncFgwJUrV7GcgFarQ5JkrFZLTF2gq4qPXH6KruViZ3CYhATSw9Khv9NhOq6xnJgwVbhBgKLC1ARVVRGHCZohQGoNXQmdtfUdDuZTuqZPvWyktC2/zSJJGudrmiFMA6us6Hf6zJczmn6+oBaKMEnpr/XJihxHmESLJehNQNbnKVfL+Yp+r4dUOnGSYTomZVYhNQ3Tddjbu8lgGDThdrnC0GlCwKqKJE4xDAPPtXENh1Uc4e9scWX2POvtNcJlhNUJkLMQmQgkOYZV0m45rGK32TnFCehN6FdZFliawLVcxmFI2/NpC5usevHM+c+PL4tJHKUwCp3zm9vczKf0nCH7+4f4rkNcJGy2e9yK5liDNptBh3A0YZTG6GVNb7BGiI5SOm3XJSkivKDNYhXS6bQow5ChExDpElvTGI1G9HWHlViRL2+PBDU0m6NwgZMkWOt9cqkhbJsCMAQUoqLlOOheSZHXnNg9Q7qM8Xs2lm3i97uswhiDknbQIgoj5uMJju+hlMRSBrVUVHoDNtZQGEUNmjwm3RsYQoBWYxkWuaoxDAtVNOACVOO81GWF1cziFKakrEAZgrIu6RgOuRLoyoSiRDdMwtUKqzYwy4pa16HQsB2PMs2oy9sD6PPxnCrw6QUdwlshCz/m8mwPS0q6vSEPf9Wr6DgBV29c4V3v/n2efuyj2I6F67TQEfitAFWDZZuUCMhr8jLEkA1YwHVdzOPkQUM0ztEqz7Bcm6zKqaoaB4FracRxzKWLn6Plt9hd20RzXdKiZBknjMf7tNttbl7fo6hKLKVxPVw20caGRppnFGXBsNUmzBIs2yZXNXbgItIKR4OkiCllzdlgwPlXvpRLF5+jXC7ZGAbcvzXk7/z4jzKZHfD6b3gj8WLK1b2bfP3f+V7uv/MIrZJcm97gjhMnWO9s4vg6epTymauXeeHmVc6e3uXPP/TnfNU9D/Knf/4n/MF/+m1OrK296CWhSw3H1snzBbkskNLDrSTzOMQLbDpuI7EzzJo4lAROB0N3WMYJ7tBjlIRsuX36rs/1/T1OrG2ymq8QjsBwemSppO23md46oOcISqGxUArLsbBKG7lMSbKUdqfDlas3aXf7RIspSqtBSdY3uizDJUmZs2a1KJCkmsS3PbLVBMs2WI0XeKbJ9vpJXqius1xNyJOCM9unmIW3aHldallSiIyqqmi3XKaLI1whMBybLFlRhznStHFaJqbUmS4jDKnRavuQJtSpIq4bUHqtKgzfJYlWtHWTKF2gmSboOr7rcDQ5IAg8HN/kZjana7n4tsNqNKEsMpxOB2E08sq0MvCo0XWN/asT1naHiKTERVCLGuYhC78hNsWHEwxXJ11kFEKRHM1YD9pUmk5YR9hui/GNQ9Y2hgjbII0USmroKsM1HZIoQUdn6LUpyZlmCXwRUMuXGl8Wk7gmNPI851qccGZrgxcOj9AN2Dp7iqeeeopJlWIg6AqLm7f22N7cZejaWJrB+PCQtu0hLRun20IJuHbjGufuuMBqtWLQ7ZHOVwhTY1ImlPMl6Y7BZqfPcjq77VhaHQ8pBO7mEBVlxL7GQHfJ6hxN1pi+z3R/TFrX9Dpd0jKmqhOS0CAKUzRDb7ZwqtlqoYM/aFPEeaPllpKcHLM2qGrZhOIoSSVEU6fVtIbmI02qWoFuUZQSQ0iUaowAUlZEmqCRf+sYqklIzFHoVqN0KR2NgpKCks3hgL2DQxzHolIVQkAmC9RKNkoV5/avQff0LrcuPYM0Ld7ytp/k4vPPckd6jicf+xTveefvUsoa23Px2z5ZWdDrtwk1xSpP0YqaOsuxWi3ObG5y/vx5XvbKlzVloWWKs97hP/zPP0stm5JRrSRVXWMJULXkh37wn2LbNrPZgl/+z29HmTqO6ZBkBU+Nb6DiHF1r3G6laVKXio3uBqamE1cxBgbT+YxK1Ozs7PBd3/Wd/MxP/TRBu0VdKlqGSx2WFFWTTyKFRoWJ7ng887nHKHWH7/9HP8DBrTGd7T5nXn4Xj7z6lXz844/zhm/8ek7fcY5/929+mocefJB7du/j3B1nWM2nnD13njxfce7Oe/iBH/kR/rs3vxlpVFgZDF9+F+/6xd/ixO42cnp7euYXjrWNdWRVMz44ZO3kDvvjEZquY7dsrFafIk25sT9Gs0zKOgHb5SjO6G/0SJcpQ2eNdB5zdRZi2h6rsiDRBa4wydKQIqnwbIfDyZytrS2u3rpBvzckDkuCngd6jW3C4WyC57pIAZrrE0kY9LdYhksCt48hTMbLMd1Bn8lsyvZwHaU3mnbbbaPpsD8bUQrFqTtOMx1PWWYLtFInVWAaFk6ug68zmk/xfZ+szDFKicDG8X2qSlEf1ah1F9NIMJDIIidTisB3kNRUqoGimJZFnhcsDQMRBMg0p0wShCHpDdYahGGesG45iKxmGc0xApcqAdPQmEwmdHpt7MAnWiww0pL7v+rl3Li+h2G5SFVjoPACB+nAfLGiv9bHVZLcrvEqk9o1mMicjm6jRTmzJGHo97g5GrGzsUWehAT9LkiJ7wVoCFqtFuN4ia0LHGEh5N+wPHHDNNCEoiMNRvM5FuDYFtdfeI6u6+NpFnHL4NZqwYbXJspSlJSEZUynFbBarXDtFmEcMfDbrHX7LGdTqkqynCw4ofk8Hy4xk5L2yW1WoiJervhiHqRclsxWK9adDik17qriUFP4lsVkucK2LHbOnOGzFy/hFwVhFuFZFoZuESYxNjY3b9zg1KkzdNst9pdHDdU6aSR41E3NXhMahtQQmkCZBo5q7Md/xYZEQl01rs6qoNREg2CTEoGkjUMlK2oUeV3haBZ2DbquUxiSgTTISNGFIl4uMKq6CdpSTYCYr9lUjsRBwxa3+wf/3uv/No9mIS+5+z4e/e3/wicfewzTNButuq1j6SaBYVAkGZQ1URZx9iX3ce/58zxw973IOGMvmuNIhcwLLj31BK+86+Vcz444kwWQFii7+Z80AYahMHQXU9e5/Mzz7OzsNDQZy6GoKyyhc+rkDt/+D7+NWTin1gVVXRMfTXjquWf43DMXyaIGxOsZFoahIQ2Nb/jbb6Aqan74h3+YX/hPb6fvWpAXmKZJy3EJ86ypA4sKyhhf63BrPiHTFO/47V/HrTMeuOsCd2yc5OlLzyCk4uRwg61On7rMePrqE2xFu03jWOyjVaBZNmVacvf5C+wd3GD97lP87D/5H/gH//j7uLW/R6kp7Be5JsIwpqoqLtx7L7euXGO7M+C5+SE7woQswjYEuA0ur623qBBons3+3oTttT4yr/HX++yvJsxHEzZP7FIWBUZdI3RBt9NifDhiuLHOZD4hcGw6nku8qujZHjeme9iGSct22eqtMZo1KLYkzTGsOXES4nsOvW6bNHFZzRd4hoHKMoSKcUwbmZuUoqBWIbveOgeXrjTRuIMeN+scrUrZ3x/RHg4J8Bl2NwlXC1wnoKozbMciTTNM2yc1VrSVQVgLaHepqxSpg287lGlCEebohtHERlsWVqGTlgmGoVCehVZJqjyjKKqmpBSYaEWNSGscwySzmt/L4whna5NqmTaxEV2P5x9/EnPYIjMkVV4g0gxp2oRhY80/mk2RRY3rGCRGhafZ1IuILFC4bQ87lRSGwc7GWUaXX6A37JGpCttqmqWWbXC0mLHm+URZjO355NXfMDxbVVVgGdTrbcpa4XhuY5DRGnJ6pErqOGPd65Iqhe1o+HpTN5VKp91tsTlcQ2YFyzwjznJ0w2IVhihR8Xg2wvV9lGMiZjF6lJOnGWFyuxZTLlPuXjtJqUs8yySraoQySOIcvxXgCoOsTNGEwrOaSW2xXBEXGVmRYlg65+86R5JEzBdTdGGw1l4jqgvCPCWvQGA3hJPPg1hRKKWoq4ZuUpYleS0plSSryiYGVCmU0DE0E1O3KGtJXh1nd2gmC1uR2hq5DqlnEBUZedrUzaMkxfU9hFQIYaIZDq4XYAsThE4sbr+Z/cov/wLRaEk8D1mlGa2tDYRucOL0OR58+NVsrO+yfcedvPFN38o///Ef4wff/Ga++uGvwjU8LN1iFaZ4Woc4EgzW7mDYOsWN69dRtsnnLj6N7lgI1dTllRIoKciKnKqq6LS6LBchpuPy33zHd6K5NnFZcvG555mnEbPpCivVUbOCutR41SNfw3d/+/fxln/542z0N9Fcm1qDdBFhWy6e4/BLv/R2vuXbvo1RkbFo2dwsEsK6JkKSaYJaNEzOuaPY7a3x3l/5bTpnT1M4Fr/5G+/gk598nH67y9Wr17l8fZ8f+bGf4vLz17nz/jsZBj1O7p7BQNFpubzmda/hw3/yQaoyZTUO+dHv+Wc8vdzjAx/+IDdGB9j+iysPqqLEtT0uP3cFzdW4sTji3o3TaO0AXUKcZOS1JMoL6raD1Gvy+YJty2M6nbMscsaLBbJSeB0fZIOhy2VBkheswhjXNolWi0ZmKCCMV9SGxnR8hDSa9EfTNJkt5hhmo/HuDwLixQpZl6RZxtXrL6AbBr1BH9u1sGwNz97EtlroTkJaxPjBSQ61lHqjS7Xe4abMcQOfDcfnzM4pBqdOkZcwnUdsb59GKZN4kaBXgjrOMPISFZss0hypFahkTqYUspAkSYbwbLy2j1RVY4QrCuIypBY5RZ0AkkqDRFWUFixFjBln6HlB0PaZFRGuaTFdLtg6tUMZp00jvCrpWm1WSjShYkqi2Sau3yIzdVqmTs+zcHyLtu9QK4mTS8ZH+/gn+wgpmB7nOZVajSokwXCdVIKu25SVJI5DkrQpW83mS6oKqkJS/zU4rJ8fXxaTuJQSXcILzz3PcHOdNKlAOcQV0PcQdY5l+xRViawK9g/GJBJm4ylheETttlksGlu9THKC3KIIK3y/w2S85PzwJMvREVqccyRyMinpmSZnd7ZvOxa/FRDFK+JVjKwU/X6fLI2RZs1iPGGtP+DgxgHDwSaXjm4BilM7J/Acn57bxcIgz4pm656UFHFEa9jCr3R8jIYRyLFcUFbIusaoBKauMKkb84Kq8WTZhO1oYEsdSzNRdU2hSrJaktYlmDqWruF6Nh0EtqoRqkSPY2yh0AwTQ9OoZHMzKCuOCfeSOJ9j1RKSHFXezvMbmB5W22e8mHHr1i2WsykXTp5i7/IzPPPEY2h5yMWnn2RgOYTP3mJ8OEXlKedOnGA+n5OInKNwyvqJPlEyYthyMEwXX7P5i08/TikU2E38gEFDpbc0Hde2ONgfM9zYJEVADd/5D74Nq6oZtFr8/u88Sjif8djjH+Pg4CZnztyLKBU6islsxOjokDSM0GrFmbNnKauUG/t7zOOQU60eG70BRBkuTS6NJQWeUBRZxlQK2nWBUcfM8pDoyh7v+L2P8G/e9h84ffo09z1wD6Ze8ZnPfoT3v/v/5O+/6U2su5tcG99gPL9Jq9XicDrG90zOrHWYzsbMX5jx5v/2n9GSOa95+Us4tXMXl5+9+qLXRErKOJ6iex5xnOMpnTRPSFZzwiwjySralottGWRZQhHXnN49y023xvf95vx7DjYulAJDCVp2A1HZHK7THbYZ7PQxbZvt4SaB18JyPBzDZJpG9PxWs7puBSAK6qrAMBzCOKe93cH2XEQNtm3j930ODvYQymCySjHLHBknhKsML/CZLfYgbCa4ttJQUUZgeVzOp+jU5IspumbR9nrc2L/KtaPP0d4+QeX2cDodghMeqR6jjJqOPyDNasy4wK90yFOK5Qq9yBCO0Wi4DZvaUvTcAN1zEFlIy9BoOSaOsLBLl5vTCdKx0ZIKV2kocoIgoKx1UqXYi0fotoMooN9xMA2PsoDZdEUUZdR5hdPqUmEyO1oQ5wVlmZOqAtt1iY9mlLJkKwhIihJRmrBKMZIKWSm0tIQkpcglSlbodULb8UAYxNUKad1OHftS48tiErdMi2ix5PT2NteefpawXoFR4WkaPWWCplPkTW3L0U22NjcpioJOv8PmyV3qOCY6mJBUBdLVObIi3IGJLCN2T/QZuA61JVA9F80z6RoWwmuxnC5uO5YKQZzmeKaNKZosho7t4QiXs2fvIExS1na2qGTOrtulbbrsh0uSMuJoNWaZLbg1usXiaIoeuIziJUWUsDQrUhMqJdFrhdCbVXAtJVKrSFNJWetowgFlUcgm6zivSgpZk5ZFg26oJUiJY9ngGpSOTlbllGVNVTVaZVmDEs2presawzAQQiD1mrIsoQZDWCzqnFiTSPv2qtrYqijqghs3rvGqhx7E1Qwe++yT3PvQw7z+W76ZWVXwhpc9zPv+rw+Bb9LNJJbukIQJltLwNJOuZjDs9tg+e4ZPXL3EH3zwfbz3Q3/EfDlrjqmAQioqoWHQ5LxESUyr1WLvxk3MScia7rLbW0OKRjn0wvWr7C+mhLLg1nLCW3/xrbzjnb9NNFqS3YxwPA/dMinrikde/SqSVcIH3v1+zp09TV0VXLt2g2/6pm9CqUb3X9c1ZVFj2zZVXrAoalynhWc4CCH46Kf/gve96318Zm/MdBTyt77l+xH2Gqd3TlPOZ6BKTEND1bBcxrzsNa/n3b/7AVZum/f98Uc48+A2rZ0htdlikStsM+bCma0XvSY83aat29jAcHOj2VlUBegGrZZPy3dJqwrbsElnS9q761w9vE6/lITLBabV3BzjpOGa5nmOZdtkRUESRcSrkGjZkNWX8ymTyRgpKw7HB7QCj5s3r9MfdEmSiMwWzOK0ARynK6J5SOD5uK6L6/vcuHGDcLkiixNc12WWhBxMj9hc30CrJKc3tmmvd6hUxXKywCoVy/GUVruLmVaNPNgP8UXMTvcUd558LZpwGY0PyKuS+SxhzV2DWOLrDq4GrZaLbuloCrIkJwxDfMNqPBVSYpoumYQiFxhujyRJGserLLEsg86w2yykioJaCdJFjChrVJ4TWA5rehe7bhY7mWEwnY6pRcXmxhqWAeM6powitLJk2OviOza6rhMEAYHn0W13MAyD2azJ/llb84gpKEzQdJ24yMiVotPrIjSDKM6ZFCmOZ2BKSat68aTLz48vi0m8khLNsag1SXdtwIbtIsIEVzcRSqOoKgxDo9tpEYcJs9GU6ewIpTWQ1bLM0dYCyuWKQNNZc1tkk4iu18XyfD5z7Wk0WXNufZthu0tuyWZr9UVUGaWCwjLoba2TqAor8EjyjK7f4Wg0IUoT9sb7BIZOZescTWYYSY3QTNr9Ibrp4XtdalNDR3BhbYfdM6dxS7Blo/OWoikhCOPzMkLQXRBmDVqJ0EqkpqMZJrpmNuUW0cgLLctqHsdN0rTMyPO8YftVFbrQsCwLNA1Vyyb2FTC0z59qiYagLuXx3zIIR7fr5f/+a7+Bk6dO8dDDr+STn/gYr3jpK/i3P/E2Xnb+Xhyp8wPf/t0c2oqbV67y/PUrjEcHaI5FrikOFlPiImNZpKR5yac/9jjPfOLTLKMll556CgqJKOvj8o7AMCw0qcjLgm63y6c/8wQb21v0zp/gp//j/8SP/cxPcc/995EkCa7lcPPqNfI852A04mvv/woe+YqHeO/HPsDvfPQPqJVsQo1kTVYUtDptrh7e5Ie/4/tYmhLLsXngZa9o4Lm6eRzn0HxWShbNTfyYQKPrOu/8lbc3eSeVS1Rr/Pov/Bxv+qZvoNVp0x5ucO2Fy7Rch92tbeIo5RVrJzl5/jR3bZ/g537yrQw2t2m3hnSCHkKkGMImWd4Opr7te1jW2K5DVqRkqxVFnKLL5jsUxyHTowlOq02eFnTXBixeuIZvWxRIEJLJYokmFWg1URKSFjlxlrJarY6DpFbNDlg02eWtto/tmHQ6LeJoxdr6gJqaqsiwk5r1dh/XCxAthzqvSaIUzTBASpIo5cKFu/A8jyxLmCcRutcE1NVFyZUrV1guZqA0qqKk4weIrk86WbFqWQSVIAtdRN8DN8JWE0zNZDjs0+0NKKXBjdGYyvGYFDnucMBksWS6WoBm0G33UK5DHWfUusDRNNyqoCwSfCkp8hjd7OB7PVq+R1rOcWtFnCeowMbVTdxOhzBPUbpglazQA4+90R62pTE/GOP4DicNn16toUyddbdFNJ+zWi2o65Lp6JD+cECcpiilCMMQy3KoakWe5yxmh7iOhW7plEmMYzVy37TIqWsFwkSUNYv5FM20WHyRSJAvNb4sJvGyLOn1ejx/5TLTxZQoTpBC42B6RGXrVI5JRMU4WVEHNuvr62xubaFpUBYFiSYJD12qWbcAACAASURBVEacu+8uFosFNiaB16JSGgf7h7Q1i7yuGKcxk8UcK5XU0wVleru11ZGKgWGzf3DQZJaj8H2f0WSEZogGiisMwiKjnsZcOHcHdcemKssmvWwZsbG2iSUFyyJlXqasZvMmlUzWVKpJGqzrEmr5V7pvJSQ1TUa4EApNM9AQ6Mepfo5lN3pxvZn49azCiAuspMQomgnHMAwMrXm9rKsv2ritqsZ0hKYo8yaf+lUPPnzb+86fOsPdd96DaZp87de+lnvvvZ9PXXoKI7ARtonhOTz0yofpt3qYq5I/XVzl1s3rxzcQnRrBN7/29dy6fJ13PPp79Ha3cC2Xn/yJnyIriqbXYOhUeQHHDVvbbAxYN2/eRNN1Hn30UXa3t3jwpS/j2tPP0tVt/MBlPp9Sljlrgx6f/NxnuHzlKt/+rd9KFSXU5bHDtZS88/d+j1/9lV+GqubXPvCHHFy6imEYFEXB2toaxTFhxziGSWhKYqBQqiErKaUobNCTlOlqjFXCM08/wcH+TaJoRVZW+O0WQjPYPzwkr0p+43d+g2vzQ77uja+js7OG57RwbJvVco4QgmUY4Qa3RyB/4dA0rVF4eDampmM6dmNe0S2Gw3U6nQ7z1RzbaAAOvmGxWoQE7T55UWC5TW28M+jT6/XwW17T+B+sHSf72WRFxeHhIbJq4CKz6YI0L+n3hmi6yWoZUVWSylkjljFKTmBRo+smum6SpimH+wfs7Oxw8eJFBoMBVdGUEjudDmEcsYojDNfmpNej5XqIts1ePmM1nlC3PHq5zpQCza2pliuoNKZZgdLmZHkMysY2ezxwx50Uy8Zo5uMQdHv47Q5xmpGXDagjVwpL09FtC1uzMC2rOZ9CMl8dkaYhURTR7a2RaBW9VpvoYEImK/xuG6E3zEzP81hEczq9HnVe45oGTlby7P51bhwdwiplmaY4G31U22WcrujvbnF0dES3220iP9od0jRl9+Q2fuBSlxLXdfF9n363Q60qTEMjcBvup2k7uLaNqTvkZU2u/obVxHWhsRxNeNn99zPo9sAJiLKSzfUtbCnwhEGgmdgIfNvh2rVrlFVOr9slmYWcbPe4cOECn/3Mk/S31yHQuL4ckYicXuCRaBIKiZeBJxwOwznt3TW2djZvO5baUOyvxrQ9n2QV4mkmo5t71EZFmsZYmqDKC1RZkx2n+xlFQRkvqbKIftujTiOCYQ+UoqUMBhvrrFxBYgtqvTH62KaOoQsEYGAhsdCEgyYclLCxLAtDMzE0HcdxMBDYpkkpa9I8JxF180XQdJSjY5o2hm4hhI5SgqKsG6q9UliGia7ruEaTQigMQX6MvIrShFc/8qrbPofr0Yy1/gYHe4dsbe7Q7Q2598xZooMJA81idX2P6taM7/ne76dzxymWT11FZDl/+OijXL3yAn/+px/hn/8vb6Pqunzla1/DA/fez8bOCTTb4cfe+jbW77iDXCsJfBfSDNu1kEWJbVp4QcBHPv5R3JbLwWLKoowx+i1OveRuMlkRpQknz5zm059+grvvO8d0MeWdv/8+DK3b3PQU3HnhPP/o+74facBXf/1ref0b38iz124w6K/xC7/4v7O3fwhwXFZpcHS+52BVOYbZSDAtyyJME3Kvol5NuLI45Mz5B9DMNllSMgg8glaHVqtFr9fjR9/yo1jdDm/5h/+YH/8f38L2iV0m832SfEZ/2GF0lJCyJJfJi14TtjJQRUWVZwTrXXIhuTUdU5saYRhjmjaaraNrUMcJdWDjdHqMn7xO0O0hzWYSbvcHCCGI4xjNbAgzy3hJq9elVDWmbREEbWzDxvUDiqIiXIRomkGn10egoycrNls9ilwRFgmW7aKbBt1uF9eyybKCUyd2OdjfR6km5+fziwp/0KW7vUEpFZPZlCQO2d3awDUsusomyzLMqMCScP7M3WSrOV2r6QsN1zaZLQ/R3BVH6ZjNO07S2dnmhaMxYVqSVIqg2yPLczpbm1SyZhlHpHHIIipYVaA0h9p26LTXKQqJbgjyYoVrWhR1QXvQw9IN6jhD5WWjhooS7LrANR0kFtKUKNPE8F389SGVa7PeGaIpjSLOWe8MKVYpQRAQhiGaprG3t4dpmuyNrlJVBZYeEOUxk7SJtS0dHdc0SKIFtqMzX84p9BpNc7CEja3+v42i/f99SBTe1jrhIqasJG5d4Qx8jkZ7dLe3mq500RA2ismUju+gCtgPM+zAZRHFqE6H9Y1tqjhnNZ6zvr1FNV6QtS167TV0LeT60T6apnHi1Glu3Nij3fki2Sl+gIhihKJhJZoGjucyO4rYObFBHmaUSqAXOYOtAXoEsZ5iWV1WYUY1j8E38YqawHYZz6f0+x12nQ7zVY40FBkFspIoAaDQVIEsa6BZRUsJeVaBqNG0RqlQ103edl3XKCEwa0nhmFi1ga2BLhsUWW00CheBatyalkmFoCxKPM8hiXJ84eAJh1xWfMe3fhdxnt0WyBQoHWEoak0nW8bM94+w1roEQZtcEzhrA0oSnr1yiTyW/Nf3fJCf/bdvZb5coI9u8shXvJpv/c7v4crzl3novgd520//BEka8XP//mdYLBb0Bn2U7bGIUzTLopYVylJIw+TMmV2i1YTD5YLXfeVriNOUl77iIYa2zzMXL4HQeeZzlzi5ucXZO+7l4QdexYc+8ifsRRIrF5S1zt963RvRVE4altx5/i6SwznPXnyW173+a3n5/a8glTk//7/+HL6lgy0gKZiqGmodUeVUCs7dfR9WWLK/f53A71AtF7zy/B382Z99iDe87uv5xLNP8srzL2OWHPD0Jz9Gx7TYuPMUv/bBP+SunbOMLl5hw99CdA1G85B+20SlBa774kaOmIK8znCky/PPXWW71yfPc8I4pioyLMMmLSQyq7G8FkUmSeqQut/0AwzHpLPd55Mf/yjn1japspykrgk6XdygT5HltAOP/b0xmlGhaknf7CJ0iWMIZtGCoe7i+B41GcssQeg2awOPZLGishXZ0ZRut0/lW8xGE04GXWLZhFwtohW5DqLMUHNJWJfkec729g6TUUR3c8Do6BC710MLM1TL59nLl+gEnQZJFxZcuvQx7rnnPg6uH2L3NjFSjXA+49zaSTIj42A8wswqWkGbOmphEuJ1NMKZpNVWGGUNAowMNC9DahlFbVLEFYHfgzIjo6KqYLZcELgeUZbQXR82mL6bByzzFYHlUaianulRzJfUVcXiKMZxW3Q6PagUpueTFAUdaZErib05RMtzLOGiGw6rIqFapViGzapqzGWZ0EFrOLKOUFiaRVLl+IaLnt8uNvhS48tiErcMk5ZmMS5mSCHJK1jPNNrnzjEajWC5ZHtnh2WcYHTa2EqyrBvds8wKWkGLxWSKblsNsaNr0y4qpjZMkyVWGOK3fYTrYBgGaZEi0EnD2x2bt0YHaJrG7qmTPPXUU5imid9ps94KGB3uM1zbII8iOt0+yWRKKAVD16KkJitiSnSCWFIFFgINU9MpsxJ/s884vIVBQ0nTj+WFHOeJGwgMQ6MqJbauU9KUeiQCRMOA1IWGYTaa7tpXDEuNxBYga+pKgdYQrD9fclG1pCpKDFPDtxzKLCdwXPI8b1a8umCtHzSW7i8YSVHS7/bJFis6bY9QJjhxSlJJbu1P2ewOeOPXfws8YvLkE48zfuoiP/KvfoLLH3+CMy+9i3/x5h/kve95lLIWDIdDyipFs3RQNRtbQ7KsQGY5ptGER6EMDN2j1/O4efMqZ05d4L4zd7N11znm0xkt3cLqD6jzjE7LJ/B8Lpy7wFmnx6queOGZ5+hqAZmpkUYxeZHywT/+Ix5+1SNcvXqVjd6ASlW89P4HuH79Ji//ioc5eeIcyeSAZZayoVuUmiKQBpphUArF8uCQB176CsaT6ziBTTZLkJbiyc9+iulsxGtf+7VMpmPcVc5/+a138E9/6M08dLBiTwtpZTX9XgflWtRRTBU2ErXt7hp7sxm3Bz78P0da5NiaoOu18a2C6/MxvaBLC4eVXuK2AvysZG7FiLog8DwMdCzhY4hmte5UJfedOEmaN+Wj0tBZJgtMYZLnOUejEa7t0W93KYviOL99gE6NnaUUdUFd6/iBSxynyKIkTGJOnz7Ltfkh64Mhk+mcdctF12xqvUl0VClsDtfIJ0sKVVJoGZZv41gmh/t73HPvXVzfu0W/N2R+NGHY6pOFIb5vc2s6ZrC+QVYmvOJlL28Co9Z6lPqSo/ENlGuTVQolLXxl0z6zRRUmlExJljGtzjZ2a0ZRCpSUWIbAUAWyNKGWWKaF7jU4NFkUDNcbcpdWFyBA1RAuI8ibeGTH93Atm0U0wbBsJDXr65sYlsPNZy8TmxqtTpsySeh2OuRhjNKBqoQKAt9muZqjC9BNC1lU2JpOkaZogUuWJwRBQJRkWI6BbQjyNEP/IrLfLzW+LCbxqq6YHRywtrVOuFjS3uwyikPs0RRHabRO7jBdrvANC0tAppfYdY02jVg/eZJiGaNsg1WeUsYxeZKy3HKwei16sxDNc6kBP3AxhMb+9AjHcVDZ7Xe7nc0tsrLg6UvPsrm5zWK1Quh64+QKWkR5TNfzWB5NUUKy1h4QJXModVp2Y7rQK53R4YR+d4AuDSzTI9gYwAv7GEWJr+msVIGqJUKBbhqIuoHwlrJG43jF3SzVEaICTSCVQlPNJK2SikhIyEGqklIdA1l1rXGBVk00rdChliVxkmMYBkkUNzW/xYLv+97vJs4iwlDeRrIOPJ/drU1M0ySK46YJa9sYvsudnS5PXLrIj/3sW9Bdh47XJlxGRL90RDFZkbd0dk5sUPldbly/xSJJGA4HRNGKJM1JwiWO5WNJRV4VaLrZlAVoms2yUqwN+lyf72NfMbB8l2lV4l8v2NraYjqfcPGZS3zjN/5dnnrhMq98yQMIx2FSF7SUxundE5i6xtXr13j5Kx/EcSxWszl+4CKlZDybsoxDrl67xjDwCAY99AxUFVP4Df3pxO4uaaU4SlZYnstsNuHU7g4bpy/wNW/0+dM/eA/PPvUcX/d1r+Hx5YT/+LZ/x997wxt49uLT3OnvUE6WPL+cNrBlzcBybE7u7jK7NaauX9zIMVTNwiBFkRQxd22f4sbePlq7R8fpQqGIdMmp9hqXb91E13UyVZEWFZplE0Yreu0OZqeDkdVEeUpWZJimjZOW2KbDvFJkVs0qjvBshzzL8IOARRGx4bkcxgtarR7z2ZKyqHEdh8GgQ7KYMfQDlqsx7a01HNslrQpSITnl9UiUpJIw0Uu219dIJlNGowmu4THsDbl8+XkCo0MW5mxs7RCHc1JVEC4STmxukEUFytRZxglKN7i+d4M7d+5E31kjqDUmaUjP76HjsLy1xPMCVCnY3TpLYRvcOlpgeTDorTE/GuEGbWZZimF6GKaLASTlnGG3zWQ8ba4pzaBE4fkOpjDJ8pJOt4VlGRwe7qOUwPBNoixmPluSFmN018Y3DOo0J3A94sWKSCtRWU2AgWY28lDT8JlMJqwPOqRZhqEbbHcHZJoGSYaJCRgIEkzTwHEMsv8XK/G/Dp7NEUJ8UgjxpBDiaSHETx0//+tCiKtCiM8cPx44fl4IIX5eCHFZCPFZIcTLX/wwGp3v/v4+utSYjPcwJws0uwlI6gkXXzMbAnlgkWQ1bqdLpzfk+njMUTxj62xj0DF8i/7mSQopkLempKIkzjNm4ZI0bbrz/XYbKZuV7ReO5WxOPF/SarWYzWbMlwsKWWN7NllaMBgMmlTCqiIuMhZH0+PVfYxNTVXHlCqlLBKScMEd50+yisec3NkGFJWhCClBa4wUumlQHaMcaqXQTY2qrpu8cdXQSEBSyKaMogTkZYEsE3KVoGk5tqPhui6OZSHzkqoo0UVjaa+UbGroccxqtSRPM+aTCad2T2IUUMUpZzdP3/Y55EnOdLTPqfNn0GwbSxm4lc6lJ57iXX/xQW4+/QxFsiQ+GjF54QrxYkJiK7wzHXodFxXXjA5vIURBmS04OniBxWJFnuTYptfkqvhtWsMh7rBNa7NPf+hSFAWPPPKVXLt+GZFmVC2T1lqXbbcFtsbm9hZ5VYAmSPKcuzZ3ef/7349ZSnZUgJSSMk95+umn6fS6eI6La1p8+hOP4XkO7/id3+XcXReIiwTNrJGOIs1XhCJmujpiFEZsnTmNHXSQmsF4coRKC0RSsbG1w62DOWd27uTnf/XXuHjxGd7673+Gt/3Qj/DQ330tT3/6cVpXx6zCOWEgMHJF4Lgsl0vG4YLZZMp+NKfb6b/oFbEyJNI0yPMcIRXPjq7jBD5OVpMUObM8xleCy+M9Bhub5FJSSoXl+cRxTLczZBGlXL6+Txhl5IWk0x5QpAWVrbPMYi6cOc/p4Saj+ZRnrjxP0HY5muxhCJN5nBD4PSZHC0zbpr82ZLi2RlGVjKsE09IZDHroRU2hQ4HENk2SqiB3dOIyRyY5o9GIsusS2D6+HRBnOX7QwurYmJbi4PLnYNH0J4JOi4ODg2aXaLloOci4oBd02c8OWR2+wEF4mXY5YX//Cbr9GkuPOBo/j6hW1OaK8d7z9Loew9aQW1f3MISH7/bZtByKyZgynTFPDtAtnXm4ojPoY3oO8zBBmBZlXZMlKxZFRH3srq5ERdfvs5rHDLprpEmCbdu0HBs9L1hFS+ZajtdpoSlouR5lXTCdHSKsJqTO9WziPMN0HUyvCfVL0ikb6wFJOsVzGpNVkZVUx7X8v+7466zEc+A1SqlICGECfymEeP/xa/+9UurRL3j/NwDnjx8PA794/PNLDllLNnZ3YTSilCVGu00pMqosw7AsojShyCWrLKRdKjxpYBSwV0UYRYHd7rJ//RBRaVhKUWsReikpHYtBp8fs5gi7ZRPq4FoOHTcgqUFxu6C+ygv0umYul1RF05m+59wpPnXxefptn0iXzOOYVrdDFhdovsVqtSKsS05s7xJevkbdCxjYW8ii5Mnnn6PT6hAIwdqgx8GtEa5tU8qSXCmMSqBrOpIKXQhkJZG6QNZNXKiUkgodXQfqBgIsdA1Ml45tIURDcs9XIbbjkdfQ7feZTUeUSUXXC+g6Lj/wL/4J0WzG5oUzjG/sIRcrZgoeefBh3vuXH+alX3hOtIoP/dlHuff++1isVk2Snie4NbpJkRWUtsBAYOk6BRVx3sRvlnOX/XSKrkEn6BBGWaMWOeaBxmVGVuZ0O0O0JENYGpbvEMcFxWJFaJR86C8/xNBsU2+Y/OVffpRVHHHi5C5FUaCijJbXZVEseO973kkYxiRRiOFY7OUzTKnT6q/xiU89huNYvOvDf8wrX/IAduCRHIw50FZcfPoSFy5cQNUgaxMDmKman/7Ft3Nh8yQ//i9/lETMCXyP0WgClWLtzhM8+eST3H3fg3z4g3/En77vnQhbYAuf33nvu3Esm1LXeP60z27tsHzhiJACxzKpDJNT/zd7bx4rXXoWdv7OvtapvW7V3b6193Z3e2svbRvbMTY2mZjdhGAHmGEUwmhIIGKJlD+IIk2iMJMIyGQwYTPEhDgQwkCAMQbsNqbtbtvtXtz99bfevfaqs+/nzB/14SF8gDtSMgKJR7rSVanOrbeuznn1vs/7PL9fb8jxwSGve+BhPn90xLkv89AVRUYtasg6dHs9JNfFbpqEYUi6cGk6DlmSInkFSTPDvy0FEQwZLdFIooCR0yKVSrwqxbIsZtNTdENGURR028DsmUxPJ1ze2SeOAnw3IMpyDFHHHjRYTlYblWFVkBchq3WAqpvIcx+9o3IwO8OxTOxS5IXllFIqqXyPYWeXyWyKrEkYqka9SFB6bWxdZT1OqWKI0wgvjTE6I9buiqKO0TSD0e55giQirwskUyUNCyqhhFREtlrkWYmrKHT626RhgaHIbD1wF0fXxyxdn51Rl8XcJ1wv6A3bBHnFcrnGSzN6F++jzDNYrygkF0EccOtkwX5Xo3AM/CrBUhoUec1QNljVGW6UQqFQ2ipp5jOLliRVQpKkhLVGp9PgotTGn7t4RsSWtYH1Dc0WillT+gFzb0VzfxctykETWXsuRZSgOw5elFPJKgVQCRq2YeL7Pln+8qUQX3YlXm/ij5LHyu2fPy9h817gQ7evewJoCYLw53Y3SILIrRdfpG0Y5L7PyGhjawa1rGwaEyRIxAK5KCEKydOCtR+QuAmj4Q5JnuHYFnpWsqbEkSxkWSXIEuazCdWWSdfQ6WaQrV0mJNR5vTHe/4nodDqUqkK/0cGQNi3uj3/iU+ShT5GnzG6d4jQ6rEJ/o3nzA4adHsN+j2DlMRruIMUlqySkaN6G6xcFmevRsg1UTSCqUupi02wCBdQ5UH3JoVlXm+5FoWaTL69AyMuN3EE3UA0dyzARKzZlimlOw2khSCKSULM4m/CW17+J7/n7f4/z99+zWamtQ0RF5/rVGwRZRqRK5EXIs59+ik6nd8f/obE14PD0EJGatmlTZxV5HtMwdMo0QaamiGPyOCDwXRSxQg1jzvI1Q6cBZcFiMUOWNzuJNI1J6oJGu8d7vvq9rOdLKkPi/d/5bQz3Rnj+EsnWuDjYYas7IJdriukMJY555NIlluNTTo5vsV6vWXseptNgGXh4sUsplBRFgq2o6LrKrZtXKcuc5WJF5Xl84emnmAUrMl2g0XGwDJWf+en/C0WRaAky0XjCXVaPa7/zBOoioV0ryKuIgd1AJsOL1rz6/gfoN9u89MLTVEXI2fiAXr+DJEn83M/8LFee/yJKLVAcLdi79yLlwMBWJCRBYLFYMRiO6A22Wblr6ujLd+P1toY4dgNNEgkPpiilyOTgFCksoNUgU0UKTYKGwergiI4ko0Q5wXiJLalotcjk6Axv4qNrCkmSIAg1cZgwPZtT5xU3r95AkVRkTaUz6LMOfM6fP89qPCeMI5xum2G3T5oV2LaDoZksFgtkQyJMQpBANw1WUsR9ly5hpBVb5y7gTlx6jR6Zn5BGMXmZEK9mLKYTNE0hzTPiOqM/bNLpdxgMzyELm93k2eSUxWqFkJckXoAmaDham4aqogKOoVKmEUoFJ5MxE9dlOpmztdOm1ehgKg5ilWPpCr2miUFKmS7pdkzyMGA5XaDaLfaGj9LsGBxOnyb2M6y0Yug0aFQC2+0BYRlw69oVwtRna9inXCxpVDWNSmBkNRip9oaFHuUsXA+XzQ7Yjzer9CLLqeuSQhHRLZNovSkxLaIERzdpOTaJH28WEkVNFqWM9oekVYbeMKiVO3lGf1YIX0ZEv3nTRpL8WeAy8K/quv4BQRB+FngDm5X6x4AfrOs6FQTh14F/Wtf1J29f+zHgB+q6furP+vuDfr9+73veQ1nlNJvOpk48y4nrGtMwkAuBpMyRNZnaUJFWCfqoSxT67HT6jA+P0U0Nv8yw0PF9l2avQ5QmqKpMEcY47RZlsSm8RxQI8xxFUPg3P/XB/2IsH3j/B9B0BaHaSHslScIwLOSOQUlFvghZUrCjWoShT2+wzXw9xawVJFXjZDHF1DYPhqFqoMq48yWqqnL1+IDnPvt5hFqmEmviIsMUxNuTOSBu1GpFVSFW5ZcafBRRQpLlTV4ckbosb3PDK2AjlKgFkaLIqARQRIXIizBMhVKUeP93fAeqqFLJJeHNMzRDxey3mRxO2Gv3Oagi7v7gT77sm+av4r9P/PAfWxv9ne/6LtIkotfrgKAQ+gEP3HMv1668hKDKWKqK4zicTKekaYrlWOTFhkOvZAWSWDHo9DlbeAT4mM0GTcPi5HBMp9NjcnaCoekIQk23N+Tw5JBOr8t6tQBR59Kwy+x4htKxqEtYhT5tq0GUxFjdNnGaEKUZpqaTTSdIhkHTsBCQKAThtoVepqLcSKazArVhElcFomogRgleuKa/NURVdVahjyZI5NGmec0wjNuNcCJxnGJYKmvfo2Fa1FWB4bTxvTWSqJAHEaKpIN2u7pIkAaGAaeCjGCZVEGKqCllZ0BlsMZsvqSuVqpap9Jza9ViYMXfbDb54MqZhtEmCFXtbI5ZBsCGFKjV5ViBJGnGQU2egKClpLRDnGc22g1Vsat71iQ/n+niHR1hbfTRJ5NbzVxhevIQhKSBUTOdzLKtJxeZZtgyd0HfRlE0axbQtPvIff+WzdV2/5svdNy+rTryu67Ku60eAXeBRQRAeBH4IuBd4LdABfuC/5oYVBOF/FgThKUEQngqjiFKQyaqapeexziMEWUS9be9J5YpaEGiJFu3aYLfTZXx2hFxVjA8PKG0Nq9NCySvo2Ogdm/V6iSJKxEWNqVscr5aMs4hEFtFVC6dhkAt3HjBV4oblnNYpgi7SG/XIqpzZ6RR/scKPfDRBYBZscuyTk1Puv3QXkVAznS1oNmykKEZCYB2HJNMVLbuBkJRsd/tkRUUCZKSoukRW5BQIt1vAN8qtuhYQpI2NRlTkDZ1NgOx2A4+IQFKX1KpMLYnkQk0cxyiK8iVzt27Z4BiIosiv/fJ/JC4ywtWKRq+J3DQ4Xoxx+g5+5rNrvnwV1F/F/z+hizINQ8dfrciCBFvSuX50hGtIKJVEmeUbx6Zh0GwNKUqBMI5QTYOUkqiMmXsLtIaMhoijG7z04uaw/nR8wmi0xblzewgSRGGIbZoEns/u1ohGWRGKBUFHISk3SIc/0uoB1O6abLUgXs1IghWybqBYBoezMW7kERUJpVDRaDTIsoJedwCqSZwXiIJAGgaUaYJSG0xOz1h5Z4hxjrdaUysSQsMgL6vbiNkEzRCpZY3e1pBKAM8NULIKwpR1EpIZKkKpYtgGS39OlOVkoYuhSeiKgFSWm45mWeDm9BDDEmh01ohSiiK08CudnngXV2cJDdFErFIERSf0YpxGizD0cYOUmecxXk8w2wJ+fkwQhVSyyIWdPcSZT5DE5F5I1LcgSmkOdqjRqUWLwflLiIpIXqQUOSi6tSkJFqrNM5snmLaDGwbMVmsOTk5f9r3yX9XsU9f1Gvg94Kvquj67nTJJgZ+BL5nvxdZnAAAAIABJREFUT4C9P3bZ7u3X/uTf+mBd16+p6/o1lmUhGQp1JbC7d4GmZmEbFqUIqmJSFzmyUCI3VFKx5GbgYks6tmFi9XoISIxXLkKnzXo+o6U6FLmIrtkocUolCrRknaFuIcQJsioQZiVN8c50ilxVCIqKUCo0VAdv7W1aklcxclrRabcZWh0cZbPargyBP3zhGaoko62KdGQVW3MoxApNFCgVkUUYoks1ki4zPLdLw9BRpAbN0qanNNFFFUPSEARp0zavK4iijFBtAFC1IJIUG8ZHXZakZYGcpZiFhJik1ElGJQr4cYIi6pR5Tk1OEcTgRuz3thD9CKNS8RYumqzRbneJ1+vNDiYMeOYbvo5/vfXlCt/+Kv57RPz93/dfrMIBJF0mSFI028FqW5SmgFFXWHmBaSjkBRuiZZFSVwENBbYaFgQuVCWiarJIE4o4ZelHnJ0eM+jsspxOuPvBB7FNC6UWCKKMuExp9weIskJc5sRixmLlsaM3KZOKsPCRxQ3XBgnUZgO93aPnbPHoI49yV2+f0+Wae85dplIlctfDUVWyJGAdrjhbzXFaFqqqoisWkiCTKTrO+W06gxFVYdBud9F1ExuVRqURZTEoEq1ej8V6RZEmrJcryqqiViQOpmdEVU7PbqLlNcv1MVmW0WsP0WWFvDbQ7CaNXofmoEupy7ScJiNngCQarKYgihlZfMhOX2O4pSCWAorTxWwM6Ko6UZIgCQKCom6geU6bbm9I7mbopYEoqGwZGuvlmFCTyMWSXr9DS9dZrhdEYYJIQJUkFHKOoejklUhSZThNE8PQ0GsZigopyxkHHpkg46BhNV7+wurlVKf0BUFo3f7dAL4SePGP8tzCZs/zNcBzty/5NeADt6tUXg+4dV2f/XmfURYFsihSUXN0dERZbrCslu6gqxaipHHx0j2cnI3xXY+qyNEtkzDNmM4WlHlMlsYUcYoiKsyCFZqjITUUck2iKAVESSPPcwzbwI086jTdpFb+RIxGI1517z3ogsDecEhRVIwnE5SmTakqFF7G9cUcURAwVI0iSpCzEsOUWec1nqHitzY6reVqhajLiKaA3m7S0Ay+6rG3ct9DD1LlxYbVoMlIioytqlR5hihLZH9Ml1bXmxRQESVf6kgTqxrz4nmCiwNagwFvfuRVPPjwg7z29a/FsDZb5DLPkCsY3X+Z3Xsv8/RnPsd9r36EsKlyMpuQxxFKUeOO11hOD3N7B8t6+dCdv4r/dpEnd96HTc3EkHVsVefWtatYlkW/26O/M2Lmr6kMiWUeURoqE98jQmQRJoh2Y8P/yUsUzeB0MkWTRKRapixznFaTgy9cYemFvHB0k6atb3aWswkIAlJWwu1UzcHkFKfXotdss14uaXU6FGXJjZsHJFFAniUcXLvBfLXkrmaPw3CKVpbohoGiqpycnKAisN12cFfrjZasqtjZGqJLItFiSUPWkMqaxWqOIylM0og0TTm3fREKHUVq0WztIisiF89fwFJtNFFnu9PCVnWEuqbX6zHcGiHLMn4YUSEjaypZEHFy7SYr18eUTMZHZ2RZRpSEdPsDZEWj1Woh1BWn0xO6jklTUYiDAEFx2Lt4L6dnCwQUHEGgaRhYWUUpVrR3Byhdm9PAw60KjF4L3/Xod3ukcULLaWJZAlWqUeUBSq4TBB6iUuN6K/wwJI8jRAla3R6S3sDWLMo0RnZkJOm/bZ34CPi523lxEfj3dV3/uiAIvysIQh8QgKeBv3P7/f8ZeA9wDYiAb/9yHyCJElmS0rRs8jRDqkziMKLRNfG8OWqt8ewXnmHv0jm8JEJJa4qqJIwiHKdNGqxoyjpyuWFuUypcvvsSJ2enSJmM3rdYLZZIQk1apBjNBnKW0rvr/B1jCZOY5194jsqQOBqforORloqmgeS7hG2LYWkg326eqX2fh+9/gGuTGzQMnXS6QqVCM0xKISVc+SiaTFlnhGsfXYx5YP8892xvM449nnrhC6xvzTmb+iiWgVSU7GwNODs7I01TyrLc4GPriiLanFhHacbH/sn/zT/7dx/ixL7G9eMjbl6/hqqqCABlhW6aeG7CN77n67nluXA+5n/93n/Avmhyz8P3MtzZxxxdYDUd8/M/+0Euv/4hzp/fZ/7d78LSLc5OTqlEAafTQhJgPpvRanfR8ppaEUBXuH7tmF63hSxKuK6LquokSQBCtWkuQWBxNuEVDzzAZ7/wOfYuXeDo8JCOYaPGBUXTYr1eU5YlV69fY+v8iIujfeq4wvciMCqSIKEWoL3VR0fhhee/wNHkjLkX889/5MeQnQEf/vAv8Nhb3oG7nKLWMl/719/JQxf2CMIVT3z8D/nE47/H5z//JH7ocW5vm3zgcM+9D/IT//j/oJJE/uDTT/LSF1/gl37+Q8imwSpwEfKcK9eusn1uh7e8+TGmtw6Yrhfsbu/S6rR59tln2eoPaNst3KVLJYg0HIckLNg/f45mq8GvfvjDzHOPd775rdiqjqIbVDKcnk4ZjUZMJhOaLYM8uxOItfY37e2KprI9GhGHEdfnY0pLpe+0mC0XqLJM4cZcbvZYrwN6poOBQmmaG9SCplJoPqP98xwdjkELETEwWwZ5krI9GCHINVef+yKtXp+qhkajQ6hJNHSbyImYjidc3N+jYlNu6TQsBsN9FusFpmEgmTpZEhCXFfPZEs8w2em0qTSZXJAZWE2C2ZpOv8fx8TG1ILBcb6BRiqYSRREVNWLHZrEOsE2VhqLjr8fUVYm/LqmrGM/PKYtTFFGh3W5TJRlVKSNLBu46wItDGk2Bvb0RvhuxFHPuHuxxeusWa0mg3WoiyMKGCV6JhKGPF/hIokK/02VQsin5lST0wmEtS1w/OKTTGWCqClVV8Nxzz+I4DgBFGiOLFbaoEKc5xdJlMBgwm80oioJ2u42/jklKF7nQkLSQulbJ8xzdNKlrgSreoKajJN6kWMSavmUzFzL2nS9nYf3/4stO4nVdPwO88k95/e1/xvtr4Ltf9ghggxlVBCRRZjWfs39XiyzISanIagnJklElg9VyTp6k5IWE1WlQljVZkmI32pDnVLcZ21605qXjq9SSQKJkFN4KVZFI4xhZNfCDBFMTmZ6N7xxLVdEZ9THjnPV6TUGNM9qiCmIEVSWvCgLfI6sFnHYLY+hw6I2x7RZJ4CKLFXIlsQwDNMukqiqSMGWZ+wiGSpaXZK7Hsir53NNPE/kBA6ODcn6Am0QkcciV6y/RspsoirIBSgFBnaLrFlINcVXwrg/8db7lb38HibnF47/3MTRFoag22NlKkQjylHbT4NrJTdKk4JHmkK//R/+Uz1z5PH/jXe/g+ePr3LhyhZ3zW7z3fe/j13/7N3nb69+AaVncOjgiK3JGgy7Zco3T6eI02wRBgNXtkecpqiRj2TJ1lbC3dxF3uWJnZ8TRwXWoN9vDQjbZu3COw/Ehg94Iwpp+a4AXhZSmjqnq2GYDSZI4Pv0kvV6L9XiG0+kyJeTe1jaTeMpob4fj8QS5gofe8GqMK1eYffwpspXPq++6yPjyiB//R9/HY694mGWz4vGP/SIdrcHK9ynLEMtsMLrrEnuGhoLCfXdf5vGnP43Rb1JLMt/zPX+fb3v/t1JoArIIjmNyeP3aRqWmWHhext7oHLPVEnKB+WSFplpoqsVUzjnx59x9/jJLd827vuY7SaU1P/5DP8iqrnjfe9/LbDJlUcTo8RpBN3GabTw3YLC9xdgd09TurAmuZZW1u0A2BcIwwJFVelsDwiQlzBJarRaxHxC5Ic5WB9PoMFkt6NkmYRgzX8ywmg4JKSenY3RTQ1ckQt/FVDUWfohNk2C+Zn9/D0lQGJ9NWWgKESXjdYBuG6imwdr3SNKSVqtFv9nkwFuSyDWmLHHr+i1so4UsVOw0u/jEmKLI8Y1bNJtt+vt7vPTiC1SBj2FbLNcrDMO4bbkqqYoS1TToKi1eKlcIbsLYCNFoYVgaWVWxc36Xa1deoqxBkcDz1yiKhKgJBOkaRQZNM8jznPlqylZ/i8n1CWdFjiKJtGWJyXJKXlcEUcz+aA9/eUbLadBs9RiPZ6h1he8v6Q23ODw55qH7LqA2RLx8iV9LKFHJaDigqCv6nTbL5ZK4yFEVk1zIkBSJ1Wq1kSibJp7nQR0hCCaVHCAINiIiURwgihs1o2qZeHlMUhU0bJs8zwnWKzTbZjF3X/b8+RcCgFVXFbUXI1Nj2QbTm2eEYUJVlmiCQJ7E3HvhAmot0G44tJoNJpFHR9W5dG4X318R5SmO0yLKcrrdPpKoM5uuUfKCKvSR2Wy70tBj6FhIOfRl+46xJGHC+GSGf+OQjmkRyyWraMXc8xicv4Tt1+xevohpOwTumqKuOF35ZFVJLoqUikJsKPhpSJTH1FXGPXfvY8oqWq0y82N+73NPcvDCixiViFiWRFmCl3iIQomiyRv4VhSQpjG+75JlCQ3ZxDAMVEujYZislmvWic/s+QN0ywCguq100mUFsaqRS5Ff+tWP8Pu/+esklccTTz/O5UafX/3Up9iJG1wYXebsYMrRlWu8981/jSj0mR4cI8siw1aXs/EcSdNJy4qbtw6RK5GDW0dEixBv6mLYFnkB47M5iipxcnAL07TJK4l1lJBmHicnRyznLmkUE8w8qiTaCIzJmUYLqjjFknTKOmO6mCOINbP5hOn160wPTnn0TW9EMUzEoqLZanF8bcx2f58HH7iH3/5/PsK//tmf4md/+d8R5EuqFtiSyute+UqsjsXO+RGPvPXNvPINjzIybDhb8Ju//BH+xY/87yhBRk+VUTsOb3vLm/nsZ57k+vXrZGmMqRpEcc5qtmCnNeDFZz/PMlpy7133I5smYRpiiqDWAuHhgge2L9I3Grz+K9/N8vAWn/zQv0ewFRzTIDweY1kWaZqjiSZNS2fQtAjKEDSRTi4TZ3diRyViLFNFQcVRNqWqp8sJiZQTJDEZ0NnfJVQrlrMlVl1jKRoDSUVxCx659CAPjc7RqnTu298h92bsDobUeU2cFxRixXJ6xr1330VeSyziAKllEEcB27JKp9FgOXcpBBnZNCmDmCjLmR+eIHgxDUmnqOpNmaUQYOlg6Rp2rROVJecunIc05tZLLyLJAquZiyHImKpCx7CRauj1BthOAznfHNh3zSaVojCwOqT5GlsowV1za3bEuZaNlweEYUjdtKnrmiiPcew2RSXSs2zajRZpVHJ884R2ewiFzKquUGuVosoRVJl202E6HZOlIl5cQFZw6fwune0uKiKr6ZLmdht3FbBZE6qUWYlSaxiGgSTITBYJg93LiJ0tpuMxZlnTqBQECdwwQhBVTL2BanSosgRKmXAxQ5Gh47RRZRHdrInlFDmHNM1RsoJ922Sr7ZAVLkPxL5mera5rDEtnvphiNUxkXWPQ7iLmIJsmmqxxejZBtxv4ccKSjEYlscwiJrMpFjqDdh9v7ZLFEWfuknWwxqQm1QQM00ZVVZbzGbZtgyRTWCbLwLtjLKqhotsGu697mKIoSIKMZijTaBhcv34F0Va5de0GagWWoqEJAoOGsTHbBx69bpvI97DUjZ8wDRI6zQEHU5ff/cMnOJtNKcoSL1gRZQFpFpIkAV7gM1svERWZnXP7XNi7QKfVxmk76A2DpKpQDZO1F/A13/R1fPP/9K0888lP8unPP0m326XZ7aFZNiAiCTJ1CYkmsqU3UCyDj/zGr/H043/IQTGhd7LgeVwoFO57zUNcHx8iyzJlViKYGt12D6vp0O33KKqSJIy496678aqE+/fPo7caFIpIFBbUyJtyx6LcmOWrEkGREAwNMa4Z9Xdo9rsYoyZoAl7gIxQiTc3hnu1zJGVKImbYmopU1kRZSkrFww8/TC4LfPbJJ5keniBVIBsana028/mYKy89T2/QZbKeoHeafNW3fBOxY3H/Xa/Ana/Z29lluZrzWz/9i/zKL/1bnj+9RuP+u/juf/B9vPpNb+DWakqgiLz/XV/FAw/ey+OPf5zX3vMQlZ8ym81ob3VpNB0ausijr3oY3/eJ05QsjNFVDUVRWPseCAVJGtG6fJ6P/sJ/YNuSuXL4Eqfukrc99mZCRWZ6PGaoNdEdm5OjU24cHyOLClmYMvc8hD8FO3o2n2KaOr67ptR0nE6b0F3T0jWkrKQKE6aHR7QMA7vpcDZxWXohcR4i2DpHx6ccHR2jCAJX1zNyx+bq2Rlmp02VF5i6gSRJvHTtKm4aUyQ5OhKlIIIksfDWPPLQg4hFhXc0JlFrzLJmUqZYvRGOYSFnKYKsoxs2XhARJAHtjs3i+AxvvqSuKkRRJMtzRG0jQpBNnaPpmPF4ynS24HB8SqXJ3Lp1hTwOsG2bdRDi6DpHmYfZsvFOzlgVGb1GE1WUqN0ApI1VaLnySLKcdRyw9FxM20JQZHBXKC2dlmqwqDaEQqEsvnQuZloqZtvk9OSIk6MjpgczRoNdbNvm/v2LxEpFJYsotUBH0OgM2uiGzHDYptM0ODu6hp1I3Hf5QTrnLnCY+giCgCnL5EXIweIms9USoRZRVJ1KV5DTkijwieqCsK4wMoG1KWApBrMs5aVpSKZqKHobT2m/7PnzL8Qkrigyge9zbu88gR8hyQIL1yOrBJIkwWo6rKOAZeBRCiBnJc2dLWQETk9PKRQQNYlGo8HAaW+kA1WNaZo4qKiOw2S1oNFt48Yhh5MJoh+RN9U7xtJUDWxV58aLNzgtAtppzYIMf+UhlgWrPOb1D7yKuiwQRZHYD5DLkuV8Sp7nLGdzNFGmozZpKCaSYfFjP/8hroxvkhCzXo4RIp+luyaMIlzXZW9vj8uXLzLodrAsA8vUMVoWw+0t9kbb7Pa2sEyV9WzB3/zab+anfuKnWR2Pef7kFrai4S5dkmhzaCQIAnEcMxwOGeyMmMY+tSRi6waxIfCff/FXmKYBkyc+j6wI/Mg/+1H+2tvfTaXJ6KpGZWiErsfZdIZa1URpRFylREUCbsBnzq6zXC6pvZjzuzsoMqRVgtWwUSyDMAw3DU5+RG2ozFdL/NmSZiYgNAWsjk3iexy/dIXFeE13sI2XFXzgm7+d49MTVrM5SgV1sWluEgSBIArZ2d7m2hdfpMpSrr14hbe86Sv4+Cc+yajT5/79iyjrmFZc88Gf+j/5xKd+nxdeeJ5XPfwQX/+Bb+XNX/lV3Hv/Q5imzjNffJ6Vu+Zrv+6beNPb3sXHrj+DJNeoEoyjFZUsEZc5z1x5joXvE2cxcZbTH4woqMizDEQJNI3usIdtWrzija/nbr3DT37o3/DPf/JfMlnMec8b34Gqa7RknUt3XSSTKtQixyhrzEELA4GWIGNvtdHUO2mavWYXcoGdrSGRVHM6mzDqDXBUA8mxcYuETKwpJVilCUIS4AzaLK6fUcgitaJwPD6j0gSIM6owo85r5FrAsQ1kCbq9FmESsNXbot3tkqY5jUaDqKiZuSteuH4FL4kI64Ke0SAhp6laiPMl62AJDZ3Sc6HRIKJGatgcz+a0treYhx5Gt0WtSHS7Xe66fJHRaIfxeEp/a8BDD76CLE5QBAV3vEDTNFRF+VKKM0oz0rlPy2oy3N3bQNJWMdM0oKImCCI03UQURVqtFpqiogkSRZRgqhpRXRCtAzJBwJAk7Fqlrdo0FZOmYpJlCUWeMhxuU+QgOAqLaM4qWXJrdsLieIlcqyRpxSrOCMKQsi5ArJFk6Pe7BPGcKAiYHs0wsUnjAt1ykCQFU1QRy4yqThDFnLZtcmVywHw5YyiabCc6URqxY5lolsRWu43SkhGqnHZVsVT8lz1//oWYxMuypNfuMT+bIYsaSRgR3ZYnkxWsp0uUWqIIUjpag5ZucePgFnVd0+510auaLI05Xc6YpQmioBLECVXLhBRWsyXdZotw5VJmOY7dJK0ytqM7v74Xhdw8vsVJGlL5EY1Rk3OiglirWLaDreo899xz1BQYhkESZ2RJjiLJ7OzsoOkqlqFxfTLhd5/4NFeu3yCNE9RSRMhq1ksX10vQDYtLl+7CabZ56MGHGQ2H9Hs9xKom9HykumS4M+LiQ/dz71vewOUH7sJoNXjVo6/jbe98N5+/9RLZMiKVRERVQ6hBRNgcbkoilSgwPTjmA+/8GwwvbfK5YlnjVBa/+onfRS5ifu5DH+S7/vbfxVLbPHvlBbZ6WyTFRnqRVyXz2EfTdcSsZHHjCGnL4VKjQyJUeIaAWGQUeURR5cwWCxzdxNJ0jGaDoqHRMy0MVUEyNCZpxOX9+8jCGqFWGe7u4csZklQgZRHj1Zg3vucreeqZ57hx4wYnswliWjAYDrn3oQfp9npst3uMD0/RJY3P/OFnSPOSD3/o3/LsjZc4E1PiPYe3vfPdvPurv5Zed4c4qJmdnVIGAsEq4uz4BpJcc3JwiyIuePub3skPfePf4x9+/z9BtXsEy4woSTi4ehVbUNhqd1F0i9nC5WwyRZIkRFGk0WyTI3B8dsz7vve7+fhv/y7v+cav5d1veTNulrK1s0vhJxvAmQpXT25RWSK5UPLKV7+S6ckxBRmnszOqJCVIlnfch1FekQLXz07x5/MNnjXPuTk5QytADDPsSmLLaiKsQmI7p5nnCGYDzdZZ+R6DC3sYgw7ECf1mE1WSWXhLxkcrDL3DeOJTFCpFlOD6wYZ1XwkQF+wPRtRFvtGMqQpZFGJqOkotkNkqrl9yfHPJMs9ZLBZomsFq5mOoDnGacN+DD1DkOXIFydLl8MZ1zg5P0WUdf7XkaHJGo9vdHFr3uhSlgIyMpqhQ5CiodM0mtw5OmB3PWCYxgqawZ7Uxspp+b4sgiEjThOV6QSlAUuSkeU5aFrQ6A3TNZrZc0W1u0qxLz0eWVOIoxXIa1FFOJcnEeYlaKfSUJltii465hd22cLo2VsNge29AeVvgcnxyRi2JFGJFu9Gh5TiMBg1ajZqm08HPC3SjiRyLtLsjEt3ALWqmkxVOp83W/ohMqIjLEt3pEq4T/KhgEcRoZYZSbhYuevrlmfN/FH8hKIZ1DXFeYHdalHWGGmugqhhRjl/WVIpAJQoM2n1EUWQ+W3PfuYssphMyL0R2TMaTCQ3FJE4DVKeJ7leIQUkpCZgCIEusiwyj2URSZDJV5VS5s4xHtg1aJXQ0iaqChZsRqQZWU8RLfbJAxlI9UHqE64i2voHexGlEkYDT6PDE00+gIKKJNf5yTlOUmS0nRNM5owu7CEOVi90hb3jDY8SLNS888yKiprB/7gIXdnY4ma7Y6W9xenaLpuyQrzJ2S4v73v4O/uWP/whf8apHectXfwOrt8b8q5/5adS2iiAq+Ms5LbGBEMV0BZHmzoinb76IfzbnX/zDH+Yf/8SP0Whb7KHy6899mldfvJ/PfuqjTKqEt7/mMYYDk+roiKDIaNsattYj9Fy0fg/J0Cg1DXfuYyNSpRWT4zG1YWFLGrElkBQ57X6PMPBYHZ1QjhIuGj2iLOLerT0e/+3f5qGHHuHs7Izl5JSqFBGbTTRdIVz4JFXE//It386HP/VRzp79AovmFmfzMd56iWXYFFnJwWzMpfsv8Y63vg4DhShJEJIa0TZY35hgSQbJcoUuipxdv8rWxX3c1ZrAr+iPtji5eZPFbM6Fi/fyvd/57fR3duh0HNJ0hRt6aJLIwl3T6/V46MFXYFg6y/UKW9URVZWT4oQ3ujq3bhzyox/+Ba499xw//He/l8sPPsDF0ZDg7JiH3vpWsrIg8HwaloaJhBJULII5um6iIFOVIkmWodQioXsnErmqCwTXY3BuiHvjECUuaLdanJ6Mmekx9qDHukiY37hBuz9ESkVOzhYoTZtWWNB1LPqdLa5evYozHOC6Li3HQa5UdMvk5sFL7O+fZzWbM3XHiEiblfJySVOXWa4iTMshjlPETCA3DCbTCZpi0gwqrI5JRc0jW+f45c89wY61aYopxYpLw32e+NST7I92iPOC/mjAajYlSxJ2d4aUco0mqpxMxug19NodiihhnccMBz1u5h7DjA3MzZIYCAqDzgC/zIjDCFeumJwc4LRbJOuQV9x3Ly+++CJG2+TCpQvcunGAIqksFhOaqoY7n9NsNchLAc/zUIElArKmMl+M6akaQlrhVhHtTovZ+AxFNlnlOaYgEa4CRKGiaXSI1ikNWQPJIO1WRAuXdRTgOA6KXGHUJkJSInZaFEWJnQpIioxl2KiNBmUmbEqqOwa2rDMNYiwRKkBA2zBUVAPjTzmv+7PiL8QkLlATeS5ZogMViZrREVVOxZhW1yY6mqBt9TY8kjjHS33q0xJdUqg1mTPPpWU3UGWFUrIoBKgUETkvaJo2wdqnyBK2W12WnssqmvKGR17Nb33i43eMJfBd+v0B07MpWRLT6LbxF1P6/T6WZqGIc+RyiFSVWE2TvK6oVZH4bIUfRzz70jUWoYcja6RpTJxFnJxNePjRV2Nt7yL1TB64cImP/sHjLD7+W7S3u/R6AwatDq6/hromzzIWi8WmnEkU6e+MSOSIOkv4+re9m1kS8eSLz3D9+g0MRcSoYZ1EaLpOVde88tFXs7uzwzNffAFLkti+tM8P/uj/xvve/7f4zU/8Pq1CI792zFOf/QLvftdXcI+5RVbCZz/3DIatoegGeZ5RijFu4KNXBTUlaiGzTAIUSYYczu/tcytYUMQxtQzBegW3a/jb7TbDSuez3hG7vsBTt15geG7ItaNrWJaF2bKxmi3KuqIU4Nyle/A/+gf8zvyz/K03v5uiafDCtS+yXC5RR32MVpPeYMBd8wjDtiAVKcMEu+twcvQ8RWwxam1hNgym8xmqJCPZBlWWEiQRqqxx7bkr7J/f4eLd9xCFCyRLw2mZxHmI6/ubbfp8TaPRYLS/y3y5YLtwGLbanBwegaah5jXG29/A93/gfyTPKp766OP8xid/j363xeH167zj67+G7U6f+XRCSEbky5tOPWHjXpy4LllVYtc5liLRazlkvTvZNW1F5r7LF/mdp57gwuVLpGnO1ekJveGGO6cYAAAgAElEQVSAMvGo0gixKtm6uEe2CImFEkmTKYKA9gN7PPfU50myFL3lsJrMkCSJo5sHDDpdgvWK7d6Q0+MT7r73HlbXbiBpKt54AkWG2u+jaQqKLJKEMZqtoKgSVmGgSzqiYSLmGYNmlyvXDnBSsLdsMgoGkkngL2hud8hDH6Nls/B9xLxG0xQWqyUzd8Wl3R2qOscLXJ69GmI3W0iVwDJKGOUqyzpmZ3tENV6ytjUutprMb90iMkScUKbR6SLXAkKryfM3r+FIOnIlcf3qjQ3ES1fpdFpUVUEURSAZ9IYDMm9NUcMwEInUEl8tieuMLCzJsoo4jrFth7bVYBmsoShJkwS5aVAWGXuDHnka4hYxZrNPeZvVvnQ9Yt9juL9LcDShkDMaVp/aANnUOVv7dGqo64osihCqlEQIKOuSNA3J6hRdUoikin6jhRz8JdOziaKEommI8qbEy8o1slrGDERs0aZhN0mjGK0S2O4NUEUBwzCoBRFZUbhv6xwN0yJSKs5v7yLnFY2mgztf8qbHHiMxBEIx52Q5IZFKEg1Obxzwyt1Ld4xFDgqCIEBURKyGQ1mDqhus4oDF6gSptJDKgl6ryeHhIZ4XUNUKTz7/HAUCseujFjWzYIMfnfsu3/it70NKCh595BGSKOXxpz7D5dE5Hnv41Zxr9dAzgSovsAyTOAhxGhpCWRGFIZokc3jtBqFfobX61LJItZrS1hzOTk4ppAq3zNFKcDSbPC95xWtfx+Dy3Tz8xsdonN8myVLe+MBr+I2P/AryNOTEWzMa7tBuOfzGRz/Kjmiyjl2cdp8w3ti5fd9n7rtUsoyqm/jumvuG++zedxe0LWTHIk0iTFmmbVuoZQWGSm97SBJGWJLOtEzZkxy2H7x3w7IIXU5Wc6SGid5tMrCb5OsAS9xMsKPXPsTDW+dYpx7ZYoVViTx47i4u93dpFDLldE1uiCyCgLguWBU+186OePR1j9Eybeo04WhyhmyqjOczwjhgudwccmqKzPbuCD/O+Ka/+QF+5Zd+ke293c3hdZTiuy5xHLKsY3RV5a2vfJRGt01majiX9pFHA77h276N3e5F9lo7HDz5Rd739v+BT730RbwsZHLzBg+/9+1kUsoXn32KcD5hHfrEOrSsJlJYYveazBdT4jje6Kolmel8TpTfuRKPi4SPnr2IrVlEZ3NEP2G/2SOdr/H9kDTLsBSNW4cn2K0m/U6XZsPAMBRW41O6vSaDQQe5yDCRGDY7m8aarKTR7pCXG0b94cFNrP4ApeWQivVG4pDGFFm2cXxqGkJe8tKzL0JeY+oaT918gTgOOXzpRZSuxUNveC1VXtB2mhxlHvqoT8urCHWZ1I2Qw4TDYL0BdgGXds6zXAUUKXS72yhaA6UWyKsaIa9RB13iucd6vABDxSklPu+e4jVk6jAhVgU0TQM2h5tFUWB3bARZoKozmm2bIhcZT9fIqo0oGhuAWxjTVFTEtCCol4xXY+IoR6ssMDX0to3a0IkoN99D03ClnEypUewGXpwTxiVunJOIKlGc0nIauMsVtmFhWxutYqvTpNfuoFCQBkuKJKDME2TRgVrEaZrEiUteFqiqitNobRSMhoSepLiLKYHxl6w6pSwrdMsmjCNEoSa3ZYSFy/Zoi4PTQ3xVxE9SqGriNGDQaJIkCUGWoIoSh/Ea4gRH1rg2OyVNU6b+Gr3t8J8+/jsMTJN2BcNa4ZLeZFswmEVr3ODOWkyj2yMXRdbrNaKq4AYJlaRxrtunZdtEeUkoFxzdPOD/pe7NYm3b0vuu3+z7ufpmt6c/99y+qc5Vde1yV3bFNjGyRRAgOUSRHMDCxgk4vEQ8kAfygGLlIYLIRAkgZIwjDDIusCrYSarKNi7fqlu3Pfc0u9+rX3PNuWbf8rBOEOGWk4oEUjGe9phraGmuteca4xvf+P7/nyrKmIbLN7/1AT3L5cN33yPOM4I0RhUkfurH/wxZEPCpF18ibDK+8e7bPDe+wQu3XuTw5h3m6xWSrpHBjh7e7EDHvY4LpkYuNNy4dwfdtujJDeFyznLjk5cCf3J+Aq5FvzdEiiuSRtjl2QyH3/3yV/gvfvVv8/X/8Xd56/e+zsNHT3jr8Ucc7h2S+jF//S/9FaZNRqMIaHaHr3z1q2TVlrrZOclJishgMCCpa7ZJzmYTMGj30FWDdLVFiivKICGVa+bLnZq1qHL22x3C9Qbdtdk0OQO7Q5wXXJ5e8/yNu9iaxd2bd8iilCYvWcyWbP2QrtOh2+qTTDykvs18u2RbxxiuySYIKIqCtuUgZDkjw8WsQchTDBmOOj0uJoud744SYdsmqqrS63dQFIVMU5ivV4RlQqBUfPjkhP2Dmzx8710kROIwRRYVqBs0SWZktGm32/zOV36Xj/7gLe5073Fs3+bnfuIvcPYHp0QfTfgbv/zL/NqXf43enT7vT85Ybj1ef/PTpNMFkw8vqRsFwW7jakOq9ZJNnHK5jUg2Pi3DQpYkVn7APAhJJJXVdwAAlHXDKIT2YEDR1GyTmE0cIqoKbdMlq2r85Yau1WYZbNgsF1CWeJs13jwiQeXD02sEyaKSBNZRQP9on1KEtGmQTZ3p9BqhrLlYrAizkmAbkUc7D/CqKhBF0FWNRlLojMbIts3Fesr98T46Nb2OTRlveP+ddxmYLnuaw22nz/yjK2Jdo1uqVFWF2rK52RuhizJhGD4rQ82om5KqTHEdA1XY/Q+qNOVqfo1rt0gF8JKQJo0hzii2MS1ZR6t20JNtsTPLshSNTd1gtPu0W0NSPyeOAwShIssjTFffuS2u12RhjKkYRI2E2+7Tb3coSdiuPKo4JY9TBLGhCmKEssTO4aXxDYrQx9Al8iLGcSxIix3vNo3otl2KLMdRdE4fPSFDxDE65JWI0+nidrp0uj3aLZ2yLJnNIlR5hCBrbKMYf+0z6o1Jpj69gyMKSaH4l4jEvyfSKQCyZDBwxoTpFjXPqC2NdZpgmiZiUdB322yTFK0sUEUI/YBWt0Pge1iaxarKaeIIGQHDUOiZJnmaQlMw32YYlk4UVaRFSm80Iri6RnY+LrK4Kjy0RuVo/4D1ckXftYjyLefXEUmZMt7fI89KkrqgKGTePXnEfDZBlWSQJU4vzvilX/5F/u7f+lusZlO+7/ve5Hd++8vcHo7ZGx4QRRGy1FDGMaqkIssyQ0ei0x9w9uiE/cGIR1dPMJCQFJGPLp7SqAJZpeNYLkVW0t0fEfzB13nu5jGPz0/BkrAqKCn45Ksvc6e9h/ZDX2B+fcXtu3eI45hlFvHWH73N4d1b/Ge/+p/z1//T/4S/9ku/hLM/ZLWa74hIpk5f67O4vCRSZA46baJ6Q6WI2FabD68eE0VbdN2g0TRmyxWW02LhewzGI1abNSoyWi3g1BJeuMEwNKqmZr2eM2q3WQQhA9nmzJ/Tcxxe/NSreIFPmG7oHh2wWi2oBZVGkknXCW7bZTqfcXjjEM9bMfc8hjeGpFHMarolKZY8fvKET736OqsooTErlpcTVFFi32ghOhaTyYRux+X86QWuY1MvpiiiznQ5R5RKmqRGd200y8RAYTlfMB4PEXWd3/zd3yAMYjqdHpXU0MgNxvGIqMyZz84Z9Xv0xw8gLzF0jcFRh7TOif0ZiqJjuV3WyyWOruKnGdbAovJzjKZgfHhEZKrk683HfxBuizSNYL1BlCyWQsTIsamKkmKzoRQqem4Xy3KYLq5xBn1UVWVPVok8n6PDPc7OLlgtnlCLDsfdMdswIBYy3HYb79rnjZdeYx1uKMuYo+EdsiwhWy6JbZ39do8mTNk+KzKQNYEsDtBFlSatUA0TzdCpRNivRRZyQu3FyKqE3igIBYRNimTJbOI1+SbDafVot7tcX5xid4fMFlPEsiFIMqwwRbCUHVzZMAmKDT1Bw80kpBrm8YbBfp+kUBCFivPJFUeHN5jOZ4zH+8SRz2y6RZZVJBWkVERptfHiFDdr6LUNNpstqqmxyUJQRRSpIfTXKKqE7TpEZYbaNChegTrqsU5KMqDZethhja9XWIMeRZCiKzKaLJOIMlEUYzsOnudx5+Am54sLgq6AkoXIkkG02dIYkCUhgpBx7+Vjwjjn9OEJd+7d5Wx9SZVtcPZsguWcdO3ROfrnunf/M+17IhKHmunsEev1JXUZkojs6o2bCk0SyZuCrEixbAOxrsiqCs0ySdKSRlShFugPBjQyDEc98iJmsZygaRJ1naMrOnGccrB3iC5pRN6WWAI/jD52J+1MYNBuU9c1qm5wPV1S5mCYLqIos/VD0jCizkXma28nd69LpCxneT3lF37+5/m9f/i/c+/+A87OzkiikJ/60o/x4iuvEpc5JQKmYu34moaKM+wxubxi8dE5pm3z/sUJei3jtHrUgsLl2SWupBEEAeHWp6ozfu/3fx9UmdV6vVOwJinboiD0QsbjQ5K9Fh9cXNAdjwnrinWRobZbfOaLn0bTKo5ujfgn/+2v8/kv/ii2oBCoFY+//Q5pFPB0csnw+JhWp8dyMaPtupAmrFYLkqogL1KqIkMSGkZuh67lIABZGDNd+mR1Q1bE6JoC5s7sSIh3W/M0TLFVlU0cIKkSFA1lVqJIKgfDfdxGRggyXj6+R74IKFWBIss4How5f/iYg+MjRFHET1Men5+jyRp5kvKDn/sBZlnK0OiyjX06fRe7Z+PlAfPAo1YlomxHr1fLimnmc+0vkLIcJWtY+2tu7u9T5QXT5QzNUCmKivlsTbyaUVYRm+0cAo/E85g8eoRT1Xzi5i0ORwOORmNc3URuJMqqJklyZMlA003WGw+n1WKzDdg7GBMtffbcAV3FIV4EeNeLXc72/9GyJKdGIEKg6Kp0FY3T60uccZ/Rc3cZuh0auebD9Sk37t7Ei3yiNCKPI2pd4d0nH1HmBa/eegHTsLl8co4Q5xwNxyyeTslo+PbklNPLK+RM5tLbECxWaIM2Vtag1gKLzZpMFZBMhUG7S99wkGiwNJUw8PE8nyzMQZGokwRVl4jjkE0Tk0gFRVNSRTld2QZZQbd1GglKoUIoKl64cwdl66OVMYKp4a02bMsMkRqn30E2NPrjPURNQ7NVmqahrFKSuOR4vE8ehdza36eMtkxnu+/RlRQ6qIzsLlkaUJJQ5Dnn2y2d8T7xNqZvOOiijKnqaIqEa9kIVcmLd+8i19Dv9+nqBiYCmiCw9ZZc+T5KCSQForNTh3reiiSJ6LoOobfGdkw8MeOG2aFehuh2C7fdQpbA1TTERmQ42Gez2hJvIwYHexSahJAKbA2bulTZ3z9mvL9HU31cAPante+JSVySJVRVxbKNnedGVSHKEu22S1EUjDo9dFmhaRrSpqIWoNfroaoqmqah97tUSYWLweVsga4OMPURTWVTlxZhHBNFEfPlgjAMaZk2frj9joZPsqFwtZwRRRFRkmGaJqZp4gU+nW73mS2sxOVkytXVFYPBgCxL+Nb5Y/78X/g5/pu//Xf4kVc+haIoKIrCT/3kT3JycoI3nSA0NVmdsohWaGlFs4nI5h5uI1OWNZIk0em2aEQBSdpFJYPxiMV0hijUdDouotjQUGJZ1u7QT91F82Wc8sKDF3BUA/9sxksHt7j0Nnxwdk6Q5FS1SDBf4AcrahUeXnzED332TZIoQmwgixMkSUDXdVarFVmcYLdbTJ/xSEuhIQ9jhr0+WZUT5ilhsGW1WlEUu63f9z/3OloFoq6iDjvIooQiSaAozCOfJMko6grVUBmPxxzs7TOb7b7rq/MLZuGa0e1D1tkWe9RDLGt6bpttsGG4v8c8WGG2XQQv4tbBEZmt8+DOPSaXV1iaztbbIEkKaZKzWnoISFRUGIaBbdtYpoMz6vLOH/wRiiSwlnOWQcCrr77K7cNjvvj9P8jd+3fYhFuW/oqszlFlGVkSqMuC6fUFy2jD6PYheq+F0jYpqxRdV9lsPOqmJC4ySgScTpdNECLq6g7PpWrML695sHeTydbnLA24Dja0BYNW+zvg2vyY+6ND9BKsZUhRF9zr7ZGeTplNr8mDgESsGNougR9TFA1lAUlc0Br0GEoaaiXy9YfvIlcx9sBhWRZMn8yxKhkhylDignvDAzZewHQ6RTcMLq+nyLZBlKUYmo4hyKiKzny+RpEtFMWgkUWGB3vEecY2iUmimLIs2W63iIpMX9SxCglDUWlMlUerCXUNQRA8W7BEgvWK029/gH54gBVrlNTcuXULQRKZXF0jeTHr2YLrYM1Z7DE2XNKJh1sJaGnD0luTlQVBFCJrKq8+eEDHNgijDWVTcpJ4WKrJSLFoHw7JwhTHcJARqLIU1+myWW0QGpmqalB1k/UqoOX2SKOUi/mUjBrB1ikUkZdvHLPJt+RNjhhFKPbu922aJnEcIwkielIxjGpiqcEsJahqgmCX+66qhrouWczn3Lx5C0WSKdceyXzBS8c38E4vUBSV68kEXdcRmu9+av6emMSrskHTuoDJdL5BlRqqPOL8/JQor5it1lSNQBylaKK620Jez8iziDjxyYuQqsmpRBj09/B9jzAMCIINoghIFZqjESYBNQUbf8mt8QHNd5A7G0hYiEiSgqSINEKNJIFlKwRJgGzq/Mk77+DFIYasM7uecTKZ8Fd+8Rf5jd/4Dd78wpt889vf5LVXX+Znf/pfZT6Zc3E5gVLhxVff4OjOHeq6JilzhLLm1RdeIhcaQqmkLhLi5ZLu/oCTkycomsw28QlJOTy4xcOHj/nWt95mMl0gVQ1lUZDkGWlZcLs35tWXX2RZhjjDDm9XS7otl96whztoMwtW3Orc495zn4BUxNM0mumKRIa2bBKJsF6v0QSZpsiR5ApZkMnzghfuPI/jtJBNneVyyf7eHookcnh4iKypxGVOXGQsFjOSJKIsctIgJo8joixFUGRu7d/C1A28aIsiCcwWU84uTnntE6/hbzf0eh0ItgjbEO/sEheJcb+HHweIugqKQJambOZTFEPBcRwcTeNqOkNuGXiXZwgtGaqaxN/StV1c3aRru8gCbP2AXqfDyHFZlRFvfOoNvvTKp/jBL30JXRQpKDk5OcFWNF55/gHDfgdFbVimCXkjYrotnvvk63zy+DZ3OwO6soGJQSOZfPD4lPHxDUoaFEHgaLxPsg3ZHw9xZIEmz3AsG003udjMsfod7r/yCo2u4Oz32AYfVw7Lpsy3P3iX/dGYKSlimGF1LWIxpSeqZGKNECQYpYCQ78hWDRWGoSEGCY5l44z6yG6LKMnwPB9RkVFbNmXfpihyuppCEHm7BakGrd/lueN79EWZy8cfoVoKx8/fosozrE5rtxBHIYEfM52vafe7tLstUj+lESRapo0qqWw1lVmWUQki+WrNvXaXbruFpshUZYmmWhjtNos0RjI0IrmhBoLFhlavy8FgQNmUGJpOmWa0VZuiVukNDnk6PUfvdmlUnVavT1pWLL01Uz/Cz0ukVov3r6/oODrBNqMoNSLPZ89uE65WPP/KA05Xl1xcnlELICkiK88DS2EdbzF1C1kAt1EhzCnXEX2jA2mK02sh2gZxWlKmMOgNWfsBcVEi6wZrvcFAwI9CerKF0jQYhsbRrdv4SY6/mtPrdTg7PWHQaWE7Dq6mkagNx26XTqtLWVcUeU3x3XOSvzcmcUGAKFiTlBFVEaEoLVx7gOW2sHSNRoE0DhmM+qQULFcelmFgqMbOrraxETRtZ0bvJ/Q6LSRFZNRro0oVUtVw3B7QdtqUDRi6QxAnGK2PR+JZU1GWNUVTIksClmaSxw09y+RgeMSj9x5hOSZBsCATUmbrKX/tP/6r/NZv/g/09waczK8Z37nB+dLj7YcPaesGg9Eer332Tfb3jiAXGB/cYJGFCJbCN/74D+nfOCJOApRGxbB7RF6EKjeMDZduY+MqbbIoRFRkZpsNZV6gjFuoXZe6aWjyms/+4A8jKRplVnJ1ccZ9q8u2ztFkheXZFSM0zsMJo3EHQSy50+nwO1/7Cv/GF3+chVrQAOeTMxqxwJU1BopOEAQ8d+c5Pjg9YRME1KsE3baIooCyjPEv59w5vkvXcJGpaeqQw4MBnV4XJU/RJJhtZjR5wmx+wbKp6Bk9hod3uOEcEjQC55dTer0BcZ3R3z/mlTvPo3VtCk0hqnLmoY/aspmdTXnu6B62rlOLAqdnFxRJjmQYNGVFpz/GMnu4usnewSFJUyNLAmVa49c1cstiMZuy3vh83+FztA0XwTbRi5jR0RFi3TDoWhz2O8hRxlB2uTu8wc07t3n+xk36mkGel4i6i2y4FKJIoyuYgr07J5EL7t57jrJSeHy9c6Cc+T7r7ZZSBKfrUhQZOhp1FPN0ckIsFKTrBNv9uHe0rVsUhcR069GrNEY3bzE5m1Jj4WU5486ATBBxh0ekFAhBxkF7SFLmPJ1dsExjri9OaBYb2ntd7P0OqlATpB7tukRyTSZ+ynkQI6UlTtshX6/YbufkaHT6hxh6i3IR0QgQh1vEuiIpSm4NOgSzK9bzJZPpnFRriNMMtddDVUXScomqF9iKhtvf5yorqZOM9dJHFi2EUiDLEz716c9BBqIiEqcJYZORByGCopLnNYqm43RdQiIWwYKJP2d4cJNKFRnJKvH1DDUpGRld5DRiaBrImsDB0Zj5dIXbNcmFBH8bIEsNQVnx9h+/R68zpNNrkysNlSpws99DqWQcxeR0MSEzdPwypNOzKYWMPMt4nMXIcUWx2Oxy7lnJdbBm0OpRl1BXAoao8VEW8eq9+0xDnyrJCKKQp+++x9B2qMuc/rCHWYosow37d4Y7ZN56i9XtsfKWyAropsqm/hcj/P5p+56YxMuqYjQY0HNadPoDTFli4c3IqKEs6HS66LqO7/tUVYXTdVHrhlqG0cGY9XJG5seUecOi3FAkIaoq8/jqnNuvvUKjyswDn6TIQRLZpiFt26FOP77cZWWFJCncOrpJlRcoioSsQFFL/P5Xv8bZ+SVhGKOICo8+fMQv/MK/z//2lX8IhoqKyBc++X1YkopW1uiGyre2Mz7zmc8jSRKPHj3ipQfPoSKiKSpZVZM24AcBjmYgSEBdous6o4MDVsGGvC4oyFEUjfV6g+8FDIdjXrx5n2ITYkgKw24PQRZIsp1V7WuvvEoWxRxIFnmccP+F57maTMiCjO0y4tbhXVI/46233uZzn/4cUtLsgBFeyD/+6tfZhBHztbc7PwhDhKpEqWu6roXiWtRJjpYJ3D26SbJcE3oesijz0qc+SYWEt1izSVL0Ep5/7gGLImZbpLQ6GpojcnH2kDhZ4+oqZRKTbSMc3WWbBrzz9ENs10HTJVq1iq1o5FGC2jJ5/+opGQJBEDLsdjgc9injkKYs6PU7TCYXDDs9kigmXK/pDPpcL2b0JBUlLVEcE0GSuJ7PuD3Yw0xqNnnMvfaATbDB7LZYBSGJKpLaEplUYEgijQq1LmCZGp2ugyKDlFeM7S6VWpCnBeIWzp885qDXwmxKOq4DdY0kyGiqwXy+pGkEZqsrDEnDKDXujI/JI488+fiOUJAkWt0WlQCiJbFcTlE1gb5t0FJU/GBDz3VYPH3KNgrRK4Gr8wvqloFSNvQcB8dQuX3rGKOUSc4WPPfKS6CorNYzkizgB15+wCvDPrIpMPdmaJZOJUpExYZxx2TqTbmYXdFxdizZuqwY9vs8PrtiPDrk7s0bZKGHrUiM2x3Onz5hsfAYKA6Ft+Xq5AxZlkESERWZlmNRFhF+4qFJCmenT0nDLZqsYBcCmiARxBEpFZnSEAg52yji+cN7iHJDJufk0YYyD0mEGsUyqIqSyN8ithzadps8zLALCbXjsIl28O6OauKnAZoKqi2T5lu0BhxEyihnVZW4mkTqr1AoCDcLxKDEzzNM1SBzDPqmQR6FZElMWKUkakWv1thkMd1ei7SIWU0m9PtdnkyuEW0Dr63Q023GgyESAu7ebR5/+yMCteRIEIm3GXGY0O/3SZMIw1QZ9vYp45yWrnzX8+f3xCSOAJ63Yj2f0Wq1yP0VvXYL2dAYHI5I05SiKCjLGmqBhe/hJSGlJvHo7ITakSmamr12D7IUggTiBEWW+ea336FoQFIVimd+J67rsgm3zL3Vd7gXiTRO2PoBpmlS1AW9YY+33vuAuMgQRQg2W06envHLv/SX+Ue/9/ssl0s6ss6PfP4HdsgmQ6E2FEzL4fN3XuMqCZjMJxwe7fH+e+8QrD1uDkds4pCtUJJsQxxR5VtPP8CwDbws5GoxY52ECLpEe9zj6fkZ73zzbSgrfvhHf4S33nqLmoYgifizP/vTJGnE0dEBg16f6fWMOEyQuhZNljG7vOAzn/8Mdx/cRNIa8jqlkhpE0+Gtj55ShRUpCpIgg9jwzsMPaLX7SK5OJFSskpBaFZhJKUacY2gmhttipZXUukhYJohFxVW05fHjp9xwBmSmTLvdZbFYYdsunxjdYVCoTKZzbt+8wzQMuP/8HURNQFBFlhuPPI1YhB5VVRKHIUmVcXh0tLMHtl1ef+EFNNNClmW89RLqktGgh21ZzGYTRKXh3FsxywPcTps/uT6h61oYXYdNEKDFNVoFczVnnvrIfZPbgz2eBAtM12E+mSIJMrquY2gq/baLhEqDApJOXYjoAjRVQaVKfHh1QWOAoslkVY3RchDEGldWWW6WyHWNY5jUecGwN0RWdeiZ6LqJVgtsvQ2pWZNsy489hlESEychjmsQKiJJlpLqECyXzGJvhzCLU7rdNrZiovRsak1GbWR63QFn1+cETc5V6HGWhyxtgQ/ffgerFOgMjukIDmqvTaFK6KaGIAjEaYZuOwiSRpY2yLJFLelMFnM6owFpXTJfr+hqLY4G+3iex+jeDaJNSLTeWVu03A5L38cZDNh75R4XkwsGaOSUCHEKdcUqD2mqCknYCdtURaHQJdAVsizD0Qx6kkri+2wCn48eP0ISdHTNpUp2HvOX3oJlvGV445AoS+iIGo9nM9Iwxy9Tkizm1p27UIqkUUySCzS5COrwDEkAACAASURBVKKMKht4RUK73Wav3aMIM0pBxXR76HoLWXd4+fs/hz9d0e52KZ5eUakGlaDjtoaM2wcUm5S8b3Db6VJuQ1Aa7H6HOEupReioNjdkh5PlhGUUUKgiVRrQeuGQbqTxcB0xWW6QHJvzxTWKrVIoIoKmUTQNUf7dy+6/JyZxXdNpxAar43Dx9JzC1VhGIf7cY/n4ks1qg6bo2KbF3t4etigxHPQoPB+nEZDTmvGwzSZecrh3ROzqpJqM3AjI/ha7kpHS3YosFgXebMbrL7/C7Rs3P3YvgtAgNTWSAmVTkhYlJ1dXzJYrNE1BFBrm11f88q/8h/zXf+/vIjYFVDmvv/4688hHd2386YKf+tKf5fjgNu1ul2S7peU4fPObf8JsMUXWDU5XVyhlhRlXlELJWhM4NFxSf0MrrOhJCvuGQ73acvLW+/zRH/8hktDw5/+tf5O6LkmSjLKsuXN8k818iWtYPHzvfWgqfN/HcmySJOHTr72Bqxo8ffqUycUlqqRSlw2rlcdf/cv/HoaUkWoBPSUFWaASS8IwIE5CojzCjwPcXge3tXvYs3XIpklZyBnXixlZtOXBg+c42Nsn/eiSdq/LSRNwR2/tcoOZiBGVXOdbcrHipeEBjy6eopkal09PkaqGJi9xTYOh26GsG+IwpdNqoWkCZZVhO7vDo5OnFwhFwbg3wHBczmYzzqbXbOOcqmqwTQt9W9JNAAnsbUkl1kTXC+7evslaKqnGLZy4ZpNu0W2DNK+o4goNFbWWKKWaQatNWzHIsgK7ZVLlCYf9LoYm4W180jRFEaDr2Awli7rIqNWCRlSZRCG66dA/OECpINysybOE+tkOq53reN4Cta/jxWuCqxVIHy8xLIuMtuPib9bUJxtUo8VYahM3oJYiGiqCovJhvKSqKq42c9I6p44LerdvUCEw1Ls4mYYdFNxodJxBi4vtgovljKrMmawWWG6Xq8sFrtHCVE3SKEVWDNKqpspS8iJFF2W81Zqmquk4LlJL5J1H71JUJbOrJXanTyOLrNdLaqFGtVwaUWJydsbQcij8cOehYmmga9xwRqR5Tq/XwW07lFWFbVrEYUTHcZmenLNYLjka7WGqCugCYlnjVGBbBqIq8vrRXZptwuz0gnG3z3a1wu44SM1ujKMaPP3wIxynhaiopFlB17bxfW/HHchhsfJZ+h7DboeszinLnLZl0m+3uH58QmlIFGHIRiowZJFBt4Mhq4g0WLaGPFvze0/eojXs00oE3vzUp9iuPKIyZxVsOFnP6LY72JJKX7exLIvkg1OCocKNwYjj0RBVAFs3aLIMSzVZezMMU6Xf+/9ZiWFeFmSFwDQtyGWJ1M+4f/8+ra6N1HcxFIsqzdlGAVcnZ/zYD32RrJFRFRNaBoKh8nRyTZjkWIpCuQmxJR2n1yNTJLxsQ+/oEMNu0WsNEEWBf/zVr3M9X3ynmwFd4uzRyY55KQl88MF7yDLki4DpxYT/4D/6Ff7+f/l36PfbeGufL33xJ4jDiKasKOOcL3zhh5hczXDcLokscmu4z3sf/DHXyytSoWbuzznojej3+1QjFw2dKEqQ2ja1apBaGoEocp1sCaqM0+2KpCj5uX/nL5FbCl/+xtcIA58sSfmRH/8STiaDoDDeP8ZPIj7/5mfoWjr+ZIqfR0i6TpDX1EVJnaf0ez3efPMH+K3f+i2mJ1O6wxssK4GmkcjjmpiGydUUs9FxRBW7YzMPl/RMh6xvUcQ5N/r71HVBUhVcX0/48PISnwxBFtCigrKGtuMSUtIdDRjYLebrCXMhpqpLhoaDrplUlUpnNEBQZa6LkEqo2T844HLhkck6p6enlEWK2TFwuy55LRDmJZIsU9U5N+89YN6kON0eQ7vPpbglkiskQUZuuwhICJbOYr1CSjKq2QLRNNkfjPjwo4/Yhil2q810s0J1LIwIFllOWORYlkWa5mjaTvGHoSP3HFKhBk0kp+R6tSXPKxRJ4fLkKUQNl96KxN+idBxcp4cjaxR+sCuJ1RU0NFbrLb29IwzdoWd/XHYfRhHrpU8Q14h7Okm+4TrfctzpkJYNUVzhBRm3O0eossitW7eQahmlyvjgnXfpdboUSkWulwi2ylZXiC9D3MEAZzjAcE1mkzlJVdAZH1N4PnmxpWkEqqahbKnIUoNUNSSihGUbqIbEJohZxTmSbdDkJbJuUWsidVnRd9t4mxDJT4g9H1HXEWqBZtjh+voaMxYQcoFZGfNjX/oiTx6d4mcZoZyzXK/Iig21qmLdGuB2R6zXm90OIUnIpJqwLpinEZu1z/V0jaY6yG6LRRoTI1F4EYKtUpYSSRQhqxJJnbBpUnp9i7PNjPZwTIaILKmUDVSSBLLCOgxYhh6XixkX1zNiU8BwbUokHoxu83hyxqb20V2RXM4RRZnQsvj885/k+uIat9vjf/ntL2PqMlZa0tQZlibjpz52LbGeeySlyINPf5ahovFk+ZTZOsCrSgq1JBJ1NDFCShSEBjTh/wPZvSAIkiAI3xQE4bef9W8JgvBHgiA8FgThvxcEQX12XXvWf/zs9Zv/wjcvKwxRxKzBVXRuDveYPD4lXvvMLiaYpk6j7uhwg/0x/+s/+hpBvkWUKtJtSpBlNLXAoNNntlrz8osvkNcFL3/iVaokYdTr8+TRQ9aBR1xnYGkIYgXCx7exZVnSczrIrkOtKJxMZ/hpTh5nnKxn/Jmf+Vf49f/q17j/0gsAvPHGG8xmMxRNZb3acO/5B8iqSuZviDdLpDDm9OFDwqRkMNynqEoaSSSJCygEqvUWw9AYtbsIQYKlKLRFGSkpqEWB8ycnLB6f8RPf/2Msnk5YPrqgnng0WcGf+5mfRa3BuLuH4WhYjk4Rp8yuZzSyiu5YvP3uO7gHA7b5lrsvvcbzd58nSRIezS8oZZG0yOnZNlpRkKYxCCV1U3J+eUYhVDt13DJgP9No6pJ8vqJvWtRhhC0Z7PWGjDtddATEaueCZ6sO8/mSeTRDcRUWvsf5fMne7XtohouCTt8ZUlYZZZXiLWeEmzVdp01dNaw3K8omw1EtLNlk6PZJ/Jg8TEGKqcnI8prx+Car2ZQ37zyPvAm5SNb0coluJWNIO/dETdMgL3ciklGPvcObzKdTVGTu7B2BLXB69pi+6yLkJTgaitQglTVNmGLIKr3hgDyMMQyDqqlxOi3sdotMKHEUgX21hRQ3vLF/E1MAxzSQkmIXyfd6pKqMV2f4wYoojqnkBhURIS85vnHI1P94Wm/YcWirIl1NJF/F3Lv3PKEfEocZltGwyqfsH7nU3gWCLHF9fU1RFATbCFvesV87ZguyEtcxEcIQ2VE5VA20RqQlqgx7fUwk6mTF4cEBxbbA1RSy8xmb6YJRZ4DVcVDFhL3eIY4xQlehXCwxkdiKFXZZUcQRo9EIq9XGtHQqs8ExVApy6rreib8sg+t4zjqYY+cZf++/+3VUQSGKEnTNxZYVDtwjxCijXm8o43hHL4pS9twhkefTZAW9VgfDMHDaJo1YkuYJo70hbUGm02khWxoVOYahYZsGTVHhaBY9zcHMG+7197EzkSyKcS0TXVW4npxz3xriNuqzaFukU4u8/twDaqFmnUcc6j32ZZeebJHPfPRSIVuuqJsS3TFoWiY3b9/BlHQkUcPsdBiJFiPVpurbGKqKH57x9qNvcblY4DojiBLcRkKNKkaySpML6KqEruv/V9nud9P+ZSLxXwI++L/1/wbwN5umuQt4wF98dv0vAt6z63/z2bh/bhMEga1aISkQCwXTaEtESXs0wu53kcsSr0wxdYM4SpEqIE1pthF6sivrMSSFbRSRSTKT2RR/G/DVr36VYbfHcrFCNXTyMqNsapKioN0ZkXyHAyWPnOV6TV1WnJ9e8OTDx9RJTVRV3B8fcfr4Cd1bh2y3W+7dvbsTuhQFgqTw2Te/wMVkznsfPmSdbXn/6QdcPv6ImAKpFsm3KVIp0HZcFt6aLTX7vX0m4Yp14OPr8PTijKmQ4W0D/udf/wdMi5De3WOajk3VNvgnb3+TVIDhg7v0D/cpw4Szk1Na+33SPOFGe8ByvuLh/Jon3g6k8ft/+DUGkkaqylw8vSQvCjxiWrLJarEkoSBsMuoyp2kaGsBb+ztW58GAbZIyVyBzNXp7I5ZFjKApFEXF9WTG1FvR7rnUYcTY7bNaebjDHmm8JQ58dFFGKCEPI/SsYK/j8uH0MVmWUJY5m82a44NDovmG4+EBeRijCSLLYIOoKURlidVqU8sgoFJmGQIxReGjSyJn/ozWqEvHNMHWkQ56SK6JXYm7en1BxFt61ILIoyeP6XRapNsIUzHQy4aObRLlCYUioRcVTikiqQpizyEXK2bn55SagBSnpEHE8nrG5Oyc1A+oWzqXkUdqqqzqhKt6iymIuIMewcZnGwUESYRt2xy3ujRlQSbUdHUDNS04mV6h9Qcfew43cUJa1jTISLbE5PwpdZEgHtqsfFADjXIjUqoDgiAgCiLavT6NLKOaBlGWc3J5zuGdO8xPZwwPj1kGAcuJRzpdcJb6zDcbJpdXlFXCZOvRHx/gb9cs7BJBbEBXyP2ArnXE+elHTGaP2BseY3fbLFcrjEogkApagsp7jx7y4eUpUlOzjDY7R8OyYL6eEm48JKPNa7de4Lh/iD3Y44uf+wzugYtUZ0SzGakooHZ7NIZKJcoIusTl9QVtw0WvZDRJRkYgjWPyPOd6PuH+/btIskBRZIQbj81qSRhHaJqG53kkcUwSRUg1fDSfsC4TvvXkfcSeg4hAGkZML3daj4frCXXb5Gq1ZNjvMp/OePThQxRVQpRgViecT6f8weP3SARYm9BxbC5XU5S85vriEqmGUtd3CteqIm/p5KoI1PhZTNvaw3Z6OI6Dqkq09gdskoAoq9kWAcEqpagTwjCmSP5f9k4RBOEQ+Eng1571BeCHgd98NuTvsyPeA/z0sz7PXv+RZ+P/9PeXRI4rFUtRcGoBqWro6S7h5Ywju09Q5+zJBnFd4Edb7t85xNYsREHDdEwMSyUTSkpK1DgDQUJTTJpKxHQ7hGlBUdaISGRRhinopN+BdA/Q1nRCtUCUwPc9YEcequOQlz//aa4nM9SwJPG37A9GCIJA1dS89MqrxGnBcDTm8NYN7u7d5PV7L3K2mbGdTymiDZnvMWy1CNcbKrlCSAs+XE0Zqx3abmsnkhF1Xn3jM3zmC2/y7/7cv82g1+eVT75BX5d5+2tfQ8pyCDK+8MIn8C+nFJqEpiucvfUBpBWn3pxKaqiCEFswuHfjHp/r3+NovM/mg0eIuowlKtyrDNI05fbdO8iyilALzyAMEiAjazr5PMSfrrjZGZKFPuNYxltt6Eom2yDCUlUUWcQyDOqspPfcEdf+gqPRHlbZIKkGdSOhaQaqZaBsc2bBhvcn59zojxFFkbIsuXHrDpfTKbVj8tHsGnO0K59TZHbRb56xWi3I0oiiKFBlGUMxabcGaLqJVknEUUoYRDRRiuu6JP4WqxKR2S1KCiJOvXPm6w46ZELBw5NHjNpDcm9LR9QwSwHPUVBbLpKoIBYSURRx++gG2zKjLEtEzcSwW1h2i363T7u26eoaUhpSZTVD1aXV6/PQu6KrKhhJglsJ5GlBZEps5ZoD1WITh2yEAr1qqM4/HolXRQ2igBcGmJ0BUZBwf++Yq8mEw30HeSiCU5FHK0ajEUdHRwT+lkaSUQSRIsswVINvv/0uqVxQxiFjXSM1GoSWjqYo2JnI/VdfwlJ6JEVJuPVwLBe7gqFkMp/PqXWRi9kT3MEeptmnzLZkVY1muTSSiqubeErBoNOnLxk0YkOWNmQV7LX7tPeG1FKDVEQ8np9QiAVKVnH26Jx1FNF22ghpQzYLWZ5dkvhLokWBJWsc9IZUKpzmG6xem1rcpXpEVaHVanHx9Jz59ZzrywmK02K5XGPJBnEQc+vG3V09umVRCgVOLfHqrfsYNYhZiWiINMDrr3+C64sFtimDH7KvuTRJjt80qJZLFqZIcUErrbn34B7dUqDddrmp2ARZxt3hEZN4i4VGJdZUCpimzn63zdPLS3TXJYoiKlXAsC2ShUcVx7S6LfKNT1ezcGWLyFvSHR9jtU1kWUYTPw6s+dPadxuJ/yrwK+xsbwF6wKZpmn+aj7gEDp79fQBcPJv8SsB/Nv6faYIg/LwgCN8QBOEbaZqxKVO8NGK+DWjUiuvVNUbX4WR+iVo0FEJDE8UYNZx/+D69lktCxujWTcogYmhZHDktCr2klBssTUYTBB5dXTJ02ohpRsvWOTwY0KgltmHj7jkf+6BFVOCmKpsgJQh29aWr1TU/+oUv8j/9g99gPO7gFz4/9NnP7nKjksgbz71MGKV0Dse0+yP2O4e8+843+JP/44/YN3qURc1gOERoQKpFWk4boVLwMh85T9hUKR0KjtwjRrduMmq5/OCX/jXc515kb3wDfV3wzW+/w9rzCYKQP/ev/wxJHiEqIpPVgn6nT5am6IrKwvfZO77D5174DH1RZbqeEkgZyzRkW+csy4BUh1m8pagExjdu4Z/PCNNs52xXiigVpIXPY++amIYnm2u63S7X/2d7bx5rW5bX933Wnoezz3zOnYc31auxq7qo7qbpbowh4GawE6BRQMhBBAtHQRGWURKjSFHnH0t2wOBEDoljDFZihwDBhGDHDM1gxQ3dXV3dNb6qV2+483Dms/fZ87Dyx7ltF12ddAUI7z3rfqSjs9dvb139vvuu/dt7r7PW7xfPcQyN3KowPZ1zGbNIMzzNZRCH3D8+QeYVb5zucxLOaHQ3EZVKHi8IFmOG+QLNrnGt3iUvc0xZp3AUyskCSzVZjM55cnudfHiGVxZUWUCRLdPh3ti9TlN3cHSdqozJwpTD269ycHbErbduk6uw2mnz5GPX0YOUEklYJviTCXbDxquZSCWl0uBo/xhtltHsr+InCe0rm+S2wiwOaOZQBAGaLqjUEr1Z58T38ZwGpa7T0xQ8JaPXrBMGAa2VOpmtYdkOqq5QiYKyyrjZ2sBudUgMlZWr24gsw1SgGamESUW93qTfbOK0XLo7765srgmFUIdn+5ukkU/TdDiaDGnYDqfDIeY45c7JEYVncjpbcO/ogFIsUJWCTAhc1wSZU3Ms6rZHGERMKVBQycIcUzUpXI2jt++w6uoISxIWIXlZoGYKNJzl4pwK1ptNhmd7VFXI3cN9Wpt9rq9vkI4mpH4KscDSBLkiyaOEm/1NANrNJvlsQavZQ2DS8nokRY5f+az0+xiVSTwLoANrG+uEWYKi2HR6dUahzzzJCMucRIQoRUZW5ORlgWvoWFIhLVKa9Rq6ojIMTrh28ymiYMrRndscHZ5yPh2zurOOWkqcXovBbIbrepyPjxHUqdKKz9+/BQ0H29Ao05BIzwnyhDVd53h8ssxg2LaJtZKjwRkzSkwURvEcr24zGE9Y667x5Dc+RzKKaJsec3/K0WxELVXQUgmVQC0F5SKmbpqgSE5PT3FaDpZl0Op6pJVGGpzgWC5xCpr3pzjFUAjxHcBASvn59/xX3wNSyr8vpXxBSvmCoRsYVo3Qj6k3OrTUGtudNdSs5ObODompEKsStGUJNtOrczieYPc7vPjiiziqxVkQ4EcRMq04ms1IbYNJGuBoCqWhI2yXstQ4ORrjKA2C+Tnp4ehdfjWbddS6watvv0FW5Bi6Rb+3Tq5Jdja3SOOEztoK8zjA1U0219ZxOk1W211aQsWh4jd/65+RVAWKbTLPQoSpEyNxXZcqi0nykCrP2OivstLvUuUZqeZy+85bPPPEU/SaO/zkf//fcnZ0yDSacuv4Dm/fv0cQhXz3v/8JTmdDjmXE+fEJq+0u8yRCmiZZkqMlBS9+8bPcO7zDxke/hjtvvs2towMWBwNmeUgcpei2g3QM/GDEzRtXWCwWZEm6nAOuq+QqVIbB4fEpjmKSpznzKMWsmQxnI9xJgjnP0acxTz/2OIsipd1oIlDpeU1anSZpWTA8H2C4NkEcYTsOpSoIhmP67Q5pVTApYuxMMhcVwjERjSav3Nujtb5FUmrkaGgJ9BWP2ck59ZUuIlVwnTaZEGSaRdOrc2Vne1nGLQx5+fyAcV3FrjsMqhiv22EwmTDy50yikH6tznq/x9CtSJMFZ+NzFv6CmtWg3eoxTRPMuouqayRRzGazwc2tDfLFHNdQiKIK0/AIphH91ir+6RClWJZtK6oSx3KYDMcoYvma7w/mlElFlhbUzBpmVxCJBabtYlp1PvA138BweP6uflgYKutuk/vVgk3TIy5z5CLDyVSanS6LquSp3ccw5hmqrpDkCTWvSZjkfPCF59ja2iHNKurNLvNwQaSDmkmUoqIoCoSpYnUalIrCy/4QERUY0sAtVHqeS5JGUEimaUJZLldVe/U2/e1t5nsn3Ll/h9ZmHy3L6eo17EphZ32TNM+YR2Pq/Q6v3LrPSq1PFE2X47xZhqNZmJXCnaN98jylvdIjC1NKE7x+C8U1mKcLXNNg6s8hK+iqy0LTtmnhWDZplFLpJgUC1bTIJezsXgEkNcdj9/oNOqlGcj4jGi2wc40iyQhlwUkZsvvcU2TliMhJearWpxsbLCqFzKszmcd0RI0IFceoY+g1ZklBzbLJ4wTP8zgdnJOWBX23g6KAgeD2Z1+i3qpxcnJAs1kniiKsjsc8DlENnUarzma7R5gm5KpACrDMGkWUcXx8TGtzFaFoHO4dosiCLErecyx9L0/iHwH+khBiD/gFlsMofxdoCiG+lAVxEzi+2D4GtgAu9jeArzAh+9+gaQqZTKnZBkJJuXN+yuF0zJycYR5RBhEOKnmWYTU9JlGEWqhYYcX1q7tQCaRlIlUNmVVccdowC2jWPLI8YRaOUYyKk8EBlqkSzidIKoqa/S5fwpnP/vkJWlmiliWDozO+/7u+l3/+O7+FEII0itna3MGu15ifz6g7TaKi4N7RAaenx/z+v/wUmqtQs1yElBiaQpkWLAY+QZ4wykNszeDG1hb+ZEqQpmy2epwdDmipgo1al5Ef82N/9a9yNjzBXRRklUSXgu/8+LdjVYLN/ip2XPLMM89Q83Nm947oVCZKWrHutfjYzk3yMiP87Fu0NJtrbgezUWMbB+V4TnNW0vSXyfIbjQaqgPc9/ST9RhtNFctCwJWCjkbuB2y0e2hSRYtz1np90n6dyNWwTJ1XXnuZQTBhVbVQFymRUmBUElFWWKVgvghQPJsqy9E8h53uKi++8gp+HGMkOdNwgSU19LLECEJ2+31m8ymGZxDrKnrdA01nvFhw++SI5maHIA4xtZLNTp+uW0cvJHpVYSB4rNbj9M27uIXAtk2iICILEp64/ji2YZOHMa+f77Ol1fBMG83UcFULXTXYHw6Ii4wwTWk2mxRpwetfvIXMFWy9huc1qSy4e75HZcF4MaYUCrrtEFcVtW6XspDomsnJyRGVLFjpr3E0GWOtdCmlIF6UNNwWyWKOLks+869+m07nXS+qrJoNDicjHKlz63CPlmbQaHeIJMh5wubVq8RhQqGqmEnOkzs3ODs5R5gOL7/0BU5OzqhQOD06ZnV1nTBJMYS6LMFngmNZBGcj6t02fbOOF0GS5pxWKUrbISpzDKlgVxq9/ir9Tp/xcMIw8BH9BkbNYTgcQtOlv7nOJAzZHw6ob6zRMR0sBF6tzSSKmZIyyiNKHeIy5+BsiNRVXnj/cwRpzBNXH2drc5UkmCHyEllW2G5tuQQ9zkjGAfM0ZeBPmYYBuawIk5TqInq1O01OBlPu7d9ByxRCWTGvF9Q3miyqBXtyRh5kiLLCzEvMUUzu1Vlx+9yZnBC5CdYooKZqGDWdo3TI0ck+HcdiMThHCxbEfoRtu1AJTMshqyT3Dg7JFMks9rENk3E4QTNUbNulqGBtpUeVFMzOh1RJwq3795G6iswrzFyQlhJF1fGaDYJkgdPt0O12sch5bG3nPYTmJV81iEspf1xKuSml3AW+F/gdKeX3A78LfOLisB8A/veL7V+7aHOx/3eklO+ug/YO8rIgrqDl9ciSEqvpUm83KEpJGC7r4QVBQKPZ5Hh4DrogCAJWVlY4m54z8MfoUmJZFlarxng+pGU79AybOEzY6W5iqjZXrlyj1WmCqKj31rDTd89OibOc0f0zFEUBRbBzbYejwSlNxyGRJZs7uyiZJAlSrj/+NIbbotnq8sEPf4gvvP4qru0QDecEQUCSJwwmy/tXv9lFNXSMeg1Vd7k3GmB7TeSs4CANUHs2kZbxc//o57m50+FHv/N7eOUP/4CT+0eYlofTrPO7//L3yeMStTJo1DxeOd+n6NZ47iMfItEqhiKm9/7HeH3/LtRtjmsFymod0XaYRT50XWpPX2NPxkSOQVVq7N0/QAG2N9bZ2thAypIyT9EUnUQpOEvnHAVj0FUWlklqGlhBxuz+CVqzhlercaO5wu/de43Hb95k6I9RqgJHN0lMhZrQ0Wcxa5sblOcz7ixGXNnZYT4PiBsaG80uoumRlstX/TTN8OcBruGgyIJUqyibJs2tNWqOSxLE6LpBoRbcPT3jLJyS6RLFtZiEPq8PT+h2OhwFE8o8o6TEtA1eeeMVpAbdK9s8bfY4LxKivMCsSuytLtPMZ7PuUded5bzoyTLpVufKOufhlMJRWVQ5sR9xZXOXXqePrASTYI6Uy5usXkgOzw6RGriNJgiVuVnSdBweb/QYDc7QLJUg8RnOpxyNzklkhRDvfnXeH52hGQbD4ZD1zjrH8Zx6q04WJ/TXVjk+PaUKE2Klwnc07g9OsF0HU1cphUat1QVF0Gp7TM+H9A2XWZpw7/gQFMli5tN2Pe7vH9IUGnnL5MpjVzFrFqcznzTNSZIEpcw4OtxnFs7p97usCpN45JPqOkqvQxJmDAMfaWpkKhwPz9EbHoPZhCid0V1tQVpi12vkZcl05rNx5Rq+7/P65z9PEEecHZ7yuT/8DG2vhW27KIpGri1r2sYyQ+m7dFt9CTTVJQAAIABJREFULEWj7dRoGC6NmkuVJKx2W4g8J5tEvO+pm+zPxrQqjVazQ8uu42Lh6C5FmlMFMcQZ08kcfR4yH0/wrDp6IhB5vlxdGhXs7DzG86tX8CqVqMrZuLqLYVsoikJvpY/v+6gI+hsrxIuAKzu7DCY+lutQrzeZz0J6vTVOT48xdYNOt8sknFAZKo5mIZKcPM/ZO77HxtUNhBDUNYfKT1AVBaWuMBZ/NkUh/nPgrwsh7rAc8/7ZC/vPAp0L+18H/sZXdUJobLV6FOmyRp+WlgTjZcEGmYUMkoD/+Mf+Gm+NTyhVQY06tbrK7YO72IqBaugQxARpzCxYUF/pMi1iJrMp640Wo8k54+mIvbfvMp1McPstprMxufrui2ccTCnTBXoJx+M53/EN38pv/PZv0GnUKMOQ1abLVk3D0+s4TY96TedKo8kv/aN/SKIXlEJCzUI3TXJFw+mukJUKd86O0MKM2kSgJgXtWhO9kNQ9mw8/+Qxy6NNtbyGrlCeffpLHH3+M6fkQsdIgnU5p97qEZUauVhxOT6mEwpbTYXh6xkuf/gwvfOg5ttttGoVCJgRpWTF4+xilFBydnWPoDofTIWuOx/D4mDSL+cT3fhc/8fd+GlXXEZrJttvGsx1KQyUzwEor3r95jbbjsWpATaYki5BYSKyazWgwJa4K7k/OeKKscTAZYAmDMF9O6VtMJ/ik5JbBm2/dJSmgb3tkisZmd40qjFikMdloQLfewBUuda3G9tYmx7MjvLGKnmTkqc/geB99XjEfjqkpDiudK/TX1xCLAiMD1joouaChmuT+Alc1uLZ+DUVTaegWq5sb1ELJm7duIRo6fUoaZU4oK0w/QUkyPKHTN0yGszl5WqAkCSQhcuET+wsWs5zVG1vMo4yz0xF5VFDDxcgFulISFAHPPPMM4/GcRqNDkcREixnRYsGd8YBcVmyvP0EuYXdrlcL3yYcxRR6/qx9u1us0LYtIrZhUOZZqsnd2QKnGnExH6J7DwD8nn/mYi4iGYVPOQgx/ii1KFsf7dOs1JkmIYZlEScI3fdM3YdoWbdMjmfrLnPyuQ1wIlFJlPJ1Qzny0osLOSgJ/hq24GJ7J1ZUNzs+HzIMFUi2pZRViFtGxDcKzA+rtHvNowY5iMlWW1bDWbIc333iLjc5VevU6WTin03LIkxm1eptSVSnKjE67zuq1LcKzAWfnB0ivQpUVlBn1msdqbxXdrkjziCxLOFNiRucHNKqK/ZMDZvGM2orL2XDKxkqfUZYwXwRIDZJFgJOWFE5GLGJU1wBXp7QU9JrD1voGmqVxJkqGx6eYuuBo/23uBufcn5zSrTe5c+cOimGSBCGz0RDDMNClShDMcVodjs4OMCI4Hw25d7bHLJsgkjnd9gphHDEI57i1JqahICyJ6ik0ui6e1eDo/JQqTVHmGfN8TlmmhIuCcPLuod7/x/j5no8EpJS/J6X8jovte1LKD0opr0spv0dKmV7Yk4v29Yv9977a3xVI8jAkU5cnPbMFG+0OwXhK12pxU23zk3/zv2ZLreNhklEidJ2srCgVcC/m7YaLBe1anbPBGR3NpvAcfFOhUgVOzUWoCpPJhDAMadUshPrumhjHkyHnSUxSFXhujVRUZLIkz0uaqkPNbTLXNSI75pV/9fv89m/8H/xPv/lLlJaGm4G30qWFQZjGNFSd6niZRra31UXpO4yMmMc+9j5ee/s1nv3YCyQ6vPTKq2w1m9zfv8e3/MWP015t8+n9N3nh6z/C7dtvEhUZcSHpNrpEQx+lVNFrDtPplHEaUlQlv/+5zzGMQ/6vL75Eq9UhHcz5wFNPU/pzRBgjw4hVvc4sCSnThPODU37gL/+HnBwdsbm6QhYvGCcLrj9+kzzNmJ4P+ZH/5D9iGM1wSsH+bEiZaOipRpkp6FYDr+YgTqbYisagZ5BMpqSLgJtPPkFQ5LQcB0dXUaqMbsPEcRx0XWc4XlZvt4WGIRVkzeK1115npsSkasno5Jwts0Pvepv1lRXcXGOrv4a76lK4GoE/YXR8TLjwafa75LoCByNKU8PzPFY3N6nygtPxObZmMUgi0jBCadkgdPx5ijQbrDz1Phpeg9PxiIUGZ6T4Ssnm5iatTptElRh6lygRiLLCNUuy4wEegjJb8Oz7HyerIqbJFKEbqKnkjdN9Gp0mpwcHFKpgu7NG2/GQSUbTqeFWEZZUGEtB1evQf2ybefTuIB6UBTgmtTDHms/xtQrFrlModeo1D0cz2FzfoLe5SZCEJGXK9s3HkJbF+GRI1a4ziULqqQK2QlZlvPyFlzBVBV0olLLiykXBkEJIFFUliiLcQuAqFuvrmxzPRtyZHTM7GuHnGc2tPrJmINKKxFLxNB3Lc8HSGJ0do0c5zXaf/TfvUmUVu9dvsLG7zen5MZEfUUqbdmuDLEjQRYVmGGw7HWZFgeErVP0+vd4O4ryizJc//ubpsgLU3lv77G5ep5Q6WZBjul0Mp0NcGXitTXTD4d7ePlGYsLK2xjOrO2R+jHAtckOha6+iVQ5FpTNYhHQcDzOXvP3WbYZZCIZGZ6WPH4TkaYZtLxOeRUlCt9snTJJl5lHdwKu72LaJKQT5aE7kR2SmhpgW/DtPfpRdd4PFJGM0HtPw6qx1+4g8p5hlxH5BURj4cUnLraEWEr1mc5TOWe+tUeVgaCYq6nuOyw/Fik2BZDodMw18Oq023RTuJVO2233u+UOOrYLd3V0qx0DVNYo8Ra3AlBrxLFwWuF0sqNkOWZLSrDXoPH2d5GTEiloDoaJoBo1Gi3anx3g8ZjIIUNV3j/KIFPpGk8liwTd88MN85uUXWV1fwdMdalsrDBYLbOlgZJJK02i4LTbNDhvNLjElp2/dYxQHmIagMgvsDQ/NyPEHAZbqMDodIoKMr2mt8alf/2f4MmX12g7WVo9mzeVv/sxPsdXu8dT7n2Xvztt83TPPcm17l+H5MY26SZ7N0JQC4gxLKrTrDbb6a2iFRBYl/fU13E4Ty6sTBlNWW112N7fottrsrG1x7/AuNddlpb3KL/7iL2IZJs89/QxN2yUXktX1NUxF49u/+S/w9tF9xrMJc62ihkHhqVQyQU1C3CJDa7u4O6v4RYY+T6ivdTE9ly+8+HlEkmPoNRTNRnNcMgRJEjEcnrOzs8VoMqS51Wfgz5n7Cz74zPOYQhIH/rLGZxQhc5+D8SkTtWAYLijCCA+Va7tbJErOXBQwXvDNH/g6zoMZtmWRlxmn4zE1YTJJfDSpY3oujmlxNDpnpWZhNm2CZMH+rTfI85yOW6eeC9alRb3VJBrPiOYBWRRDEZCrBZ7bxB8tGKUZmWpg2B5H945wZImjGghdpzAMhJ9QJDGqZ5JoMBiP8EOfhJwoifnsmy8xlimT28cok5iRjLm+duVd/dAzbeZ3DilX6kSOSz3TqWsqRTJhOhsjgxhRVLx+9zbrXodsEXF8fMxwOsNut9AyyXw8RdQdnFyy21+hFJLAACOt6Kyv8ulXv4CjGcyLiHEwx61U5nnGSq3NwZ09am6dJ596jJZdx18EJL5Py7bRLBstzpkoKfcODhGOThDM0QvJNErYbfVJ5sGy7qwqyIM5oT8iy2JmsxGtRg2yjF6twZySs7cPmGc+ZbIgGo1w6x42KvPRDF0oWIrBRz70tYTzGYpS0dB1dAH75ZxrtSZ6EDEajbj52BN0u11Ojo74wy9+nlSRSFVja2WDmzcfQ9UFqlZh6oJYVDgND8cw0SpQDZUkS+l0ejhOHU3RsRyHKEnxfR/LspCAoiyTk02nU9aaPaoopaCif/0qO1fXefWLnyXNQtZ212g0GkxnY4LZlJpl0nZsPF3BVCRKUTCJ5iRZytnJKQ3HJpgFrG9tUpYSpfx/nZX9R3gogriqqoynM/qtFWZhQtRwaSQK1G2e3b1GOVowLzIyP6aqIKkyskpSKTrd1TVIcq5u76AoClGeIgqNlz71B3S21zmOR2iFhKygzAvSNEXRdDQb+q3Gu3wpTJ1QkZRInti+yitvvA5ViV8lrLfaKEXFLE2YjH1ModLsNImVgluH9yktjcxU2NjapIwFwTBBxgayqiFsDUWBr/9zH2WQ+vzFT3w33/zch3jabHHw4kt8+vA2imFSL3U+9LGPUU4jqiSh0fDYu3+fPIiQouJDH/s6siyj2fDIKMiSmDSL0dsuI39C4QfkoylVmnDv7JTT8ZhFkmLXW5wkC8bHpxweHfG3/85P8z/+3D+g0WqSJBmaalCqgtFgiFZUXFvZQB0GdJtt8DO0Xoc1LKJ0gbXWZPWp64z29qmpBopUmJIRjRcYQmd7e5uoSkmTBZqm4Ps+pm6RJMs53PcP9pGK4HRwTsOro5WSL9y5vaxmIwV+EJBSMpqENEqVDcVmRbfx5yGdzXXu7N1nrdZmpbIoOy7/56d/n0a7RaRU5HFCXTEQDYfaoiDMIjqayyyO6dstChTuHJ+gXFy8HbuxzB3v1ZiIclkT0raYlgk1x2V3rUetUSMpUjrbq3i2iaJW6KZBEIV0drYoS1BTQZTlbFAnKiqmqkRLBVGa4bWb6KYGlcCptbA9l5oi6dkulaJycLT/rn64+9wzy/zfKDx18zoogrIssSyLo7t38ZWSIAzZtTssNJ32ap9qEbLrtQhmc+6fH9HoeHRsi3sLn1FWMJvMWXFqbD/7BNF4xqrl0VrpcT4dcuX6FXzfR2/WGKUzav0OpuPi6Q5VltLrdYiCBamqYaLRWu8RHpyh6hqDsxFms0FlKozyBaFdce3pGyhBwmLiU9tYZTBdcGN3g9l8RKRoKPUaZ7HP4P4+je0VbG1ZKnFhSRK15P74BNGySR2FSK+4s3dImpQ4bgNHqmSyJBiPuTvc487+G6yu9snSkNl0CGXK+vVrhGHMYjLj9r27vHp2h87mClohaecqW+sb7B8cUCmClV4f0uWCPFlWFHmFYRgomo5hmvR6K4xPT3CNZcbQ0WiGUA1e3LuHaNWpuw6LvUNyIRCuhZ/OSZM5YTqn3vQYjCc0uytERsVCrUgNhdxQlmkwbB274WFbGnmVMpmNScuYSnnEKvukZUFvewOmMRu1FotpwKrjcPLWbW7fuUuz1yCbz2h36gTTEXXHxTAMbFNSLQJwdY5Oj8jzlKxIKcucmu2g5hWyhEbPI8sjFFNQyIJus46Cyu29rzDSE0yJpc81o0mhFrQ1hTqCegFX13eIiowsS8iEoCwvso35Aa3NPivzDON0xOdufY5zJ8F73ybb79vlr/yV7+N7rj2PPJxz/7U3+cLvfYb/9Of/O2YI/vDV1/jox7+Z3cIkjHxis+SNO29wGp/yvsefQFcN5r5Ps9nm/U89yxuHe7TqLWajAaJeAzSkJunGDjvOOnqjvnw9FCpNx6aoCQIlZ2tnm9F0hGV6ZFHK63tv4VgGf+7DHyLPUzTXw9U0jDTnmeee4SCe4IcReZlTxCH6POSl0QEr21fJC5U3Xn2LKJcM84iGZbGOh6hZmEJwPDjmxuoKuVQwpIVjufiTMUFLJz8fUe83UUrJmuqSFylFWXLtiRs4umSn1UZHI80zvHqfUgoqSqaLKR3XITw4xTEtEn9GHs2IogWGIvDvHlEN5lSayTwOGR8esrq1iuJqTPwhDdcmtCqkpvPY+jaObaKttZjGc6o4Rg9ieuurCCEwpeTJ1gpFmnM6C9m2mziaYD4ZU9dtavly8Vh9a503D/bobK9SqRKnqNBtlXrdoZGXtA2DjuUwHE+RaJR6hVtvYEwrYgWmVUp5uqCqvfsyvPXZT7PSaRFGEXfvHRBLSZCX2LU6tWYHRSrEUclcKRguBoTzOa1ui8p0sYySummQlgoTqeMkFXqast1ocvzqHnfunLEQCWmSMx0d8y03nufWy6/zzPPPYxYKoyxHR2en1SeJFwQOy0yiSYo/GhHJjIO7d7h6ZRsrzvFW1nF0FzMTbEkbOc8Y7J/hdRv0V5rYecrNnQ3uv3nIan2V4ShAiRJMr0HPajEcjknTGJkn1IVKFCXEfgxJRcdooAYlLbtGo1XHDyYM4hmWpdGpt2k3N/jgRz7O6f4hilSw3RqZojE7PqCx2kImGR/88x9hejBkb++IDFiY8PqLX6Cz2mdRlYwmUzTLZrvdJysLClFieSaKVqfR7nBw9z6droflGrzy+itcf2wHoZXYRkWz12FjZQtsyfhsRKXbZIbDcRiSzTIs02Fnc4v7+/tkuSBbRLiFpJErNG2bbLyg3ewwCUPCNCPNTbRIJ4gX7zl+qp/85Cf/2MH3T4uf+Ft/+5ONdpPW5hpus4aRSoYkeIaFZjskQYBpGPhRSFTm2LrKbO5jmBYSQRDG9Np9JuMpUqiEChiWga1bzM8GKLqJrBQUYaBpOlmWY9sdyjLntU98zx/xxfnJn0BoBs889gRlw+Ltu7fRTRNfq8hkhePVcC2LD6xf4d74lFhWbLb6bD1+hRdeeJ6vff5reEppMb91wE6jSzqP+Mf/9FeZ5wG5LpGawG3UuLl+hcl8yu4TN3jtpZfIc1A0je2NLTzbpWba1OtNXnn7NkJR6a6t0Gy1KKucMs8JpyNkzeSxZx6DIuUkmaK5KmoQoRYlhVJRCEG9svnQBz5KbBj8wad+k6PjY37+Z3+eH/zLP8i1rU3qros/9UFKKl1jvb9KOQ252V5FXbGJggjheiipRmejTzCeYkpBMB7x2JXrDAbn+FkMdRuzLKkMFV0oJIrAVVUOh8e0V3rMhkOeMnqkpoKYh2iWQVBlSEXgahbBYEyqCSJZkukKju1yMD9HBRRVIxOCw9EIr9NgeDag/8QNoqzEQCWRFaqi0ep0eOzpG0wmY6okZ2N7m2Du0253KNKMNEoYn5+j6RqKY/P2wT7dZou0yNEUFZlmjMIFUtfwiwzVtrAsm8UiRLNtUFWmRknmL2jqLrPxiK3dLfbv79Fx6vgnQ2TTQzeXyaEQkhKFIq8wDQtVESxkCkHB45vbkOdUhSRH8tp3ffcf6YfP/sqvcFLEtOotpr6Pogi0NCceTrAbdZI0IUszdq5eIR352BlUNRMEZNLBUHWi2QxXSlzbYTCf4ZkOtFwm54d4nTUCOUErHU7GAxqdFvfv3mPoT1G1DFWBqpTs751clCDMSJKMmutxtphy9fo1JrM5hVSYxD6mYdBfXyGhICkzKgVM22Y6nXF0fIxegL3SYXFwRGYqICVmCVXLRqsqNNeiFBZ5VjIJhqyv9ZBFShpHJFXB6OSYKE0xvBrS0EmDktX1VQ6P9qjKlKwqkEjmwQJVEegNG6kKSCvOb+3R31ghzXKCmY8soeY5tG2PPEyYTCdERYZpmmRCkmUZ8zAliU7oeSusbKzgxxE1r0FelEzmAVIIal4dUzE42L9La7PHZLJgfX2dmqLQNk021jeZ+j5lVVFVglKG1JsNxpMFuuHhdT3KsiLxYwxFxdZMVLVCGuB1urz+6sunn/zkJ//+V4uf4qvM/vszodfvy3/v27+Nch7S2F5jeHZOEIWktoaTa9R7Nc5OTlnrrpIlOVkZo6sG4SLGdh1s22Y+nS0L4VomRVGxvrrC22+/zer6CmkW4Xl15tMZ8/mcbrvFPIjor/b4mb/3Vc/RJZf8mfPDP/hDODkkaslgEWLaBqurfY4Pj7hx5SoLP2Q8n1FVYLbrWIucWCmopj4LB7Zbq0zzFCXJ8QybQRhg5RDVFLZ2Hmfx1mvMwozOTocgyjGlgLxcphnYvcZZMEa1NAhCZkHK2toa4/GYGzdu8Pq9t2h6TeJFiGs4xOWCKIqotdrcPz3iam+FYDZnMpry+OOPMxwO2e6ssBdPac0jxNYaw9MhV5+8ycEXX6O9s8XR/gEra7tsrXZ56ZU/pNnqMRrPaXgNkiDGapt0621Oj44Rmo7n2dTaTdJFzgvPfg2/8Ou/QFd3qGwdigolyal1W2SloOl4nA1OWGQJrm5j2g5+OkeU0Km3iYqEpmIyyEPMpARdRW3YrHlbhLMTgrLClstUu5a1TIk9Go2o76yxmMzQFFA8HTsxORme0+zU6XUavLm/h2s4qMLAtRySPMYPJgghMEybpmmBahBlOaqhUugK/r17dLc2kbOSf/LPf+nzUsoXvlpfeSiGU4os43wxgXaNo5MBmQpbvVWe6O5g1VzSi5qJWbZceqvoNopu4LouZZkTRwtM2yArUqBCUyXHp0eUZPjBDH8RMByMcV2Pa7vXKIpl/mp/PH3Q0i+55CuiGDppXUeGIc26hyzy5ZBBWjE4OydPYqQqUIXESRMUBSokz3zkBcxYME5T9NECxdVIshzXsDBNk8IPOH/lJQaaht0yGJ8GpOcT/MGYsUxpb62xPzphfHRMfDakNC2EIpn7U6aTAa+/9kUszeTt27exDZNuw2EuM0pFZXx0zo3GBuOJT5JV9NZWicuUaeYzmkxZsTwqVefsfApVwejlNwm7NaL9IQ2nzsnpfc5O96GERZLiILBMg3G5YDSfs3/vPkpRYCPp9VZ47bXXmM2m/OZv/Qs+sv0k4yKmp9iINKXeW2WQpBRBxN7BHn64QLd0FB0W8YzCD0mzjHmeIIuS++GYK1vbzJWCwWKODA3u7X2BOJeMhkN8kdHd3eR4PibWSmprbaKFTyoKWo0m2gKOh0cUOkzDkKOzMU+2tpGTCNc08GOfeTDDVDQaTo0iSZkFPgiBpmlkcUZ2MmX3qfcxPxkx0977is2H4km81WnLf/db/wJhlpBnJVmR02o0Ccdj+utr5EpFnuZoio7v+zz+xBPs3d4DT6AkObE0ScMFrZqLUhQ0HQdNU1lUCqJYZmRTkAwjH9up4WgWSZaS5AlhEmI6Nr/8T375q/p5ySX/f/Ot3/Od1KSGVa9jKQq6pRD4IStr6wzPTpmEAZaiYSgCS2/i+3OaHZd5mFD3zGVRB8chm84x6038eYhlqESmZMVtkUdzZCKRhsF0McNUFXZWlwm+ptMphazQqgSv00LGGkLJWcwSSlehyBJadp0UFSXPMF2LmT9lPh6xvb3LPIwoK9CylI2rVzm/s8cgm/LM2g1O0hDDNCnjlMnCZ31lnazMyPMFvdVtTt68T6kIKsCr1/CLFLcsKQyVIkn5wOPv5/de/gx1Q6O51uVsOqetNzBMweRsQGWZOAhmRcKV9XX2989wnSaLxQjHtPBsh2m0wNJ0TFNnXqbLyQ4CHNMhLSs81SAuEtxaDdeySZKM0+mAXqtJPolJdQGiwKoKhOvhhylOpRDmKd1mCyrJ/ekpHcelZ3skVUEuYT6fk6iSplZHMyENfQodHK1NuJgiSVA1G1lpFLrCbrvB6TTn1371f350nsRlJUnTnKoEy6qxtbJBVpR4K32yJMfSTLK4pKwUmq0Ot27dwjAMZoGPbdvIIqHfWy5d1u0aJ8mCvfmIURGTOtqyxFiyoFQFORWLKMS2dUxNp1tv4yrmAz4Dl1yyxJAKmmVQVcuZNpoUxHHM8PxsWY7N9fCDGYqu4S8CRM0iTGIcx2IyGdPstFFNg7XVVeJFiK6qUKSoouT07IwgytAci1xIPvh1H6Hh1Xnr3h2m82UBBk1V6bT7+POQaRAwWURobY8yz5eVhtKIIAhQVZVgMafV6rC5c5Oz0RSJQpylrK9vcnh4ytPve55mt4+fZViqyWp/jUWcsLq5RTQPUBUFmRXcu/02tmWgqdBuelRVgRACygopJa1Wixe/+CKWYxOHGWeHQ0SYEg+nVEGMWVZoqVwm9tItppMAy1rOhmo2m6iGzmg2BVXBsHSOz88oFjkrzRUMRQdRIcnRDYFj2eRhyvHpEYuFT7vdJktSCk/FKkDJJO3dHdKTORqSosyQGsxmE6aLGU/tXsc1HPw0Zxr4lLLAMS26tQaqLsiSmLrt0jA8HCGW+7orCHQsTcOzTU4Oj8gT/z33mYfiSVwIEQBvPWg//pToAu99udXDzaWWh5NLLQ8nf9padqSU7042/2W8e8nig+Gt9/La8CgghHjxUsvDx6WWh5NLLX9yHorhlEsuueSSS/54XAbxSy655JJHmIcliP/bNFn7UsvDyaWWh5NLLX9CHoofNi+55JJLLvnj8bA8iV9yySWXXPLH4IEHcSHEx4UQbwkh7gghvmoBiQeNEOIfCiEGQojX3mFrCyF+Swjx9sV368IuhBD/zYW2V4QQzz84z9+NEGJLCPG7Qog3hBCvCyF+9ML+yOkRQlhCiM8KIV6+0PJfXdivCCE+c+Hz/yqEMC7s5kX7zsX+3Qfp/5cjhFCFEF8QQvz6RftR1bEnhHhVCPFFIcSLF7ZHrn8BCCGaQohfFkK8KYS4JYT48MOg5YEGcSGECvw94FuBJ4HvE0I8+SB9eg/8PPDxL7P9DeBTUsobwKf4N9WMvhW4cfH5YeBn/ox8fK8UwI9JKZ8Evhb4kYvz/yjqSYFvlFI+CzwHfFwI8bXA3wJ+Skp5HZgCP3Rx/A8B0wv7T10c9zDxo8Ctd7QfVR0Af15K+dw7pt89iv0LlrWF/4WU8nHgWZb/nwevRUr5wD7Ah4HfeEf7x4Eff5A+vUe/d4HX3tF+C1i72F5jOe8d4H8Avu8rHfcwfljWSf3mR10P4AAvAR9iufhC+/L+BvwG8OGLbe3iOPGgfb/wZ5NlQPhG4NcB8SjquPBpD+h+me2R618sC77f//Jz+zBoedDDKRvA4TvaRxe2R40VKeXpxfYZsHKx/cjou3gNfz/wGR5RPRdDEF8EBsBvAXeBmZTySxWx3+nvv9ZysX/Oslbsw8BPA/8Z8KXKAB0eTR0AEvhNIcTnhRA/fGF7FPvXFWAI/NzFMNc/EEK4PARaHnQQ/7cOubztPlJTfoQQNeB/A/6alPKPJG14lPRIKUsp5XMsn2Q/CDz+gF36/4wQ4juAgZTy8w/alz8lPiqlfJ7l8MKPCCG+/p07H6H+pQHPAz8jpXw/EPJlReAflJYHHcSPga13tDcvbI8a50KINYCL78GF/aHXJ4TQWQbwfyw0zj5+AAAB0klEQVSl/JUL8yOrB0BKOQN+l+WwQ1MI8aX0Eu/0919rudjfAMZ/xq5+JT4C/CUhxB7wCyyHVP4uj54OAKSUxxffA+Cfsry5Por96wg4klJ+5qL9yyyD+gPX8qCD+OeAGxe/vBvA9wK/9oB9+uPwa8APXGz/AMux5S/Z/4OLX6q/Fpi/49XrgSOEEMDPAreklH/nHbseOT1CiJ4QonmxbbMc27/FMph/4uKwL9fyJY2fAH7n4knqgSKl/HEp5aaUcpfl9fA7Usrv5xHTASCEcIUQ3pe2gW8BXuMR7F9SyjPgUAhx88L0TcAbPAxaHoIfDL4NuM1y/PK/eND+vAd//xfgFMhZ3p1/iOUY5KeAt4HfBtoXxwqWs2/uAq8CLzxo/79My0dZvv69Anzx4vNtj6Ie4H3AFy60vAb8lxf2q8BngTvALwHmhd26aN+52H/1QWv4Cpq+Afj1R1XHhc8vX3xe/9L1/Sj2rwv/ngNevOhjvwq0HgYtlys2L7nkkkseYR70cMoll1xyySV/Ai6D+CWXXHLJI8xlEL/kkksueYS5DOKXXHLJJY8wl0H8kksuueQR5jKIX3LJJZc8wlwG8UsuueSSR5jLIH7JJZdc8gjzfwMePDXK3TAM9gAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "if PROTOCOL == 'grpc':\n", + " np_image = np.repeat(np.expand_dims(np_image, 0), batch_size, axis=0)\n", + " channel = grpc.insecure_channel(SERVER_URL)\n", + " stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n", + " request = predict_pb2.PredictRequest()\n", + " request.model_spec.name = 'ssdmobilenet'\n", + " request.model_spec.signature_name = 'serving_default'\n", + " request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(np_image))\n", + " result = stub.Predict(request)\n", + " visualize(result.outputs, np_image[0])\n", + "elif PROTOCOL == 'rest':\n", + " predict_request = '{\"instances\" : %s}' % np.expand_dims(np_image, 0).tolist()\n", + " result = requests.post(SERVER_URL, data=predict_request)\n", + " visualize(result.json()['predictions'][0], np_image)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Measure Performance" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def make_request(batch_size):\n", + " if PROTOCOL == 'rest':\n", + " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist()\n", + " return '{\"instances\" : %s}' % np_images\n", + " elif PROTOCOL == 'grpc':\n", + " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0), batch_size, axis=0)\n", + " channel = grpc.insecure_channel(SERVER_URL)\n", + " stub = prediction_service_pb2_grpc.PredictionServiceStub(channel)\n", + " request = predict_pb2.PredictRequest()\n", + " request.model_spec.name = MODEL\n", + " request.model_spec.signature_name = 'serving_default'\n", + " request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(np_images))\n", + " return (stub, request)\n", + "\n", + "def send_request(predict_request):\n", + " if PROTOCOL == 'rest':\n", + " requests.post(SERVER_URL, data=predict_request)\n", + " elif PROTOCOL == 'grpc':\n", + " predict_request[0].Predict(predict_request[1])\n", + "\n", + "def benchmark(batch_size=1, num_iteration=10, warm_up_iteration=2):\n", + " i = 0\n", + " total_time = 0\n", + " for _ in range(num_iteration):\n", + " i += 1\n", + " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0), batch_size, axis=0)\n", + " predict_request = make_request(batch_size)\n", + " start_time = time.time()\n", + " send_request(predict_request)\n", + " time_consume = time.time() - start_time\n", + " print('Iteration %d: %.3f sec' % (i, time_consume))\n", + " if i > warm_up_iteration:\n", + " total_time += time_consume\n", + "\n", + " time_average = total_time / (num_iteration - warm_up_iteration)\n", + " print('Average time: %.3f sec' % (time_average))\n", + " print('Batch size = %d' % batch_size)\n", + " if batch_size == 1:\n", + " print('Latency: %.3f ms' % (time_average * 1000))\n", + " print('Throughput: %.3f images/sec' % (batch_size / time_average))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Real-time Inference (latency, batch_size=1)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 1: 0.059 sec\n", + "Iteration 2: 0.098 sec\n", + "Iteration 3: 0.055 sec\n", + "Iteration 4: 0.052 sec\n", + "Iteration 5: 0.056 sec\n", + "Iteration 6: 0.051 sec\n", + "Iteration 7: 0.056 sec\n", + "Iteration 8: 0.052 sec\n", + "Iteration 9: 0.050 sec\n", + "Iteration 10: 0.048 sec\n", + "Average time: 0.052 sec\n", + "Batch size = 1\n", + "Latency: 52.392 ms\n", + "Throughput: 19.087 images/sec\n" + ] + } + ], + "source": [ + "benchmark()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Throughput (batch_size=128)" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 1: 4.414 sec\n", + "Iteration 2: 2.699 sec\n", + "Iteration 3: 2.654 sec\n", + "Iteration 4: 2.409 sec\n", + "Iteration 5: 2.485 sec\n", + "Iteration 6: 2.476 sec\n", + "Iteration 7: 2.457 sec\n", + "Iteration 8: 2.497 sec\n", + "Iteration 9: 2.575 sec\n", + "Iteration 10: 2.539 sec\n", + "Average time: 2.511 sec\n", + "Batch size = 128\n", + "Throughput: 50.967 images/sec\n" + ] + } + ], + "source": [ + "benchmark(batch_size=128)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 2", + "language": "python", + "name": "python2" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 2 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython2", + "version": "2.7.12" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/docs/object_detection/tensorflow_serving/RFCN.ipynb b/docs/object_detection/tensorflow_serving/RFCN.ipynb deleted file mode 100644 index 2f96cf5e7..000000000 --- a/docs/object_detection/tensorflow_serving/RFCN.ipynb +++ /dev/null @@ -1,207 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Object Detection: R-FCN" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from __future__ import print_function\n", - "\n", - "import os\n", - "import time\n", - "import random\n", - "import requests\n", - "import numpy as np\n", - "import tensorflow as tf\n", - "from PIL import Image\n", - "\n", - "from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array\n", - "\n", - "%matplotlib inline\n", - "import matplotlib\n", - "from matplotlib import pyplot as plt" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "SERVER_URL = 'http://localhost:8501/v1/models/rfcn:predict'\n", - "IMAGES_PATH = '/home//coco/val/val2017' # Edit this to your COCO validation directory" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def get_random_image(image_dir):\n", - " image_path = os.path.join(image_dir, random.choice(os.listdir(image_dir)))\n", - " image = Image.open(image_path)\n", - " (im_width, im_height) = image.size\n", - " \n", - " return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)\n", - "\n", - "def visualize(output_dict, image_np):\n", - " output_dict['num_detections'] = int(output_dict['num_detections'])\n", - " output_dict['detection_classes'] = np.array(output_dict['detection_classes']).astype(np.uint8)\n", - " output_dict['detection_boxes'] = np.array(output_dict['detection_boxes'])\n", - " output_dict['detection_scores'] = np.array(output_dict['detection_scores'])\n", - "\n", - " # Visualize the results of a detection\n", - " visualize_boxes_and_labels_on_image_array(\n", - " image_np,\n", - " output_dict['detection_boxes'],\n", - " output_dict['detection_classes'],\n", - " output_dict['detection_scores'],\n", - " {1: {'id': 1, 'name': 'object'}}, # Empty category index\n", - " instance_masks=output_dict.get('detection_masks'),\n", - " use_normalized_coordinates=True,\n", - " line_thickness=8)\n", - " plt.figure()\n", - " plt.imshow(image_np)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Test Object Detection" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np_image = get_random_image(IMAGES_PATH)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "predict_request = '{\"instances\" : %s}' % np.expand_dims(np_image, 0).tolist()\n", - "result = requests.post(SERVER_URL, data=predict_request)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "visualize(result.json()['predictions'][0], np_image)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Measure Performance" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def benchmark(batch_size=1, num_iteration=40, warm_up_iteration=10):\n", - " i = 0\n", - " total_time = 0\n", - " for _ in range(num_iteration):\n", - " i += 1\n", - " np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist()\n", - " predict_request = '{\"instances\" : %s}' % np_images\n", - " start_time = time.time()\n", - " requests.post(SERVER_URL, data=predict_request)\n", - " time_consume = time.time() - start_time\n", - " print('Iteration %d: %.3f sec' % (i, time_consume))\n", - " if i > warm_up_iteration:\n", - " total_time += time_consume\n", - "\n", - " time_average = total_time / (num_iteration - warm_up_iteration)\n", - " print('Average time: %.3f sec' % (time_average))\n", - " print('Batch size = %d' % batch_size)\n", - " if batch_size == 1:\n", - " print('Latency: %.3f ms' % (time_average * 1000))\n", - " print('Throughput: %.3f images/sec' % (batch_size / time_average))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Real-time Inference (latency, batch_size=1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "benchmark()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Throughput (batch_size=128)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "benchmark(batch_size=128)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 2", - "language": "python", - "name": "python2" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 2 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython2", - "version": "2.7.10" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} diff --git a/docs/object_detection/tensorflow_serving/Tutorial.md b/docs/object_detection/tensorflow_serving/Tutorial.md index dfb3d724f..224943f5c 100644 --- a/docs/object_detection/tensorflow_serving/Tutorial.md +++ b/docs/object_detection/tensorflow_serving/Tutorial.md @@ -1,15 +1,17 @@ -# Object Detection with TensorFlow Serving on CPU using R-FCN model +# Object Detection with TensorFlow Serving on CPU +Models: R-FCN and SSD-MobileNet ## Goal This tutorial will introduce you to the CPU performance considerations for object detection in deep learning models and how to use [IntelĀ® Optimizations for TensorFlow Serving](https://www.tensorflow.org/serving/) to improve inference time on CPUs. -This tutorial uses a pre-trained Region-based Fully Convolutional Network (R-FCN) model for object detection and provides sample code that you can use to get your optimized TensorFlow model server and REST client up and running quickly. In this tutorial using R-FCN, you will measure inference performance in two situations: -* **Online inference**, where batch_size=1. In this case, lower time to result means better runtime performance. +This tutorial uses two pre-trained models - a [Region-based Fully Convolutional Network (R-FCN)](https://arxiv.org/pdf/1605.06409.pdf) and a [Single-Shot MultiBox Detector MobileNet (SSD-MobileNet)](https://arxiv.org/pdf/1704.04861.pdf) - for object detection and provides sample code that you can use to get your optimized TensorFlow model server and client up and running quickly. +In this tutorial you will choose between R-FCN and SSD-MobileNet, and between the REST client and GRPC client, and then measure inference performance in two situations: +* **Online inference**, where batch_size=1. In this case, a lower number means better runtime performance. * **Batch inference**, where batch_size>1. In this case, a higher number means better runtime performance. **NOTE about REST vs. GRPC**: This tutorial is focused on optimizing the model server, not the client that sends requests. For optimal client-side serialization and de-serialization, you may want to use TensorFlow Serving's GRPC option instead of the REST API, especially if you are optimizing for batch inference (here is one [article](https://medium.com/@avidaneran/tensorflow-serving-rest-vs-grpc-e8cef9d4ff62) with a relevant analysis). -We use REST in this tutorial for illustration, not as a best practice, and offer another [tutorial](/docs/image_recognition/tensorflow_serving/Tutorial.md) that illustrates the use of GRPC with TensorFlow Serving. +We show both GRPC and REST in this tutorial for illustration, not as a best practice. Feel free to compare and choose the protocol that works best for you. ## Prerequisites @@ -19,140 +21,178 @@ This tutorial assumes you have already: especially these sections: * [Performance Metrics](/docs/general/tensorflow_serving/GeneralBestPractices.md#performance-metrics) * [TensorFlow Serving Configuration Settings](/docs/general/tensorflow_serving/GeneralBestPractices.md#tensorflow-serving-configuration-settings) -* Ran an example end-to-end using a REST client, such as the one in the [Installation Guide](/docs/general/tensorflow_serving/InstallationGuide.md) +* Ran an example end-to-end using a REST or GRPC client, such as the one in the [Installation Guide](/docs/general/tensorflow_serving/InstallationGuide.md) ## Background -[IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN)](https://github.com/intel/mkl-dnn) offers significant performance improvements for convolution, pooling, normalization, activation, and other operations for object detection, using efficient vectorization and multi-threading. Tuning TensorFlow Serving to take full advantage of your hardware for object detection deep learning inference involves: +[IntelĀ® Math Kernel Library for Deep Neural Networks (IntelĀ® MKL-DNN)](https://github.com/intel/mkl-dnn) offers significant performance improvements for convolution, pooling, normalization, activation, and other operations for object detection, using efficient vectorization and multi-threading. +Tuning TensorFlow Serving to take full advantage of your hardware for object detection deep learning inference involves: 1. Running a TensorFlow Serving docker container configured for performance given your hardware resources -2. Running a REST client notebook to verify object detection and measure online and batch inference performance +2. Running a REST or GRPC client to verify object detection and measure online and batch inference 3. Experimenting with the TensorFlow Serving settings on your own to further optimize for your model and use case -## Hands-on Tutorial with pre-trained R-FCN model +## Hands-on Tutorial -1. **Set up your environment**: We need to setup two things for this tutorial - #### 1.1 Install the [requests](http://docs.python-requests.org) package for making REST HTTP requests. - We will use a virtual environment to install the required packages. If you do not have pip or virtualenv, you will need to get them first: - ``` - $ sudo apt-get install -y python python-pip - $ pip install virtualenv - ``` - - Create and activate the python virtual envirnoment in your home directory and install the [`requests`](http://docs.python-requests.org) package. +1. **Download the data and clone the Model Zoo**: + + 1.1 Download the 2017 validation COCO dataset (~780MB) (**note**: do not convert the COCO dataset to TF records format): + ``` - $ cd ~ - $ virtualenv rfcn_venv - $ source rfcn_venv/bin/activate - (rfcn_venv)$ pip install requests + cd ~ + mkdir -p coco/val + wget http://images.cocodataset.org/zips/val2017.zip + unzip val2017.zip -d coco/val + export COCO_VAL_DATA=$(pwd)/coco/val/val2017 + echo "export COCO_VAL_DATA=$(pwd)/coco/val/val2017" >> ~/.bashrc ``` - #### 1.2 Install [Tensorflow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection) - For detailed instructions, [click here](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). Following are the instructions for Ubuntu 16.04. - - - 1.2.1 Install Tensorflow Object Detection API dependencies - ``` - (rfcn_venv)$ sudo apt-get install -y protobuf-compiler python-pil python-lxml python-tk - (rfcn_venv)$ pip install tensorflow Cython contextlib2 jupyter matplotlib pillow lxml - ``` - - 1.2.2 Clone the tensorflow models repo into your home directory. - ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ git clone https://github.com/tensorflow/models - (rfcn_venv)$ export TF_MODELS_ROOT=$(pwd)/models - (rfcn_venv)$ echo "export TF_MODELS_ROOT=$(pwd)/models" >> ~/.bashrc - ``` + 1.2 Clone the Intel Model Zoo into your home directory: + + ``` + cd ~ + git clone https://github.com/IntelAI/models.git + ``` + +2. **Choose your model and download the pre-trained SavedModel**: Select either R-FCN or SSD-MobileNet. + Then download and extract the pre-trained model and copy the `saved_model.pb` to `~/obj_detection/1` (the `1` subdirectory is important - don't skip it!). + This is the file we will serve from TensorFlow Serving. Finally, define a variable for your chosen model to use in later steps. + Refer to the [TensorFlow documentation](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/saved_model) for more information about SavedModels, and refer to the FP32 model READMEs for [R-FCN](/benchmarks/object_detection/tensorflow/rfcn/README.md#download_fp32_pretrained_model) and [SSD-MobileNet](/benchmarks/object_detection/tensorflow/ssd-mobilenet/README.md#fp32-inference-instructions) to get the latest location of the pre-trained models. + + Highlight and copy one of the following download links: + * R-FCN: `https://storage.googleapis.com/intel-optimized-tensorflow/models/rfcn_resnet101_fp32_coco_pretrained_model.tar.gz` + * SSD-MobileNet: `http://download.tensorflow.org/models/object_detection/ssd_mobilenet_v1_coco_2018_01_28.tar.gz` + + Then execute the following bash commands after customizing them for the model you have chosen: + + ``` + cd ~ + wget + tar -xzvf + mkdir -p obj_detection/1 + cp /saved_model/saved_model.pb obj_detection/1 + model_name= + ``` - 1.2.3 Install COCO API - ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ git clone https://github.com/cocodataset/cocoapi.git - (rfcn_venv)$ cd cocoapi/PythonAPI - (rfcn_venv)$ make - (rfcn_venv)$ cp -r pycocotools $TF_MODELS_ROOT/research/ - ``` +3. **Set up your virtual environment**: We will use a virtual environment to install the required packages. - 1.2.4 Manually install the protobuf-compiler v3.0.0, run the compilation process, add Libraries to PYTHONPATH and to your `.bashrc` and test the installation of Tensorflow Object Detection API - ``` - (rfcn_venv)$ cd $TF_MODELS_ROOT/research/ - (rfcn_venv)$ wget -O protobuf.zip https://github.com/google/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip - (rfcn_venv)$ unzip protobuf.zip - (rfcn_venv)$ ./bin/protoc object_detection/protos/*.proto --python_out=. - (rfcn_venv)$ export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim - (rfcn_venv)$ echo "export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim" >> ~/.bashrc - (rfcn_venv)$ python object_detection/builders/model_builder_test.py - ``` - -2. **Download the Data**: Download the 2017 validation COCO dataset (~780MB) (**note**: do not convert the COCO dataset to TF records format): - + 3.1 If you do not have pip or virtualenv, you will need to get them first: + ``` + sudo apt-get install -y python python-pip virtualenv ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ mkdir -p coco/val - (rfcn_venv)$ wget http://images.cocodataset.org/zips/val2017.zip - (rfcn_venv)$ unzip val2017.zip -d coco/val - (rfcn_venv)$ export COCO_VAL_DATA=$(pwd)/coco/val/val2017 - (rfcn_venv)$ echo "export COCO_VAL_DATA=$(pwd)/coco/val/val2017" >> ~/.bashrc + + 3.2 Create and activate the python virtual environment in your home directory: + ``` + cd ~ + virtualenv od_venv + source od_venv/bin/activate ``` -3. **Download and Prepare the pre-trained SavedModel**: Download and extract the pre-trained model and copy the `rfcn_resnet101_fp32_coco/saved_model/saved_model.pb` to `rfcn/1` (the `1` subdirectory is important - don't skip it!). This is the file we will serve from TensorFlow Serving. - Refer to the [TensorFlow documentation](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/python/saved_model) for more information about SavedModels, and refer to this [README file](/benchmarks/object_detection/tensorflow/rfcn/README.md#download_fp32_pretrained_model) to get the latest location of the pre-trained model. + 3.3 Install the required packages using `requirements.txt`: ``` - (rfcn_venv)$ cd ~/ - (rfcn_venv)$ wget https://storage.googleapis.com/intel-optimized-tensorflow/models/rfcn_resnet101_fp32_coco_pretrained_model.tar.gz - (rfcn_venv)$ tar -xzvf rfcn_resnet101_fp32_coco_pretrained_model.tar.gz - (rfcn_venv)$ mkdir -p rfcn/1 - (rfcn_venv)$ cp rfcn_resnet101_fp32_coco/saved_model/saved_model.pb rfcn/1 + pip install -r models/docs/object_detection/tensorflow_serving/requirements.txt ``` -4. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. To compute *num_physical_cores* and *tf_session_parallelism* with bash commands: + 3.3 Choose between the REST example or the GRPC example (the environment dependencies are different depending on the protocol you use, + and GRPC is usually faster, especially when using larger batch sizes). Define a variable for your desired protocol. + + **REST**: + ``` + protocol_name=rest + ``` + + **GRPC**: + ``` + protocol_name=grpc + ``` + +4. **Install [TensorFlow Object Detection API](https://github.com/tensorflow/models/tree/master/research/object_detection)**: + For detailed instructions, [click here](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/installation.md). + We have already installed the required python packages for the API. Following are the rest of the instructions for Ubuntu 16.04. + + 4.1 Clone the tensorflow models repo into a new folder in your home directory. ``` - (rfcn_venv)$ cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` - (rfcn_venv)$ num_sockets=`lscpu | grep "Socket(s)" | cut -d':' -f2 | xargs` - (rfcn_venv)$ num_physical_cores=$((cores_per_socket * num_sockets)) - (rfcn_venv)$ echo $num_physical_cores + cd ~ + git clone https://github.com/tensorflow/models tensorflow-models + export TF_MODELS_ROOT=$(pwd)/tensorflow-models + echo "export TF_MODELS_ROOT=$(pwd)/tensorflow-models" >> ~/.bashrc ``` -5. **Start the server**: Now let's start up the TensorFlow model server. With `&` at the end of the cmd, runs the container as a background process. Press enter after executing the following cmd. -To optimize overall performance, use the following recommended settings from the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md): - * OMP_NUM_THREADS=*num_physical_cores* - * TENSORFLOW_INTER_OP_PARALLELISM=2 - * TENSORFLOW_INTRA_OP_PARALLELISM=*num_physical_cores* + 4.2 Manually install the protobuf-compiler v3.0.0, run the compilation process, add libraries to PYTHONPATH and to your `.bashrc` and test the installation of Tensorflow Object Detection API. + ``` + cd $TF_MODELS_ROOT/research/ + wget -O protobuf.zip https://github.com/protocolbuffers/protobuf/releases/download/v3.0.0/protoc-3.0.0-linux-x86_64.zip + unzip protobuf.zip + ./bin/protoc object_detection/protos/*.proto --python_out=. + export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim + echo "export PYTHONPATH=$PYTHONPATH:$(pwd):$(pwd)/slim" >> ~/.bashrc + python object_detection/builders/model_builder_test.py + ``` +5. **Discover the number of physical cores**: Compute *num_physical_cores* by executing the `lscpu` command and multiplying `Core(s) per socket` by `Socket(s)`. + For example, for a machine with `Core(s) per socket: 28` and `Socket(s): 2`, `num_physical_cores = 28 * 2 = 56`. + To compute *num_physical_cores* with bash commands: ``` - (rfcn_venv)$ cd ~ - (rfcn_venv)$ docker run \ - --name=tfserving_rfcn \ - -p 8501:8501 \ - -v "$(pwd)/rfcn:/models/rfcn" \ - -e MODEL_NAME=rfcn \ - -e OMP_NUM_THREADS=$num_physical_cores \ - -e TENSORFLOW_INTER_OP_PARALLELISM=2 \ - -e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \ - tensorflow/serving:mkl & - ``` - **Note**: For some models, playing around with these settings values can improve performance even further. - We recommend that you experiment with your own hardware and model if you have strict performance requirements. - -6. *Measure Online and Batch inference performance**: Clone the Intel Model Zoo into a directory called `intel-models` and run `rfcn-benchmark.py` [python script](/docs/object_detection/tensorflow_serving/rfcn-benchmark.py), which will test both Online and Batch performance. - ``` - (rfcn_venv)$ git clone https://github.com/IntelAI/models.git intel-models - (rfcn_venv)$ python intel-models/docs/object_detection/tensorflow_serving/rfcn-benchmark.py \ - -i $COCO_VAL_DATA + cores_per_socket=`lscpu | grep "Core(s) per socket" | cut -d':' -f2 | xargs` + num_sockets=`lscpu | grep "Socket(s)" | cut -d':' -f2 | xargs` + num_physical_cores=$((cores_per_socket * num_sockets)) + echo $num_physical_cores ``` +6. **Start the server**: Now start up the TensorFlow model server. Using `-d` (for "detached") runs the container as a background process. + We will publish the ports for both REST (`-p 8501:8501`) and GRPC (`-p 8500:8500`). + To optimize overall performance, use the following recommended settings from the [General Best Practices](/docs/general/tensorflow_serving/GeneralBestPractices.md): + * OMP_NUM_THREADS=*num_physical_cores* + * TENSORFLOW_INTER_OP_PARALLELISM=2 + * TENSORFLOW_INTRA_OP_PARALLELISM=*num_physical_cores* + + ``` + cd ~ + docker run \ + --name=tfserving \ + -d \ + -p 8500:8500 \ + -p 8501:8501 \ + -v "$(pwd)/obj_detection:/models/$model_name" \ + -e MODEL_NAME=$model_name \ + -e OMP_NUM_THREADS=$num_physical_cores \ + -e TENSORFLOW_INTER_OP_PARALLELISM=2 \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=$num_physical_cores \ + tensorflow/serving:mkl + ``` + + **Note**: For some models, playing around with the parallelism settings can improve performance even further. + We recommend that you experiment with your own hardware and model if you have strict performance requirements. -7. **Visualize Object Detection Output**: To visually see the output of object detection results, we will use Jupyter notebook via web browser. If you are using a system that does not have a browser, such as a VM on GCP or AWS, a workaround is to use local port forwarding of port 8888 to relay the jupyter service to your localhost. You will need to quit your SSH session and log back in with port forwarding configured. -For example, with a GCP VM, add `--ssh-flag="-L 8888:localhost:8888"` to your ssh command. Once you are connected again with port forwarding, reactivate the virtual environment, navigate to the tutorial directory, and start jupyter notebook. Continue with the next instruction. - ``` - $ cd ~ - $ source rfcn_venv/bin/activate - (rfcn_venv)$ cd intel-models/docs/object_detection/tensorflow_serving - (rfcn_venv)$ jupyter notebook +7. **Measure online and batch inference performance**: Run the `object_detection_benchmark.py` [python script](/docs/object_detection/tensorflow_serving/object_detection_benchmark.py), which will test both online and batch inference performance. + + ``` + cd ~ + python models/docs/object_detection/tensorflow_serving/object_detection_benchmark.py \ + -i $COCO_VAL_DATA \ + -m $model_name \ + -p $protocol_name ``` - After running `jupyter notebook` , paste the generated link into your browser and open the `RFCN.ipynb` file. You will need to edit the code in one place - in the second cell, insert the path to your downloaded COCO validation data set. Then, execute the cells in order. The output of the "Test Object Detection" section should be an image with objects correctly detected by the R-FCN model. -8. (Optional) **Using a single core**: In some cases, it is desirable to constrain the inference server to a single core or socket. Docker has many runtime flags that allow you to control the container's access to the host system's CPUs, memory, and other resources. See the [Docker document on this topic](https://docs.docker.com/config/containers/resource_constraints/#cpu) for all the options and their definitions. For example, to run the container so that a single CPU is used, you can use these settings: +8. **Visualize object detection output**: To visually see the results of object detection, we will use a Jupyter notebook via web browser. + If you are using a system that does not have a browser, such as a VM on GCP or AWS, a workaround is to use local port forwarding of port 8888 to relay the jupyter service to your localhost. + You will need to quit your SSH session and log back in with port forwarding configured. For example, with a GCP VM, add `--ssh-flag="-L 8888:localhost:8888"` to your ssh command. + Once you are connected again with port forwarding, reactivate the virtual environment, navigate to the tutorial directory, and start the jupyter notebook service. + + ``` + cd ~ + source od_venv/bin/activate + cd models/docs/object_detection/tensorflow_serving + jupyter notebook + ``` + + After running `jupyter notebook`, paste the generated link into your browser and open the `ObjectDetection.ipynb` file. + You will need to edit the code in one cell - in the second cell, insert the path to your downloaded COCO validation data set and name of your chosen model and protocol. + Then, execute the cells in order. The output of the "Test Object Detection" section should be an image with objects detected by the served model. + +9. (Optional) **Using a single core**: In some cases, it is desirable to constrain the inference server to a single core or socket. + Docker has many runtime flags that allow you to control the container's access to the host system's CPUs, memory, and other resources. + See the [Docker document on this topic](https://docs.docker.com/config/containers/resource_constraints/#cpu) for all the options and their definitions. + For example, to run the container so that a single CPU is used, you can use these settings: * `--cpuset-cpus="0"` * `--cpus="1"` * `OMP_NUM_THREADS=1` @@ -160,33 +200,39 @@ For example, with a GCP VM, add `--ssh-flag="-L 8888:localhost:8888"` to your ss * `TENSORFLOW_INTRA_OP_PARALLELISM=1` ``` - (rfcn_venv)$ docker run \ - --name=tfserving_rfcn_1 \ - -p 8500:8500 \ - --cpuset-cpus="0" \ - --cpus="1" \ - -v "$(pwd)/rfcn:/models/rfcn" \ - -e MODEL_NAME=rfcn \ - -e OMP_NUM_THREADS=1 \ - -e TENSORFLOW_INTER_OP_PARALLELISM=1 \ - -e TENSORFLOW_INTRA_OP_PARALLELISM=1 \ - tensorflow/serving:mkl & + cd ~ + docker run \ + --name=tfserving_1core \ + -d \ + -p 8500:8500 \ + -p 8501:8501 \ + --cpuset-cpus="0" \ + --cpus="1" \ + -v "$(pwd)/obj_detection:/models/$model_name" \ + -e MODEL_NAME=$model_name \ + -e OMP_NUM_THREADS=1 \ + -e TENSORFLOW_INTER_OP_PARALLELISM=1 \ + -e TENSORFLOW_INTRA_OP_PARALLELISM=1 \ + tensorflow/serving:mkl ``` - + 10. **Clean up**: * After saving any changes you made to the Jupyter notebook, close the file and stop the Jupyter server by clicking `Quit` from the main file browser. - * After you are fininshed with querying, you can stop the container which is running in the background. To restart the container with the same name, you need to stop and remove the container from the registry. To view your running containers run `docker ps`. - ``` - (rfcn_venv)$ docker rm -f tfserving_rfcn - ``` + * After you are finished with querying, you can stop the container which is running in the background. + To restart the container with the same name, you need to stop and remove the container from the registry. + To view your running containers run `docker ps`. + + ``` + docker rm -f tfserving + ``` + * Deactivate your virtual environment with `deactivate`. - ## Conclusion You have now seen an end-to-end example of serving an object detection model for inference using TensorFlow Serving, and learned: 1. How to choose good values for the performance-related runtime parameters exposed by the `docker run` command -2. How to verify that the served model can correctly detect objects in an image using a sample Jupyter notebook -3. How to measure online and batch inference metrics using a REST client +2. How to test online and batch inference metrics using a REST or GRPC client +3. How to verify that the served model can correctly detect objects in an image using a sample Jupyter notebook With this knowledge and the example code provided, you should be able to get started serving your own custom object detection model with good performance. If desired, you should also be able to investigate a variety of different settings combinations to see if further performance improvement are possible. diff --git a/docs/object_detection/tensorflow_serving/rfcn-benchmark.py b/docs/object_detection/tensorflow_serving/object_detection_benchmark.py similarity index 54% rename from docs/object_detection/tensorflow_serving/rfcn-benchmark.py rename to docs/object_detection/tensorflow_serving/object_detection_benchmark.py index 6948df969..c30c1aeae 100644 --- a/docs/object_detection/tensorflow_serving/rfcn-benchmark.py +++ b/docs/object_detection/tensorflow_serving/object_detection_benchmark.py @@ -14,7 +14,7 @@ # ####### USAGE ######### -# python rfcn-benchmark.py -i +# python object_detection_benchmark.py -i -m -p from __future__ import print_function @@ -25,8 +25,6 @@ import requests import numpy as np from PIL import Image -import tensorflow as tf -from object_detection.utils.visualization_utils import visualize_boxes_and_labels_on_image_array def check_for_link(value): @@ -40,7 +38,7 @@ def check_for_link(value): raise argparse.ArgumentTypeError("{} cannot be a link.".format(value)) def check_valid_folder(value): - """verifies filename exists and isn't a link""" + """Verifies filename exists and isn't a link""" if value is not None: if not os.path.isdir(value): raise argparse.ArgumentTypeError("{} does not exist or is not a directory.". @@ -48,6 +46,20 @@ def check_valid_folder(value): check_for_link(value) return value +def check_valid_model(value): + """Verifies model name is supported""" + if value not in ('rfcn', 'ssdmobilenet'): + raise argparse.ArgumentError("Model name {} does not match 'rfcn' or 'ssdmobilenet'.". + format(value)) + return value + +def check_valid_protocol(value): + """Verifies protocol is supported""" + if value not in ('rest', 'grpc'): + raise argparse.ArgumentError("Protocol name {} does not match 'rest' or 'grpc'.". + format(value)) + return value + def get_random_image(image_dir): image_path = os.path.join(image_dir, random.choice(os.listdir(image_dir))) image = Image.open(image_path) @@ -55,15 +67,38 @@ def get_random_image(image_dir): return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8) +def make_request(batch_size): + if PROTOCOL == 'rest': + np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist() + return '{"instances" : %s}' % np_images + elif PROTOCOL == 'grpc': + import grpc + import tensorflow as tf + from tensorflow_serving.apis import predict_pb2 + from tensorflow_serving.apis import prediction_service_pb2_grpc + np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0), batch_size, axis=0) + channel = grpc.insecure_channel(SERVER_URL) + stub = prediction_service_pb2_grpc.PredictionServiceStub(channel) + request = predict_pb2.PredictRequest() + request.model_spec.name = MODEL + request.model_spec.signature_name = 'serving_default' + request.inputs['inputs'].CopyFrom(tf.contrib.util.make_tensor_proto(np_images)) + return (stub, request) + +def send_request(predict_request): + if PROTOCOL == 'rest': + requests.post(SERVER_URL, data=predict_request) + elif PROTOCOL == 'grpc': + predict_request[0].Predict(predict_request[1]) + def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10): i = 0 total_time = 0 for _ in range(num_iteration): i += 1 - np_images = np.repeat(np.expand_dims(get_random_image(IMAGES_PATH), 0).tolist(), batch_size, axis=0).tolist() - predict_request = '{"instances" : %s}' % np_images + predict_request = make_request(batch_size) start_time = time.time() - requests.post(SERVER_URL, data=predict_request) + send_request(predict_request) time_consume = time.time() - start_time print('Iteration %d: %.3f sec' % (i, time_consume)) if i > warm_up_iteration: @@ -81,15 +116,26 @@ def benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10): ap = argparse.ArgumentParser() ap.add_argument("-i", "--images_path", type=check_valid_folder, required=True, help="Path to COCO validation directory") + ap.add_argument("-m", "--model", type=check_valid_model, required=True, + help="Name of model (rfcn or ssdmobilenet)") + ap.add_argument("-p", "--protocol", type=check_valid_protocol, required=True, + help="Name of protocol (rest or grpc)") args = vars(ap.parse_args()) - - SERVER_URL = 'http://localhost:8501/v1/models/rfcn:predict' + IMAGES_PATH = args['images_path'] + MODEL = args['model'] + PROTOCOL = args['protocol'] + if PROTOCOL == 'rest': + SERVER_URL = 'http://localhost:8501/v1/models/{}:predict'.format(MODEL) + elif PROTOCOL == 'grpc': + SERVER_URL = 'localhost:8500' print('\n SERVER_URL: {} \n IMAGES_PATH: {}'.format(SERVER_URL, IMAGES_PATH)) - print('\nStarting R-FCN model benchmarking for Latency with batch_size=1, num_iteration=20, warm_up_iteration=10') + print('\nStarting {} model benchmarking for latency on {}:'.format(MODEL.upper(), PROTOCOL.upper())) + print('batch_size=1, num_iteration=20, warm_up_iteration=10\n') benchmark(batch_size=1, num_iteration=20, warm_up_iteration=10) - print('\nStarting R-FCN model benchmarking for Throughput with batch_size=128, num_iteration=10, warm_up_iteration=2') + print('\nStarting {} model benchmarking for throughput on {}:'.format(MODEL.upper(), PROTOCOL.upper())) + print('batch_size=128, num_iteration=10, warm_up_iteration=2\n') benchmark(batch_size=128, num_iteration=10, warm_up_iteration=2) diff --git a/docs/object_detection/tensorflow_serving/requirements.txt b/docs/object_detection/tensorflow_serving/requirements.txt new file mode 100644 index 000000000..1e77692c2 --- /dev/null +++ b/docs/object_detection/tensorflow_serving/requirements.txt @@ -0,0 +1,15 @@ +# rest +requests + +# grpc +intel-tensorflow +tensorflow-serving-api + +# object detection api +Cython +contextlib2 +jupyter +matplotlib +pillow +lxml +absl-py \ No newline at end of file diff --git a/docs/recommendation/tensorflow/Tutorial.md b/docs/recommendation/tensorflow/Tutorial.md index aa33a6643..96b389771 100644 --- a/docs/recommendation/tensorflow/Tutorial.md +++ b/docs/recommendation/tensorflow/Tutorial.md @@ -215,8 +215,6 @@ Set this parameter to a socket id to run the workload on a single socket. Average Latency (ms/batch) : ... Throughput is (records/sec) : ... -------------------------------------------------- - lscpu_path_cmd = command -v lscpu - lscpu located here: /usr/bin/lscpu num_inter_threads: 28 num_intra_threads: 1 Received these standard args: Namespace(accuracy_only=False, batch_size=512, benchmark_dir='/workspace/benchmarks', benchmark_only=True, checkpoint=None, data_location='/dataset', data_num_inter_threads=None, data_num_intra_threads=None, framework='tensorflow', input_graph='/in_graph/wide_deep_fp32_pretrained_model.pb', intelai_models='/workspace/intelai_models', mode='inference', model_args=[], model_name='wide_deep_large_ds', model_source_dir='/workspace/models', num_cores=-1, num_inter_threads=28, num_intra_threads=1, num_parallel_batches=28, output_dir='/workspace/benchmarks/common/tensorflow/logs', output_results=False, precision='fp32', socket_id=-1, use_case='recommendation', verbose=True) @@ -276,9 +274,7 @@ perform necessary installs, run the ```launch_benchmark.py``` script, and does n --debug   Example Output: - - lscpu_path_cmd = command -v lscpu - lscpu located here: b'/usr/bin/lscpu' + root@a78677f56d69:/workspace/benchmarks/common/tensorflow# To rerun the model script, execute the ```start.sh``` bash script from your existing directory with additional or modified flags. For example, to rerun with the best batch inference (batch size=512) settings, run with ```BATCH_SIZE``` diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py new file mode 100644 index 000000000..0335ce423 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/accuracy.py @@ -0,0 +1,137 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np +from tensorflow.core.protobuf import rewriter_config_pb2 +from google.protobuf import text_format +import tensorflow as tf +import image_preprocessing +import dataset + +NUM_TEST_IMAGES = 50000 + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--data_location", default=None, + help="full path to the validation data") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="densenet169/predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + data_location = args.data_location + dataset = dataset.ImagenetData(data_location) + preprocessor = image_preprocessing.ImagePreprocessor( + input_height, input_width, batch_size, + 1, # device count + tf.float32, # data_type for input fed to the graph + train=False, # doing inference + resize_method='crop') + images, labels = preprocessor.minibatch(dataset, subset='validation') + graph = load_graph(model_file) + input_tensor = graph.get_tensor_by_name(input_layer + ":0") + output_tensor = graph.get_tensor_by_name(output_layer + ":0") + + rewrite_options = rewriter_config_pb2.RewriterConfig( + layout_optimizer=rewriter_config_pb2.RewriterConfig.ON) + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + config.graph_options.rewrite_options.remapping = ( + rewriter_config_pb2.RewriterConfig.OFF) + + total_accuracy1, total_accuracy5 = (0.0, 0.0) + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset='validation') \ + - num_processed_images + top1 = 0 + with tf.Session(config=config) as sess: + sess_graph = tf.Session(graph=graph, config=config) + + while num_remaining_images >= batch_size: + # Reads and preprocess data + #import pdb + #pdb.set_trace() + np_images, np_labels = sess.run([images[0], labels[0]]) + np_labels -= 1 + #print(np_labels.shape) + num_processed_images += batch_size + num_remaining_images -= batch_size + start_time = time.time() + # Compute inference on the preprocessed data + predictions1 = sess_graph.run(output_tensor, + {input_tensor: np_images}) + elapsed_time = time.time() - start_time + if(batch_size !=1): + predictions1 = sess.run(tf.squeeze(predictions1)) + else : + predictions1 = sess.run(tf.reshape(predictions1,[1,1000])) + predictions2 = tf.argmax(predictions1, axis=1) + predictions = sess.run(predictions2) + top1 += batch_size - (np.count_nonzero(predictions - np_labels)) + print("Iteration time: %0.4f ms" % elapsed_time) + print(top1/num_processed_images) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/benchmark.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/benchmark.py new file mode 100644 index 000000000..4091b4137 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/benchmark.py @@ -0,0 +1,161 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. # You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np +from tensorflow.core.protobuf import rewriter_config_pb2 +from google.protobuf import text_format +import tensorflow as tf + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="densenet169/predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + parser.add_argument("-gpu", "--gpu", + default = -1, + type=int, help="Run on gpu, other wise cpu", + required=False) + + parser.add_argument("--warmup_steps", type=int, default=40, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=100, help="number of steps") + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + warmup_steps = args.warmup_steps + steps = args.steps + print(steps) + assert steps > 10, "Benchmark steps should be at least 10." + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + + input_shape = [batch_size, input_height, input_width, 3] + images = tf.truncated_normal( + input_shape, + dtype=tf.float32, + stddev=10, + name='synthetic_images') + + image_data = None + graph = load_graph(model_file) + + input_tensor = graph.get_tensor_by_name(input_layer + ":0"); + output_tensor = graph.get_tensor_by_name(output_layer + ":0"); + + rewrite_options = rewriter_config_pb2.RewriterConfig( + layout_optimizer=rewriter_config_pb2.RewriterConfig.ON) + config = tf.ConfigProto() + if (args.gpu < 0): + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + config.graph_options.rewrite_options.remapping = ( + rewriter_config_pb2.RewriterConfig.OFF) + #os.environ["OMP_NUM_THREADS"] = "14" + with tf.Session(config=config) as sess: + image_data = sess.run(images) + + with tf.Session(graph=graph, config=config) as sess: + sys.stdout.flush() + print("[Running warmup steps...]") + for t in range(warmup_steps): + start_time = time.time() + sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size/elapsed_time)) + avg = 0 + print("[Running benchmark steps...]") + total_time = 0; + total_images = 0; + for t in range(steps): + start_time = time.time() + results = sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + avg += elapsed_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size*(t+1)/avg)); + print(" Latency: {0} ms" + "".format(avg*1000. /(t+1))) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/cnn_util.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/cnn_util.py new file mode 100644 index 000000000..32902d149 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/cnn_util.py @@ -0,0 +1,50 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for CNN benchmarks.""" + +import tensorflow as tf + + +def tensorflow_version_tuple(): + v = tf.__version__ + major, minor, patch = v.split('.') + return (int(major), int(minor), patch) + + +def tensorflow_version(): + vt = tensorflow_version_tuple() + return vt[0] * 1000 + vt[1] + diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/dataset.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/dataset.py new file mode 100644 index 000000000..88fdebce6 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/dataset.py @@ -0,0 +1,103 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Benchmark dataset utilities. +""" + +from abc import abstractmethod +import os + +import tensorflow as tf + + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, data_dir=None): + self.name = name + if data_dir is None: + raise ValueError('Data directory not specified') + self.data_dir = data_dir + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @abstractmethod + def num_classes(self): + pass + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + +class FlowersData(Dataset): + + def __init__(self, data_dir=None): + super(FlowersData, self).__init__('Flowers', data_dir) + + def num_classes(self): + return 5 + + def num_examples_per_epoch(self, subset): + if subset == 'train': + return 3170 + elif subset == 'validation': + return 500 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + +class ImagenetData(Dataset): + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('ImageNet', data_dir) + + def num_classes(self): + return 1000 + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return 1281167 + elif subset == 'validation': + return 50000 + else: + raise ValueError('Invalid data subset "%s"' % subset) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/densenet_preprocessing.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/densenet_preprocessing.py new file mode 100644 index 000000000..298694af0 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/densenet_preprocessing.py @@ -0,0 +1,391 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2016 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== +"""Provides utilities to preprocess images. + +The preprocessing steps for VGG were introduced in the following technical +report: + + Very Deep Convolutional Networks For Large-Scale Image Recognition + Karen Simonyan and Andrew Zisserman + arXiv technical report, 2015 + PDF: http://arxiv.org/pdf/1409.1556.pdf + ILSVRC 2014 Slides: http://www.robots.ox.ac.uk/~karen/pdf/ILSVRC_2014.pdf + CC-BY-4.0 + +More information can be obtained from the VGG website: +www.robots.ox.ac.uk/~vgg/research/very_deep/ +""" + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf + +slim = tf.contrib.slim + +_R_MEAN = 123.68 +_G_MEAN = 116.78 +_B_MEAN = 103.94 + +_SCALE_FACTOR = 0.017 + +_RESIZE_SIDE_MIN = 256 +_RESIZE_SIDE_MAX = 512 + + +def _crop(image, offset_height, offset_width, crop_height, crop_width): + """Crops the given image using the provided offsets and sizes. + + Note that the method doesn't assume we know the input image size but it does + assume we know the input image rank. + + Args: + image: an image of shape [height, width, channels]. + offset_height: a scalar tensor indicating the height offset. + offset_width: a scalar tensor indicating the width offset. + crop_height: the height of the cropped image. + crop_width: the width of the cropped image. + + Returns: + the cropped (and resized) image. + + Raises: + InvalidArgumentError: if the rank is not 3 or if the image dimensions are + less than the crop size. + """ + original_shape = tf.shape(image) + + rank_assertion = tf.Assert( + tf.equal(tf.rank(image), 3), + ['Rank of image must be equal to 3.']) + with tf.control_dependencies([rank_assertion]): + cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]]) + + size_assertion = tf.Assert( + tf.logical_and( + tf.greater_equal(original_shape[0], crop_height), + tf.greater_equal(original_shape[1], crop_width)), + ['Crop size greater than the image size.']) + + offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0])) + + # Use tf.slice instead of crop_to_bounding box as it accepts tensors to + # define the crop size. + with tf.control_dependencies([size_assertion]): + image = tf.slice(image, offsets, cropped_shape) + return tf.reshape(image, cropped_shape) + + +def _random_crop(image_list, crop_height, crop_width): + """Crops the given list of images. + + The function applies the same crop to each image in the list. This can be + effectively applied when there are multiple image inputs of the same + dimension such as: + + image, depths, normals = _random_crop([image, depths, normals], 120, 150) + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the new height. + crop_width: the new width. + + Returns: + the image_list with cropped images. + + Raises: + ValueError: if there are multiple image inputs provided with different size + or the images are smaller than the crop dimensions. + """ + if not image_list: + raise ValueError('Empty image_list.') + + # Compute the rank assertions. + rank_assertions = [] + for i in range(len(image_list)): + image_rank = tf.rank(image_list[i]) + rank_assert = tf.Assert( + tf.equal(image_rank, 3), + ['Wrong rank for tensor %s [expected] [actual]', + image_list[i].name, 3, image_rank]) + rank_assertions.append(rank_assert) + + with tf.control_dependencies([rank_assertions[0]]): + image_shape = tf.shape(image_list[0]) + image_height = image_shape[0] + image_width = image_shape[1] + crop_size_assert = tf.Assert( + tf.logical_and( + tf.greater_equal(image_height, crop_height), + tf.greater_equal(image_width, crop_width)), + ['Crop size greater than the image size.']) + + asserts = [rank_assertions[0], crop_size_assert] + + for i in range(1, len(image_list)): + image = image_list[i] + asserts.append(rank_assertions[i]) + with tf.control_dependencies([rank_assertions[i]]): + shape = tf.shape(image) + height = shape[0] + width = shape[1] + + height_assert = tf.Assert( + tf.equal(height, image_height), + ['Wrong height for tensor %s [expected][actual]', + image.name, height, image_height]) + width_assert = tf.Assert( + tf.equal(width, image_width), + ['Wrong width for tensor %s [expected][actual]', + image.name, width, image_width]) + asserts.extend([height_assert, width_assert]) + + # Create a random bounding box. + # + # Use tf.random_uniform and not numpy.random.rand as doing the former would + # generate random numbers at graph eval time, unlike the latter which + # generates random numbers at graph definition time. + with tf.control_dependencies(asserts): + max_offset_height = tf.reshape(image_height - crop_height + 1, []) + with tf.control_dependencies(asserts): + max_offset_width = tf.reshape(image_width - crop_width + 1, []) + offset_height = tf.random_uniform( + [], maxval=max_offset_height, dtype=tf.int32) + offset_width = tf.random_uniform( + [], maxval=max_offset_width, dtype=tf.int32) + + return [_crop(image, offset_height, offset_width, + crop_height, crop_width) for image in image_list] + + +def _central_crop(image_list, crop_height, crop_width): + """Performs central crops of the given image list. + + Args: + image_list: a list of image tensors of the same dimension but possibly + varying channel. + crop_height: the height of the image following the crop. + crop_width: the width of the image following the crop. + + Returns: + the list of cropped images. + """ + outputs = [] + for image in image_list: + image_height = tf.shape(image)[0] + image_width = tf.shape(image)[1] + + offset_height = (image_height - crop_height) / 2 + offset_width = (image_width - crop_width) / 2 + + outputs.append(_crop(image, offset_height, offset_width, + crop_height, crop_width)) + return outputs + + +def _mean_image_subtraction(image, means): + """Subtracts the given means from each image channel. + + For example: + means = [123.68, 116.779, 103.939] + image = _mean_image_subtraction(image, means) + + Note that the rank of `image` must be known. + + Args: + image: a tensor of size [height, width, C]. + means: a C-vector of values to subtract from each channel. + + Returns: + the centered image. + + Raises: + ValueError: If the rank of `image` is unknown, if `image` has a rank other + than three or if the number of channels in `image` doesn't match the + number of values in `means`. + """ + if image.get_shape().ndims != 3: + raise ValueError('Input must be of size [height, width, C>0]') + num_channels = image.get_shape().as_list()[-1] + if len(means) != num_channels: + raise ValueError('len(means) must match the number of channels') + + channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image) + for i in range(num_channels): + channels[i] -= means[i] + return tf.concat(axis=2, values=channels) + + +def _smallest_size_at_least(height, width, smallest_side): + """Computes new shape with the smallest side equal to `smallest_side`. + + Computes new shape with the smallest side equal to `smallest_side` while + preserving the original aspect ratio. + + Args: + height: an int32 scalar tensor indicating the current height. + width: an int32 scalar tensor indicating the current width. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + new_height: an int32 scalar tensor indicating the new height. + new_width: and int32 scalar tensor indicating the new width. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + height = tf.to_float(height) + width = tf.to_float(width) + smallest_side = tf.to_float(smallest_side) + + scale = tf.cond(tf.greater(height, width), + lambda: smallest_side / width, + lambda: smallest_side / height) + new_height = tf.to_int32(height * scale) + new_width = tf.to_int32(width * scale) + return new_height, new_width + + +def _aspect_preserving_resize(image, smallest_side): + """Resize images preserving the original aspect ratio. + + Args: + image: A 3-D image `Tensor`. + smallest_side: A python integer or scalar `Tensor` indicating the size of + the smallest side after resize. + + Returns: + resized_image: A 3-D tensor containing the resized image. + """ + smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32) + + shape = tf.shape(image) + height = shape[0] + width = shape[1] + new_height, new_width = _smallest_size_at_least(height, width, smallest_side) + image = tf.expand_dims(image, 0) + resized_image = tf.image.resize_bilinear(image, [new_height, new_width], + align_corners=False) + resized_image = tf.squeeze(resized_image) + resized_image.set_shape([None, None, 3]) + return resized_image + + +def preprocess_for_train(image, + output_height, + output_width, + resize_side_min=_RESIZE_SIDE_MIN, + resize_side_max=_RESIZE_SIDE_MAX): + """Preprocesses the given image for training. + + Note that the actual resizing scale is sampled from + [`resize_size_min`, `resize_size_max`]. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + resize_side_min: The lower bound for the smallest side of the image for + aspect-preserving resizing. + resize_side_max: The upper bound for the smallest side of the image for + aspect-preserving resizing. + + Returns: + A preprocessed image. + """ + resize_side = tf.random_uniform( + [], minval=resize_side_min, maxval=resize_side_max+1, dtype=tf.int32) + + image = _aspect_preserving_resize(image, resize_side) + image = _random_crop([image], output_height, output_width)[0] + image.set_shape([output_height, output_width, 3]) + image = tf.to_float(image) + image = tf.image.random_flip_left_right(image) + + image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) + return image * _SCALE_FACTOR + + +def preprocess_for_eval(image, output_height, output_width, resize_side): + """Preprocesses the given image for evaluation. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + resize_side: The smallest side of the image for aspect-preserving resizing. + + Returns: + A preprocessed image. + """ + image = _aspect_preserving_resize(image, resize_side) + image = _central_crop([image], output_height, output_width)[0] + image.set_shape([output_height, output_width, 3]) + image = tf.to_float(image) + + image = _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) + return image * _SCALE_FACTOR + + +def preprocess_image(image, output_height, output_width, is_training=False, + resize_side_min=_RESIZE_SIDE_MIN, + resize_side_max=_RESIZE_SIDE_MAX): + """Preprocesses the given image. + + Args: + image: A `Tensor` representing an image of arbitrary size. + output_height: The height of the image after preprocessing. + output_width: The width of the image after preprocessing. + is_training: `True` if we're preprocessing the image for training and + `False` otherwise. + resize_side_min: The lower bound for the smallest side of the image for + aspect-preserving resizing. If `is_training` is `False`, then this value + is used for rescaling. + resize_side_max: The upper bound for the smallest side of the image for + aspect-preserving resizing. If `is_training` is `False`, this value is + ignored. Otherwise, the resize side is sampled from + [resize_size_min, resize_size_max]. + + Returns: + A preprocessed image. + """ + if is_training: + return preprocess_for_train(image, output_height, output_width, + resize_side_min, resize_side_max) + else: + return preprocess_for_eval(image, output_height, output_width, + resize_side_min) diff --git a/models/image_recognition/tensorflow/densenet169/inference/fp32/image_preprocessing.py b/models/image_recognition/tensorflow/densenet169/inference/fp32/image_preprocessing.py new file mode 100644 index 000000000..fe5d0eee0 --- /dev/null +++ b/models/image_recognition/tensorflow/densenet169/inference/fp32/image_preprocessing.py @@ -0,0 +1,420 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image pre-processing utilities. +""" +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf +from random import randint +import densenet_preprocessing +from tensorflow.python.ops import data_flow_ops +import cnn_util + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): + # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): + with tf.name_scope(scope or 'decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3, + fancy_upscaling=False, + dct_method='INTEGER_FAST') + + # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') + + return image + + +def eval_image(image, height, width, bbox, thread_id, resize): + """Get the image for model evaluation.""" + with tf.name_scope('eval_image'): + if not thread_id: + tf.summary.image( + 'original_image', tf.expand_dims(image, 0)) + + if resize == 'crop': + # Note: This is much slower than crop_to_bounding_box + # It seems that the redundant pad step has huge overhead + # distorted_image = tf.image.resize_image_with_crop_or_pad(image, + # height, width) + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256, 256*shape[1]/shape[0]], dtype=tf.int32)), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256*shape[0]/shape[1], 256], dtype=tf.int32))) + shape = tf.shape(image) + + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + #y0=tf.random_uniform([],minval=0,maxval=(shape[0] - height + 1), dtype=tf.int32) + #x0=tf.random_uniform([],minval=0,maxval=(shape[1] - width + 1), dtype=tf.int32) + ## distorted_image = tf.slice(image, [y0,x0,0], [height,width,3]) + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, + width) + else: + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.5, + aspect_ratio_range=[0.90, 1.10], + area_range=[0.10, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + resize_method = { + 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, + 'bilinear': tf.image.ResizeMethod.BILINEAR, + 'bicubic': tf.image.ResizeMethod.BICUBIC, + 'area': tf.image.ResizeMethod.AREA + }[resize] + # This resizing operation may distort the images because the aspect + # ratio is not respected. + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], + resize_method, + align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', tf.expand_dims(distorted_image, 0)) + image = distorted_image + return image + + +def distort_image(image, height, width, bbox, thread_id=0, scope=None): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D float Tensor of image + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + thread_id: integer indicating the preprocessing thread. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor of distorted image used for training. + """ + # with tf.op_scope([image, height, width, bbox], scope, 'distort_image'): + # with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + with tf.name_scope(scope or 'distort_image'): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # After this point, all image pixels reside in [0,1) + # until the very end, when they're rescaled to (-1, 1). The various + # adjust_* ops all require this range for dtype float. + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + # Display the bounding box in the first thread only. + if not thread_id: + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + tf.summary.image( + 'image_with_bounding_boxes', image_with_box) + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an allowed + # range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.99, 1.01], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + if not thread_id: + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distort_bbox) + tf.summary.image( + 'images_with_distorted_bounding_box', + image_with_distorted_box) + + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + resize_method = thread_id % 4 + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], resize_method, align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. + distorted_image = distort_color(distorted_image, thread_id) + + # Note: This ensures the scaling matches the output of eval_image + distorted_image *= 256 + + if not thread_id: + tf.summary.image( + 'final_distorted_image', + tf.expand_dims(distorted_image, 0)) + return distorted_image + + +def distort_color(image, thread_id=0, scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: Tensor containing single image. + thread_id: preprocessing thread ID. + scope: Optional scope for op_scope. + Returns: + color-distorted image + """ + # with tf.op_scope([image], scope, 'distort_color'): + # with tf.name_scope(scope, 'distort_color', [image]): + with tf.name_scope(scope or 'distort_color'): + color_ordering = thread_id % 2 + + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +class ImagePreprocessor(object): + """Preprocessor for input images.""" + + def __init__(self, + height, + width, + batch_size, + device_count, + dtype=tf.float32, + train=True, + distortions=None, + resize_method=None): + self.height = height + self.width = width + self.batch_size = batch_size + self.device_count = device_count + self.dtype = dtype + self.train = train + self.resize_method = resize_method + if distortions is None: + distortions = False + self.distortions = distortions + if self.batch_size % self.device_count != 0: + raise ValueError( + ('batch_size must be a multiple of device_count: ' + 'batch_size %d, device_count: %d') % + (self.batch_size, self.device_count)) + self.batch_size_per_device = self.batch_size // self.device_count + + def preprocess(self, image_buffer, bbox, thread_id): + """Preprocessing image_buffer using thread_id.""" + # Note: Width and height of image is known only at runtime. + image = tf.image.decode_jpeg(image_buffer, channels=3, + dct_method='INTEGER_FAST') + if self.train and self.distortions: + image = distort_image(image, self.height, self.width, bbox, thread_id) + else: + #image = eval_image(image, self.height, self.width, bbox, thread_id, + # self.resize_method) + image = densenet_preprocessing.preprocess_image(image,224,224,False) + # Note: image is now float32 [height,width,3] with range [0, 255] + + # image = tf.cast(image, tf.uint8) # HACK TESTING + + return image + + def minibatch(self, dataset, subset): + with tf.name_scope('batch_processing'): + images = [[] for i in range(self.device_count)] + labels = [[] for i in range(self.device_count)] + record_input = data_flow_ops.RecordInput( + file_pattern=dataset.tf_record_pattern(subset), + seed=randint(0, 9000), + parallelism=64, + buffer_size=10000, + batch_size=self.batch_size, + name='record_input') + records = record_input.get_yield_op() + records = tf.split(records, self.batch_size, 0) + records = [tf.reshape(record, []) for record in records] + for i in xrange(self.batch_size): + value = records[i] + image_buffer, label_index, bbox, _ = parse_example_proto(value) + image = self.preprocess(image_buffer, bbox, i % 4) + + device_index = i % self.device_count + images[device_index].append(image) + labels[device_index].append(label_index) + label_index_batch = [None] * self.device_count + for device_index in xrange(self.device_count): + images[device_index] = tf.parallel_stack(images[device_index]) + label_index_batch[device_index] = tf.concat(labels[device_index], 0) + + # dynamic_pad=True) # HACK TESTING dynamic_pad=True + images[device_index] = tf.cast(images[device_index], self.dtype) + depth = 3 + images[device_index] = tf.reshape( + images[device_index], + shape=[self.batch_size_per_device, self.height, self.width, depth]) + label_index_batch[device_index] = tf.reshape( + label_index_batch[device_index], [self.batch_size_per_device]) + # Display the training images in the visualizer. + # tf.summary.image('images', images) + + return images, label_index_batch diff --git a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier.py b/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier.py deleted file mode 100644 index 361836891..000000000 --- a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier.py +++ /dev/null @@ -1,277 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - - -# Copyright 2016 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Generic evaluation script that evaluates a model using a given dataset.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -import tensorflow as tf -import os -import time -from datetime import datetime - -import dataset_factory -import nets_factory -import preprocessing_factory - -slim = tf.contrib.slim - -tf.app.flags.DEFINE_integer( - 'batch_size', 100, 'The number of samples in each batch.') - -tf.app.flags.DEFINE_integer( - 'max_num_batches', 1, - 'Max number of batches to evaluate by default use all.') - -tf.app.flags.DEFINE_string( - 'master', '', 'The address of the TensorFlow master to use.') - -tf.app.flags.DEFINE_string( - 'checkpoint_path', '/tmp/tfmodel/', - 'The directory where the model was written to or an absolute path to a ' - 'checkpoint file.') - -tf.app.flags.DEFINE_string( - 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.') - -tf.app.flags.DEFINE_integer( - 'num_preprocessing_threads', 4, - 'The number of threads used to create the batches.') - -tf.app.flags.DEFINE_string( - 'dataset_name', 'imagenet', 'The name of the dataset to load.') - -tf.app.flags.DEFINE_string( - 'dataset_split_name', 'test', 'The name of the train/test split.') - -tf.app.flags.DEFINE_string( - 'dataset_dir', None, 'The directory where the dataset files are stored.') - -tf.app.flags.DEFINE_integer( - 'labels_offset', 0, - 'An offset for the labels in the dataset. This flag is primarily used to ' - 'evaluate the VGG and ResNet architectures which do not use a background ' - 'class for the ImageNet dataset.') - -tf.app.flags.DEFINE_string( - 'model_name', 'inception_resnet_v2', - 'The name of the architecture to evaluate.') - -tf.app.flags.DEFINE_string( - 'preprocessing_name', None, - 'The name of the preprocessing to use. If left ' - 'as `None`, then the model_name flag is used.') - -tf.app.flags.DEFINE_float( - 'moving_average_decay', None, - 'The decay to use for the moving average.' - 'If left as None, then moving averages are not used.') - -tf.app.flags.DEFINE_integer( - 'eval_image_size', None, 'Eval image size') - -tf.app.flags.DEFINE_integer( - 'eval_log_frequency', 10, - 'Number of eval steps to run between displaying ' - 'eval metrics.') - -tf.app.flags.DEFINE_integer( - 'inter_op_parallelism_threads', 1, 'The number of inter-thread.') - -tf.app.flags.DEFINE_integer( - 'intra_op_parallelism_threads', 28, 'The number of intra-thread.') - - -FLAGS = tf.app.flags.FLAGS - -class _LoggerHook(tf.train.SessionRunHook): - """ Logs loss and runtime.""" - - def begin(self): - self._step = -1 - self._displayed_steps = 0 - self._total_images_per_sec = 0 - - def before_run(self, run_context): - self._step += 1 - self._start_time = time.time() - - def after_run(self, run_context, run_values): - duration = time.time() - self._start_time - if (self._step + 1) % FLAGS.eval_log_frequency == 0: - images_per_sec = FLAGS.batch_size / duration - self._displayed_steps += 1 - self._total_images_per_sec += images_per_sec - - format_str = ('%s: step %d, %.1f images/sec') - print ( - format_str % (datetime.now(), (self._step+1), images_per_sec)) - - def end(self, run_context): - print( - 'self._total_images_per_sec = %.1f' % self._total_images_per_sec) - print('self._displayed_steps = %d' % self._displayed_steps) - images_per_sec = self._total_images_per_sec / self._displayed_steps - print('Total images/sec = %.1f' %(images_per_sec)) - if FLAGS.batch_size == 1: - latency = 1000 / images_per_sec - print('Latency ms/step = %.1f' % (latency)) - -def main(_): - if not FLAGS.dataset_dir: - raise ValueError( - 'You must supply the dataset directory with --dataset_dir') - - tf.logging.set_verbosity(tf.logging.INFO) - #os.environ["OMP_NUM_THREADS"] = "54" - with tf.Graph().as_default(): - tf_global_step = slim.get_or_create_global_step() - - ###################### - # Select the dataset # - ###################### - dataset = dataset_factory.get_dataset( - FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) - - #################### - # Select the model # - #################### - network_fn = nets_factory.get_network_fn( - FLAGS.model_name, - num_classes=(dataset.num_classes - FLAGS.labels_offset), - is_training=False) - - ############################################################## - # Create a dataset provider that loads data from the dataset # - ############################################################## - provider = slim.dataset_data_provider.DatasetDataProvider( - dataset, - shuffle=False, - common_queue_capacity=2 * FLAGS.batch_size, - common_queue_min=FLAGS.batch_size) - [image, label] = provider.get(['image', 'label']) - label -= FLAGS.labels_offset - - ##################################### - # Select the preprocessing function # - ##################################### - preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name - image_preprocessing_fn = preprocessing_factory.get_preprocessing( - preprocessing_name, - is_training=False) - - eval_image_size = \ - FLAGS.eval_image_size or network_fn.default_image_size - - image = image_preprocessing_fn( - image, eval_image_size, eval_image_size) - - images, labels = tf.train.batch( - [image, label], - batch_size=FLAGS.batch_size, - num_threads=FLAGS.num_preprocessing_threads, - capacity=5 * FLAGS.batch_size) - - #################### - # Define the model # - #################### - logits, _ = network_fn(images) - - if FLAGS.moving_average_decay: - variable_averages = tf.train.ExponentialMovingAverage( - FLAGS.moving_average_decay, tf_global_step) - variables_to_restore = variable_averages.variables_to_restore( - slim.get_model_variables()) - variables_to_restore[tf_global_step.op.name] = tf_global_step - else: - variables_to_restore = slim.get_variables_to_restore() - - predictions = tf.argmax(logits, 1) - #labels = tf.squeeze(labels) - - # Define the metrics: - names_to_values, names_to_updates = \ - slim.metrics.aggregate_metric_map({ - 'Accuracy': slim.metrics.streaming_accuracy( - predictions, labels), - 'Recall_5': slim.metrics.streaming_recall_at_k( - logits, labels, 5), - }) - - # Print the summaries to screen. - for name, value in names_to_values.items(): - summary_name = 'eval/%s' % name - op = tf.summary.scalar(summary_name, value, collections=[]) - op = tf.Print(op, [value], summary_name) - tf.add_to_collection(tf.GraphKeys.SUMMARIES, op) - - # TODO(sguada) use num_epochs=1 - if FLAGS.max_num_batches: - num_batches = FLAGS.max_num_batches - else: - # This ensures that we make a single pass over all of the data. - num_batches = math.ceil( - dataset.num_samples / float(FLAGS.batch_size)) - - num_batches = 100 - - config = tf.ConfigProto( - inter_op_parallelism_threads=FLAGS.inter_op_parallelism_threads, - intra_op_parallelism_threads=FLAGS.intra_op_parallelism_threads) - - if tf.gfile.IsDirectory(FLAGS.checkpoint_path): - checkpoint_path = tf.train.latest_checkpoint( - FLAGS.checkpoint_path) - else: - checkpoint_path = FLAGS.checkpoint_path - - tf.logging.info('Evaluating %s' % checkpoint_path) - - slim.evaluation.evaluate_once( - master=FLAGS.master, - checkpoint_path=checkpoint_path, - logdir=FLAGS.eval_dir, - num_evals=num_batches, - eval_op=list(names_to_updates.values()), - variables_to_restore=variables_to_restore, - hooks=[_LoggerHook()], - session_config=config) - - -if __name__ == '__main__': - tf.app.run() diff --git a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py b/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py index 5671f2287..595b252a4 100644 --- a/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py +++ b/models/image_recognition/tensorflow/inception_resnet_v2/eval_image_classifier_accuracy.py @@ -147,9 +147,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -160,6 +162,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py index 98b826ac9..b84d28ae3 100644 --- a/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/inceptionv3/fp32/eval_image_classifier_inference.py @@ -189,9 +189,11 @@ def run(self): num_processed_images += self.args.batch_size num_remaining_images -= self.args.batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = infer_sess.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time with tf.Graph().as_default() as accu_graph: accuracy1 = tf.reduce_sum( @@ -207,6 +209,7 @@ def run(self): total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py b/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py index 7d79593aa..8062bd6be 100644 --- a/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py +++ b/models/image_recognition/tensorflow/inceptionv3/int8/accuracy.py @@ -120,9 +120,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -133,6 +135,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1/num_processed_images, total_accuracy5/num_processed_images)) diff --git a/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py b/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py index 3dc0b90f9..a3bdf7c58 100644 --- a/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py +++ b/models/image_recognition/tensorflow/inceptionv4/inference/accuracy.py @@ -144,9 +144,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -157,6 +159,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print( "Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % ( diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py index 7d6a37abc..f5d45fb9f 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/accuracy.py @@ -143,9 +143,11 @@ def load_graph(model_file): np_images, np_labels = sess.run([images[0], labels[0]]) num_processed_images += batch_size num_remaining_images -= batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = sess_graph.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time accuracy1 = tf.reduce_sum( tf.cast(tf.nn.in_top_k(tf.constant(predictions), tf.constant(np_labels), 1), tf.float32)) @@ -156,6 +158,7 @@ def load_graph(model_file): np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print( "Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % ( diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py index fd3165387..974913258 100644 --- a/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/fp32/eval_image_classifier.py @@ -146,56 +146,66 @@ def end(self, run_context): print('Latency ms/step = %.1f' % (latency)) def main(_): - if not FLAGS.dataset_dir: - raise ValueError('You must supply the dataset directory with --dataset_dir') - tf.logging.set_verbosity(tf.logging.INFO) - #os.environ["OMP_NUM_THREADS"] = "54" + with tf.Graph().as_default(): tf_global_step = slim.get_or_create_global_step() ###################### # Select the dataset # ###################### - dataset = dataset_factory.get_dataset( - FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) + if FLAGS.dataset_dir: + print("Inference using real data") + dataset = dataset_factory.get_dataset( + FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir) + num_classes = dataset.num_classes - FLAGS.labels_offset + else: + print("Inference using synthetic data") + num_classes = 1000 #################### # Select the model # #################### network_fn = nets_factory.get_network_fn( FLAGS.model_name, - num_classes=(dataset.num_classes - FLAGS.labels_offset), - is_training=False) - - ############################################################## - # Create a dataset provider that loads data from the dataset # - ############################################################## - provider = slim.dataset_data_provider.DatasetDataProvider( - dataset, - shuffle=False, - common_queue_capacity=2 * FLAGS.batch_size, - common_queue_min=FLAGS.batch_size) - [image, label] = provider.get(['image', 'label']) - label -= FLAGS.labels_offset - - ##################################### - # Select the preprocessing function # - ##################################### - preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name - image_preprocessing_fn = preprocessing_factory.get_preprocessing( - preprocessing_name, + num_classes=num_classes, is_training=False) eval_image_size = FLAGS.eval_image_size or network_fn.default_image_size - image = image_preprocessing_fn(image, eval_image_size, eval_image_size) - - images, labels = tf.train.batch( - [image, label], - batch_size=FLAGS.batch_size, - num_threads=FLAGS.num_preprocessing_threads, - capacity=5 * FLAGS.batch_size) + if FLAGS.dataset_dir: + ############################################################## + # Create a dataset provider that loads data from the dataset # + ############################################################## + provider = slim.dataset_data_provider.DatasetDataProvider( + dataset, + shuffle=False, + common_queue_capacity=2 * FLAGS.batch_size, + common_queue_min=FLAGS.batch_size) + [image, label] = provider.get(['image', 'label']) + label -= FLAGS.labels_offset + + ##################################### + # Select the preprocessing function # + ##################################### + preprocessing_name = FLAGS.preprocessing_name or FLAGS.model_name + image_preprocessing_fn = preprocessing_factory.get_preprocessing( + preprocessing_name, + is_training=False) + + image = image_preprocessing_fn(image, eval_image_size, eval_image_size) + + images, labels = tf.train.batch( + [image, label], + batch_size=FLAGS.batch_size, + num_threads=FLAGS.num_preprocessing_threads, + capacity=5 * FLAGS.batch_size) + else: + # Generate random images and labels with constant 0 when no dataset is used + input_shape = [FLAGS.batch_size, eval_image_size, eval_image_size, 3] + label_shape = [FLAGS.batch_size] + images = tf.random.uniform(input_shape, 0.0, 255.0, dtype=tf.float32, name='synthetic_images') + labels = tf.constant(0, shape=label_shape, dtype=tf.int64) #################### # Define the model # @@ -258,4 +268,4 @@ def main(_): if __name__ == '__main__': - tf.app.run() \ No newline at end of file + tf.app.run() diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py new file mode 100644 index 000000000..6d7acaf50 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/accuracy.py @@ -0,0 +1,135 @@ +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np + +from google.protobuf import text_format +import tensorflow as tf +import preprocessing +import datasets + +NUM_TEST_IMAGES = 50000 + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--data_location", default=None, + help="full path to the validation data") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="MobilenetV1/Predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + data_location = args.data_location + dataset = datasets.ImagenetData(data_location) + preprocessor = dataset.get_image_preprocessor()( + input_height, input_width, batch_size, + 1, # device count + tf.float32, # data_type for input fed to the graph + train=False, # doing inference + resize_method='bilinear') + + images, labels = preprocessor.minibatch(dataset, subset='validation', + use_datasets=True, cache_data=False) + graph = load_graph(model_file) + input_tensor = graph.get_tensor_by_name(input_layer + ":0") + output_tensor = graph.get_tensor_by_name(output_layer + ":0") + + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + + total_accuracy1, total_accuracy5 = (0.0, 0.0) + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset='validation') \ + - num_processed_images + with tf.Session() as sess: + sess_graph = tf.Session(graph=graph, config=config) + while num_remaining_images >= batch_size: + # Reads and preprocess data + np_images, np_labels = sess.run([images[0], labels[0]]) + num_processed_images += batch_size + num_remaining_images -= batch_size + start_time = time.time() + # Compute inference on the preprocessed data + predictions = sess_graph.run(output_tensor, + {input_tensor: np_images}) + elapsed_time = time.time() - start_time + accuracy1 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 1), tf.float32)) + + accuracy5 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 5), tf.float32)) + np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) + total_accuracy1 += np_accuracy1 + total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) + print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ + % (num_processed_images, total_accuracy1/num_processed_images, + total_accuracy5/num_processed_images)) diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py new file mode 100644 index 000000000..0e7a41f31 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/benchmark.py @@ -0,0 +1,149 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np + +from google.protobuf import text_format +import tensorflow as tf + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="MobilenetV1/Predictions/Reshape_1", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + parser.add_argument("--warmup_steps", type=int, default=10, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=50, help="number of steps") + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + input_height = args.input_height + input_width = args.input_width + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + warmup_steps = args.warmup_steps + steps = args.steps + assert steps > 10, "Benchmark steps should be at least 10." + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + + input_shape = [batch_size, input_height, input_width, 3] + images = tf.truncated_normal( + input_shape, + dtype=tf.float32, + stddev=10, + name='synthetic_images') + + image_data = None + + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + config.use_per_session_threads = True + + with tf.Session() as sess: + image_data = sess.run(images) + graph = load_graph(model_file) + + input_tensor = graph.get_tensor_by_name(input_layer + ":0"); + output_tensor = graph.get_tensor_by_name(output_layer + ":0"); + + with tf.Session(graph=graph, config=config) as sess: + sys.stdout.flush() + print("[Running warmup steps...]") + for t in range(warmup_steps): + start_time = time.time() + sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size/elapsed_time), flush=True) + + print("[Running benchmark steps...]") + total_time = 0; + total_images = 0; + for t in range(steps): + start_time = time.time() + results = sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + if((t+1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t+1, batch_size/elapsed_time), flush=True); diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/cnn_util.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/cnn_util.py new file mode 100644 index 000000000..32902d149 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/cnn_util.py @@ -0,0 +1,50 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for CNN benchmarks.""" + +import tensorflow as tf + + +def tensorflow_version_tuple(): + v = tf.__version__ + major, minor, patch = v.split('.') + return (int(major), int(minor), patch) + + +def tensorflow_version(): + vt = tensorflow_version_tuple() + return vt[0] * 1000 + vt[1] + diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/datasets.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/datasets.py new file mode 100644 index 000000000..8734044b5 --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/datasets.py @@ -0,0 +1,195 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Benchmark dataset utilities. +""" + +from abc import abstractmethod +import os + +import numpy as np +from six.moves import cPickle +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + +from tensorflow.python.platform import gfile +import preprocessing + + +IMAGENET_NUM_TRAIN_IMAGES = 1281167 +IMAGENET_NUM_VAL_IMAGES = 50000 + + +def create_dataset(data_dir, data_name): + """Create a Dataset instance based on data_dir and data_name.""" + supported_datasets = { + 'imagenet': ImagenetData, + 'cifar10': Cifar10Data, + } + if not data_dir and not data_name: + # When using synthetic data, use synthetic imagenet images by default. + data_name = 'imagenet' + + if data_name is None: + for supported_name in supported_datasets: + if supported_name in data_dir: + data_name = supported_name + break + + if data_name is None: + raise ValueError('Could not identify name of dataset. ' + 'Please specify with --data_name option.') + + if data_name not in supported_datasets: + raise ValueError('Unknown dataset. Must be one of %s', ', '.join( + [key for key in sorted(supported_datasets.keys())])) + + return supported_datasets[data_name](data_dir) + + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, height=None, width=None, depth=None, data_dir=None, + queue_runner_required=False, num_classes=1000): + self.name = name + self.height = height + self.width = width + self.depth = depth or 3 + + self.data_dir = data_dir + self._queue_runner_required = queue_runner_required + self._num_classes = num_classes + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @property + def num_classes(self): + return self._num_classes + + @num_classes.setter + def num_classes(self, val): + self._num_classes = val + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + def get_image_preprocessor(self): + return None + + def queue_runner_required(self): + return self._queue_runner_required + + def use_synthetic_gpu_images(self): + return not self.data_dir + + +class ImagenetData(Dataset): + """Configuration for Imagenet dataset.""" + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('imagenet', 300, 300, data_dir=data_dir) + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return IMAGENET_NUM_TRAIN_IMAGES + elif subset == 'validation': + return IMAGENET_NUM_VAL_IMAGES + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self): + if self.use_synthetic_gpu_images(): + return preprocessing.SyntheticImagePreprocessor + else: + return preprocessing.RecordInputImagePreprocessor + + +class Cifar10Data(Dataset): + """Configuration for cifar 10 dataset. + + It will mount all the input images to memory. + """ + + def __init__(self, data_dir=None): + super(Cifar10Data, self).__init__('cifar10', 32, 32, data_dir=data_dir, + queue_runner_required=True, + num_classes=10) + + def read_data_files(self, subset='train'): + """Reads from data file and returns images and labels in a numpy array.""" + assert self.data_dir, ('Cannot call `read_data_files` when using synthetic ' + 'data') + if subset == 'train': + filenames = [os.path.join(self.data_dir, 'data_batch_%d' % i) + for i in xrange(1, 6)] + elif subset == 'validation': + filenames = [os.path.join(self.data_dir, 'test_batch')] + else: + raise ValueError('Invalid data subset "%s"' % subset) + + inputs = [] + for filename in filenames: + with gfile.Open(filename, 'r') as f: + inputs.append(cPickle.load(f)) + # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the + # input format. + all_images = np.concatenate( + [each_input['data'] for each_input in inputs]).astype(np.float32) + all_labels = np.concatenate( + [each_input['labels'] for each_input in inputs]) + return all_images, all_labels + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return 50000 + elif subset == 'validation': + return 10000 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self): + if self.use_synthetic_gpu_images(): + return preprocessing.SyntheticImagePreprocessor + else: + return preprocessing.Cifar10ImagePreprocessor diff --git a/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/preprocessing.py b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/preprocessing.py new file mode 100644 index 000000000..ef94d3e3d --- /dev/null +++ b/models/image_recognition/tensorflow/mobilenet_v1/inference/int8/preprocessing.py @@ -0,0 +1,637 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image pre-processing utilities. +""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +import math +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf + +from tensorflow.contrib.data.python.ops import batching +from tensorflow.contrib.data.python.ops import interleave_ops +from tensorflow.contrib.image.python.ops import distort_image_ops +from tensorflow.python.layers import utils +from tensorflow.python.ops import data_flow_ops +from tensorflow.python.platform import gfile +import cnn_util + +from tensorflow.python.ops import control_flow_ops + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def get_image_resize_method(resize_method, batch_position=0): + """Get tensorflow resize method. + + If resize_method is 'round_robin', return different methods based on batch + position in a round-robin fashion. NOTE: If the batch size is not a multiple + of the number of methods, then the distribution of methods will not be + uniform. + + Args: + resize_method: (string) nearest, bilinear, bicubic, area, or round_robin. + batch_position: position of the image in a batch. NOTE: this argument can + be an integer or a tensor + Returns: + one of resize type defined in tf.image.ResizeMethod. + """ + resize_methods_map = { + 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, + 'bilinear': tf.image.ResizeMethod.BILINEAR, + 'bicubic': tf.image.ResizeMethod.BICUBIC, + 'area': tf.image.ResizeMethod.AREA + } + + if resize_method != 'round_robin': + return resize_methods_map[resize_method] + + # return a resize method based on batch position in a round-robin fashion. + resize_methods = resize_methods_map.values() + def lookup(index): + return resize_methods[index] + + def resize_method_0(): + return utils.smart_cond(batch_position % len(resize_methods) == 0, + lambda: lookup(0), resize_method_1) + + def resize_method_1(): + return utils.smart_cond(batch_position % len(resize_methods) == 1, + lambda: lookup(1), resize_method_2) + + def resize_method_2(): + return utils.smart_cond(batch_position % len(resize_methods) == 2, + lambda: lookup(2), lambda: lookup(3)) + + # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here + # because TF would not be able to construct a finite graph. + + return resize_method_0() + + +def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): + # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): + with tf.name_scope(scope or 'decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3) #, + # fancy_upscaling=False, + # dct_method='INTEGER_FAST') + + # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + return image + + + +def preprocess_for_eval(image, height, width, + central_fraction=0.875, scope=None): + """Prepare one image for evaluation. + + If height and width are specified it would output an image with that size by + applying resize_bilinear. + + If central_fraction is specified it would crop the central fraction of the + input image. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). + height: integer + width: integer + central_fraction: Optional Float, fraction of the image to crop. + scope: Optional scope for name_scope. + Returns: + 3-D float Tensor of prepared image. + """ + with tf.name_scope(scope, 'eval_image', [image, height, width]): + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + + +def apply_with_random_selector(x, func, num_cases): + """Computes func(x, sel), with sel sampled from [0...num_cases-1]. + + Args: + x: input Tensor. + func: Python function to apply. + num_cases: Python int32, number of cases to sample sel from. + + Returns: + The result of func(x, sel), where func receives the value of the + selector as a python integer, but sel is sampled dynamically. + """ + sel = tf.random_uniform([], maxval=num_cases, dtype=tf.int32) + # Pass the real x only to one of the func calls. + return control_flow_ops.merge([ + func(control_flow_ops.switch(x, tf.equal(sel, case))[1], case) + for case in range(num_cases)])[0] + + +def distort_color(image, color_ordering=0, fast_mode=True, scope=None): + """Distort the color of a Tensor image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: 3-D Tensor containing single image in [0, 1]. + color_ordering: Python int, a type of distortion (valid values: 0-3). + fast_mode: Avoids slower ops (random_hue and random_contrast) + scope: Optional scope for name_scope. + Returns: + 3-D Tensor color-distorted image on range [0, 1] + Raises: + ValueError: if color_ordering not in [0, 3] + """ + with tf.name_scope(scope, 'distort_color', [image]): + if fast_mode: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + elif color_ordering == 2: + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + elif color_ordering == 3: + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_brightness(image, max_delta=32. / 255.) + else: + raise ValueError('color_ordering must be in [0, 3]') + + # The random_* ops do not necessarily clamp. + return tf.clip_by_value(image, 0.0, 1.0) + + +def distorted_bounding_box_crop(image, + bbox, + min_object_covered=0.1, + aspect_ratio_range=(0.75, 1.33), + area_range=(0.05, 1.0), + max_attempts=100, + scope=None): + """Generates cropped_image using a one of the bboxes randomly distorted. + + See `tf.image.sample_distorted_bounding_box` for more documentation. + + Args: + image: 3-D Tensor of image (it will be converted to floats in [0, 1]). + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. If num_boxes is 0 then it would use the whole + image. + min_object_covered: An optional `float`. Defaults to `0.1`. The cropped + area of the image must contain at least this fraction of any bounding box + supplied. + aspect_ratio_range: An optional list of `floats`. The cropped area of the + image must have an aspect ratio = width / height within this range. + area_range: An optional list of `floats`. The cropped area of the image + must contain a fraction of the supplied image within in this range. + max_attempts: An optional `int`. Number of attempts at generating a cropped + region of the image of the specified constraints. After `max_attempts` + failures, return the entire image. + scope: Optional scope for name_scope. + Returns: + A tuple, a 3-D Tensor cropped_image and the distorted bbox + """ + with tf.name_scope(scope, 'distorted_bounding_box_crop', [image, bbox]): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an + # allowed range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=min_object_covered, + aspect_ratio_range=aspect_ratio_range, + area_range=area_range, + max_attempts=max_attempts, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + + # Crop the image to the specified bounding box. + cropped_image = tf.slice(image, bbox_begin, bbox_size) + return cropped_image, distort_bbox + + + +def preprocess_for_train(image, height,width, bbox, + batch_position, + fast_mode=True, + scope=None, + add_image_summaries=True): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D Tensor of image. If dtype is tf.float32 then the range should be + [0, 1], otherwise it would converted to tf.float32 assuming that the range + is [0, MAX], where MAX is largest positive representable number for + int(8/16/32) data type (see `tf.image.convert_image_dtype` for details). + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + batch_position: position of the image in a batch, which affects how images + are distorted and resized. NOTE: this argument can be an integer or a + tensor + scope: Optional scope for op_scope. + add_image_summaries: Enable image summaries. + Returns: + 3-D float Tensor of distorted image used for training with range [-1, 1]. + """ + + with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + if bbox is None: + bbox = tf.constant([0.0, 0.0, 1.0, 1.0], + dtype=tf.float32, + shape=[1, 1, 4]) + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + if add_image_summaries: + tf.summary.image('image_with_bounding_boxes', image_with_box) + + distorted_image, distorted_bbox = distorted_bounding_box_crop(image, bbox) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([None, None, 3]) + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distorted_bbox) + if add_image_summaries: + tf.summary.image('images_with_distorted_bounding_box', + image_with_distorted_box) + + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + + # We select only 1 case for fast_mode bilinear. + num_resize_cases = 1 if fast_mode else 4 + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, method: tf.image.resize_images(x, [height, width], method), + num_cases=num_resize_cases) + + if add_image_summaries: + tf.summary.image('cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + # Randomly distort the colors. There are 1 or 4 ways to do it. + num_distort_cases = 1 if fast_mode else 4 + distorted_image = apply_with_random_selector( + distorted_image, + lambda x, ordering: distort_color(x, ordering, fast_mode), + num_cases=num_distort_cases) + + if add_image_summaries: + tf.summary.image('final_distorted_image', + tf.expand_dims(distorted_image, 0)) + distorted_image = tf.subtract(distorted_image, 0.5) + distorted_image = tf.multiply(distorted_image, 2.0) + return distorted_image + + +def distort_color(image, batch_position=0, distort_color_in_yiq=False, + scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops based on the position of the image in a batch. + + Args: + image: float32 Tensor containing single image. Tensor values should be in + range [0, 1]. + batch_position: the position of the image in a batch. NOTE: this argument + can be an integer or a tensor + distort_color_in_yiq: distort color of input images in YIQ space. + scope: Optional scope for op_scope. + Returns: + color-distorted image + """ + with tf.name_scope(scope or 'distort_color'): + + def distort_fn_0(image=image): + """Variant 0 of distort function.""" + image = tf.image.random_brightness(image, max_delta=32. / 255.) + #if distort_color_in_yiq: + # image = distort_image_ops.random_hsv_in_yiq( + # image, lower_saturation=0.5, upper_saturation=1.5, + # max_delta_hue=0.2 * math.pi) + #else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + return image + + def distort_fn_1(image=image): + """Variant 1 of distort function.""" + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + #if distort_color_in_yiq: + # image = distort_image_ops.random_hsv_in_yiq( + # image, lower_saturation=0.5, upper_saturation=1.5, + # max_delta_hue=0.2 * math.pi) + #else: + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + return image + + image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0, + distort_fn_1) + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +class RecordInputImagePreprocessor(object): + """Preprocessor for images with RecordInput format.""" + + def __init__(self, + height, + width, + batch_size, + num_splits, + dtype, + train, + distortions=False, + resize_method="bilinear", + shift_ratio=0, + summary_verbosity=1, + distort_color_in_yiq=False, + fuse_decode_and_crop=False): + self.height = height + self.width = width + self.batch_size = batch_size + self.num_splits = num_splits + self.dtype = dtype + self.train = train + self.resize_method = resize_method + self.shift_ratio = shift_ratio + self.distortions = distortions + self.distort_color_in_yiq = distort_color_in_yiq + self.fuse_decode_and_crop = fuse_decode_and_crop + if self.batch_size % self.num_splits != 0: + raise ValueError( + ('batch_size must be a multiple of num_splits: ' + 'batch_size %d, num_splits: %d') % + (self.batch_size, self.num_splits)) + self.batch_size_per_split = self.batch_size // self.num_splits + self.summary_verbosity = summary_verbosity + + def image_preprocess(self, image_buffer, bbox, batch_position): + """Preprocessing image_buffer as a function of its batch position.""" + if self.train: + image_buffer = tf.image.decode_jpeg( + image_buffer, channels=3, dct_method='INTEGER_FAST') + image = preprocess_for_train(image_buffer, self.height, self.width, bbox, + batch_position) + else: + image = tf.image.decode_jpeg( + image_buffer, channels=3, dct_method='INTEGER_FAST') + image = preprocess_for_eval(image, self.height, self.width) + return image + + def parse_and_preprocess(self, value, batch_position): + image_buffer, label_index, bbox, _ = parse_example_proto(value) + image = self.image_preprocess(image_buffer, bbox, batch_position) + return (label_index, image) + + def minibatch(self, dataset, subset, use_datasets, cache_data, + shift_ratio=-1): + if shift_ratio < 0: + shift_ratio = self.shift_ratio + with tf.name_scope('batch_processing'): + # Build final results per split. + images = [[] for _ in range(self.num_splits)] + labels = [[] for _ in range(self.num_splits)] + if use_datasets: + glob_pattern = dataset.tf_record_pattern(subset) + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError('Found no files in --data_dir matching: {}' + .format(glob_pattern)) + ds = tf.data.TFRecordDataset.list_files(file_names) + ds = ds.apply( + interleave_ops.parallel_interleave( + tf.data.TFRecordDataset, cycle_length=10)) + if cache_data: + ds = ds.take(1).cache().repeat() + counter = tf.data.Dataset.range(self.batch_size) + counter = counter.repeat() + ds = tf.data.Dataset.zip((ds, counter)) + ds = ds.prefetch(buffer_size=self.batch_size) + ds = ds.shuffle(buffer_size=10000) + ds = ds.repeat() + ds = ds.apply( + batching.map_and_batch( + map_func=self.parse_and_preprocess, + batch_size=self.batch_size_per_split, + num_parallel_batches=self.num_splits)) + ds = ds.prefetch(buffer_size=self.num_splits) + ds_iterator = ds.make_one_shot_iterator() + for d in xrange(self.num_splits): + labels[d], images[d] = ds_iterator.get_next() + + else: + record_input = data_flow_ops.RecordInput( + file_pattern=dataset.tf_record_pattern(subset), + seed=301, + parallelism=64, + buffer_size=10000, + batch_size=self.batch_size, + shift_ratio=shift_ratio, + name='record_input') + records = record_input.get_yield_op() + records = tf.split(records, self.batch_size, 0) + records = [tf.reshape(record, []) for record in records] + for idx in xrange(self.batch_size): + value = records[idx] + (label, image) = self.parse_and_preprocess(value, idx) + split_index = idx % self.num_splits + labels[split_index].append(label) + images[split_index].append(image) + + for split_index in xrange(self.num_splits): + if not use_datasets: + images[split_index] = tf.parallel_stack(images[split_index]) + labels[split_index] = tf.concat(labels[split_index], 0) + images[split_index] = tf.cast(images[split_index], self.dtype) + depth = 3 + images[split_index] = tf.reshape( + images[split_index], + shape=[self.batch_size_per_split, self.height, self.width, depth]) + labels[split_index] = tf.reshape(labels[split_index], + [self.batch_size_per_split]) + return images, labels + diff --git a/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py index a65a54b08..e62b40b3d 100644 --- a/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/resnet101/inference/eval_image_classifier_inference.py @@ -200,9 +200,11 @@ def run(self): num_processed_images += self.args.batch_size num_remaining_images -= self.args.batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = infer_sess.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time with tf.Graph().as_default() as accu_graph: # Putting all code within this make things faster. accuracy1 = tf.reduce_sum( @@ -216,6 +218,7 @@ def run(self): np_accuracy1, np_accuracy5 = accu_sess.run([accuracy1, accuracy5]) total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py index 198509a23..21a1b465e 100644 --- a/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py +++ b/models/image_recognition/tensorflow/resnet50/inference/eval_image_classifier_inference.py @@ -165,12 +165,12 @@ def run(self): input_tensor = infer_graph.get_tensor_by_name('input:0') output_tensor = infer_graph.get_tensor_by_name('predict:0') - data_sess = tf.Session(graph=data_graph, config=data_config) + data_sess = tf.Session(graph=data_graph, config=data_config) infer_sess = tf.Session(graph=infer_graph, config=infer_config) num_processed_images = 0 num_remaining_images = dataset.num_examples_per_epoch(subset=subset) - num_processed_images \ - if self.args.data_location else datasets.IMAGENET_NUM_VAL_IMAGES + if self.args.data_location else (self.args.batch_size * self.args.steps) if (not self.args.accuracy_only): iteration = 0 @@ -230,9 +230,11 @@ def run(self): num_processed_images += self.args.batch_size num_remaining_images -= self.args.batch_size + start_time = time.time() # Compute inference on the preprocessed data predictions = infer_sess.run(output_tensor, {input_tensor: np_images}) + elapsed_time = time.time() - start_time # Write out the file name, expected label, and top prediction self.write_results_output(predictions, tf_filenames, np_labels) @@ -251,6 +253,7 @@ def run(self): total_accuracy1 += np_accuracy1 total_accuracy5 += np_accuracy5 + print("Iteration time: %0.4f ms" % elapsed_time) print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ % (num_processed_images, total_accuracy1 / num_processed_images, total_accuracy5 / num_processed_images)) diff --git a/models/image_recognition/tensorflow/resnet50v1_5/__init__.py b/models/image_recognition/tensorflow/resnet50v1_5/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/datasets.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/datasets.py new file mode 100644 index 000000000..cb848e467 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/datasets.py @@ -0,0 +1,96 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Benchmark dataset utilities. +""" + +from abc import abstractmethod +import os + +import tensorflow as tf + +import preprocessing + +IMAGENET_NUM_TRAIN_IMAGES = 1281167 +IMAGENET_NUM_VAL_IMAGES = 50000 +IMAGENET_NUM_CLASSES = 1000 + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, data_dir=None): + self.name = name + if data_dir is None: + raise ValueError('Data directory not specified') + self.data_dir = data_dir + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @abstractmethod + def num_classes(self): + pass + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + +class ImagenetData(Dataset): + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('ImageNet', data_dir) + + def num_classes(self): + return IMAGENET_NUM_CLASSES + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return IMAGENET_NUM_TRAIN_IMAGES + elif subset == 'validation': + return IMAGENET_NUM_VAL_IMAGES + elif subset == 'calibrate' or subset == 'calibration': + return 100 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self): + return preprocessing.RecordInputImagePreprocessor diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py new file mode 100644 index 000000000..e1e6133e1 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/eval_image_classifier_inference.py @@ -0,0 +1,271 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import time +from argparse import ArgumentParser + +import tensorflow as tf +import tensorflow.tools.graph_transforms as graph_transforms + +import datasets +import numpy as np + +INPUTS = 'input_tensor:0' +OUTPUTS = 'softmax_tensor:0' +OPTIMIZATION = 'strip_unused_nodes remove_nodes(op=Identity, op=CheckNumerics) fold_constants(ignore_errors=true) fold_batch_norms fold_old_batch_norms' + +RESNET_IMAGE_SIZE = 224 + + +class eval_classifier_optimized_graph: + """Evaluate image classifier with optimized TensorFlow graph""" + + def __init__(self): + + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument('-b', "--batch-size", + help="Specify the batch size. If this " \ + "parameter is not specified or is -1, the " \ + "largest ideal batch size for the model will " \ + "be used.", + dest="batch_size", type=int, default=-1) + + arg_parser.add_argument('-e', "--num-inter-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + + arg_parser.add_argument('-a', "--num-intra-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) + + arg_parser.add_argument('-m', "--model-name", + help='Specify the model name to run benchmark for', + dest='model_name') + + arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') + + arg_parser.add_argument('-d', "--data-location", + help='Specify the location of the data. ' + 'If this parameter is not specified, ' + 'the benchmark will use random/dummy data.', + dest="data_location", default=None) + + arg_parser.add_argument('-r', "--accuracy-only", + help='For accuracy measurement only.', + dest='accuracy_only', action='store_true') + arg_parser.add_argument('--calibrate', dest='calibrate', + help='Run accuracy with calibration data,' + 'to generate min_max ranges, calibrate=[True/False]', + type=bool, default=False) + arg_parser.add_argument("--results-file-path", + help="File path for the inference results", + dest="results_file_path", default=None) + arg_parser.add_argument("--warmup-steps", type=int, default=10, + help="number of warmup steps") + arg_parser.add_argument("--steps", type=int, default=50, + help="number of steps") + + arg_parser.add_argument( + '--data-num-inter-threads', dest='data_num_inter_threads', + help='number threads across operators', + type=int, default=32) + arg_parser.add_argument( + '--data-num-intra-threads', dest='data_num_intra_threads', + help='number threads for data layer operator', + type=int, default=14) + arg_parser.add_argument( + '--num-cores', dest='num_cores', + help='number of cores', + type=int, default=28) + + self.args = arg_parser.parse_args() + # validate the arguements + self.validate_args() + + def write_results_output(self, predictions, filenames, labels): + # If a results_file_path is provided, write the predictions to the file + if self.args.results_file_path: + top_predictions = np.argmax(predictions, 1) + with open(self.args.results_file_path, "a") as fp: + for filename, expected_label, top_prediction in zip(filenames, labels, top_predictions): + fp.write("{},{},{}\n".format(filename, expected_label, top_prediction)) + + def run(self): + """run benchmark with optimized graph""" + + print("Run inference") + + data_config = tf.ConfigProto() + data_config.intra_op_parallelism_threads = self.args.data_num_intra_threads + data_config.inter_op_parallelism_threads = self.args.data_num_inter_threads + data_config.use_per_session_threads = 1 + + infer_config = tf.ConfigProto() + infer_config.intra_op_parallelism_threads = self.args.num_intra_threads + infer_config.inter_op_parallelism_threads = self.args.num_inter_threads + infer_config.use_per_session_threads = 1 + + data_graph = tf.Graph() + with data_graph.as_default(): + if (self.args.data_location): + print("Inference with real data.") + if self.args.calibrate: + subset = 'calibration' + else: + subset = 'validation' + dataset = datasets.ImagenetData(self.args.data_location) + preprocessor = dataset.get_image_preprocessor()( + RESNET_IMAGE_SIZE, RESNET_IMAGE_SIZE, self.args.batch_size, + num_cores=self.args.num_cores, + resize_method='crop') + + images, labels, filenames = preprocessor.minibatch(dataset, subset=subset) + + # If a results file path is provided, then start the prediction output file + if self.args.results_file_path: + with open(self.args.results_file_path, "w+") as fp: + fp.write("filename,actual,prediction\n") + else: + print("Inference with dummy data.") + input_shape = [self.args.batch_size, RESNET_IMAGE_SIZE, RESNET_IMAGE_SIZE, 3] + images = tf.random.uniform(input_shape, 0.0, 255.0, dtype=tf.float32, name='synthetic_images') + + infer_graph = tf.Graph() + with infer_graph.as_default(): + graph_def = tf.GraphDef() + with tf.gfile.FastGFile(self.args.input_graph, 'rb') as input_file: + input_graph_content = input_file.read() + graph_def.ParseFromString(input_graph_content) + + output_graph = graph_transforms.TransformGraph(graph_def, + [INPUTS], [OUTPUTS], [OPTIMIZATION]) + tf.import_graph_def(output_graph, name='') + + # Definite input and output Tensors for detection_graph + input_tensor = infer_graph.get_tensor_by_name('input_tensor:0') + output_tensor = infer_graph.get_tensor_by_name('softmax_tensor:0') + + data_sess = tf.Session(graph=data_graph, config=data_config) + infer_sess = tf.Session(graph=infer_graph, config=infer_config) + + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset=subset) - num_processed_images \ + if self.args.data_location else datasets.IMAGENET_NUM_VAL_IMAGES + + if (not self.args.accuracy_only): + iteration = 0 + warm_up_iteration = self.args.warmup_steps + total_run = self.args.steps + total_time = 0 + + while num_remaining_images >= self.args.batch_size and iteration < total_run: + iteration += 1 + tf_filenames = None + np_labels = None + data_load_start = time.time() + if self.args.results_file_path: + image_np, np_labels, tf_filenames = data_sess.run([images, labels, filenames]) + else: + image_np = data_sess.run(images) + + data_load_time = time.time() - data_load_start + + num_processed_images += self.args.batch_size + num_remaining_images -= self.args.batch_size + + start_time = time.time() + predictions = infer_sess.run(output_tensor, feed_dict={input_tensor: image_np}) + time_consume = time.time() - start_time + + # Write out the file name, expected label, and top prediction + self.write_results_output(predictions, tf_filenames, np_labels) + + # only add data loading time for real data, not for dummy data + if self.args.data_location: + time_consume += data_load_time + + print('Iteration %d: %.6f sec' % (iteration, time_consume)) + if iteration > warm_up_iteration: + total_time += time_consume + + time_average = total_time / (iteration - warm_up_iteration) + print('Average time: %.6f sec' % (time_average)) + + print('Batch size = %d' % self.args.batch_size) + if (self.args.batch_size == 1): + print('Latency: %.3f ms' % (time_average * 1000)) + # print throughput for both batch size 1 and 128 + print('Throughput: %.3f images/sec' % (self.args.batch_size / time_average)) + + else: # accuracy check + total_accuracy1, total_accuracy5 = (0.0, 0.0) + + while num_remaining_images >= self.args.batch_size: + # Reads and preprocess data + tf_filenames = None + if self.args.results_file_path: + np_images, np_labels, tf_filenames = data_sess.run([images, labels, filenames]) + else: + np_images, np_labels = data_sess.run([images, labels]) + num_processed_images += self.args.batch_size + num_remaining_images -= self.args.batch_size + + start_time = time.time() + # Compute inference on the preprocessed data + predictions = infer_sess.run(output_tensor, + {input_tensor: np_images}) + elapsed_time = time.time() - start_time + + # Write out the file name, expected label, and top prediction + self.write_results_output(predictions, tf_filenames, np_labels) + + with tf.Graph().as_default() as accu_graph: + accuracy1 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 1), tf.float32)) + + accuracy5 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 5), tf.float32)) + with tf.Session() as accu_sess: + np_accuracy1, np_accuracy5 = accu_sess.run([accuracy1, accuracy5]) + + total_accuracy1 += np_accuracy1 + total_accuracy5 += np_accuracy5 + + print("Iteration time: %0.4f ms" % elapsed_time) + print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ + % (num_processed_images, total_accuracy1 / num_processed_images, + total_accuracy5 / num_processed_images)) + + def validate_args(self): + """validate the arguments""" + + if not self.args.data_location: + if self.args.accuracy_only: + raise ValueError("You must use real data for accuracy measurement.") + + +if __name__ == "__main__": + evaluate_opt_graph = eval_classifier_optimized_graph() + evaluate_opt_graph.run() diff --git a/models/image_recognition/tensorflow/resnet50v1_5/inference/preprocessing.py b/models/image_recognition/tensorflow/resnet50v1_5/inference/preprocessing.py new file mode 100644 index 000000000..3c6361584 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/inference/preprocessing.py @@ -0,0 +1,177 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.data.experimental import parallel_interleave +from tensorflow.data.experimental import map_and_batch +from tensorflow.python.platform import gfile + + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/filename': tf.FixedLenFeature([], dtype=tf.string, + default_value="") + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + filename = tf.cast(features['image/filename'], dtype=tf.string) + + return features['image/encoded'], label, filename + + +def eval_image(image, height, width, resize_method, + central_fraction=0.875, scope=None): + + with tf.name_scope('eval_image'): + if resize_method == 'crop': + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256, 256 * shape[1] / shape[0]], + dtype=tf.int32)), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256 * shape[0] / shape[1], 256], + dtype=tf.int32))) + + shape = tf.shape(image) + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, width) + distorted_image.set_shape([height, width, 3]) + means = tf.broadcast_to([123.68, 116.78, 103.94], tf.shape(distorted_image)) + return distorted_image - means + else: # bilinear + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + +class RecordInputImagePreprocessor(object): + """Preprocessor for images with RecordInput format.""" + + def __init__(self, + height, + width, + batch_size, + num_cores, + resize_method="bilinear"): + + self.height = height + self.width = width + self.batch_size = batch_size + self.num_cores = num_cores + self.resize_method = resize_method + + def parse_and_preprocess(self, value): + # parse + image_buffer, label_index, filename = parse_example_proto(value) + # preprocess + image = tf.image.decode_jpeg( + image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') + image = eval_image(image, self.height, self.width, self.resize_method) + return (image, label_index, filename) + + def minibatch(self, dataset, subset, cache_data=False): + + with tf.name_scope('batch_processing'): + + glob_pattern = dataset.tf_record_pattern(subset) + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError('Found no files in --data_dir matching: {}' + .format(glob_pattern)) + ds = tf.data.TFRecordDataset.list_files(file_names) + + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5, + sloppy=True, + buffer_output_elements=10000, prefetch_input_elements=10000)) + + if cache_data: + ds = ds.take(1).cache().repeat() + + ds = ds.prefetch(buffer_size=10000) + #ds = ds.prefetch(buffer_size=self.batch_size) + + # num of parallel batches not greater than 56 + max_num_parallel_batches = min(56, 2 * self.num_cores) + ds = ds.apply( + map_and_batch( + map_func=self.parse_and_preprocess, + batch_size=self.batch_size, + num_parallel_batches=max_num_parallel_batches, + num_parallel_calls=None)) + + ds = ds.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) + + ds_iterator = ds.make_one_shot_iterator() + images, labels, filename = ds_iterator.get_next() + # reshape + labels = tf.reshape(labels, [self.batch_size]) + filename = tf.reshape(filename, [self.batch_size]) + + return images, labels, filename diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/__init__.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/benchmark.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/benchmark.py new file mode 100644 index 000000000..c6d9a9e1f --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/benchmark.py @@ -0,0 +1,213 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import time + +import datasets +import tensorflow as tf + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--data_location", default=None, + help="dataset location") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="predict", + help="name of output layer") + parser.add_argument("--num_cores", default=28, + type=int, help="number of physical cores") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + parser.add_argument( + '--data_num_inter_threads', + help='number threads across data layer operators', + type=int, default=16) + parser.add_argument( + '--data_num_intra_threads', + help='number threads for an data layer operator', + type=int, default=14) + parser.add_argument("--warmup_steps", type=int, default=10, + help="number of warmup steps") + parser.add_argument("--steps", type=int, default=50, help="number of steps") + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + if args.input_height: + input_height = args.input_height + else: + input_height = 224 + if args.input_width: + input_width = args.input_width + else: + input_width = 224 + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + warmup_steps = args.warmup_steps + steps = args.steps + assert steps > 10, "Benchmark steps should be at least 10." + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + + data_config = tf.ConfigProto() + data_config.intra_op_parallelism_threads = args.data_num_intra_threads + data_config.inter_op_parallelism_threads = args.data_num_inter_threads + data_config.use_per_session_threads = 1 + + infer_config = tf.ConfigProto() + infer_config.intra_op_parallelism_threads = num_intra_threads + infer_config.inter_op_parallelism_threads = num_inter_threads + infer_config.use_per_session_threads = 1 + + data_graph = tf.Graph() + with data_graph.as_default(): + if args.data_location: + print("inference with real data") + # get the images from dataset + dataset = datasets.ImagenetData(args.data_location) + preprocessor = dataset.get_image_preprocessor(benchmark=True)( + input_height, input_width, batch_size, + num_cores=args.num_cores, + resize_method='crop') + images = preprocessor.minibatch(dataset, subset='validation') + else: + # synthetic images + print("inference with dummy data") + input_shape = [batch_size, input_height, input_width, 3] + images = tf.random.uniform( + input_shape, 0.0, 255.0, dtype=tf.float32, name='synthetic_images') + + infer_graph = tf.Graph() + with infer_graph.as_default(): + graph_def = tf.GraphDef() + with open(model_file, "rb") as f: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name='') + + input_tensor = infer_graph.get_tensor_by_name(input_layer + ":0") + output_tensor = infer_graph.get_tensor_by_name(output_layer + ":0") + tf.global_variables_initializer() + + data_sess = tf.Session(graph=data_graph, config=data_config) + infer_sess = tf.Session(graph=infer_graph, config=infer_config) + + print("[Running warmup steps...]") + step_total_time = 0 + step_total_images = 0 + + for t in range(warmup_steps): + data_start_time = time.time() + image_data = data_sess.run(images) + data_load_time = time.time() - data_start_time + + start_time = time.time() + infer_sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + + # only count the data loading and processing time for real data + if args.data_location: + elapsed_time += data_load_time + + step_total_time += elapsed_time + step_total_images += batch_size + + if ((t + 1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t + 1, step_total_images / step_total_time)) + step_total_time = 0 + step_total_images = 0 + + print("[Running benchmark steps...]") + total_time = 0 + total_images = 0 + + step_total_time = 0 + step_total_images = 0 + + for t in range(steps): + try: + data_start_time = time.time() + image_data = data_sess.run(images) + data_load_time = time.time() - data_start_time + + start_time = time.time() + infer_sess.run(output_tensor, {input_tensor: image_data}) + elapsed_time = time.time() - start_time + + # only count the data loading and processing time for real data + if args.data_location: + elapsed_time += data_load_time + + total_time += elapsed_time + total_images += batch_size + + step_total_time += elapsed_time + step_total_images += batch_size + + if ((t + 1) % 10 == 0): + print("steps = {0}, {1} images/sec" + "".format(t + 1, step_total_images / step_total_time)) + step_total_time = 0 + step_total_images = 0 + + except tf.errors.OutOfRangeError: + print("Running out of images from dataset.") + break + + print("Average throughput for batch size {0}: {1} images/sec".format(batch_size, total_images / total_time)) diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/cnn_util.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/cnn_util.py new file mode 100644 index 000000000..fb76f2971 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/cnn_util.py @@ -0,0 +1,51 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Utilities for CNN benchmarks.""" + +import tensorflow as tf + + +def tensorflow_version_tuple(): + v = tf.__version__ + major, minor, patch = v.split('.') + return (int(major), int(minor), patch) + + +def tensorflow_version(): + vt = tensorflow_version_tuple() + return vt[0] * 1000 + vt[1] + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/datasets.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/datasets.py new file mode 100644 index 000000000..1a885cb66 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/datasets.py @@ -0,0 +1,114 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +import os +from abc import abstractmethod + +import tensorflow as tf + + +IMAGENET_NUM_TRAIN_IMAGES = 1281167 +IMAGENET_NUM_VAL_IMAGES = 50000 + +class Dataset(object): + """Abstract class for cnn benchmarks dataset.""" + + def __init__(self, name, height=None, width=None, depth=None, data_dir=None, + queue_runner_required=False, num_classes=1000): + self.name = name + self.height = height + self.width = width + self.depth = depth or 3 + + self.data_dir = data_dir + self._queue_runner_required = queue_runner_required + self._num_classes = num_classes + + def tf_record_pattern(self, subset): + return os.path.join(self.data_dir, '%s-*-of-*' % subset) + + def reader(self): + return tf.TFRecordReader() + + @property + def num_classes(self): + return self._num_classes + + @num_classes.setter + def num_classes(self, val): + self._num_classes = val + + @abstractmethod + def num_examples_per_epoch(self, subset): + pass + + def __str__(self): + return self.name + + def get_image_preprocessor(self): + return None + + def queue_runner_required(self): + return self._queue_runner_required + + def use_synthetic_gpu_images(self): + return not self.data_dir + + +class ImagenetData(Dataset): + """Configuration for Imagenet dataset.""" + + def __init__(self, data_dir=None): + super(ImagenetData, self).__init__('imagenet', 300, 300, data_dir=data_dir) + + def num_examples_per_epoch(self, subset='train'): + if subset == 'train': + return IMAGENET_NUM_TRAIN_IMAGES + elif subset == 'validation': + return IMAGENET_NUM_VAL_IMAGES + elif subset == 'calibrate' or subset == 'calibration': + return 100 + else: + raise ValueError('Invalid data subset "%s"' % subset) + + def get_image_preprocessor(self, benchmark=False): + if benchmark: + import preprocessing_benchmark + return preprocessing_benchmark.RecordInputImagePreprocessor + else: + import preprocessing + return preprocessing.RecordInputImagePreprocessor + diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/generate_calibration_data.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/generate_calibration_data.py new file mode 100644 index 000000000..abf62345b --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/generate_calibration_data.py @@ -0,0 +1,183 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import argparse +import sys +import os +import time +import numpy as np +from collections import namedtuple +from operator import attrgetter + +from google.protobuf import text_format +import tensorflow as tf +import preprocessing +import datasets + +NUM_TEST_IMAGES = 50000 + +def load_graph(model_file): + graph = tf.Graph() + graph_def = tf.GraphDef() + + import os + file_ext = os.path.splitext(model_file)[1] + + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + with graph.as_default(): + tf.import_graph_def(graph_def, name='') + + return graph + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--input_graph", default=None, + help="graph/model to be executed") + parser.add_argument("--data_location", default=None, + help="full path to the validation data") + parser.add_argument("--input_height", default=224, + type=int, help="input height") + parser.add_argument("--input_width", default=224, + type=int, help="input width") + parser.add_argument("--batch_size", default=32, + type=int, help="batch size") + parser.add_argument("--input_layer", default="input", + help="name of input layer") + parser.add_argument("--output_layer", default="predict", + help="name of output layer") + parser.add_argument( + '--num_inter_threads', + help='number threads across operators', + type=int, default=1) + parser.add_argument( + '--num_intra_threads', + help='number threads for an operator', + type=int, default=1) + args = parser.parse_args() + + if args.input_graph: + model_file = args.input_graph + else: + sys.exit("Please provide a graph file.") + if args.input_height: + input_height = args.input_height + else: + input_height = 224 + if args.input_width: + input_width = args.input_width + else: + input_width = 224 + batch_size = args.batch_size + input_layer = args.input_layer + output_layer = args.output_layer + num_inter_threads = args.num_inter_threads + num_intra_threads = args.num_intra_threads + data_location = args.data_location + dataset = datasets.ImagenetData(data_location) + preprocessor = preprocessing.ImagePreprocessor( + input_height, input_width, batch_size, + 1, # device count + tf.float32, # data_type for input fed to the graph + train=False, # doing inference + resize_method='crop') + images, labels, tf_records = preprocessor.minibatch(dataset, subset='train') + graph = load_graph(model_file) + input_tensor = graph.get_tensor_by_name(input_layer + ":0") + output_tensor = graph.get_tensor_by_name(output_layer + ":0") + + config = tf.ConfigProto() + config.inter_op_parallelism_threads = num_inter_threads + config.intra_op_parallelism_threads = num_intra_threads + + total_accuracy1, total_accuracy5 = (0.0, 0.0) + num_processed_images = 0 + num_remaining_images = dataset.num_examples_per_epoch(subset='train') \ + - num_processed_images + + CALIBRATION_POOL_SIZE = 1000 + CALIBRATION_SET_SIZE = 100 + calibration_pool = [] + ImageWithConfidence = namedtuple('ImageWithConfidence', + ['tf_record', 'confidence']) + current_pool_size = 0 + with tf.Session() as sess: + sess_graph = tf.Session(graph=graph, config=config) + while num_remaining_images >= batch_size: + # Reads and preprocess data + np_images, np_labels, serialized_images = sess.run( + [images[0], labels[0], tf_records]) + num_processed_images += batch_size + num_remaining_images -= batch_size + # Compute inference on the preprocessed data + predictions = sess_graph.run(output_tensor, + {input_tensor: np_images}) + selected_img_indices = np.where( + predictions.argmax(axis=1) == np_labels)[0].tolist() + current_pool_size += len(selected_img_indices) + for indx in selected_img_indices: + calibration_pool.append(ImageWithConfidence( + serialized_images[indx], predictions[indx].max())) + + accuracy1 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 1), tf.float32)) + + accuracy5 = tf.reduce_sum( + tf.cast(tf.nn.in_top_k(tf.constant(predictions), + tf.constant(np_labels), 5), tf.float32)) + np_accuracy1, np_accuracy5 = sess.run([accuracy1, accuracy5]) + total_accuracy1 += np_accuracy1 + total_accuracy5 += np_accuracy5 + print("Processed %d images. (Top1 accuracy, Top5 accuracy) = (%0.4f, %0.4f)" \ + % (num_processed_images, total_accuracy1/num_processed_images, + total_accuracy5/num_processed_images)) + if current_pool_size >= CALIBRATION_POOL_SIZE: + break + + writer = tf.python_io.TFRecordWriter('calibration-1-of-1') + calibration_pool = sorted(calibration_pool, + key=attrgetter('confidence'), reverse=True) + for i in range(CALIBRATION_SET_SIZE): + writer.write(calibration_pool[i].tf_record) + writer.close() diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing.py new file mode 100644 index 000000000..c4e0a95ce --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing.py @@ -0,0 +1,419 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + +"""Image pre-processing utilities. +""" +from six.moves import xrange # pylint: disable=redefined-builtin +import tensorflow as tf +from random import randint + +from tensorflow.python.ops import data_flow_ops +import cnn_util + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + + The output of the build_image_data.py image preprocessing script is a dataset + containing serialized Example protocol buffers. Each Example proto contains + the following fields: + + image/height: 462 + image/width: 581 + image/colorspace: 'RGB' + image/channels: 3 + image/class/label: 615 + image/class/synset: 'n03623198' + image/class/text: 'knee pad' + image/object/bbox/xmin: 0.1 + image/object/bbox/xmax: 0.9 + image/object/bbox/ymin: 0.2 + image/object/bbox/ymax: 0.6 + image/object/bbox/label: 615 + image/format: 'JPEG' + image/filename: 'ILSVRC2012_val_00041207.JPEG' + image/encoded: + + Args: + example_serialized: scalar Tensor tf.string containing a serialized + Example protocol buffer. + + Returns: + image_buffer: Tensor tf.string containing the contents of a JPEG file. + label: Tensor tf.int32 containing the label. + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged as + [ymin, xmin, ymax, xmax]. + text: Tensor tf.string containing the human-readable label. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) + ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) + xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) + ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) + + # Note that we impose an ordering of (y, x) just to make life difficult. + bbox = tf.concat([ymin, xmin, ymax, xmax], 0) + + # Force the variable number of bounding boxes into the shape + # [1, num_boxes, coords]. + bbox = tf.expand_dims(bbox, 0) + bbox = tf.transpose(bbox, [0, 2, 1]) + + return features['image/encoded'], label, bbox, features['image/class/text'] + + +def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): + """Decode a JPEG string into one 3-D float image Tensor. + + Args: + image_buffer: scalar string Tensor. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor with values ranging from [0, 1). + """ + # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): + # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): + with tf.name_scope(scope or 'decode_jpeg'): + # Decode the string as an RGB JPEG. + # Note that the resulting image contains an unknown height and width + # that is set dynamically by decode_jpeg. In other words, the height + # and width of image is unknown at compile-time. + image = tf.image.decode_jpeg(image_buffer, channels=3, + fancy_upscaling=False, + dct_method='INTEGER_FAST') + + # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') + + return image + + +def eval_image(image, height, width, bbox, thread_id, resize): + """Get the image for model evaluation.""" + with tf.name_scope('eval_image'): + if not thread_id: + tf.summary.image( + 'original_image', tf.expand_dims(image, 0)) + + if resize == 'crop': + # Note: This is much slower than crop_to_bounding_box + # It seems that the redundant pad step has huge overhead + # distorted_image = tf.image.resize_image_with_crop_or_pad(image, + # height, width) + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256, 256*shape[1]/shape[0]], dtype=tf.int32)), + lambda: tf.image.resize_images(image, tf.convert_to_tensor([256*shape[0]/shape[1], 256], dtype=tf.int32))) + shape = tf.shape(image) + + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + #y0=tf.random_uniform([],minval=0,maxval=(shape[0] - height + 1), dtype=tf.int32) + #x0=tf.random_uniform([],minval=0,maxval=(shape[1] - width + 1), dtype=tf.int32) + ## distorted_image = tf.slice(image, [y0,x0,0], [height,width,3]) + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, + width) + else: + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.5, + aspect_ratio_range=[0.90, 1.10], + area_range=[0.10, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, _ = sample_distorted_bounding_box + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + resize_method = { + 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, + 'bilinear': tf.image.ResizeMethod.BILINEAR, + 'bicubic': tf.image.ResizeMethod.BICUBIC, + 'area': tf.image.ResizeMethod.AREA + }[resize] + # This resizing operation may distort the images because the aspect + # ratio is not respected. + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], + resize_method, + align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', tf.expand_dims(distorted_image, 0)) + image = distorted_image + return image + + +def distort_image(image, height, width, bbox, thread_id=0, scope=None): + """Distort one image for training a network. + + Distorting images provides a useful technique for augmenting the data + set during training in order to make the network invariant to aspects + of the image that do not effect the label. + + Args: + image: 3-D float Tensor of image + height: integer + width: integer + bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] + where each coordinate is [0, 1) and the coordinates are arranged + as [ymin, xmin, ymax, xmax]. + thread_id: integer indicating the preprocessing thread. + scope: Optional scope for op_scope. + Returns: + 3-D float Tensor of distorted image used for training. + """ + # with tf.op_scope([image, height, width, bbox], scope, 'distort_image'): + # with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): + with tf.name_scope(scope or 'distort_image'): + # Each bounding box has shape [1, num_boxes, box coords] and + # the coordinates are ordered [ymin, xmin, ymax, xmax]. + + # After this point, all image pixels reside in [0,1) + # until the very end, when they're rescaled to (-1, 1). The various + # adjust_* ops all require this range for dtype float. + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + + # Display the bounding box in the first thread only. + if not thread_id: + image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + bbox) + tf.summary.image( + 'image_with_bounding_boxes', image_with_box) + + # A large fraction of image datasets contain a human-annotated bounding + # box delineating the region of the image containing the object of interest. + # We choose to create a new bounding box for the object which is a randomly + # distorted version of the human-annotated bounding box that obeys an allowed + # range of aspect ratios, sizes and overlap with the human-annotated + # bounding box. If no box is supplied, then we assume the bounding box is + # the entire image. + sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( + tf.shape(image), + bounding_boxes=bbox, + min_object_covered=0.1, + aspect_ratio_range=[0.99, 1.01], + area_range=[0.05, 1.0], + max_attempts=100, + use_image_if_no_bounding_boxes=True) + bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box + if not thread_id: + image_with_distorted_box = tf.image.draw_bounding_boxes( + tf.expand_dims(image, 0), distort_bbox) + tf.summary.image( + 'images_with_distorted_bounding_box', + image_with_distorted_box) + + # Crop the image to the specified bounding box. + distorted_image = tf.slice(image, bbox_begin, bbox_size) + + # This resizing operation may distort the images because the aspect + # ratio is not respected. We select a resize method in a round robin + # fashion based on the thread number. + # Note that ResizeMethod contains 4 enumerated resizing methods. + resize_method = thread_id % 4 + if cnn_util.tensorflow_version() >= 11: + distorted_image = tf.image.resize_images( + distorted_image, [height, width], resize_method, align_corners=False) + else: + distorted_image = tf.image.resize_images( + distorted_image, height, width, resize_method, align_corners=False) + # Restore the shape since the dynamic slice based upon the bbox_size loses + # the third dimension. + distorted_image.set_shape([height, width, 3]) + if not thread_id: + tf.summary.image( + 'cropped_resized_image', + tf.expand_dims(distorted_image, 0)) + + # Randomly flip the image horizontally. + distorted_image = tf.image.random_flip_left_right(distorted_image) + + # Randomly distort the colors. + distorted_image = distort_color(distorted_image, thread_id) + + # Note: This ensures the scaling matches the output of eval_image + distorted_image *= 256 + + if not thread_id: + tf.summary.image( + 'final_distorted_image', + tf.expand_dims(distorted_image, 0)) + return distorted_image + + +def distort_color(image, thread_id=0, scope=None): + """Distort the color of the image. + + Each color distortion is non-commutative and thus ordering of the color ops + matters. Ideally we would randomly permute the ordering of the color ops. + Rather then adding that level of complication, we select a distinct ordering + of color ops for each preprocessing thread. + + Args: + image: Tensor containing single image. + thread_id: preprocessing thread ID. + scope: Optional scope for op_scope. + Returns: + color-distorted image + """ + # with tf.op_scope([image], scope, 'distort_color'): + # with tf.name_scope(scope, 'distort_color', [image]): + with tf.name_scope(scope or 'distort_color'): + color_ordering = thread_id % 2 + + if color_ordering == 0: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + elif color_ordering == 1: + image = tf.image.random_brightness(image, max_delta=32. / 255.) + image = tf.image.random_contrast(image, lower=0.5, upper=1.5) + image = tf.image.random_saturation(image, lower=0.5, upper=1.5) + image = tf.image.random_hue(image, max_delta=0.2) + + # The random_* ops do not necessarily clamp. + image = tf.clip_by_value(image, 0.0, 1.0) + return image + + +class ImagePreprocessor(object): + """Preprocessor for input images.""" + + def __init__(self, + height, + width, + batch_size, + device_count, + dtype=tf.float32, + train=True, + distortions=None, + resize_method=None): + self.height = height + self.width = width + self.batch_size = batch_size + self.device_count = device_count + self.dtype = dtype + self.train = train + self.resize_method = resize_method + if distortions is None: + distortions = False + self.distortions = distortions + if self.batch_size % self.device_count != 0: + raise ValueError( + ('batch_size must be a multiple of device_count: ' + 'batch_size %d, device_count: %d') % + (self.batch_size, self.device_count)) + self.batch_size_per_device = self.batch_size // self.device_count + + def preprocess(self, image_buffer, bbox, thread_id): + """Preprocessing image_buffer using thread_id.""" + # Note: Width and height of image is known only at runtime. + image = tf.image.decode_jpeg(image_buffer, channels=3, + dct_method='INTEGER_FAST') + if self.train and self.distortions: + image = distort_image(image, self.height, self.width, bbox, thread_id) + else: + image = eval_image(image, self.height, self.width, bbox, thread_id, + self.resize_method) + # Note: image is now float32 [height,width,3] with range [0, 255] + + # image = tf.cast(image, tf.uint8) # HACK TESTING + + return image + + def minibatch(self, dataset, subset): + with tf.name_scope('batch_processing'): + images = [[] for i in range(self.device_count)] + labels = [[] for i in range(self.device_count)] + record_input = data_flow_ops.RecordInput( + file_pattern=dataset.tf_record_pattern(subset), + seed=randint(0, 9000), + parallelism=64, + buffer_size=10000, + batch_size=self.batch_size, + name='record_input') + records = record_input.get_yield_op() + records = tf.split(records, self.batch_size, 0) + records = [tf.reshape(record, []) for record in records] + for i in xrange(self.batch_size): + value = records[i] + image_buffer, label_index, bbox, _ = parse_example_proto(value) + image = self.preprocess(image_buffer, bbox, i % 4) + device_index = i % self.device_count + images[device_index].append(image) + labels[device_index].append(label_index) + label_index_batch = [None] * self.device_count + for device_index in xrange(self.device_count): + images[device_index] = tf.parallel_stack(images[device_index]) + label_index_batch[device_index] = tf.concat(labels[device_index], 0) + + # dynamic_pad=True) # HACK TESTING dynamic_pad=True + images[device_index] = tf.cast(images[device_index], self.dtype) + depth = 3 + images[device_index] = tf.reshape( + images[device_index], + shape=[self.batch_size_per_device, self.height, self.width, depth]) + label_index_batch[device_index] = tf.reshape( + label_index_batch[device_index], [self.batch_size_per_device]) + # Display the training images in the visualizer. + # tf.summary.image('images', images) + + return images, label_index_batch, records diff --git a/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing_benchmark.py b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing_benchmark.py new file mode 100644 index 000000000..8e3556556 --- /dev/null +++ b/models/image_recognition/tensorflow/resnet50v1_5/int8/preprocessing_benchmark.py @@ -0,0 +1,173 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + + +# Copyright 2017 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================== + + +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import tensorflow as tf +from tensorflow.data.experimental import parallel_interleave +from tensorflow.data.experimental import map_and_batch +from tensorflow.python.platform import gfile + + +def parse_example_proto(example_serialized): + """Parses an Example proto containing a training example of an image. + """ + # Dense features in Example proto. + feature_map = { + 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, + default_value=''), + 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, + default_value=-1), + } + sparse_float32 = tf.VarLenFeature(dtype=tf.float32) + # Sparse features in Example proto. + feature_map.update( + {k: sparse_float32 for k in ['image/object/bbox/xmin', + 'image/object/bbox/ymin', + 'image/object/bbox/xmax', + 'image/object/bbox/ymax']}) + + features = tf.parse_single_example(example_serialized, feature_map) + label = tf.cast(features['image/class/label'], dtype=tf.int32) + + return features['image/encoded'], label + + +def eval_image(image, height, width, resize_method, + central_fraction=0.875, scope=None): + with tf.name_scope('eval_image'): + if resize_method == 'crop': + shape = tf.shape(image) + image = tf.cond(tf.less(shape[0], shape[1]), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256, 256 * shape[1] / shape[0]], + dtype=tf.int32)), + lambda: tf.image.resize_images(image, + tf.convert_to_tensor([256 * shape[0] / shape[1], 256], + dtype=tf.int32))) + shape = tf.shape(image) + y0 = (shape[0] - height) // 2 + x0 = (shape[1] - width) // 2 + distorted_image = tf.image.crop_to_bounding_box(image, y0, x0, height, width) + distorted_image.set_shape([height, width, 3]) + means = tf.broadcast_to([123.68, 116.78, 103.94], tf.shape(distorted_image)) + return distorted_image - means + else: # bilinear + if image.dtype != tf.float32: + image = tf.image.convert_image_dtype(image, dtype=tf.float32) + # Crop the central region of the image with an area containing 87.5% of + # the original image. + if central_fraction: + image = tf.image.central_crop(image, central_fraction=central_fraction) + + if height and width: + # Resize the image to the specified height and width. + image = tf.expand_dims(image, 0) + image = tf.image.resize_bilinear(image, [height, width], + align_corners=False) + image = tf.squeeze(image, [0]) + image = tf.subtract(image, 0.5) + image = tf.multiply(image, 2.0) + return image + + +class RecordInputImagePreprocessor(object): + """Preprocessor for images with RecordInput format.""" + + def __init__(self, + height, + width, + batch_size, + num_cores, + resize_method): + + self.height = height + self.width = width + self.batch_size = batch_size + self.num_cores = num_cores + self.resize_method = resize_method + + def parse_and_preprocess(self, value): + # parse + image_buffer, label_index = parse_example_proto(value) + # preprocess + image = tf.image.decode_jpeg( + image_buffer, channels=3, fancy_upscaling=False, dct_method='INTEGER_FAST') + image = eval_image(image, self.height, self.width, self.resize_method) + + return (image, label_index) + + def minibatch(self, dataset, subset, cache_data=False): + + with tf.name_scope('batch_processing'): + + glob_pattern = dataset.tf_record_pattern(subset) + file_names = gfile.Glob(glob_pattern) + if not file_names: + raise ValueError('Found no files in --data_dir matching: {}' + .format(glob_pattern)) + ds = tf.data.TFRecordDataset.list_files(file_names) + + ds = ds.apply( + parallel_interleave( + tf.data.TFRecordDataset, cycle_length=self.num_cores, block_length=5, + sloppy=True, + buffer_output_elements=10000, prefetch_input_elements=10000)) + + if cache_data: + ds = ds.take(1).cache().repeat() + + ds = ds.prefetch(buffer_size=10000) + # ds = ds.prefetch(buffer_size=self.batch_size) + + # num of parallel batches not greater than 56 + max_num_parallel_batches = min(56, 2*self.num_cores) + ds = ds.apply( + map_and_batch( + map_func=self.parse_and_preprocess, + batch_size=self.batch_size, + num_parallel_batches=max_num_parallel_batches, + num_parallel_calls=None)) # this number should be tuned + + ds = ds.prefetch(buffer_size=tf.contrib.data.AUTOTUNE) # this number can be tuned + + ds_iterator = ds.make_one_shot_iterator() + images, _ = ds_iterator.get_next() + + return images diff --git a/models/object_detection/tensorflow/faster_rcnn/inference/int8/coco_int8.sh b/models/object_detection/tensorflow/faster_rcnn/inference/int8/coco_int8.sh old mode 100644 new mode 100755 diff --git a/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py b/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py index 89b890ab1..90a1d1fd0 100644 --- a/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py +++ b/models/object_detection/tensorflow/ssd-mobilenet/inference/int8/run_frozen_graph_ssdmob.py @@ -36,10 +36,6 @@ import argparse from tensorflow.python.client import timeline -os.environ["KMP_BLOCKTIME"] = "0" -os.environ["KMP_SETTINGS"] = "1" -os.environ["KMP_AFFINITY"] = "granularity=fine,verbose,compact,1,0" - parser = argparse.ArgumentParser() parser.add_argument('-g', '--graph', help='Path to input graph to run', type=str, required=True) parser.add_argument('-d', '--dataset', help='Full Path to input dataset to run', type=str, required=True) diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py deleted file mode 100644 index 08f3b7e5a..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/coco_metric.py +++ /dev/null @@ -1,193 +0,0 @@ -# Copyright 2018 Google. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""COCO-style evaluation metrics. - -Forked from reference model implementation. - -COCO API: github.com/cocodataset/cocoapi/ -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import atexit -import tempfile - -from absl import flags - -import numpy as np -from pycocotools.coco import COCO -from pycocotools.cocoeval import COCOeval -import six - -import tensorflow as tf - -import ssd_constants - -FLAGS = flags.FLAGS - - -# https://github.com/cocodataset/cocoapi/issues/49 -if six.PY3: - import pycocotools.coco - pycocotools.coco.unicode = str - - -def async_eval_runner(queue_predictions, queue_results, val_json_file): - """Load intermediate eval results and get COCO metrics.""" - while True: - message = queue_predictions.get() - if message == 'STOP': # poison pill - break - step, predictions = message - results = compute_map(predictions, val_json_file) - queue_results.put((step, results)) - - -def compute_map(predictions, val_json_file): - """Use model predictions to compute mAP. - - Args: - predictions: a list of tuples returned by decoded_predictions function, - each containing the following elements: - image source_id, box coordinates in XYWH order, probability score, label - val_json_file: path to COCO annotation file - Returns: - A dictionary that maps all COCO metrics (keys) to their values - """ - - if val_json_file.startswith("gs://"): - _, local_val_json = tempfile.mkstemp(suffix=".json") - tf.gfile.Remove(local_val_json) - - tf.gfile.Copy(val_json_file, local_val_json) - atexit.register(tf.gfile.Remove, local_val_json) - else: - local_val_json = val_json_file - - cocoGt = COCO(local_val_json) - cocoDt = cocoGt.loadRes(np.array(predictions)) - E = COCOeval(cocoGt, cocoDt, iouType='bbox') - E.evaluate() - E.accumulate() - E.summarize() - print("Current AP: {:.5f}".format(E.stats[0])) - metric_names = ['AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'ARmax1', - 'ARmax10', 'ARmax100', 'ARs', 'ARm', 'ARl'] - - # Prefix with "COCO" to group in TensorBoard. - return {"COCO/" + key: value for key, value in zip(metric_names, E.stats)} - - -def calc_iou(target, candidates): - target_tiled = np.tile(target[np.newaxis, :], (candidates.shape[0], 1)) - # Left Top & Right Bottom - lt = np.maximum(target_tiled[:,:2], candidates[:,:2]) - - rb = np.minimum(target_tiled[:,2:], candidates[:,2:]) - - delta = np.maximum(rb - lt, 0) - - intersect = delta[:,0] * delta[:,1] - - delta1 = target_tiled[:,2:] - candidates[:,:2] - area1 = delta1[:,0] * delta1[:,1] - delta2 = target_tiled[:,2:] - candidates[:,:2] - area2 = delta2[:,0] * delta2[:,1] - - iou = intersect/(area1 + area2 - intersect) - return iou - - -# TODO(haoyuzhang): Rewrite this NumPy based implementation to TensorFlow based -# implementation under ssd_model.py accuracy_function. -def decode_predictions(labels_and_predictions): - """Decode predictions and remove unused boxes and labels.""" - predictions = [] - for example in labels_and_predictions: - source_id = int(example[ssd_constants.SOURCE_ID]) - pred_box = example[ssd_constants.PRED_BOXES] - pred_scores = example[ssd_constants.PRED_SCORES] - - locs, labels, probs = decode_single( - pred_box, pred_scores, ssd_constants.OVERLAP_CRITERIA, - ssd_constants.MAX_NUM_EVAL_BOXES, ssd_constants.MAX_NUM_EVAL_BOXES) - - raw_height, raw_width, _ = example[ssd_constants.RAW_SHAPE] - for loc, label, prob in zip(locs, labels, probs): - # Ordering convention differs, hence [1], [0] rather than [0], [1] - x, y = loc[1] * raw_width, loc[0] * raw_height - w, h = (loc[3] - loc[1]) * raw_width, (loc[2] - loc[0]) * raw_height - predictions.append( - [source_id, x, y, w, h, prob, ssd_constants.CLASS_INV_MAP[label]]) - return predictions - - -def decode_single(bboxes_in, scores_in, criteria, max_output, max_num=200): - # Reference to https://github.com/amdegroot/ssd.pytorch - - bboxes_out = [] - scores_out = [] - labels_out = [] - - for i, score in enumerate(np.split(scores_in, scores_in.shape[1], 1)): - score = np.squeeze(score, 1) - - # skip background - if i == 0: - continue - - mask = score > ssd_constants.MIN_SCORE - if not np.any(mask): - continue - - bboxes, score = bboxes_in[mask, :], score[mask] - - score_idx_sorted = np.argsort(score) - score_sorted = score[score_idx_sorted] - - score_idx_sorted = score_idx_sorted[-max_num:] - candidates = [] - - # perform non-maximum suppression - while len(score_idx_sorted): - idx = score_idx_sorted[-1] - bboxes_sorted = bboxes[score_idx_sorted, :] - bboxes_idx = bboxes[idx, :] - iou = calc_iou(bboxes_idx, bboxes_sorted) - - score_idx_sorted = score_idx_sorted[iou < criteria] - candidates.append(idx) - - bboxes_out.append(bboxes[candidates, :]) - scores_out.append(score[candidates]) - labels_out.extend([i]*len(candidates)) - - if len(scores_out) == 0: - tf.logging.info("No objects detected. Returning dummy values.") - return ( - np.zeros(shape=(1, 4), dtype=np.float32), - np.zeros(shape=(1,), dtype=np.int32), - np.ones(shape=(1,), dtype=np.float32) * ssd_constants.DUMMY_SCORE, - ) - - bboxes_out = np.concatenate(bboxes_out, axis=0) - scores_out = np.concatenate(scores_out, axis=0) - labels_out = np.array(labels_out) - - max_ids = np.argsort(scores_out)[-max_output:] - - return bboxes_out[max_ids, :], labels_out[max_ids], scores_out[max_ids] diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/datasets.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/datasets.py deleted file mode 100644 index 58c0f0dff..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/datasets.py +++ /dev/null @@ -1,251 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Benchmark dataset utilities. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -from abc import abstractmethod -import os - -import numpy as np -import six -from six.moves import cPickle -from six.moves import xrange # pylint: disable=redefined-builtin -import tensorflow as tf - -from tensorflow.python.platform import gfile -import preprocessing - -IMAGENET_NUM_TRAIN_IMAGES = 1281167 -IMAGENET_NUM_VAL_IMAGES = 50000 - -COCO_NUM_TRAIN_IMAGES = 118287 -COCO_NUM_VAL_IMAGES = 4952 - - -class Dataset(object): - """Abstract class for cnn benchmarks dataset.""" - - def __init__(self, - name, - data_dir=None, - queue_runner_required=False, - num_classes=None): - self.name = name - self.data_dir = data_dir - self._queue_runner_required = queue_runner_required - self._num_classes = num_classes - - def tf_record_pattern(self, subset): - return os.path.join(self.data_dir, '%s-*-of-*' % subset) - - def reader(self): - return tf.TFRecordReader() - - @property - def num_classes(self): - return self._num_classes - - @num_classes.setter - def num_classes(self, val): - self._num_classes = val - - @abstractmethod - def num_examples_per_epoch(self, subset): - pass - - def __str__(self): - return self.name - - def get_input_preprocessor(self, input_preprocessor='default'): - assert not self.use_synthetic_gpu_inputs() - return _SUPPORTED_INPUT_PREPROCESSORS[self.name][input_preprocessor] - - def queue_runner_required(self): - return self._queue_runner_required - - def use_synthetic_gpu_inputs(self): - return not self.data_dir - - -class LibrispeechDataset(Dataset): - """Configuration for LibriSpeech dataset.""" - - def __init__(self, data_dir=None): - super(LibrispeechDataset, self).__init__( - 'librispeech', data_dir, num_classes=29) - - def tf_record_pattern(self, subset): - if subset == 'train': - return os.path.join(self.data_dir, 'train-clean-*.tfrecords') - elif subset == 'validation': - return os.path.join(self.data_dir, 'test-clean.tfrecords') - else: - return '' - - def num_examples_per_epoch(self, subset='train'): - del subset - return 2 # TODO(laigd): currently this is an arbitrary number. - - -class ImageDataset(Dataset): - """Abstract class for image datasets.""" - - def __init__(self, - name, - height, - width, - depth=None, - data_dir=None, - queue_runner_required=False, - num_classes=1001): - super(ImageDataset, self).__init__(name, data_dir, queue_runner_required, - num_classes) - self.height = height - self.width = width - self.depth = depth or 3 - - -class ImagenetDataset(ImageDataset): - """Configuration for Imagenet dataset.""" - - def __init__(self, data_dir=None): - super(ImagenetDataset, self).__init__( - 'imagenet', 300, 300, data_dir=data_dir) - - def num_examples_per_epoch(self, subset='train'): - if subset == 'train': - return IMAGENET_NUM_TRAIN_IMAGES - elif subset == 'validation': - return IMAGENET_NUM_VAL_IMAGES - else: - raise ValueError('Invalid data subset "%s"' % subset) - - -class Cifar10Dataset(ImageDataset): - """Configuration for cifar 10 dataset. - - It will mount all the input images to memory. - """ - - def __init__(self, data_dir=None): - super(Cifar10Dataset, self).__init__( - 'cifar10', - 32, - 32, - data_dir=data_dir, - queue_runner_required=True, - num_classes=11) - - def read_data_files(self, subset='train'): - """Reads from data file and returns images and labels in a numpy array.""" - assert self.data_dir, ('Cannot call `read_data_files` when using synthetic ' - 'data') - if subset == 'train': - filenames = [ - os.path.join(self.data_dir, 'data_batch_%d' % i) - for i in xrange(1, 6) - ] - elif subset == 'validation': - filenames = [os.path.join(self.data_dir, 'test_batch')] - else: - raise ValueError('Invalid data subset "%s"' % subset) - - inputs = [] - for filename in filenames: - with gfile.Open(filename, 'rb') as f: - # python2 does not have the encoding parameter - encoding = {} if six.PY2 else {'encoding': 'bytes'} - inputs.append(cPickle.load(f, **encoding)) - # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the - # input format. - all_images = np.concatenate( - [each_input[b'data'] for each_input in inputs]).astype(np.float32) - all_labels = np.concatenate( - [each_input[b'labels'] for each_input in inputs]) - return all_images, all_labels - - def num_examples_per_epoch(self, subset='train'): - if subset == 'train': - return 50000 - elif subset == 'validation': - return 10000 - else: - raise ValueError('Invalid data subset "%s"' % subset) - - -class COCODataset(ImageDataset): - """COnfiguration for COCO dataset.""" - - def __init__(self, data_dir=None, image_size=300): - super(COCODataset, self).__init__( - 'coco', image_size, image_size, data_dir=data_dir, num_classes=81) - - def num_examples_per_epoch(self, subset='train'): - if subset == 'train': - return COCO_NUM_TRAIN_IMAGES - elif subset == 'validation': - return COCO_NUM_VAL_IMAGES - else: - raise ValueError('Invalid data subset "%s"' % subset) - - -_SUPPORTED_DATASETS = { - 'imagenet': ImagenetDataset, - 'cifar10': Cifar10Dataset, - 'librispeech': LibrispeechDataset, - 'coco': COCODataset, -} - -_SUPPORTED_INPUT_PREPROCESSORS = { - 'imagenet': { - 'default': preprocessing.RecordInputImagePreprocessor, - 'official_models_imagenet': preprocessing.ImagenetPreprocessor, - }, - 'cifar10': { - 'default': preprocessing.Cifar10ImagePreprocessor - }, - 'librispeech': { - 'default': preprocessing.LibrispeechPreprocessor - }, - 'coco': { - 'default': preprocessing.COCOPreprocessor - }, -} - - -def create_dataset(data_dir, data_name): - """Create a Dataset instance based on data_dir and data_name.""" - if not data_dir and not data_name: - # When using synthetic data, use synthetic imagenet images by default. - data_name = 'imagenet' - - # Infere dataset name from data_dir if data_name is not provided. - if data_name is None: - for supported_name in _SUPPORTED_DATASETS: - if supported_name in data_dir: - data_name = supported_name - break - else: # Failed to identify dataset name from data dir. - raise ValueError('Could not identify name of dataset. ' - 'Please specify with --data_name option.') - if data_name not in _SUPPORTED_DATASETS: - raise ValueError('Unknown dataset. Must be one of %s' % ', '.join( - [key for key in sorted(_SUPPORTED_DATASETS.keys())])) - - return _SUPPORTED_DATASETS[data_name](data_dir) diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py index f2666a94c..657469658 100644 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py +++ b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/infer_detections.py @@ -23,9 +23,10 @@ from argparse import ArgumentParser +import benchmark_cnn import datasets import ssd_constants -import ssd_model +from models import ssd_model from preprocessing import COCOPreprocessor IMAGE_SIZE = 300 @@ -168,7 +169,8 @@ def accuracy_check(self): ds_init = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS) ds_sess = tf.Session() - self.model = ssd_model.SSD300Model(self.args.data_location) + params = benchmark_cnn.make_params(data_dir=self.args.data_location) + self.model = ssd_model.SSD300Model(params=params) print("Inference for accuracy check.") with tf.Session(graph=self.freeze_graph, config=self.config) as sess: diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/preprocessing.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/preprocessing.py deleted file mode 100644 index 6814a48cd..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/preprocessing.py +++ /dev/null @@ -1,1259 +0,0 @@ -# Copyright 2017 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - -"""Image pre-processing utilities. -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import math -from six.moves import xrange # pylint: disable=redefined-builtin -import tensorflow as tf - -from tensorflow.contrib.data.python.ops import threadpool -from tensorflow.contrib.image.python.ops import distort_image_ops -from tensorflow.contrib.data.python.ops import interleave_ops -from tensorflow.contrib.data.python.ops import batching -from tensorflow.python.framework import function -from tensorflow.python.layers import utils -from tensorflow.python.ops import data_flow_ops -from tensorflow.python.platform import gfile - - -def parse_example_proto(example_serialized): - """Parses an Example proto containing a training example of an image. - - The output of the build_image_data.py image preprocessing script is a dataset - containing serialized Example protocol buffers. Each Example proto contains - the following fields: - - image/height: 462 - image/width: 581 - image/colorspace: 'RGB' - image/channels: 3 - image/class/label: 615 - image/class/synset: 'n03623198' - image/class/text: 'knee pad' - image/object/bbox/xmin: 0.1 - image/object/bbox/xmax: 0.9 - image/object/bbox/ymin: 0.2 - image/object/bbox/ymax: 0.6 - image/object/bbox/label: 615 - image/format: 'JPEG' - image/filename: 'ILSVRC2012_val_00041207.JPEG' - image/encoded: - - Args: - example_serialized: scalar Tensor tf.string containing a serialized - Example protocol buffer. - - Returns: - image_buffer: Tensor tf.string containing the contents of a JPEG file. - label: Tensor tf.int32 containing the label. - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged as - [ymin, xmin, ymax, xmax]. - text: Tensor tf.string containing the human-readable label. - """ - # Dense features in Example proto. - feature_map = { - 'image/encoded': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - 'image/class/label': tf.FixedLenFeature([1], dtype=tf.int64, - default_value=-1), - 'image/class/text': tf.FixedLenFeature([], dtype=tf.string, - default_value=''), - } - sparse_float32 = tf.VarLenFeature(dtype=tf.float32) - # Sparse features in Example proto. - feature_map.update( - {k: sparse_float32 for k in ['image/object/bbox/xmin', - 'image/object/bbox/ymin', - 'image/object/bbox/xmax', - 'image/object/bbox/ymax']}) - - features = tf.parse_single_example(example_serialized, feature_map) - label = tf.cast(features['image/class/label'], dtype=tf.int32) - - xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 0) - ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 0) - xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 0) - ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 0) - - # Note that we impose an ordering of (y, x) just to make life difficult. - bbox = tf.concat([ymin, xmin, ymax, xmax], 0) - - # Force the variable number of bounding boxes into the shape - # [1, num_boxes, coords]. - bbox = tf.expand_dims(bbox, 0) - bbox = tf.transpose(bbox, [0, 2, 1]) - - return features['image/encoded'], label, bbox, features['image/class/text'] - - -_RESIZE_METHOD_MAP = { - 'nearest': tf.image.ResizeMethod.NEAREST_NEIGHBOR, - 'bilinear': tf.image.ResizeMethod.BILINEAR, - 'bicubic': tf.image.ResizeMethod.BICUBIC, - 'area': tf.image.ResizeMethod.AREA -} - - -def get_image_resize_method(resize_method, batch_position=0): - """Get tensorflow resize method. - - If resize_method is 'round_robin', return different methods based on batch - position in a round-robin fashion. NOTE: If the batch size is not a multiple - of the number of methods, then the distribution of methods will not be - uniform. - - Args: - resize_method: (string) nearest, bilinear, bicubic, area, or round_robin. - batch_position: position of the image in a batch. NOTE: this argument can - be an integer or a tensor - Returns: - one of resize type defined in tf.image.ResizeMethod. - """ - - if resize_method != 'round_robin': - return _RESIZE_METHOD_MAP[resize_method] - - # return a resize method based on batch position in a round-robin fashion. - resize_methods = list(_RESIZE_METHOD_MAP.values()) - def lookup(index): - return resize_methods[index] - - def resize_method_0(): - return utils.smart_cond(batch_position % len(resize_methods) == 0, - lambda: lookup(0), resize_method_1) - - def resize_method_1(): - return utils.smart_cond(batch_position % len(resize_methods) == 1, - lambda: lookup(1), resize_method_2) - - def resize_method_2(): - return utils.smart_cond(batch_position % len(resize_methods) == 2, - lambda: lookup(2), lambda: lookup(3)) - - # NOTE(jsimsa): Unfortunately, we cannot use a single recursive function here - # because TF would not be able to construct a finite graph. - - return resize_method_0() - - -def decode_jpeg(image_buffer, scope=None): # , dtype=tf.float32): - """Decode a JPEG string into one 3-D float image Tensor. - - Args: - image_buffer: scalar string Tensor. - scope: Optional scope for op_scope. - Returns: - 3-D float Tensor with values ranging from [0, 1). - """ - # with tf.op_scope([image_buffer], scope, 'decode_jpeg'): - # with tf.name_scope(scope, 'decode_jpeg', [image_buffer]): - with tf.name_scope(scope or 'decode_jpeg'): - # Decode the string as an RGB JPEG. - # Note that the resulting image contains an unknown height and width - # that is set dynamically by decode_jpeg. In other words, the height - # and width of image is unknown at compile-time. - image = tf.image.decode_jpeg(image_buffer, channels=3, - fancy_upscaling=False, - dct_method='INTEGER_FAST') - - # image = tf.Print(image, [tf.shape(image)], 'Image shape: ') - - return image - - -_R_MEAN = 123.68 -_G_MEAN = 116.78 -_B_MEAN = 103.94 -_CHANNEL_MEANS = [_R_MEAN, _G_MEAN, _B_MEAN] - - -def normalized_image(images): - # Rescale from [0, 255] to [0, 2] - images = tf.multiply(images, 1. / 127.5) - # Rescale to [-1, 1] - return tf.subtract(images, 1.0) - - -def eval_image(image, - height, - width, - batch_position, - resize_method, - summary_verbosity=0): - """Get the image for model evaluation. - - We preprocess the image simiarly to Slim, see - https://github.com/tensorflow/models/blob/master/research/slim/preprocessing/vgg_preprocessing.py - Validation images do not have bounding boxes, so to crop the image, we first - resize the image such that the aspect ratio is maintained and the resized - height and width are both at least 1.145 times `height` and `width` - respectively. Then, we do a central crop to size (`height`, `width`). - - Args: - image: 3-D float Tensor representing the image. - height: The height of the image that will be returned. - width: The width of the image that will be returned. - batch_position: position of the image in a batch, which affects how images - are distorted and resized. NOTE: this argument can be an integer or a - tensor - resize_method: one of the strings 'round_robin', 'nearest', 'bilinear', - 'bicubic', or 'area'. - summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both - summaries and checkpoints. - Returns: - An image of size (output_height, output_width, 3) that is resized and - cropped as described above. - """ - # TODO(reedwm): Currently we resize then crop. Investigate if it's faster to - # crop then resize. - with tf.name_scope('eval_image'): - if summary_verbosity >= 3: - tf.summary.image( - 'original_image', tf.expand_dims(image, 0)) - - shape = tf.shape(image) - image_height = shape[0] - image_width = shape[1] - image_height_float = tf.cast(image_height, tf.float32) - image_width_float = tf.cast(image_width, tf.float32) - - # This value is chosen so that in resnet, images are cropped to a size of - # 256 x 256, which matches what other implementations do. The final image - # size for resnet is 224 x 224, and floor(224 * 1.145) = 256. - scale_factor = 1.145 - - # Compute resize_height and resize_width to be the minimum values such that - # 1. The aspect ratio is maintained (i.e. resize_height / resize_width is - # image_height / image_width), and - # 2. resize_height >= height * `scale_factor`, and - # 3. resize_width >= width * `scale_factor` - max_ratio = tf.maximum(height / image_height_float, - width / image_width_float) - resize_height = tf.cast(image_height_float * max_ratio * scale_factor, - tf.int32) - resize_width = tf.cast(image_width_float * max_ratio * scale_factor, - tf.int32) - - # Resize the image to shape (`resize_height`, `resize_width`) - image_resize_method = get_image_resize_method(resize_method, batch_position) - distorted_image = tf.image.resize_images(image, - [resize_height, resize_width], - image_resize_method, - align_corners=False) - - # Do a central crop of the image to size (height, width). - # MLPerf requires us to log (height, width) with two different keys. - total_crop_height = (resize_height - height) - crop_top = total_crop_height // 2 - total_crop_width = (resize_width - width) - crop_left = total_crop_width // 2 - distorted_image = tf.slice(distorted_image, [crop_top, crop_left, 0], - [height, width, 3]) - - distorted_image.set_shape([height, width, 3]) - if summary_verbosity >= 3: - tf.summary.image( - 'cropped_resized_image', tf.expand_dims(distorted_image, 0)) - image = distorted_image - return image - - -def train_image(image_buffer, - height, - width, - bbox, - batch_position, - resize_method, - distortions, - scope=None, - summary_verbosity=0, - distort_color_in_yiq=False, - fuse_decode_and_crop=False): - """Distort one image for training a network. - - Distorting images provides a useful technique for augmenting the data - set during training in order to make the network invariant to aspects - of the image that do not effect the label. - - Args: - image_buffer: scalar string Tensor representing the raw JPEG image buffer. - height: integer - width: integer - bbox: 3-D float Tensor of bounding boxes arranged [1, num_boxes, coords] - where each coordinate is [0, 1) and the coordinates are arranged - as [ymin, xmin, ymax, xmax]. - batch_position: position of the image in a batch, which affects how images - are distorted and resized. NOTE: this argument can be an integer or a - tensor - resize_method: round_robin, nearest, bilinear, bicubic, or area. - distortions: If true, apply full distortions for image colors. - scope: Optional scope for op_scope. - summary_verbosity: Verbosity level for summary ops. Pass 0 to disable both - summaries and checkpoints. - distort_color_in_yiq: distort color of input images in YIQ space. - fuse_decode_and_crop: fuse the decode/crop operation. - Returns: - 3-D float Tensor of distorted image used for training. - """ - # with tf.op_scope([image, height, width, bbox], scope, 'distort_image'): - # with tf.name_scope(scope, 'distort_image', [image, height, width, bbox]): - with tf.name_scope(scope or 'distort_image'): - # A large fraction of image datasets contain a human-annotated bounding box - # delineating the region of the image containing the object of interest. We - # choose to create a new bounding box for the object which is a randomly - # distorted version of the human-annotated bounding box that obeys an - # allowed range of aspect ratios, sizes and overlap with the human-annotated - # bounding box. If no box is supplied, then we assume the bounding box is - # the entire image. - min_object_covered = 0.1 - aspect_ratio_range = [0.75, 1.33] - area_range = [0.05, 1.0] - max_attempts = 100 - - sample_distorted_bounding_box = tf.image.sample_distorted_bounding_box( - tf.image.extract_jpeg_shape(image_buffer), - bounding_boxes=bbox, - min_object_covered=min_object_covered, - aspect_ratio_range=aspect_ratio_range, - area_range=area_range, - max_attempts=max_attempts, - use_image_if_no_bounding_boxes=True) - bbox_begin, bbox_size, distort_bbox = sample_distorted_bounding_box - if summary_verbosity >= 3: - image = tf.image.decode_jpeg(image_buffer, channels=3, - dct_method='INTEGER_FAST') - image = tf.image.convert_image_dtype(image, dtype=tf.float32) - image_with_distorted_box = tf.image.draw_bounding_boxes( - tf.expand_dims(image, 0), distort_bbox) - tf.summary.image( - 'images_with_distorted_bounding_box', - image_with_distorted_box) - - # Crop the image to the specified bounding box. - if fuse_decode_and_crop: - offset_y, offset_x, _ = tf.unstack(bbox_begin) - target_height, target_width, _ = tf.unstack(bbox_size) - crop_window = tf.stack([offset_y, offset_x, target_height, target_width]) - image = tf.image.decode_and_crop_jpeg( - image_buffer, crop_window, channels=3) - else: - image = tf.image.decode_jpeg(image_buffer, channels=3, - dct_method='INTEGER_FAST') - image = tf.slice(image, bbox_begin, bbox_size) - - distorted_image = tf.image.random_flip_left_right(image) - - # This resizing operation may distort the images because the aspect - # ratio is not respected. - image_resize_method = get_image_resize_method(resize_method, batch_position) - distorted_image = tf.image.resize_images( - distorted_image, [height, width], - image_resize_method, - align_corners=False) - # Restore the shape since the dynamic slice based upon the bbox_size loses - # the third dimension. - distorted_image.set_shape([height, width, 3]) - if summary_verbosity >= 3: - tf.summary.image('cropped_resized_maybe_flipped_image', - tf.expand_dims(distorted_image, 0)) - - if distortions: - distorted_image = tf.cast(distorted_image, dtype=tf.float32) - # Images values are expected to be in [0,1] for color distortion. - distorted_image /= 255. - # Randomly distort the colors. - distorted_image = distort_color(distorted_image, batch_position, - distort_color_in_yiq=distort_color_in_yiq) - - # Note: This ensures the scaling matches the output of eval_image - distorted_image *= 255 - - if summary_verbosity >= 3: - tf.summary.image( - 'final_distorted_image', - tf.expand_dims(distorted_image, 0)) - return distorted_image - - -def distort_color(image, batch_position=0, distort_color_in_yiq=False, - scope=None): - """Distort the color of the image. - - Each color distortion is non-commutative and thus ordering of the color ops - matters. Ideally we would randomly permute the ordering of the color ops. - Rather then adding that level of complication, we select a distinct ordering - of color ops based on the position of the image in a batch. - - Args: - image: float32 Tensor containing single image. Tensor values should be in - range [0, 1]. - batch_position: the position of the image in a batch. NOTE: this argument - can be an integer or a tensor - distort_color_in_yiq: distort color of input images in YIQ space. - scope: Optional scope for op_scope. - Returns: - color-distorted image - """ - with tf.name_scope(scope or 'distort_color'): - - def distort_fn_0(image=image): - """Variant 0 of distort function.""" - image = tf.image.random_brightness(image, max_delta=32. / 255.) - if distort_color_in_yiq: - image = distort_image_ops.random_hsv_in_yiq( - image, lower_saturation=0.5, upper_saturation=1.5, - max_delta_hue=0.2 * math.pi) - else: - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - return image - - def distort_fn_1(image=image): - """Variant 1 of distort function.""" - image = tf.image.random_brightness(image, max_delta=32. / 255.) - image = tf.image.random_contrast(image, lower=0.5, upper=1.5) - if distort_color_in_yiq: - image = distort_image_ops.random_hsv_in_yiq( - image, lower_saturation=0.5, upper_saturation=1.5, - max_delta_hue=0.2 * math.pi) - else: - image = tf.image.random_saturation(image, lower=0.5, upper=1.5) - image = tf.image.random_hue(image, max_delta=0.2) - return image - - image = utils.smart_cond(batch_position % 2 == 0, distort_fn_0, - distort_fn_1) - # The random_* ops do not necessarily clamp. - image = tf.clip_by_value(image, 0.0, 1.0) - return image - - -class InputPreprocessor(object): - """Base class for all model preprocessors.""" - - def __init__(self, batch_size, output_shapes): - self.batch_size = batch_size - self.output_shapes = output_shapes - - def supports_datasets(self): - """Whether this preprocessor supports dataset.""" - return False - - def minibatch(self, dataset, subset, params, shift_ratio=-1): - """Returns tensors representing a minibatch of all the input.""" - raise NotImplementedError('Must be implemented by subclass.') - - # The methods added below are only supported/used if supports_datasets() - # returns True. - # TODO(laigd): refactor benchmark_cnn.py and put the logic of - # _build_input_processing() into InputPreprocessor. - - def parse_and_preprocess(self, value, batch_position): - """Function to parse and preprocess an Example proto in input pipeline.""" - raise NotImplementedError('Must be implemented by subclass.') - - def build_prefetch_input_processing(self, batch_size, model_input_shapes, - num_splits, cpu_device, params, - gpu_devices, model_input_data_types, - dataset, doing_eval): - """"Returns FunctionBufferingResources that do input pre(processing).""" - assert self.supports_datasets() - with tf.device(cpu_device): - if doing_eval: - subset = 'validation' - else: - subset = 'train' - - function_buffering_resources = [] - remote_fn, args = self.minibatch_fn( - batch_size=batch_size, - model_input_shapes=model_input_shapes, - num_splits=num_splits, - dataset=dataset, - subset=subset, - train=(not doing_eval), - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - for device_num in range(len(gpu_devices)): - with tf.device(gpu_devices[device_num]): - buffer_resource_handle = prefetching_ops.function_buffering_resource( - f=remote_fn, - output_types=model_input_data_types, - target_device=cpu_device, - string_arg=args[0], - buffer_size=params.datasets_prefetch_buffer_size, - shared_name=None) - function_buffering_resources.append(buffer_resource_handle) - return function_buffering_resources - - # TODO(laigd): figure out how to remove these parameters, since the - # preprocessor itself has self.batch_size, self.num_splits, etc defined. - def build_multi_device_iterator(self, batch_size, num_splits, cpu_device, - params, gpu_devices, dataset, doing_eval): - """Creates a MultiDeviceIterator.""" - assert self.supports_datasets() - assert num_splits == len(gpu_devices) - with tf.name_scope('batch_processing'): - if doing_eval: - subset = 'validation' - else: - subset = 'train' - batch_size_per_split = batch_size // num_splits - ds = self.create_dataset( - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train=(not doing_eval), - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - multi_device_iterator = multi_device_iterator_ops.MultiDeviceIterator( - ds, - gpu_devices, - source_device=cpu_device, - max_buffer_size=params.multi_device_iterator_max_buffer_size) - tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, - multi_device_iterator.initializer) - return multi_device_iterator - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - raise NotImplementedError('Must be implemented by subclass.') - - def create_iterator(self, ds): - ds_iterator = ds.make_initializable_iterator() - tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, - ds_iterator.initializer) - return ds_iterator - - def minibatch_fn(self, batch_size, model_input_shapes, num_splits, - dataset, subset, train, datasets_repeat_cached_sample, - num_threads, datasets_use_caching, - datasets_parallel_interleave_cycle_length, - datasets_sloppy_parallel_interleave, - datasets_parallel_interleave_prefetch): - """Returns a function and list of args for the fn to create a minibatch.""" - assert self.supports_datasets() - batch_size_per_split = batch_size // num_splits - assert batch_size_per_split == model_input_shapes[0][0] - with tf.name_scope('batch_processing'): - ds = self.create_dataset(batch_size, num_splits, batch_size_per_split, - dataset, subset, train, - datasets_repeat_cached_sample, num_threads, - datasets_use_caching, - datasets_parallel_interleave_cycle_length, - datasets_sloppy_parallel_interleave, - datasets_parallel_interleave_prefetch) - ds_iterator = self.create_iterator(ds) - - ds_iterator_string_handle = ds_iterator.string_handle() - - @function.Defun(tf.string) - def _fn(h): - remote_iterator = tf.data.Iterator.from_string_handle( - h, ds_iterator.output_types, ds_iterator.output_shapes) - input_list = remote_iterator.get_next() - reshaped_input_list = [ - tf.reshape(input_list[i], shape=model_input_shapes[i]) - for i in range(len(input_list)) - ] - return reshaped_input_list - - return _fn, [ds_iterator_string_handle] - - -class BaseImagePreprocessor(InputPreprocessor): - """Base class for all image model preprocessors.""" - - def __init__(self, - batch_size, - output_shapes, - num_splits, - dtype, - train, - distortions, - resize_method, - shift_ratio=-1, - summary_verbosity=0, - distort_color_in_yiq=True, - fuse_decode_and_crop=True, - match_mlperf=False): - super(BaseImagePreprocessor, self).__init__(batch_size, output_shapes) - image_shape = output_shapes[0] - # image_shape is in form (batch_size, height, width, depth) - self.height = image_shape[1] - self.width = image_shape[2] - self.depth = image_shape[3] - self.num_splits = num_splits - self.dtype = dtype - self.train = train - self.resize_method = resize_method - self.shift_ratio = shift_ratio - self.distortions = distortions - self.distort_color_in_yiq = distort_color_in_yiq - self.fuse_decode_and_crop = fuse_decode_and_crop - if self.batch_size % self.num_splits != 0: - raise ValueError( - ('batch_size must be a multiple of num_splits: ' - 'batch_size %d, num_splits: %d') % - (self.batch_size, self.num_splits)) - self.batch_size_per_split = self.batch_size // self.num_splits - self.summary_verbosity = summary_verbosity - self.match_mlperf = match_mlperf - - def parse_and_preprocess(self, value, batch_position): - assert self.supports_datasets() - image_buffer, label_index, bbox, _ = parse_example_proto(value) - if self.match_mlperf: - bbox = tf.zeros((1, 0, 4), dtype=bbox.dtype) - image = self.preprocess(image_buffer, bbox, batch_position) - return (image, label_index) - - def preprocess(self, image_buffer, bbox, batch_position): - raise NotImplementedError('Must be implemented by subclass.') - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - assert self.supports_datasets() - glob_pattern = dataset.tf_record_pattern(subset) - file_names = gfile.Glob(glob_pattern) - if not file_names: - raise ValueError('Found no files in --data_dir matching: {}' - .format(glob_pattern)) - ds = tf.data.TFRecordDataset.list_files(file_names) - ds = ds.apply( - interleave_ops.parallel_interleave( - tf.data.TFRecordDataset, - cycle_length=datasets_parallel_interleave_cycle_length or 10, - sloppy=datasets_sloppy_parallel_interleave, - prefetch_input_elements=datasets_parallel_interleave_prefetch)) - if datasets_repeat_cached_sample: - # Repeat a single sample element indefinitely to emulate memory-speed IO. - ds = ds.take(1).cache().repeat() - counter = tf.data.Dataset.range(batch_size) - counter = counter.repeat() - ds = tf.data.Dataset.zip((ds, counter)) - ds = ds.prefetch(buffer_size=batch_size) - if datasets_use_caching: - ds = ds.cache() - if train: - buffer_size = 10000 - ds = ds.apply( - tf.data.experimental.shuffle_and_repeat(buffer_size=buffer_size)) - else: - ds = ds.repeat() - ds = ds.apply( - batching.map_and_batch( - map_func=self.parse_and_preprocess, - batch_size=batch_size_per_split, - num_parallel_batches=num_splits)) - ds = ds.prefetch(buffer_size=num_splits) - if num_threads: - ds = threadpool.override_threadpool( - ds, - threadpool.PrivateThreadPool( - num_threads, display_name='input_pipeline_thread_pool')) - return ds - - -class RecordInputImagePreprocessor(BaseImagePreprocessor): - """Preprocessor for images with RecordInput format.""" - - def preprocess(self, image_buffer, bbox, batch_position): - """Preprocessing image_buffer as a function of its batch position.""" - if self.train: - image = train_image(image_buffer, self.height, self.width, bbox, - batch_position, self.resize_method, self.distortions, - None, summary_verbosity=self.summary_verbosity, - distort_color_in_yiq=self.distort_color_in_yiq, - fuse_decode_and_crop=self.fuse_decode_and_crop) - else: - image = tf.image.decode_jpeg( - image_buffer, channels=3, dct_method='INTEGER_FAST') - image = eval_image(image, self.height, self.width, batch_position, - self.resize_method, - summary_verbosity=self.summary_verbosity) - # Note: image is now float32 [height,width,3] with range [0, 255] - - # image = tf.cast(image, tf.uint8) # HACK TESTING - - if self.match_mlperf: - normalized = image - _CHANNEL_MEANS - else: - normalized = normalized_image(image) - return tf.cast(normalized, self.dtype) - - def minibatch(self, - dataset, - subset, - params, - shift_ratio=-1): - if shift_ratio < 0: - shift_ratio = self.shift_ratio - with tf.name_scope('batch_processing'): - # Build final results per split. - images = [[] for _ in range(self.num_splits)] - labels = [[] for _ in range(self.num_splits)] - if params.use_datasets: - ds = self.create_dataset( - self.batch_size, self.num_splits, self.batch_size_per_split, - dataset, subset, self.train, - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - ds_iterator = self.create_iterator(ds) - for d in xrange(self.num_splits): - images[d], labels[d] = ds_iterator.get_next() - - # TODO(laigd): consider removing the --use_datasets option, it should - # always use datasets. - else: - record_input = data_flow_ops.RecordInput( - file_pattern=dataset.tf_record_pattern(subset), - seed=301, - parallelism=64, - buffer_size=10000, - batch_size=self.batch_size, - shift_ratio=shift_ratio, - name='record_input') - records = record_input.get_yield_op() - records = tf.split(records, self.batch_size, 0) - records = [tf.reshape(record, []) for record in records] - for idx in xrange(self.batch_size): - value = records[idx] - (image, label) = self.parse_and_preprocess(value, idx) - split_index = idx % self.num_splits - labels[split_index].append(label) - images[split_index].append(image) - - for split_index in xrange(self.num_splits): - if not params.use_datasets: - images[split_index] = tf.parallel_stack(images[split_index]) - labels[split_index] = tf.concat(labels[split_index], 0) - images[split_index] = tf.reshape( - images[split_index], - shape=[self.batch_size_per_split, self.height, self.width, - self.depth]) - labels[split_index] = tf.reshape(labels[split_index], - [self.batch_size_per_split]) - return images, labels - - def supports_datasets(self): - return True - - -class ImagenetPreprocessor(RecordInputImagePreprocessor): - - def preprocess(self, image_buffer, bbox, batch_position): - # pylint: disable=g-import-not-at-top - try: - from official.resnet.imagenet_preprocessing import preprocess_image - except ImportError: - tf.logging.fatal('Please include tensorflow/models to the PYTHONPATH.') - raise - if self.train: - image = preprocess_image( - image_buffer, bbox, self.height, self.width, self.depth, - is_training=True) - else: - image = preprocess_image( - image_buffer, bbox, self.height, self.width, self.depth, - is_training=False) - return tf.cast(image, self.dtype) - - -class Cifar10ImagePreprocessor(BaseImagePreprocessor): - """Preprocessor for Cifar10 input images.""" - - def _distort_image(self, image): - """Distort one image for training a network. - - Adopted the standard data augmentation scheme that is widely used for - this dataset: the images are first zero-padded with 4 pixels on each side, - then randomly cropped to again produce distorted images; half of the images - are then horizontally mirrored. - - Args: - image: input image. - Returns: - distorted image. - """ - image = tf.image.resize_image_with_crop_or_pad( - image, self.height + 8, self.width + 8) - distorted_image = tf.random_crop(image, - [self.height, self.width, self.depth]) - # Randomly flip the image horizontally. - distorted_image = tf.image.random_flip_left_right(distorted_image) - if self.summary_verbosity >= 3: - tf.summary.image('distorted_image', tf.expand_dims(distorted_image, 0)) - return distorted_image - - def _eval_image(self, image): - """Get the image for model evaluation.""" - distorted_image = tf.image.resize_image_with_crop_or_pad( - image, self.width, self.height) - if self.summary_verbosity >= 3: - tf.summary.image('cropped.image', tf.expand_dims(distorted_image, 0)) - return distorted_image - - def preprocess(self, raw_image): - """Preprocessing raw image.""" - if self.summary_verbosity >= 3: - tf.summary.image('raw.image', tf.expand_dims(raw_image, 0)) - if self.train and self.distortions: - image = self._distort_image(raw_image) - else: - image = self._eval_image(raw_image) - normalized = normalized_image(image) - return tf.cast(normalized, self.dtype) - - def minibatch(self, - dataset, - subset, - params, - shift_ratio=-1): - # TODO(jsimsa): Implement datasets code path - del shift_ratio, params - with tf.name_scope('batch_processing'): - all_images, all_labels = dataset.read_data_files(subset) - all_images = tf.constant(all_images) - all_labels = tf.constant(all_labels) - input_image, input_label = tf.train.slice_input_producer( - [all_images, all_labels]) - input_image = tf.cast(input_image, self.dtype) - input_label = tf.cast(input_label, tf.int32) - # Ensure that the random shuffling has good mixing properties. - min_fraction_of_examples_in_queue = 0.4 - min_queue_examples = int(dataset.num_examples_per_epoch(subset) * - min_fraction_of_examples_in_queue) - raw_images, raw_labels = tf.train.shuffle_batch( - [input_image, input_label], batch_size=self.batch_size, - capacity=min_queue_examples + 3 * self.batch_size, - min_after_dequeue=min_queue_examples) - - images = [[] for i in range(self.num_splits)] - labels = [[] for i in range(self.num_splits)] - - # Create a list of size batch_size, each containing one image of the - # batch. Without the unstack call, raw_images[i] would still access the - # same image via a strided_slice op, but would be slower. - raw_images = tf.unstack(raw_images, axis=0) - raw_labels = tf.unstack(raw_labels, axis=0) - for i in xrange(self.batch_size): - split_index = i % self.num_splits - # The raw image read from data has the format [depth, height, width] - # reshape to the format returned by minibatch. - raw_image = tf.reshape(raw_images[i], - [dataset.depth, dataset.height, dataset.width]) - raw_image = tf.transpose(raw_image, [1, 2, 0]) - image = self.preprocess(raw_image) - images[split_index].append(image) - - labels[split_index].append(raw_labels[i]) - - for split_index in xrange(self.num_splits): - images[split_index] = tf.parallel_stack(images[split_index]) - labels[split_index] = tf.parallel_stack(labels[split_index]) - return images, labels - - -class COCOPreprocessor(BaseImagePreprocessor): - """Preprocessor for COCO dataset input images, boxes, and labels.""" - - def minibatch(self, - dataset, - subset, - params, - shift_ratio=-1): - del shift_ratio # Not used when using datasets instead of data_flow_ops - with tf.name_scope('batch_processing'): - ds = self.create_dataset( - self.batch_size, self.num_splits, self.batch_size_per_split, - dataset, subset, self.train, params.datasets_repeat_cached_sample) - ds_iterator = self.create_iterator(ds) - - # Training data: 4 tuple - # Validation data: 5 tuple - # See get_input_shapes in models/ssd_model.py for details. - input_len = 4 if subset == 'train' else 5 - input_lists = [[None for _ in range(self.num_splits)] - for _ in range(input_len)] - for d in xrange(self.num_splits): - input_list = ds_iterator.get_next() - for i in range(input_len): - input_lists[i][d] = input_list[i] - return input_lists - - def preprocess(self, data): - try: - import ssd_dataloader # pylint: disable=g-import-not-at-top - import ssd_constants # pylint: disable=g-import-not-at-top - from object_detection.core import preprocessor # pylint: disable=g-import-not-at-top - except ImportError: - raise ImportError('To use the COCO dataset, you must clone the ' - 'repo https://github.com/tensorflow/models and add ' - 'tensorflow/models and tensorflow/models/research to ' - 'the PYTHONPATH, and compile the protobufs by ' - 'following https://github.com/tensorflow/models/blob/' - 'master/research/object_detection/g3doc/installation.md' - '#protobuf-compilation') - image_buffer = data['image_buffer'] - boxes = data['groundtruth_boxes'] - classes = tf.reshape(data['groundtruth_classes'], [-1, 1]) - source_id = tf.string_to_number(data['source_id']) - raw_shape = data['raw_shape'] - - ssd_encoder = ssd_dataloader.Encoder() - - # Only 80 of the 90 COCO classes are used. - class_map = tf.convert_to_tensor(ssd_constants.CLASS_MAP) - classes = tf.gather(class_map, classes) - classes = tf.cast(classes, dtype=tf.float32) - - if self.train: - image, boxes, classes = ssd_dataloader.ssd_decode_and_crop( - image_buffer, boxes, classes, raw_shape) - # ssd_crop resizes and returns image of dtype float32 and does not change - # its range (i.e., value in between 0--255). Divide by 255. converts it - # to [0, 1] range. Not doing this before cropping to avoid dtype cast - # (which incurs additional memory copy). - image /= 255. - - image, boxes = preprocessor.random_horizontal_flip( - image=image, boxes=boxes) - # Random horizontal flip probability is 50% - # See https://github.com/tensorflow/models/blob/master/research/object_detection/core/preprocessor.py # pylint: disable=line-too-long - - image = ssd_dataloader.color_jitter( - image, brightness=0.125, contrast=0.5, saturation=0.5, hue=0.05) - image = ssd_dataloader.normalize_image(image) - image = tf.cast(image, self.dtype) - - encoded_returns = ssd_encoder.encode_labels(boxes, classes) - encoded_classes, encoded_boxes, num_matched_boxes = encoded_returns - - # Shape of image: [width, height, channel] - # Shape of encoded_boxes: [NUM_SSD_BOXES, 4] - # Shape of encoded_classes: [NUM_SSD_BOXES, 1] - # Shape of num_matched_boxes: [1] - return (image, encoded_boxes, encoded_classes, num_matched_boxes) - - else: - image = tf.image.decode_jpeg(image_buffer) - image = tf.image.resize_images( - image, size=(ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE)) - # resize_image returns image of dtype float32 and does not change its - # range. Divide by 255 to convert image to [0, 1] range. - image /= 255. - - image = ssd_dataloader.normalize_image(image) - image = tf.cast(image, self.dtype) - - def trim_and_pad(inp_tensor): - """Limit the number of boxes, and pad if necessary.""" - inp_tensor = inp_tensor[:ssd_constants.MAX_NUM_EVAL_BOXES] - num_pad = ssd_constants.MAX_NUM_EVAL_BOXES - tf.shape(inp_tensor)[0] - inp_tensor = tf.pad(inp_tensor, [[0, num_pad], [0, 0]]) - return tf.reshape(inp_tensor, [ssd_constants.MAX_NUM_EVAL_BOXES, - inp_tensor.get_shape()[1]]) - - boxes, classes = trim_and_pad(boxes), trim_and_pad(classes) - - # Shape of boxes: [MAX_NUM_EVAL_BOXES, 4] - # Shape of classes: [MAX_NUM_EVAL_BOXES, 1] - # Shape of source_id: [] (scalar tensor) - # Shape of raw_shape: [3] - return (image, boxes, classes, source_id, raw_shape) - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - try: - import ssd_dataloader # pylint: disable=g-import-not-at-top - except ImportError: - raise ImportError('To use the COCO dataset, you must clone the ' - 'repo https://github.com/tensorflow/models and add ' - 'tensorflow/models and tensorflow/models/research to ' - 'the PYTHONPATH, and compile the protobufs by ' - 'following https://github.com/tensorflow/models/blob/' - 'master/research/object_detection/g3doc/installation.md' - '#protobuf-compilation') - assert self.supports_datasets() - - glob_pattern = dataset.tf_record_pattern(subset) - file_names = gfile.Glob(glob_pattern) - if not file_names: - raise ValueError('Found no files in --data_dir matching: {}' - .format(glob_pattern)) - - ds = tf.data.TFRecordDataset.list_files(file_names) - # TODO(haoyuzhang): Enable map+filter fusion after cl/218399112 in release - # options = tf.data.Options() - # options.experimental_map_and_filter_fusion = True - # ds = ds.with_options(options) - - ds = ds.apply( - interleave_ops.parallel_interleave( - tf.data.TFRecordDataset, - cycle_length=datasets_parallel_interleave_cycle_length or 10, - sloppy=datasets_sloppy_parallel_interleave)) - if datasets_repeat_cached_sample: - # Repeat a single sample element indefinitely to emulate memory-speed IO. - ds = ds.take(1).cache().repeat() - ds = ds.prefetch(buffer_size=batch_size) - if datasets_use_caching: - ds = ds.cache() - if train: - ds = ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=10000)) - else: - ds = ds.repeat() - - ds = ds.map(ssd_dataloader.ssd_parse_example_proto, num_parallel_calls=64) - ds = ds.filter( - lambda data: tf.greater(tf.shape(data['groundtruth_boxes'])[0], 0)) - ds = ds.apply( - batching.map_and_batch( - map_func=self.preprocess, - batch_size=batch_size_per_split, - num_parallel_batches=num_splits, - drop_remainder=train)) - ds = ds.prefetch(buffer_size=num_splits) - if num_threads: - ds = threadpool.override_threadpool( - ds, - threadpool.PrivateThreadPool( - num_threads, display_name='input_pipeline_thread_pool')) - return ds - - def supports_datasets(self): - return True - - -class LibrispeechPreprocessor(InputPreprocessor): - """Preprocessor for librispeech class for all image model preprocessors.""" - - def __init__(self, batch_size, output_shapes, num_splits, dtype, train, - **kwargs): - del kwargs - super(LibrispeechPreprocessor, self).__init__(batch_size, output_shapes) - self.num_splits = num_splits - self.dtype = dtype - self.is_train = train - if self.batch_size % self.num_splits != 0: - raise ValueError(('batch_size must be a multiple of num_splits: ' - 'batch_size %d, num_splits: %d') % (self.batch_size, - self.num_splits)) - self.batch_size_per_split = self.batch_size // self.num_splits - - def create_dataset(self, - batch_size, - num_splits, - batch_size_per_split, - dataset, - subset, - train, - datasets_repeat_cached_sample, - num_threads=None, - datasets_use_caching=False, - datasets_parallel_interleave_cycle_length=None, - datasets_sloppy_parallel_interleave=False, - datasets_parallel_interleave_prefetch=None): - """Creates a dataset for the benchmark.""" - # TODO(laigd): currently the only difference between this and the one in - # BaseImagePreprocessor is, this uses map() and padded_batch() while the - # latter uses tf.data.experimental.map_and_batch(). Try to merge them. - assert self.supports_datasets() - glob_pattern = dataset.tf_record_pattern(subset) - file_names = gfile.Glob(glob_pattern) - if not file_names: - raise ValueError('Found no files in --data_dir matching: {}' - .format(glob_pattern)) - ds = tf.data.TFRecordDataset.list_files(file_names) - ds = ds.apply( - tf.data.experimental.parallel_interleave( - tf.data.TFRecordDataset, - cycle_length=datasets_parallel_interleave_cycle_length or 10, - sloppy=datasets_sloppy_parallel_interleave, - prefetch_input_elements=datasets_parallel_interleave_prefetch)) - if datasets_repeat_cached_sample: - # Repeat a single sample element indefinitely to emulate memory-speed IO. - ds = ds.take(1).cache().repeat() - counter = tf.data.Dataset.range(batch_size) - counter = counter.repeat() - ds = tf.data.Dataset.zip((ds, counter)) - ds = ds.prefetch(buffer_size=batch_size) - if datasets_use_caching: - ds = ds.cache() - if train: - ds = ds.apply(tf.data.experimental.shuffle_and_repeat(buffer_size=10000)) - else: - ds = ds.repeat() - ds = ds.map(map_func=self.parse_and_preprocess, - num_parallel_calls=batch_size_per_split*num_splits) - ds = ds.padded_batch( - batch_size=batch_size_per_split, - padded_shapes=tuple([ - tf.TensorShape(output_shape[1:]) - for output_shape in self.output_shapes - ]), - drop_remainder=True) - ds = ds.prefetch(buffer_size=num_splits) - if num_threads: - ds = threadpool.override_threadpool( - ds, - threadpool.PrivateThreadPool( - num_threads, display_name='input_pipeline_thread_pool')) - return ds - - def minibatch(self, dataset, subset, params, shift_ratio=-1): - assert params.use_datasets - # TODO(laigd): unify this with CNNModel's minibatch() - # TODO(laigd): in distributed mode we use shift_ratio so different workers - # won't work on same inputs, so we should respect that. - del shift_ratio - with tf.name_scope('batch_processing'): - ds = self.create_dataset( - self.batch_size, - self.num_splits, - self.batch_size_per_split, - dataset, - subset, - self.is_train, - datasets_repeat_cached_sample=params.datasets_repeat_cached_sample, - num_threads=params.datasets_num_private_threads, - datasets_use_caching=params.datasets_use_caching, - datasets_parallel_interleave_cycle_length=( - params.datasets_parallel_interleave_cycle_length), - datasets_sloppy_parallel_interleave=( - params.datasets_sloppy_parallel_interleave), - datasets_parallel_interleave_prefetch=( - params.datasets_parallel_interleave_prefetch)) - ds_iterator = self.create_iterator(ds) - - # The four lists are: input spectrogram feature, labels, input lengths, - # label lengths - input_lists = [[None for _ in range(self.num_splits)] for _ in range(4)] - for d in xrange(self.num_splits): - input_list = ds_iterator.get_next() - for i in range(4): - input_lists[i][d] = input_list[i] - - assert self.output_shapes == [ - input_lists[i][0].shape.as_list() for i in range(4) - ] - return tuple(input_lists) - - def supports_datasets(self): - return True - - def parse_and_preprocess(self, value, batch_position): - """Parse an TFRecord.""" - del batch_position - assert self.supports_datasets() - context_features = { - 'labels': tf.VarLenFeature(dtype=tf.int64), - 'input_length': tf.FixedLenFeature([], dtype=tf.int64), - 'label_length': tf.FixedLenFeature([], dtype=tf.int64), - } - sequence_features = { - 'features': tf.FixedLenSequenceFeature([161], dtype=tf.float32) - } - context_parsed, sequence_parsed = tf.parse_single_sequence_example( - serialized=value, - context_features=context_features, - sequence_features=sequence_features, - ) - - return [ - # Input - tf.expand_dims(sequence_parsed['features'], axis=2), - # Label - tf.cast( - tf.reshape( - tf.sparse_tensor_to_dense(context_parsed['labels']), [-1]), - dtype=tf.int32), - # Input length - tf.cast( - tf.reshape(context_parsed['input_length'], [1]), - dtype=tf.int32), - # Label length - tf.cast( - tf.reshape(context_parsed['label_length'], [1]), - dtype=tf.int32), - ] diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_constants.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_constants.py deleted file mode 100644 index 77fa0149b..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_constants.py +++ /dev/null @@ -1,118 +0,0 @@ -# Copyright 2018 Google. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Central location for all constants related to MLPerf SSD.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -# ============================================================================== -# == Model ===================================================================== -# ============================================================================== -IMAGE_SIZE = 300 - -# TODO(taylorrobie): MLPerf uses 80, but COCO documents 90. (RetinaNet uses 90) -# Update(taylorrobie): Labels > 81 show up in the pipeline. This will need to -# be resolved. -NUM_CLASSES = 81 # Including "no class". Not all COCO classes are used. - -# Note: Zero is special. (Background class) CLASS_INV_MAP[0] must be zero. -CLASS_INV_MAP = ( - 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, - 22, 23, 24, 25, 27, 28, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, - 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, - 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, - 88, 89, 90) -_MAP = {j: i for i, j in enumerate(CLASS_INV_MAP)} -CLASS_MAP = tuple(_MAP.get(i, -1) for i in range(max(CLASS_INV_MAP) + 1)) - -NUM_SSD_BOXES = 8732 - -RESNET_DEPTH = 34 - -"""SSD specific""" -MIN_LEVEL = 3 -MAX_LEVEL = 8 - -FEATURE_SIZES = (38, 19, 10, 5, 3, 1) -STEPS = (8, 16, 32, 64, 100, 300) - -# https://github.com/amdegroot/ssd.pytorch/blob/master/data/config.py -SCALES = (21, 45, 99, 153, 207, 261, 315) -ASPECT_RATIOS = ((2,), (2, 3), (2, 3), (2, 3), (2,), (2,)) -NUM_DEFAULTS = (4, 6, 6, 6, 4, 4) -NUM_DEFAULTS_BY_LEVEL = {3: 4, 4: 6, 5: 6, 6: 6, 7: 4, 8: 4} -SCALE_XY = 0.1 -SCALE_HW = 0.2 -BOX_CODER_SCALES = (1 / SCALE_XY, 1 / SCALE_XY, 1 / SCALE_HW, 1 / SCALE_HW) -MATCH_THRESHOLD = 0.5 - -# https://discuss.pytorch.org/t/how-to-preprocess-input-for-pre-trained-networks/683 -NORMALIZATION_MEAN = (0.485, 0.456, 0.406) -NORMALIZATION_STD = (0.229, 0.224, 0.225) - -# SSD Cropping -NUM_CROP_PASSES = 50 -CROP_MIN_IOU_CHOICES = (0, 0.1, 0.3, 0.5, 0.7, 0.9) -P_NO_CROP_PER_PASS = 1 / (len(CROP_MIN_IOU_CHOICES) + 1) - -# Hard example mining -NEGS_PER_POSITIVE = 3 - -# Batch normalization -BATCH_NORM_DECAY = 0.997 -BATCH_NORM_EPSILON = 1e-4 - - -# ============================================================================== -# == Optimizer ================================================================= -# ============================================================================== -LEARNING_RATE_SCHEDULE = ( - (0, 1e-3), - (160000, 1e-4), - (200000, 1e-5), -) -MOMENTUM = 0.9 -WEIGHT_DECAY = 5e-4 - - -# ============================================================================== -# == Keys ====================================================================== -# ============================================================================== -BOXES = "boxes" -CLASSES = "classes" -NUM_MATCHED_BOXES = "num_matched_boxes" -IMAGE = "image" -SOURCE_ID = "source_id" -RAW_SHAPE = "raw_shape" -PRED_BOXES = "pred_boxes" -PRED_SCORES = "pred_scores" - - -# ============================================================================== -# == Evaluation ================================================================ -# ============================================================================== - -# Note: This is based on a batch size of 32 -# https://github.com/mlperf/reference/blob/master/single_stage_detector/ssd/train.py#L21-L37 -CHECKPOINT_FREQUENCY = 20000 -MAX_NUM_EVAL_BOXES = 200 -OVERLAP_CRITERIA = 0.5 # Used for nonmax supression -MIN_SCORE = 0.05 # Minimum score to be considered during evaluation. -DUMMY_SCORE = -1e5 # If no boxes are matched. - -ANNOTATION_FILE = "annotations/instances_val2017.json" -COCO_NUM_TRAIN_IMAGES = 118287 -COCO_NUM_VAL_IMAGES = 4952 diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_dataloader.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_dataloader.py deleted file mode 100644 index 2f291fd85..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_dataloader.py +++ /dev/null @@ -1,382 +0,0 @@ -# Copyright 2018 Google. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== -"""Data loader and processing.""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import itertools as it -import math - -import numpy as np -import tensorflow as tf - -from object_detection.box_coders import faster_rcnn_box_coder -from object_detection.core import box_list -from object_detection.core import region_similarity_calculator -from object_detection.core import target_assigner -from object_detection.matchers import argmax_matcher -import ssd_constants - - -class DefaultBoxes(object): - """Default bounding boxes for 300x300 5 layer SSD. - - Default bounding boxes generation follows the order of (W, H, anchor_sizes). - Therefore, the tensor converted from DefaultBoxes has a shape of - [anchor_sizes, H, W, 4]. The last dimension is the box coordinates; 'ltrb' - is [ymin, xmin, ymax, xmax] while 'xywh' is [cy, cx, h, w]. - """ - - def __init__(self): - fk = ssd_constants.IMAGE_SIZE / np.array(ssd_constants.STEPS) - - self.default_boxes = [] - # size of feature and number of feature - for idx, feature_size in enumerate(ssd_constants.FEATURE_SIZES): - sk1 = ssd_constants.SCALES[idx] / ssd_constants.IMAGE_SIZE - sk2 = ssd_constants.SCALES[idx+1] / ssd_constants.IMAGE_SIZE - sk3 = math.sqrt(sk1*sk2) - all_sizes = [(sk1, sk1), (sk3, sk3)] - - for alpha in ssd_constants.ASPECT_RATIOS[idx]: - w, h = sk1 * math.sqrt(alpha), sk1 / math.sqrt(alpha) - all_sizes.append((w, h)) - all_sizes.append((h, w)) - - assert len(all_sizes) == ssd_constants.NUM_DEFAULTS[idx] - - for w, h in all_sizes: - for i, j in it.product(range(feature_size), repeat=2): - cx, cy = (j + 0.5) / fk[idx], (i + 0.5) / fk[idx] - box = tuple(np.clip(k, 0, 1) for k in (cy, cx, h, w)) - self.default_boxes.append(box) - - assert len(self.default_boxes) == ssd_constants.NUM_SSD_BOXES - - def to_ltrb(cy, cx, h, w): - return cy - h / 2, cx - w / 2, cy + h / 2, cx + w / 2 - - # For IoU calculation - self.default_boxes_ltrb = tuple(to_ltrb(*i) for i in self.default_boxes) - - def __call__(self, order='ltrb'): - if order == 'ltrb': return self.default_boxes_ltrb - if order == 'xywh': return self.default_boxes - - -def calc_iou_tensor(boxes1, boxes2): - """Calculation of IoU based on two boxes tensor. - - Reference to https://github.com/kuangliu/pytorch-ssd - - Args: - boxes1: shape (N, 4), four coordinates of N boxes - boxes2: shape (M, 4), four coordinates of M boxes - Returns: - IoU: shape (N, M), IoU of the i-th box in `boxes1` and j-th box in `boxes2` - """ - b1_left, b1_top, b1_right, b1_bottom = tf.split(boxes1, 4, axis=1) - b2_left, b2_top, b2_right, b2_bottom = tf.split(boxes2, 4, axis=1) - - # Shape of intersect_* (N, M) - intersect_left = tf.maximum(b1_left, tf.transpose(b2_left)) - intersect_top = tf.maximum(b1_top, tf.transpose(b2_top)) - intersect_right = tf.minimum(b1_right, tf.transpose(b2_right)) - intersect_bottom = tf.minimum(b1_bottom, tf.transpose(b2_bottom)) - - boxes1_area = (b1_right - b1_left) * (b1_bottom - b1_top) - boxes2_area = (b2_right - b2_left) * (b2_bottom - b2_top) - - intersect = tf.multiply(tf.maximum((intersect_right - intersect_left), 0), - tf.maximum((intersect_bottom - intersect_top), 0)) - union = boxes1_area + tf.transpose(boxes2_area) - intersect - iou = intersect / union - - return iou - - -def ssd_parse_example_proto(example_serialized): - """Parses an Example proto containing a training example of an image. - - Each Example proto contains the following fields that we care about: - - image/encoded: - image/source_id: tf.string - image/height: tf.int64 - image/width: tf.int64 - image/object/bbox/xmin: tf.VarLenFeature(tf.float32) - image/object/bbox/xmax: tf.VarLenFeature(tf.float32) - image/object/bbox/ymin: tf.VarLenFeature(tf.float32 - image/object/bbox/ymax: tf.VarLenFeature(tf.float32) - image/object/class/label: tf.VarLenFeature(tf.int64) - image/object/class/text: tf.VarLenFeature(tf.string) - - Complete decoder can be found in: - https://github.com/tensorflow/models/blob/master/research/object_detection/data_decoders/tf_example_decoder.py - - Args: - example_serialized: scalar Tensor tf.string containing a serialized - Example protocol buffer. - - Returns: - A dictionary with the following key-values: - image_buffer: Tensor tf.string containing the contents of a JPEG file. - groundtruth_boxes: Tensor tf.float32 of shape [num_boxes, 4], containing - coordinates of object bounding boxes. - groundtruth_classeS: Tensor tf.int64 of shape [num_boxes, 1], containing - class labels of objects. - source_id: unique image identifier. - raw_shape: [height, width, 3]. - """ - feature_map = { - 'image/encoded': tf.FixedLenFeature( - (), dtype=tf.string, default_value=''), - 'image/source_id': tf.FixedLenFeature((), tf.string, default_value=''), - 'image/height': tf.FixedLenFeature((), tf.int64, default_value=1), - 'image/width': tf.FixedLenFeature((), tf.int64, default_value=1), - 'image/object/bbox/xmin': tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/ymin': tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/xmax': tf.VarLenFeature(dtype=tf.float32), - 'image/object/bbox/ymax': tf.VarLenFeature(dtype=tf.float32), - 'image/object/class/label': tf.VarLenFeature(dtype=tf.int64), - } - features = tf.parse_single_example(example_serialized, feature_map) - - xmin = tf.expand_dims(features['image/object/bbox/xmin'].values, 1) - ymin = tf.expand_dims(features['image/object/bbox/ymin'].values, 1) - xmax = tf.expand_dims(features['image/object/bbox/xmax'].values, 1) - ymax = tf.expand_dims(features['image/object/bbox/ymax'].values, 1) - - image_buffer = features['image/encoded'] - # Bounding box coordinates should be in ltrb order - boxes = tf.concat([ymin, xmin, ymax, xmax], 1) - classes = tf.expand_dims(features['image/object/class/label'].values, 1) - source_id = features['image/source_id'] - raw_shape = tf.stack([features['image/height'], features['image/width'], 3]) - - return {'image_buffer': image_buffer, - 'groundtruth_boxes': boxes, - 'groundtruth_classes': classes, - 'source_id': source_id, - 'raw_shape': raw_shape} - - -def ssd_decode_and_crop(image_buffer, boxes, classes, raw_shape): - """Crop image randomly and decode the cropped region. - - This function will crop an image to meet the following requirements: - 1. height to width ratio between 0.5 and 2; - 2. IoUs of some boxes exceed specified threshold; - 3. At least one box center is in the cropped region. - We defer the jpeg decoding task until after the crop to avoid wasted work. - - Reference: https://github.com/chauhan-utk/ssd.DomainAdaptation - - Args: - image_buffer: Tensor tf.string containing the contents of a JPEG file. - boxes: Tensor tf.float32 of shape [num_boxes, 4], containing coordinates of - object bounding boxes. - classes: Tensor tf.int64 of shape [num_boxes, 1], containing class labels - of objects. - raw_shape: [height, width, 3]. - - Returns: - resized_image: decoded, cropped, and resized image Tensor tf.float32 of - shape [ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE, 3], value - range 0--255. - cropped_boxes: box coordinates for objects in the cropped region. - cropped_classes: class labels for objects in the cropped region. - """ - - num_boxes = tf.shape(boxes)[0] - - def no_crop_check(): - return (tf.random_uniform(shape=(), minval=0, maxval=1, dtype=tf.float32) - < ssd_constants.P_NO_CROP_PER_PASS) - - def no_crop_proposal(): - return ( - tf.ones((), tf.bool), - tf.convert_to_tensor([0, 0, 1, 1], dtype=tf.float32), - tf.ones((num_boxes,), tf.bool), - ) - - def crop_proposal(): - rand_vec = lambda minval, maxval: tf.random_uniform( - shape=(ssd_constants.NUM_CROP_PASSES, 1), minval=minval, maxval=maxval, - dtype=tf.float32) - - width, height = rand_vec(0.3, 1), rand_vec(0.3, 1) - left, top = rand_vec(0, 1-width), rand_vec(0, 1-height) - - right = left + width - bottom = top + height - - ltrb = tf.concat([left, top, right, bottom], axis=1) - - min_iou = tf.random_shuffle(ssd_constants.CROP_MIN_IOU_CHOICES)[0] - ious = calc_iou_tensor(ltrb, boxes) - - # discard any bboxes whose center not in the cropped image - xc, yc = [tf.tile(0.5 * (boxes[:, i + 0] + boxes[:, i + 2])[tf.newaxis, :], - (ssd_constants.NUM_CROP_PASSES, 1)) for i in range(2)] - - masks = tf.reduce_all(tf.stack([ - tf.greater(xc, tf.tile(left, (1, num_boxes))), - tf.less(xc, tf.tile(right, (1, num_boxes))), - tf.greater(yc, tf.tile(top, (1, num_boxes))), - tf.less(yc, tf.tile(bottom, (1, num_boxes))), - ], axis=2), axis=2) - - # Checks of whether a crop is valid. - valid_aspect = tf.logical_and(tf.less(height/width, 2), - tf.less(width/height, 2)) - valid_ious = tf.reduce_all(tf.greater(ious, min_iou), axis=1, keepdims=True) - valid_masks = tf.reduce_any(masks, axis=1, keepdims=True) - - valid_all = tf.cast(tf.reduce_all(tf.concat( - [valid_aspect, valid_ious, valid_masks], axis=1), axis=1), tf.int32) - - # One indexed, as zero is needed for the case of no matches. - index = tf.range(1, 1 + ssd_constants.NUM_CROP_PASSES, dtype=tf.int32) - - # Either one-hot, or zeros if there is no valid crop. - selection = tf.equal(tf.reduce_max(index * valid_all), index) - - use_crop = tf.reduce_any(selection) - output_ltrb = tf.reduce_sum(tf.multiply(ltrb, tf.tile(tf.cast( - selection, tf.float32)[:, tf.newaxis], (1, 4))), axis=0) - output_masks = tf.reduce_any(tf.logical_and(masks, tf.tile( - selection[:, tf.newaxis], (1, num_boxes))), axis=0) - - return use_crop, output_ltrb, output_masks - - def proposal(*args): - return tf.cond( - pred=no_crop_check(), - true_fn=no_crop_proposal, - false_fn=crop_proposal, - ) - - _, crop_bounds, box_masks = tf.while_loop( - cond=lambda x, *_: tf.logical_not(x), - body=proposal, - loop_vars=[tf.zeros((), tf.bool), tf.zeros((4,), tf.float32), tf.zeros((num_boxes,), tf.bool)], - ) - - filtered_boxes = tf.boolean_mask(boxes, box_masks, axis=0) - - # Clip boxes to the cropped region. - filtered_boxes = tf.stack([ - tf.maximum(filtered_boxes[:, 0], crop_bounds[0]), - tf.maximum(filtered_boxes[:, 1], crop_bounds[1]), - tf.minimum(filtered_boxes[:, 2], crop_bounds[2]), - tf.minimum(filtered_boxes[:, 3], crop_bounds[3]), - ], axis=1) - - left = crop_bounds[0] - top = crop_bounds[1] - width = crop_bounds[2] - left - height = crop_bounds[3] - top - - cropped_boxes = tf.stack([ - (filtered_boxes[:, 0] - left) / width, - (filtered_boxes[:, 1] - top) / height, - (filtered_boxes[:, 2] - left) / width, - (filtered_boxes[:, 3] - top) / height, - ], axis=1) - - # crop_window containing integer coordinates of cropped region. A normalized - # coordinate value of y should be mapped to the image coordinate at - # y * (height - 1). - raw_shape = tf.cast(raw_shape, tf.float32) - crop_window = tf.stack([left * (raw_shape[0] - 1), - top * (raw_shape[1] - 1), - width * raw_shape[0], - height * raw_shape[1]]) - crop_window = tf.cast(crop_window, tf.int32) - - # Fused op only decodes the cropped portion of an image - cropped_image = tf.image.decode_and_crop_jpeg( - image_buffer, crop_window, channels=3) - - # Resize converts image dtype from uint8 to float32, without rescaling values. - resized_image = tf.image.resize_images( - cropped_image, [ssd_constants.IMAGE_SIZE, ssd_constants.IMAGE_SIZE]) - - cropped_classes = tf.boolean_mask(classes, box_masks, axis=0) - - return resized_image, cropped_boxes, cropped_classes - - -def color_jitter(image, brightness=0, contrast=0, saturation=0, hue=0): - """Distort the color of the image.""" - with tf.name_scope('distort_color'): - if brightness > 0: - image = tf.image.random_brightness(image, max_delta=brightness) - if contrast > 0: - image = tf.image.random_contrast( - image, lower=1-contrast, upper=1+contrast) - if saturation > 0: - image = tf.image.random_saturation( - image, lower=1-saturation, upper=1+saturation) - if hue > 0: - image = tf.image.random_hue(image, max_delta=hue) - return image - - -def normalize_image(image): - """Normalize the image to zero mean and unit variance. - - Args: - image: 3D tensor of type float32, value in [0, 1] - Returns: - image normalized by mean and stdev. - """ - image = tf.subtract(image, ssd_constants.NORMALIZATION_MEAN) - image = tf.divide(image, ssd_constants.NORMALIZATION_STD) - - return image - - -class Encoder(object): - """Encoder for SSD boxes and labels.""" - - def __init__(self): - similarity_calc = region_similarity_calculator.IouSimilarity() - matcher = argmax_matcher.ArgMaxMatcher( - matched_threshold=ssd_constants.MATCH_THRESHOLD, - unmatched_threshold=ssd_constants.MATCH_THRESHOLD, - negatives_lower_than_unmatched=True, - force_match_for_each_row=True) - - box_coder = faster_rcnn_box_coder.FasterRcnnBoxCoder( - scale_factors=ssd_constants.BOX_CODER_SCALES) - - self.default_boxes = DefaultBoxes()('ltrb') - self.default_boxes = box_list.BoxList( - tf.convert_to_tensor(self.default_boxes)) - self.assigner = target_assigner.TargetAssigner( - similarity_calc, matcher, box_coder) - - def encode_labels(self, gt_boxes, gt_labels): - target_boxes = box_list.BoxList(gt_boxes) - encoded_classes, _, encoded_boxes, _, matches = self.assigner.assign( - self.default_boxes, target_boxes, gt_labels) - num_matched_boxes = tf.reduce_sum( - tf.cast(tf.not_equal(matches.match_results, -1), tf.float32)) - return encoded_classes, encoded_boxes, num_matched_boxes diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_model.py b/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_model.py deleted file mode 100644 index c8d67c24d..000000000 --- a/models/object_detection/tensorflow/ssd-resnet34/inference/fp32/ssd_model.py +++ /dev/null @@ -1,171 +0,0 @@ -# -# -*- coding: utf-8 -*- -# -# Copyright (c) 2019 Intel Corporation -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# -# SPDX-License-Identifier: EPL-2.0 -# - -# Copyright 2018 The TensorFlow Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# ============================================================================== - - -"""SSD300 Model Configuration. - -References: - Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy, Scott Reed, - Cheng-Yang Fu, Alexander C. Berg - SSD: Single Shot MultiBox Detector - arXiv:1512.02325 - -Ported from MLPerf reference implementation: - https://github.com/mlperf/reference/tree/ssd/single_stage_detector/ssd - -""" - -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - -import multiprocessing -import os -import re -import threading -import tensorflow as tf - -import ssd_constants - - -class SSD300Model(): - """Single Shot Multibox Detection (SSD) model for 300x300 image datasets.""" - - def __init__(self, data_dir, label_num=ssd_constants.NUM_CLASSES): - # For COCO dataset, 80 categories + 1 background = 81 labels - self.label_num = label_num - self.data_dir = data_dir - - # Collected predictions for eval stage. It maps each image id in eval - # dataset to a dict containing the following information: - # source_id: raw ID of image - # raw_shape: raw shape of image - # pred_box: encoded box coordinates of prediction - # pred_scores: scores of classes in prediction - self.predictions = {} - - # Global step when predictions are collected. - self.eval_global_step = 0 - - # Average precision. In asynchronous eval mode, this is the latest AP we - # get so far and may not be the results at current eval step. - self.eval_coco_ap = 0 - - # Process, queues, and thread for asynchronous evaluation. When enabled, - # create a separte process (async_eval_process) that continously pull - # intermediate results from the predictions queue (a multiprocessing queue), - # process them, and push final results into results queue (another - # multiprocessing queue). The main thread is responsible to push message - # into predictions queue, and start a separate thread to continuously pull - # messages from results queue to update final results. - # Message in predictions queue should be a tuple of two elements: - # (evaluation step, predictions) - # Message in results queue should be a tuple of two elements: - # (evaluation step, final results) - self.async_eval_process = None - self.async_eval_predictions_queue = None - self.async_eval_results_queue = None - self.async_eval_results_getter_thread = None - - # The MLPerf reference uses a starting lr of 1e-3 at bs=32. - self.base_lr_batch_size = 32 - - def skip_final_affine_layer(self): - return True - - def postprocess(self, results): - """Postprocess results returned from model.""" - try: - import coco_metric # pylint: disable=g-import-not-at-top - except ImportError: - raise ImportError('To use the COCO dataset, you must clone the ' - 'repo https://github.com/tensorflow/models and add ' - 'tensorflow/models and tensorflow/models/research to ' - 'the PYTHONPATH, and compile the protobufs by ' - 'following https://github.com/tensorflow/models/blob/' - 'master/research/object_detection/g3doc/installation.md' - '#protobuf-compilation ; To evaluate using COCO' - 'metric, download and install Python COCO API from' - 'https://github.com/cocodataset/cocoapi') - - pred_boxes = results[ssd_constants.PRED_BOXES] - pred_scores = results[ssd_constants.PRED_SCORES] - # TODO(haoyuzhang): maybe use these values for visualization. - # gt_boxes = results['gt_boxes'] - # gt_classes = results['gt_classes'] - source_id = results[ssd_constants.SOURCE_ID] - raw_shape = results[ssd_constants.RAW_SHAPE] - - # COCO evaluation requires processing COCO_NUM_VAL_IMAGES exactly once. Due - # to rounding errors (i.e., COCO_NUM_VAL_IMAGES % batch_size != 0), setting - # `num_eval_epochs` to 1 is not enough and will often miss some images. We - # expect user to set `num_eval_epochs` to >1, which will leave some unused - # images from previous steps in `predictions`. Here we check if we are doing - # eval at a new global step. - if results['global_step'] > self.eval_global_step: - self.eval_global_step = results['global_step'] - self.predictions.clear() - - for i, sid in enumerate(source_id): - self.predictions[int(sid)] = { - ssd_constants.PRED_BOXES: pred_boxes[i], - ssd_constants.PRED_SCORES: pred_scores[i], - ssd_constants.SOURCE_ID: source_id[i], - ssd_constants.RAW_SHAPE: raw_shape[i] - } - - # COCO metric calculates mAP only after a full epoch of evaluation. Return - # dummy results for top_N_accuracy to be compatible with benchmar_cnn.py. - if len(self.predictions) >= ssd_constants.COCO_NUM_VAL_IMAGES: - print('Got results for all {:d} eval examples. Calculate mAP...'.format( - ssd_constants.COCO_NUM_VAL_IMAGES)) - - annotation_file = os.path.join(self.data_dir, - ssd_constants.ANNOTATION_FILE) - # Size of predictions before decoding about 15--30GB, while size after - # decoding is 100--200MB. When using async eval mode, decoding takes - # 20--30 seconds of main thread time but is necessary to avoid OOM during - # inter-process communication. - decoded_preds = coco_metric.decode_predictions(self.predictions.values()) - self.predictions.clear() - - eval_results = coco_metric.compute_map(decoded_preds, annotation_file) - self.eval_coco_ap = eval_results['COCO/AP'] - ret = {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.} - return ret - print('Got {:d} out of {:d} eval examples.' - ' Waiting for the remaining to calculate mAP...'.format( - len(self.predictions), ssd_constants.COCO_NUM_VAL_IMAGES)) - return {'top_1_accuracy': self.eval_coco_ap, 'top_5_accuracy': 0.} diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py new file mode 100644 index 000000000..159180624 --- /dev/null +++ b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/__init__.py @@ -0,0 +1,20 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + diff --git a/models/object_detection/tensorflow/ssd-resnet34/inference/int8/infer_detections.py b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/infer_detections.py new file mode 100644 index 000000000..657469658 --- /dev/null +++ b/models/object_detection/tensorflow/ssd-resnet34/inference/int8/infer_detections.py @@ -0,0 +1,211 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import tensorflow as tf +import time + +from argparse import ArgumentParser + +import benchmark_cnn +import datasets +import ssd_constants +from models import ssd_model +from preprocessing import COCOPreprocessor + +IMAGE_SIZE = 300 + +import os + +class ssd_resnet34_infer: + + def __init__(self): + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument('-b', "--batch-size", + help="Specify the batch size. If this " \ + "parameter is not specified or is -1, the " \ + "largest ideal batch size for the model will " \ + "be used.", + dest="batch_size", type=int, default=-1) + + arg_parser.add_argument('-e', "--inter-op-parallelism-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + + arg_parser.add_argument('-a', "--intra-op-parallelism-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) + + arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph.', + dest='input_graph') + + arg_parser.add_argument('-d', "--data-location", + help='Specify the location of the data. ' + 'If this parameter is not specified, ' + 'the benchmark will use random/dummy data.', + dest="data_location", default=None) + + arg_parser.add_argument('-r', "--accuracy-only", + help='For accuracy measurement only.', + dest='accuracy_only', action='store_true') + + arg_parser.add_argument("--results-file-path", + help="File path for the inference results", + dest="results_file_path", default=None) + + # parse the arguments + self.args = arg_parser.parse_args() + + self.freeze_graph = self.load_graph(self.args.input_graph) + self.config = tf.ConfigProto() + self.config.intra_op_parallelism_threads = self.args.num_intra_threads + self.config.inter_op_parallelism_threads = self.args.num_inter_threads + + if self.args.batch_size == -1: + self.args.batch_size = 64 + + self.num_batches = (ssd_constants.COCO_NUM_VAL_IMAGES // self.args.batch_size) + \ + (ssd_constants.COCO_NUM_VAL_IMAGES % self.args.batch_size > 0) + + input_layer = 'input' + output_layers = ['v/stack', 'v/Softmax'] + self.input_tensor = self.freeze_graph.get_tensor_by_name(input_layer + ":0") + self.output_tensors = [self.freeze_graph.get_tensor_by_name(x + ":0") for x in output_layers] + + + def load_graph(self, frozen_graph_filename): + print('load graph from: ' + frozen_graph_filename) + with tf.gfile.GFile(frozen_graph_filename, "rb") as f: + graph_def = tf.GraphDef() + graph_def.ParseFromString(f.read()) + + # Then, we import the graph_def into a new Graph and returns it + with tf.Graph().as_default() as graph: + # Since we load everything in a new graph, this is not needed + tf.import_graph_def(graph_def, name='') + return graph + + def run_benchmark(self): + print("Inference with dummy data.") + with tf.Session(graph=self.freeze_graph, config=self.config) as sess: + + input_images = sess.run(tf.truncated_normal( + [self.args.batch_size, IMAGE_SIZE, IMAGE_SIZE, 3], + dtype=tf.float32, + stddev=10, + name='synthetic_images')) + + total_iter = 1000 + warmup_iter = 200 + ttime = 0.0 + + print('total iteration is {0}'.format(str(total_iter))) + print('warm up iteration is {0}'.format(str(warmup_iter))) + + for step in range(total_iter): + start_time = time.time() + _ = sess.run(self.output_tensors, {self.input_tensor: input_images}) + end_time = time.time() + + duration = end_time - start_time + if (step + 1) % 10 == 0: + print('steps = {0}, {1} sec'.format(str(step), str(duration))) + + if step + 1 > warmup_iter: + ttime += duration + + total_batches = total_iter - warmup_iter + print ('Batchsize: {0}'.format(str(self.args.batch_size))) + print ('Time spent per BATCH: {0:10.4f} ms'.format(ttime / total_batches * 1000)) + print ('Total samples/sec: {0:10.4f} samples/s'.format(total_batches * self.args.batch_size / ttime)) + + + def __get_input(self): + preprocessor = COCOPreprocessor( + batch_size=self.args.batch_size, + output_shapes=[[self.args.batch_size, IMAGE_SIZE, IMAGE_SIZE, 3]], + num_splits=1, + dtype=tf.float32, + train=False, + distortions=True, + resize_method=None, + shift_ratio=0 + ) + + class params: + datasets_repeat_cached_sample = False + + self.params = params() + self.dataset = datasets.create_dataset(self.args.data_location, 'coco') + + return preprocessor.minibatch( + self.dataset, + subset='validation', + params=self.params, + shift_ratio=0) + + + def accuracy_check(self): + print(self.args) + input_list = self.__get_input() + ds_init = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS) + + ds_sess = tf.Session() + params = benchmark_cnn.make_params(data_dir=self.args.data_location) + self.model = ssd_model.SSD300Model(params=params) + + print("Inference for accuracy check.") + with tf.Session(graph=self.freeze_graph, config=self.config) as sess: + ds_sess.run(ds_init) + global_step = 0 + + for _ in range(self.num_batches): + results = {} + input_lists = ds_sess.run(input_list) + input_images = input_lists[0][0] + input_ids = input_lists[3][0] + input_raw_shapes = input_lists[4][0] + + result = sess.run(self.output_tensors, {self.input_tensor: input_images}) + # Make global_step available in results for postprocessing. + results['global_step'] = global_step + results[ssd_constants.SOURCE_ID] = input_ids + results[ssd_constants.RAW_SHAPE] = input_raw_shapes + + results[ssd_constants.PRED_BOXES] = result[0] + results[ssd_constants.PRED_SCORES] = result[1] + + results = self.model.postprocess(results) + + + + def run(self): + if self.args.accuracy_only: + self.accuracy_check() + else: + self.run_benchmark() + + + +if __name__ == "__main__": + infer = ssd_resnet34_infer() + infer.run() + diff --git a/models/object_detection/tensorflow/ssd_vgg16/__init__.py b/models/object_detection/tensorflow/ssd_vgg16/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/__init__.py b/models/object_detection/tensorflow/ssd_vgg16/inference/__init__.py new file mode 100644 index 000000000..d9c4123de --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/__init__.py @@ -0,0 +1,19 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/anchor_manipulator.py b/models/object_detection/tensorflow/ssd_vgg16/inference/anchor_manipulator.py new file mode 100644 index 000000000..f52acdc08 --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/anchor_manipulator.py @@ -0,0 +1,353 @@ +# Copyright 2018 Changan Wang + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= + +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +import math + +import tensorflow as tf +import numpy as np + +from tensorflow.contrib.image.python.ops import image_ops + +def areas(gt_bboxes): + with tf.name_scope('bboxes_areas', values=[gt_bboxes]): + ymin, xmin, ymax, xmax = tf.split(gt_bboxes, 4, axis=1) + return (xmax - xmin) * (ymax - ymin) + +def intersection(gt_bboxes, default_bboxes): + with tf.name_scope('bboxes_intersection', values=[gt_bboxes, default_bboxes]): + # num_anchors x 1 + ymin, xmin, ymax, xmax = tf.split(gt_bboxes, 4, axis=1) + # 1 x num_anchors + gt_ymin, gt_xmin, gt_ymax, gt_xmax = [tf.transpose(b, perm=[1, 0]) for b in tf.split(default_bboxes, 4, axis=1)] + # broadcast here to generate the full matrix + int_ymin = tf.maximum(ymin, gt_ymin) + int_xmin = tf.maximum(xmin, gt_xmin) + int_ymax = tf.minimum(ymax, gt_ymax) + int_xmax = tf.minimum(xmax, gt_xmax) + h = tf.maximum(int_ymax - int_ymin, 0.) + w = tf.maximum(int_xmax - int_xmin, 0.) + + return h * w +def iou_matrix(gt_bboxes, default_bboxes): + with tf.name_scope('iou_matrix', values = [gt_bboxes, default_bboxes]): + inter_vol = intersection(gt_bboxes, default_bboxes) + # broadcast + union_vol = areas(gt_bboxes) + tf.transpose(areas(default_bboxes), perm=[1, 0]) - inter_vol + + return tf.where(tf.equal(union_vol, 0.0), + tf.zeros_like(inter_vol), tf.truediv(inter_vol, union_vol)) + +def do_dual_max_match(overlap_matrix, low_thres, high_thres, ignore_between=True, gt_max_first=True): + ''' + overlap_matrix: num_gt * num_anchors + ''' + with tf.name_scope('dual_max_match', values=[overlap_matrix]): + # first match from anchors' side + anchors_to_gt = tf.argmax(overlap_matrix, axis=0) + # the matching degree + match_values = tf.reduce_max(overlap_matrix, axis=0) + + #positive_mask = tf.greater(match_values, high_thres) + less_mask = tf.less(match_values, low_thres) + between_mask = tf.logical_and(tf.less(match_values, high_thres), tf.greater_equal(match_values, low_thres)) + negative_mask = less_mask if ignore_between else between_mask + ignore_mask = between_mask if ignore_between else less_mask + # fill all negative positions with -1, all ignore positions is -2 + match_indices = tf.where(negative_mask, -1 * tf.ones_like(anchors_to_gt), anchors_to_gt) + match_indices = tf.where(ignore_mask, -2 * tf.ones_like(match_indices), match_indices) + + # negtive values has no effect in tf.one_hot, that means all zeros along that axis + # so all positive match positions in anchors_to_gt_mask is 1, all others are 0 + anchors_to_gt_mask = tf.one_hot(tf.clip_by_value(match_indices, -1, tf.cast(tf.shape(overlap_matrix)[0], tf.int64)), + tf.shape(overlap_matrix)[0], on_value=1, off_value=0, axis=0, dtype=tf.int32) + # match from ground truth's side + gt_to_anchors = tf.argmax(overlap_matrix, axis=1) + + if gt_max_first: + # the max match from ground truth's side has higher priority + left_gt_to_anchors_mask = tf.one_hot(gt_to_anchors, tf.shape(overlap_matrix)[1], on_value=1, off_value=0, axis=1, dtype=tf.int32) + else: + # the max match from anchors' side has higher priority + # use match result from ground truth's side only when the the matching degree from anchors' side is lower than position threshold + left_gt_to_anchors_mask = tf.cast(tf.logical_and(tf.reduce_max(anchors_to_gt_mask, axis=1, keep_dims=True) < 1, + tf.one_hot(gt_to_anchors, tf.shape(overlap_matrix)[1], + on_value=True, off_value=False, axis=1, dtype=tf.bool) + ), tf.int64) + # can not use left_gt_to_anchors_mask here, because there are many ground truthes match to one anchor, we should pick the highest one even when we are merging matching from ground truth side + left_gt_to_anchors_scores = overlap_matrix * tf.to_float(left_gt_to_anchors_mask) + # merge matching results from ground truth's side with the original matching results from anchors' side + # then select all the overlap score of those matching pairs + selected_scores = tf.gather_nd(overlap_matrix, tf.stack([tf.where(tf.reduce_max(left_gt_to_anchors_mask, axis=0) > 0, + tf.argmax(left_gt_to_anchors_scores, axis=0), + anchors_to_gt), + tf.range(tf.cast(tf.shape(overlap_matrix)[1], tf.int64))], axis=1)) + # return the matching results for both foreground anchors and background anchors, also with overlap scores + return tf.where(tf.reduce_max(left_gt_to_anchors_mask, axis=0) > 0, + tf.argmax(left_gt_to_anchors_scores, axis=0), + match_indices), selected_scores + +# def save_anchors(bboxes, labels, anchors_point): +# if not hasattr(save_image_with_bbox, "counter"): +# save_image_with_bbox.counter = 0 # it doesn't exist yet, so initialize it +# save_image_with_bbox.counter += 1 + +# np.save('./debug/bboxes_{}.npy'.format(save_image_with_bbox.counter), np.copy(bboxes)) +# np.save('./debug/labels_{}.npy'.format(save_image_with_bbox.counter), np.copy(labels)) +# np.save('./debug/anchors_{}.npy'.format(save_image_with_bbox.counter), np.copy(anchors_point)) +# return save_image_with_bbox.counter + +class AnchorEncoder(object): + def __init__(self, allowed_borders, positive_threshold, ignore_threshold, prior_scaling, clip=False): + super(AnchorEncoder, self).__init__() + self._all_anchors = None + self._allowed_borders = allowed_borders + self._positive_threshold = positive_threshold + self._ignore_threshold = ignore_threshold + self._prior_scaling = prior_scaling + self._clip = clip + + def center2point(self, center_y, center_x, height, width): + return center_y - height / 2., center_x - width / 2., center_y + height / 2., center_x + width / 2., + + def point2center(self, ymin, xmin, ymax, xmax): + height, width = (ymax - ymin), (xmax - xmin) + return ymin + height / 2., xmin + width / 2., height, width + + def encode_all_anchors(self, labels, bboxes, all_anchors, all_num_anchors_depth, all_num_anchors_spatial, debug=False): + # y, x, h, w are all in range [0, 1] relative to the original image size + # shape info: + # y_on_image, x_on_image: layers_shapes[0] * layers_shapes[1] + # h_on_image, w_on_image: num_anchors + assert (len(all_num_anchors_depth)==len(all_num_anchors_spatial)) and (len(all_num_anchors_depth)==len(all_anchors)), 'inconsist num layers for anchors.' + with tf.name_scope('encode_all_anchors'): + num_layers = len(all_num_anchors_depth) + list_anchors_ymin = [] + list_anchors_xmin = [] + list_anchors_ymax = [] + list_anchors_xmax = [] + tiled_allowed_borders = [] + for ind, anchor in enumerate(all_anchors): + anchors_ymin_, anchors_xmin_, anchors_ymax_, anchors_xmax_ = self.center2point(anchor[0], anchor[1], anchor[2], anchor[3]) + + list_anchors_ymin.append(tf.reshape(anchors_ymin_, [-1])) + list_anchors_xmin.append(tf.reshape(anchors_xmin_, [-1])) + list_anchors_ymax.append(tf.reshape(anchors_ymax_, [-1])) + list_anchors_xmax.append(tf.reshape(anchors_xmax_, [-1])) + + tiled_allowed_borders.extend([self._allowed_borders[ind]] * all_num_anchors_depth[ind] * all_num_anchors_spatial[ind]) + + anchors_ymin = tf.concat(list_anchors_ymin, 0, name='concat_ymin') + anchors_xmin = tf.concat(list_anchors_xmin, 0, name='concat_xmin') + anchors_ymax = tf.concat(list_anchors_ymax, 0, name='concat_ymax') + anchors_xmax = tf.concat(list_anchors_xmax, 0, name='concat_xmax') + + if self._clip: + anchors_ymin = tf.clip_by_value(anchors_ymin, 0., 1.) + anchors_xmin = tf.clip_by_value(anchors_xmin, 0., 1.) + anchors_ymax = tf.clip_by_value(anchors_ymax, 0., 1.) + anchors_xmax = tf.clip_by_value(anchors_xmax, 0., 1.) + + anchor_allowed_borders = tf.stack(tiled_allowed_borders, 0, name='concat_allowed_borders') + + inside_mask = tf.logical_and(tf.logical_and(anchors_ymin > -anchor_allowed_borders * 1., + anchors_xmin > -anchor_allowed_borders * 1.), + tf.logical_and(anchors_ymax < (1. + anchor_allowed_borders * 1.), + anchors_xmax < (1. + anchor_allowed_borders * 1.))) + + anchors_point = tf.stack([anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax], axis=-1) + + # save_anchors_op = tf.py_func(save_anchors, + # [bboxes, + # labels, + # anchors_point], + # tf.int64, stateful=True) + + # with tf.control_dependencies([save_anchors_op]): + overlap_matrix = iou_matrix(bboxes, anchors_point) * tf.cast(tf.expand_dims(inside_mask, 0), tf.float32) + matched_gt, gt_scores = do_dual_max_match(overlap_matrix, self._ignore_threshold, self._positive_threshold) + # get all positive matching positions + matched_gt_mask = matched_gt > -1 + matched_indices = tf.clip_by_value(matched_gt, 0, tf.int64.max) + # the labels here maybe chaos at those non-positive positions + gt_labels = tf.gather(labels, matched_indices) + # filter the invalid labels + gt_labels = gt_labels * tf.cast(matched_gt_mask, tf.int64) + # set those ignored positions to -1 + gt_labels = gt_labels + (-1 * tf.cast(matched_gt < -1, tf.int64)) + + gt_ymin, gt_xmin, gt_ymax, gt_xmax = tf.unstack(tf.gather(bboxes, matched_indices), 4, axis=-1) + + # transform to center / size. + gt_cy, gt_cx, gt_h, gt_w = self.point2center(gt_ymin, gt_xmin, gt_ymax, gt_xmax) + anchor_cy, anchor_cx, anchor_h, anchor_w = self.point2center(anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax) + # encode features. + # the prior_scaling (in fact is 5 and 10) is use for balance the regression loss of center and with(or height) + gt_cy = (gt_cy - anchor_cy) / anchor_h / self._prior_scaling[0] + gt_cx = (gt_cx - anchor_cx) / anchor_w / self._prior_scaling[1] + gt_h = tf.log(gt_h / anchor_h) / self._prior_scaling[2] + gt_w = tf.log(gt_w / anchor_w) / self._prior_scaling[3] + # now gt_localizations is our regression object, but also maybe chaos at those non-positive positions + if debug: + gt_targets = tf.stack([anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax], axis=-1) + else: + gt_targets = tf.stack([gt_cy, gt_cx, gt_h, gt_w], axis=-1) + # set all targets of non-positive positions to 0 + gt_targets = tf.expand_dims(tf.cast(matched_gt_mask, tf.float32), -1) * gt_targets + self._all_anchors = (anchor_cy, anchor_cx, anchor_h, anchor_w) + return gt_targets, gt_labels, gt_scores + + # return a list, of which each is: + # shape: [feature_h, feature_w, num_anchors, 4] + # order: ymin, xmin, ymax, xmax + def decode_all_anchors(self, pred_location, num_anchors_per_layer): + assert self._all_anchors is not None, 'no anchors to decode.' + with tf.name_scope('decode_all_anchors', values=[pred_location]): + anchor_cy, anchor_cx, anchor_h, anchor_w = self._all_anchors + + pred_h = tf.exp(pred_location[:, -2] * self._prior_scaling[2]) * anchor_h + pred_w = tf.exp(pred_location[:, -1] * self._prior_scaling[3]) * anchor_w + pred_cy = pred_location[:, 0] * self._prior_scaling[0] * anchor_h + anchor_cy + pred_cx = pred_location[:, 1] * self._prior_scaling[1] * anchor_w + anchor_cx + + return tf.split(tf.stack(self.center2point(pred_cy, pred_cx, pred_h, pred_w), axis=-1), num_anchors_per_layer, axis=0) + + def ext_decode_all_anchors(self, pred_location, all_anchors, all_num_anchors_depth, all_num_anchors_spatial): + assert (len(all_num_anchors_depth)==len(all_num_anchors_spatial)) and (len(all_num_anchors_depth)==len(all_anchors)), 'inconsist num layers for anchors.' + with tf.name_scope('ext_decode_all_anchors', values=[pred_location]): + num_anchors_per_layer = [] + for ind in range(len(all_anchors)): + num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind]) + + num_layers = len(all_num_anchors_depth) + list_anchors_ymin = [] + list_anchors_xmin = [] + list_anchors_ymax = [] + list_anchors_xmax = [] + tiled_allowed_borders = [] + for ind, anchor in enumerate(all_anchors): + anchors_ymin_, anchors_xmin_, anchors_ymax_, anchors_xmax_ = self.center2point(anchor[0], anchor[1], anchor[2], anchor[3]) + + list_anchors_ymin.append(tf.reshape(anchors_ymin_, [-1])) + list_anchors_xmin.append(tf.reshape(anchors_xmin_, [-1])) + list_anchors_ymax.append(tf.reshape(anchors_ymax_, [-1])) + list_anchors_xmax.append(tf.reshape(anchors_xmax_, [-1])) + + anchors_ymin = tf.concat(list_anchors_ymin, 0, name='concat_ymin') + anchors_xmin = tf.concat(list_anchors_xmin, 0, name='concat_xmin') + anchors_ymax = tf.concat(list_anchors_ymax, 0, name='concat_ymax') + anchors_xmax = tf.concat(list_anchors_xmax, 0, name='concat_xmax') + + anchor_cy, anchor_cx, anchor_h, anchor_w = self.point2center(anchors_ymin, anchors_xmin, anchors_ymax, anchors_xmax) + + pred_h = tf.exp(pred_location[:,-2] * self._prior_scaling[2]) * anchor_h + pred_w = tf.exp(pred_location[:, -1] * self._prior_scaling[3]) * anchor_w + pred_cy = pred_location[:, 0] * self._prior_scaling[0] * anchor_h + anchor_cy + pred_cx = pred_location[:, 1] * self._prior_scaling[1] * anchor_w + anchor_cx + + return tf.split(tf.stack(self.center2point(pred_cy, pred_cx, pred_h, pred_w), axis=-1), num_anchors_per_layer, axis=0) + +class AnchorCreator(object): + def __init__(self, img_shape, layers_shapes, anchor_scales, extra_anchor_scales, anchor_ratios, layer_steps): + super(AnchorCreator, self).__init__() + # img_shape -> (height, width) + self._img_shape = img_shape + self._layers_shapes = layers_shapes + self._anchor_scales = anchor_scales + self._extra_anchor_scales = extra_anchor_scales + self._anchor_ratios = anchor_ratios + self._layer_steps = layer_steps + self._anchor_offset = [0.5] * len(self._layers_shapes) + + def get_layer_anchors(self, layer_shape, anchor_scale, extra_anchor_scale, anchor_ratio, layer_step, offset = 0.5): + ''' assume layer_shape[0] = 6, layer_shape[1] = 5 + x_on_layer = [[0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4], + [0, 1, 2, 3, 4]] + y_on_layer = [[0, 0, 0, 0, 0], + [1, 1, 1, 1, 1], + [2, 2, 2, 2, 2], + [3, 3, 3, 3, 3], + [4, 4, 4, 4, 4], + [5, 5, 5, 5, 5]] + ''' + with tf.name_scope('get_layer_anchors'): + x_on_layer, y_on_layer = tf.meshgrid(tf.range(layer_shape[1]), tf.range(layer_shape[0])) + + y_on_image = (tf.cast(y_on_layer, tf.float32) + offset) * layer_step / self._img_shape[0] + x_on_image = (tf.cast(x_on_layer, tf.float32) + offset) * layer_step / self._img_shape[1] + + num_anchors_along_depth = len(anchor_scale) * len(anchor_ratio) + len(extra_anchor_scale) + num_anchors_along_spatial = layer_shape[1] * layer_shape[0] + + list_h_on_image = [] + list_w_on_image = [] + + global_index = 0 + # for square anchors + for _, scale in enumerate(extra_anchor_scale): + list_h_on_image.append(scale) + list_w_on_image.append(scale) + global_index += 1 + # for other aspect ratio anchors + for scale_index, scale in enumerate(anchor_scale): + for ratio_index, ratio in enumerate(anchor_ratio): + list_h_on_image.append(scale / math.sqrt(ratio)) + list_w_on_image.append(scale * math.sqrt(ratio)) + global_index += 1 + # shape info: + # y_on_image, x_on_image: layers_shapes[0] * layers_shapes[1] + # h_on_image, w_on_image: num_anchors_along_depth + return tf.expand_dims(y_on_image, axis=-1), tf.expand_dims(x_on_image, axis=-1), \ + tf.constant(list_h_on_image, dtype=tf.float32), \ + tf.constant(list_w_on_image, dtype=tf.float32), num_anchors_along_depth, num_anchors_along_spatial + + def get_all_anchors(self): + all_anchors = [] + all_num_anchors_depth = [] + all_num_anchors_spatial = [] + for layer_index, layer_shape in enumerate(self._layers_shapes): + anchors_this_layer = self.get_layer_anchors(layer_shape, + self._anchor_scales[layer_index], + self._extra_anchor_scales[layer_index], + self._anchor_ratios[layer_index], + self._layer_steps[layer_index], + self._anchor_offset[layer_index]) + all_anchors.append(anchors_this_layer[:-2]) + all_num_anchors_depth.append(anchors_this_layer[-2]) + all_num_anchors_spatial.append(anchors_this_layer[-1]) + return all_anchors, all_num_anchors_depth, all_num_anchors_spatial + diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/eval_ssd.py b/models/object_detection/tensorflow/ssd_vgg16/inference/eval_ssd.py new file mode 100644 index 000000000..fdbb4a44d --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/eval_ssd.py @@ -0,0 +1,316 @@ +# Copyright 2018 Changan Wang + +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ============================================================================= +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import os +import numpy as np +import time +from argparse import ArgumentParser +import sys +from google.protobuf import text_format +import tensorflow as tf + +from dataset import dataset_common +from preprocessing import ssd_preprocessing +import anchor_manipulator + +SSD_VGG16_IMAGE_SIZE = 300 +NUM_CLASSES = 81 +NEGATIVE_RATIO = 1.0 +SELECT_THRESHOLD = 0.1 +MATCH_THRESHOLD = 0.5 +NEG_THRESHOLD = 0.5 +DATA_FORMAT = 'channels_last' +NUM_READERS = 10 +NUM_PREPROCESSING_THREADS = 28 + + +def input_fn(dataset_pattern='val-*', batch_size=1, data_location=None): + out_shape = [SSD_VGG16_IMAGE_SIZE] * 2 + anchor_creator = anchor_manipulator.AnchorCreator(out_shape, + layers_shapes=[(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), + (1, 1)], + anchor_scales=[(0.1,), (0.2,), (0.375,), (0.55,), (0.725,), + (0.9,)], + extra_anchor_scales=[(0.1414,), (0.2739,), (0.4541,), (0.6315,), + (0.8078,), (0.9836,)], + anchor_ratios=[(1., 2., .5), (1., 2., 3., .5, 0.3333), + (1., 2., 3., .5, 0.3333), (1., 2., 3., .5, 0.3333), + (1., 2., .5), (1., 2., .5)], + layer_steps=[8, 16, 32, 64, 100, 300]) + all_anchors, all_num_anchors_depth, all_num_anchors_spatial = anchor_creator.get_all_anchors() + + num_anchors_per_layer = [] + for ind in range(len(all_anchors)): + num_anchors_per_layer.append(all_num_anchors_depth[ind] * all_num_anchors_spatial[ind]) + + anchor_encoder_decoder = anchor_manipulator.AnchorEncoder(allowed_borders=[1.0] * 6, + positive_threshold=MATCH_THRESHOLD, + ignore_threshold=NEG_THRESHOLD, + prior_scaling=[0.1, 0.1, 0.2, 0.2]) + + image_preprocessing_fn = lambda image_, labels_, bboxes_: ssd_preprocessing.preprocess_image(image_, labels_, + bboxes_, out_shape, + is_training=False, + data_format=DATA_FORMAT, + output_rgb=False) + anchor_encoder_fn = lambda glabels_, gbboxes_: anchor_encoder_decoder.encode_all_anchors(glabels_, gbboxes_, + all_anchors, + all_num_anchors_depth, + all_num_anchors_spatial) + + image, filename, shape, loc_targets, cls_targets, match_scores = \ + dataset_common.slim_get_batch(NUM_CLASSES, + batch_size, + 'val', + os.path.join( + data_location, + dataset_pattern), + NUM_READERS, + NUM_PREPROCESSING_THREADS, + image_preprocessing_fn, + anchor_encoder_fn, + num_epochs=1, + is_training=False) + return image, filename, shape + + +class EvaluateSSDModel(): + def __init__(self): + + arg_parser = ArgumentParser(description='Parse args') + + arg_parser.add_argument('-b', "--batch-size", + help="Specify the batch size. If this " \ + "parameter is not specified or is -1, the " \ + "largest ideal batch size for the model will " \ + "be used.", + dest="batch_size", type=int, default=1) + + arg_parser.add_argument('-e', "--num-inter-threads", + help='The number of inter-thread.', + dest='num_inter_threads', type=int, default=0) + + arg_parser.add_argument('-a', "--num-intra-threads", + help='The number of intra-thread.', + dest='num_intra_threads', type=int, default=0) + + arg_parser.add_argument('--data-num-inter-threads', dest='data_num_inter_threads', + help='number threads across operators', + type=int, default=21) + + arg_parser.add_argument('--data-num-intra-threads', dest='data_num_intra_threads', + help='number threads for data layer operator', + type=int, default=28) + + arg_parser.add_argument('--kmp-blocktime', dest='kmp_blocktime', + help='number of kmp blocktime', + type=int, default=1) + + arg_parser.add_argument('-g', "--input-graph", + help='Specify the input graph for the transform tool', + dest='input_graph') + + arg_parser.add_argument('-d', "--data-location", + help='Specify the location of the data. ' + 'If this parameter is not specified, ' + 'the benchmark will use random/dummy data.', + dest="data_location", default=None) + + arg_parser.add_argument('-r', "--accuracy-only", + help='For accuracy measurement only.', + dest='accuracy_only', action='store_true') + + arg_parser.add_argument("--warmup-steps", type=int, default=10, + help="number of warmup steps") + + arg_parser.add_argument("--steps", type=int, default=50, + help="number of steps") + + self.args = arg_parser.parse_args() + + os.environ["KMP_BLOCKTIME"] = str(self.args.kmp_blocktime) + + def eval(self): + + data_config = tf.ConfigProto() + data_config.inter_op_parallelism_threads = self.args.data_num_inter_threads + data_config.intra_op_parallelism_threads = self.args.data_num_intra_threads + data_config.use_per_session_threads = 1 + + infer_config = tf.ConfigProto() + infer_config.inter_op_parallelism_threads = self.args.num_inter_threads # self.args.num_inter_threads + infer_config.intra_op_parallelism_threads = self.args.num_intra_threads # self.args.num_intra_threads + infer_config.use_per_session_threads = 1 + + data_graph = tf.Graph() + with data_graph.as_default(): + if self.args.data_location: # real data + image, filename, shape = \ + input_fn(dataset_pattern='val-*', batch_size=self.args.batch_size, data_location=self.args.data_location) + else: # dummy data + input_shape = [self.args.batch_size, SSD_VGG16_IMAGE_SIZE, SSD_VGG16_IMAGE_SIZE, 3] + image = tf.random.uniform(input_shape, -123.68, 151.06, dtype=tf.float32, name='synthetic_images') + + infer_graph = tf.Graph() + model_file = self.args.input_graph + with infer_graph.as_default(): + graph_def = tf.GraphDef() + file_ext = os.path.splitext(model_file)[1] + with open(model_file, "rb") as f: + if file_ext == '.pbtxt': + text_format.Merge(f.read(), graph_def) + else: + graph_def.ParseFromString(f.read()) + tf.import_graph_def(graph_def, name='') + + # Define input and output Tensors for inference graph + output_names = ["ExpandDims"] + for i in range(1, 160): + output_names.append("ExpandDims_" + str(i)) + + input_operation = infer_graph.get_operation_by_name("input") + output_operations = [] + for name in output_names: + output_operations.append(infer_graph.get_operation_by_name(name).outputs[0]) + + infer_sess = tf.Session(graph=infer_graph, config=infer_config) + + if not self.args.accuracy_only: # benchmark + step = 0 + total_steps = self.args.warmup_steps + self.args.steps + + total_images = 0 + total_duration = 0 + + if not self.args.data_location: # inference with dummy data + print("Inference with dummy data") + data_sess = tf.Session(graph=data_graph, config=data_config) + + while step < total_steps: + step += 1 + image_np = data_sess.run(image) + start_time = time.time() + + infer_sess.run(output_operations, {input_operation.outputs[0]: image_np}) + duration = time.time() - start_time + + if step > self.args.warmup_steps: + total_duration += duration + total_images += self.args.batch_size + print('Iteration %d: %.6f sec' % (step, duration)) + sys.stdout.flush() + + else: # benchmark with real data + print("Inference with real data") + with data_graph.as_default(): + with tf.train.MonitoredTrainingSession(config=data_config) as data_sess: + while not data_sess.should_stop() and step < total_steps: + step += 1 + start_time = time.time() + image_np, _, _ = data_sess.run([image, filename, shape]) + infer_sess.run(output_operations, {input_operation.outputs[0]: image_np}) + duration = time.time() - start_time + + if step > self.args.warmup_steps: + total_duration += duration + total_images += self.args.batch_size + print('Iteration %d: %.6f sec' % (step, duration)) + sys.stdout.flush() + + print('Batch size = %d' % self.args.batch_size) + print('Throughput: %.3f images/sec' % (total_images / total_duration)) + if (self.args.batch_size == 1): + latency = (total_duration / total_images) * 1000 + print('Latency: %.3f ms' % (latency)) + + else: # accuracy only + results = [] + filenames = [] + shapes = [] + total_processed_images = 0 + with data_graph.as_default(): + with tf.train.MonitoredTrainingSession(config=data_config) as data_sess: + while not data_sess.should_stop(): + image_np, filename_np, shape_np = data_sess.run([image, filename, shape]) + total_processed_images += self.args.batch_size + predict = infer_sess.run(output_operations, {input_operation.outputs[0]: image_np}) + if (total_processed_images % 30 == 0): + print("Predicting results for {} images...".format(total_processed_images)) + sys.stdout.flush() + results.append(predict) + filenames.append(filename_np[0]) + shapes.append(shape_np[0]) + + log_dir = os.path.join('./', 'logs') + # if it doesn't exist, create. + if not os.path.exists(log_dir): + os.makedirs(log_dir) + for class_ind in range(1, NUM_CLASSES): + with open(os.path.join(log_dir, 'results_{}.txt'.format(class_ind)), 'wt') as f: + for image_ind, pred in enumerate(results): + shape = shapes[image_ind] + filename = filenames[image_ind] + # parsing prediction results and calculate bbox + scores = pred[(class_ind * 2) - 2][0] + bboxes = pred[(class_ind * 2) - 1][0] + bboxes[:, 0] = (bboxes[:, 0] * shape[0]).astype(np.int32, copy=False) + 1 + bboxes[:, 1] = (bboxes[:, 1] * shape[1]).astype(np.int32, copy=False) + 1 + bboxes[:, 2] = (bboxes[:, 2] * shape[0]).astype(np.int32, copy=False) + 1 + bboxes[:, 3] = (bboxes[:, 3] * shape[1]).astype(np.int32, copy=False) + 1 + + valid_mask = np.logical_and((bboxes[:, 2] - bboxes[:, 0] > 0), + (bboxes[:, 3] - bboxes[:, 1] > 0)) + + for det_ind in range(valid_mask.shape[0]): + if not valid_mask[det_ind]: + continue + f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'. + format(filename.decode('utf8')[:-4], scores[det_ind], + bboxes[det_ind, 1], bboxes[det_ind, 0], + bboxes[det_ind, 3], bboxes[det_ind, 2])) + + coco_eval = os.path.join( + os.path.dirname(os.path.realpath(__file__)), "validate_ssd_vgg16.py") + cmd_prefix = "python " + coco_eval + cmd_prefix += " --detections_path ./logs" + cmd_prefix += " --annotations_file {}/instances_val2017.json".format(self.args.data_location) + cmd = cmd_prefix + os.system(cmd) + +if __name__ == "__main__": + obj = EvaluateSSDModel() + obj.eval() diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py b/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py new file mode 100755 index 000000000..5cc72cf7a --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/generate_coco_records.py @@ -0,0 +1,205 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# + +import argparse +import os +import json +import numpy as np +from tqdm import tqdm +import tensorflow as tf +from convert_tfrecords import ImageCoder, _process_image, _int64_feature, _float_feature, _bytes_feature, _bytes_list_feature + + +def load_annotation_data(annotations_filename): + + # Load annotation data + with open(annotations_filename, 'r') as annotations_file: + data = json.load(annotations_file) + + # Create map of category IDs to category names + category_map = {} + for category_datum in data['categories']: + category_map[category_datum['id']] = category_datum['name'] + + # Create map of file IDs to annotation data + annotation_map = {} + for annotation_datum in data['annotations']: + image_id = annotation_datum['image_id'] + if (image_id not in annotation_map): + annotation_map[image_id] = [] + + # Add annotation datum for current image ID + annotation_map[image_id].append(annotation_datum) + + # Create map of file IDs to image data + image_map = {} + for image_datum in data['images']: + image_id = image_datum['id'] + if (image_id in annotation_map): + image_map[image_id] = image_datum + + return image_map, annotation_map, category_map + + +def get_annotation_data(image_data, annotation_data, category_map): + + LABEL_MAP = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, + 13: 12, 14: 13, 15: 14, 16: 15, 17: 16, 18: 17, 19: 18, 20: 19, 21: 20, 22: 21, + 23: 22, 24: 23, 25: 24, 27: 25, 28: 26, 31: 27, 32: 28, 33: 29, 34: 30, 35: 31, + 36: 32, 37: 33, 38: 34, 39: 35, 40: 36, 41: 37, 42: 38, 43: 39, 44: 40, 46: 41, + 47: 42, 48: 43, 49: 44, 50: 45, 51: 46, 52: 47, 53: 48, 54: 49, 55: 50, 56: 51, + 57: 52, 58: 53, 59: 54, 60: 55, 61: 56, 62: 57, 63: 58, 64: 59, 65: 60, 67: 61, + 70: 62, 72: 63, 73: 64, 74: 65, 75: 66, 76: 67, 77: 68, 78: 69, 79: 70, 80: 71, + 81: 72, 82: 73, 84: 74, 85: 75, 86: 76, 87: 77, 88: 78, 89: 79, 90: 80} + + # Retrieve image width and height + image_width = image_data['width'] + image_height = image_data['height'] + + bboxes = [] + labels = [] + label_names = [] + difficult = [] + truncated = [] + for annotation_datum in annotation_data: + # Scale bounding box coordinates + # COCO bounding boxes are [x, y, width, height] but https://github.com/HiKapok/SSD.TensorFlow.git expects [ymin, xmin, ymax, xmax] + bbox = annotation_datum['bbox'] + ymin = bbox[1] / image_height + xmin = bbox[0] / image_width + ymax = (bbox[1] + bbox[3]) / image_height + xmax = (bbox[0] + bbox[2]) / image_width + bboxes.append([ymin, xmin, ymax, xmax]) + + labels.append(LABEL_MAP[annotation_datum['category_id']]) + label_names.append(category_map[annotation_datum['category_id']].encode('ascii')) + + # Append difficult and truncated flags + difficult.append(0) + truncated.append(0) + + return bboxes, labels, label_names, difficult, truncated + + +def get_record(filename, buffer, width, height, bboxes, labels, label_names, difficult, truncated): + + CHANNEL_COUNT = 3 + IMAGE_FORMAT = 'JPEG' + + # Extract bounding box coordinates + ymin = [] + xmin = [] + ymax = [] + xmax = [] + for bbox in bboxes: + ymin.append(bbox[0]) + xmin.append(bbox[1]) + ymax.append(bbox[2]) + xmax.append(bbox[3]) + + # Create record features + features = { + 'image/width': _int64_feature(width), + 'image/height': _int64_feature(height), + 'image/channels': _int64_feature(CHANNEL_COUNT), + 'image/shape': _int64_feature([height, width, CHANNEL_COUNT]), + 'image/object/bbox/xmin': _float_feature(xmin), + 'image/object/bbox/xmax': _float_feature(xmax), + 'image/object/bbox/ymin': _float_feature(ymin), + 'image/object/bbox/ymax': _float_feature(ymax), + 'image/object/bbox/label': _int64_feature(labels), + 'image/object/bbox/label_text': _bytes_list_feature(label_names), + 'image/object/bbox/difficult': _int64_feature(difficult), + 'image/object/bbox/truncated': _int64_feature(truncated), + 'image/format': _bytes_feature(IMAGE_FORMAT), + 'image/filename': _bytes_feature(filename.encode('utf8')), + 'image/encoded': _bytes_feature(buffer)} + + return tf.train.Example(features = tf.train.Features(feature = features)) + + +def check_for_link(value): + """ + Throws an error if the specified path is a link. os.islink returns + True for sym links. For files, we also look at the number of links in + os.stat() to determine if it's a hard link. + """ + if os.path.islink(value) or \ + (os.path.isfile(value) and os.stat(value).st_nlink > 1): + raise argparse.ArgumentTypeError("{} cannot be a link.".format(value)) + +def check_valid_file_or_folder(value): + """verifies filename exists and isn't a link""" + if value is not None: + if not os.path.isfile(value) and not os.path.isdir(value): + raise argparse.ArgumentTypeError("{} does not exist or is not a file/folder.". + format(value)) + check_for_link(value) + return value + + +def main(): + + RECORDS_PER_FILE = 1024 + RECORD_FILENAME_FORMAT = '%s-%.5d-of-%.5d' + + parser = argparse.ArgumentParser() + parser.add_argument('--image_path', type=check_valid_file_or_folder, required=True, help='path to the input validation image files') + parser.add_argument('--annotations_file', type=check_valid_file_or_folder, required=True, help='name of the input validation annotations file') + parser.add_argument('--output_prefix', type=str, required=True, help='prefix of the output TensorFlow record files') + parser.add_argument('--output_path', type=check_valid_file_or_folder, required=True, help='path to the output TensorFlow record files') + + args = parser.parse_args() + + # Load annotation data + image_map, annotation_map, category_map = load_annotation_data(args.annotations_file) + + # Create output path if necessary + if (not os.path.exists(args.output_path)): + os.makedirs(args.output_path) + + # Create image coder + image_coder = ImageCoder() + + record_file_index = 0 + record_file_count = np.ceil(len(image_map) / RECORDS_PER_FILE).astype(int) + for index, image_id in tqdm(enumerate(image_map), desc = 'Generating', total = len(image_map), unit = ' file'): + # Create record writer + if (index % RECORDS_PER_FILE == 0): + output_filename = os.path.join(args.output_path, RECORD_FILENAME_FORMAT % (args.output_prefix, record_file_index, record_file_count)) + writer = tf.python_io.TFRecordWriter(output_filename) + record_file_index += 1 + + # Extract image data from current image file + image_filename = image_map[image_id]['file_name'] + image_buffer, _, _ = _process_image(os.path.join(args.image_path, image_filename), image_coder) + + # Retrieve annotation data associated with current image file + bboxes, labels, label_names, difficult, truncated = get_annotation_data(image_map[image_id], annotation_map[image_id], category_map) + + # Write TF record for current image file + image_width, image_height = image_map[image_id]['width'], image_map[image_id]['height'] + record = get_record(image_filename, image_buffer, image_width, image_height, bboxes, labels, label_names, difficult, truncated) + writer.write(record.SerializeToString()) + + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/models/object_detection/tensorflow/ssd_vgg16/inference/validate_ssd_vgg16.py b/models/object_detection/tensorflow/ssd_vgg16/inference/validate_ssd_vgg16.py new file mode 100644 index 000000000..c580fc022 --- /dev/null +++ b/models/object_detection/tensorflow/ssd_vgg16/inference/validate_ssd_vgg16.py @@ -0,0 +1,111 @@ +# +# -*- coding: utf-8 -*- +# +# Copyright (c) 2019 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# SPDX-License-Identifier: EPL-2.0 +# +import argparse +import os +import json +from pycocotools.coco import COCO +from pycocotools.cocoeval import COCOeval + + +def convert_detection(label, detection): + + ID_INDEX = 0 + SCORE_INDEX = 1 + XMIN_INDEX = 2 + YMIN_INDEX = 3 + XMAX_INDEX = 4 + YMAX_INDEX = 5 + LABEL_MAP = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 11, + 12: 13, 13: 14, 14: 15, 15: 16, 16: 17, 17: 18, 18: 19, 19: 20, 20: 21, 21: 22, + 22: 23, 23: 24, 24: 25, 25: 27, 26: 28, 27: 31, 28: 32, 29: 33, 30: 34, 31: 35, + 32: 36, 33: 37, 34: 38, 35: 39, 36: 40, 37: 41, 38: 42, 39: 43, 40: 44, 41: 46, + 42: 47, 43: 48, 44: 49, 45: 50, 46: 51, 47: 52, 48: 53, 49: 54, 50: 55, 51: 56, + 52: 57, 53: 58, 54: 59, 55: 60, 56: 61, 57: 62, 58: 63, 59: 64, 60: 65, 61: 67, + 62: 70, 63: 72, 64: 73, 65: 74, 66: 75, 67: 76, 68: 77, 69: 78, 70: 79, 71: 80, + 72: 81, 73: 82, 74: 84, 75: 85, 76: 86, 77: 87, 78: 88, 79: 89, 80: 90} + + # Extract image ID and bounding box score from detection + image_id = int(detection[ID_INDEX]) + score = float(detection[SCORE_INDEX]) + + # Convert bounding box coordinates [xmin, ymin, xmax, ymax] to [x, y, width, height] + x = float(detection[XMIN_INDEX]) + y = float(detection[YMIN_INDEX]) + width = float(detection[XMAX_INDEX]) - x + height = float(detection[YMAX_INDEX]) - y + bbox = [x, y, width, height] + + return {'category_id': LABEL_MAP[label], 'image_id': image_id, 'score': score, 'bbox': bbox} + + +def generate_results_file(detections_path, results_filename): + + DETECTIONS_EXTENSION = '.txt' + + # Retrieve detections filenames + filenames = [filename for filename in os.listdir(detections_path) if filename.endswith(DETECTIONS_EXTENSION)] + + results = [] + for filename in filenames: + # Read detections from current file + with open(os.path.join(detections_path, filename), 'r') as detections_file: + lines = detections_file.readlines() + + # Convert detections from current file + label = int(os.path.splitext(filename)[0].split('_')[1]) + for line in lines: + results.append(convert_detection(label, line.strip().split())) + + # Write results to file + with open(os.path.join(detections_path, results_filename), 'w') as results_file: + json.dump(results, results_file) + + +def main(): + + RESULTS_FILENAME = 'results.json' + ANNOTATION_TYPE = 'bbox' + + parser = argparse.ArgumentParser() + parser.add_argument('--detections_path', type = str, required = True, help = 'path to the input detected bounding box files') + parser.add_argument('--annotations_file', type = str, required = True, help = 'name of the input validation annotations file') + + args = parser.parse_args() + + # Generate COCO results file + print('Generating COCO results...') + generate_results_file(args.detections_path, RESULTS_FILENAME) + + # Create COCO instance + cocoGt = COCO(args.annotations_file) + + # Load COCO results + cocoDt = cocoGt.loadRes(os.path.join(args.detections_path, RESULTS_FILENAME)) + + # Evaluate results + cocoEval = COCOeval(cocoGt, cocoDt, ANNOTATION_TYPE) + cocoEval.evaluate() + cocoEval.accumulate() + cocoEval.summarize() + + +if __name__ == '__main__': + + main() \ No newline at end of file diff --git a/models_directory_structure.png b/models_directory_structure.png new file mode 100644 index 000000000..906cfdf02 Binary files /dev/null and b/models_directory_structure.png differ diff --git a/requirements-test.txt b/requirements-test.txt index 5102c19b3..fe0bf31ab 100644 --- a/requirements-test.txt +++ b/requirements-test.txt @@ -1,6 +1,6 @@ conditional flake8==3.7.5 -pytest +pytest==4.6.3 pytest-cov pytest-xdist mock diff --git a/tests/test_utils/io.py b/tests/test_utils/io.py index 50f8e5e61..5ec580f94 100644 --- a/tests/test_utils/io.py +++ b/tests/test_utils/io.py @@ -18,19 +18,21 @@ # SPDX-License-Identifier: EPL-2.0 # -import csv +import os +import json -def parse_csv_file(file_path, expected_num_columns): +def parse_json_files(json_dir_path): """ - Reads the specified csv file. Checks for a value number of columns in - each row. Returns the csv file values as a list of tuples. + Reads the JSON files in the specified directory. Checks for a value number of columns in + each row. Returns the JSON files values as a list of tuples. """ values = [] - with open(file_path) as csv_file: - csv_reader = csv.reader(csv_file, delimiter=',', - skipinitialspace=True) - for row in csv_reader: - assert len(row) == expected_num_columns - values.append(tuple(row)) + for model_file in os.listdir(json_dir_path): + file_path = os.path.join(json_dir_path, model_file) + with open(file_path) as f: + data = json.load(f) + for x in data: + values.append( + tuple((x['input'], x['output'], model_file + " :: " + x['_comment']))) return values diff --git a/tests/unit/common/tensorflow/test_run_tf_benchmarks.py b/tests/unit/common/tensorflow/test_run_tf_benchmarks.py index b5407f00e..0b58ac411 100644 --- a/tests/unit/common/tensorflow/test_run_tf_benchmarks.py +++ b/tests/unit/common/tensorflow/test_run_tf_benchmarks.py @@ -28,19 +28,19 @@ from benchmarks.common.tensorflow.run_tf_benchmark import ModelBenchmarkUtil from test_utils import platform_config -from test_utils.io import parse_csv_file +from test_utils.io import parse_json_files def parse_model_args_file(): """ - Gets test args from the tf_model_args.txt file to use as parameters + Gets test args from the models files in the specified directory to use as parameters for testing model benchmarking scripts. The file has a run_tf_benchmarks.py command with args with the corresponding run command that should get called from model_init.py """ current_dir = os.path.dirname(os.path.realpath(__file__)) - csv_file_path = os.path.join(current_dir, "tf_model_args.txt") - return parse_csv_file(csv_file_path, 2) + models_args_path = os.path.join(current_dir, "tf_model_args") + return parse_json_files(models_args_path) def delete_env_var(env_var): @@ -63,7 +63,7 @@ def clear_kmp_env_vars(): test_arg_values = parse_model_args_file() -@pytest.mark.parametrize("test_args,expected_cmd", test_arg_values) +@pytest.mark.parametrize("test_args,expected_cmd,comment", test_arg_values) @patch("os.mkdir") @patch("shutil.rmtree") @patch("os.listdir") @@ -73,18 +73,21 @@ def clear_kmp_env_vars(): @patch("os.stat") @patch("os.chdir") @patch("os.remove") +@patch("glob.glob") @patch("common.platform_util.os") @patch("common.platform_util.system_platform") @patch("common.platform_util.subprocess") @patch("common.base_model_init.BaseModelInitializer.run_command") -def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, - mock_os, mock_remove, mock_chdir, mock_stat, mock_path_exists, mock_is_file, mock_is_dir, - mock_listdir, mock_rmtree, mock_mkdir, test_args, expected_cmd): +def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, mock_os, + mock_glob, mock_remove, mock_chdir, mock_stat, mock_path_exists, + mock_is_file, mock_is_dir, mock_listdir, mock_rmtree, mock_mkdir, + test_args, expected_cmd, comment): """ Runs through executing the specified run_tf_benchmarks.py command from the test_args and verifying that the model_init file calls run_command with the expected_cmd string. """ + print("****** Running The {} test ******".format(comment)) os.environ["PYTHON_EXE"] = "python" mock_path_exists.return_value = True mock_is_dir.return_value = True @@ -92,6 +95,7 @@ def test_run_benchmark(mock_run_command, mock_subprocess, mock_platform, mock_stat.return_value = MagicMock(st_nlink=0) parse_model_args_file() mock_listdir.return_value = True + mock_glob.return_value = ["/usr/lib/libtcmalloc.so.4.2.6"] clear_kmp_env_vars() platform_config.set_mock_system_type(mock_platform) platform_config.set_mock_os_access(mock_os) diff --git a/tests/unit/common/tensorflow/tf_model_args.txt b/tests/unit/common/tensorflow/tf_model_args.txt deleted file mode 100755 index 6381db35a..000000000 --- a/tests/unit/common/tensorflow/tf_model_args.txt +++ /dev/null @@ -1,82 +0,0 @@ -run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose, OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose,python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose,python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier.py --dataset_name=imagenet --checkpoint_path=/checkpoints --eval_dir=/checkpoints --dataset_dir=/dataset --dataset_split_name=validation --clone_on_cpu=True --model_name=inception_resnet_v2 --inter_op_parallelism_threads=2 --intra_op_parallelism_threads=28 --batch_size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier.py --dataset_name=imagenet --checkpoint_path=/checkpoints --eval_dir=/checkpoints --dataset_dir=/dataset --dataset_split_name=validation --clone_on_cpu=True --model_name=inception_resnet_v2 --inter_op_parallelism_threads=2 --intra_op_parallelism_threads=28 --batch_size=128 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --data-location=/dataset --calibration-only,python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50_int8_pretrained_model.pb --data_location=/dataset -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50.pb --intelai-models . --accuracy-only --verbose,python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 128 --in-graph /final_int8_resnet50.pb --intelai-models . --benchmark-only --verbose,python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=128 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 64 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 64 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose -run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 1 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose,taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 1 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset --in-graph=/in_graph/frozen_inference_graph.pb,sh /workspace/intelai_models/inference/fp32/coco_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset/coco_val.record /workspace/models -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --accuracy-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset/coco_val.record -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --accuracy-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --benchmark-dir=/workspace/benchmarks --data-location=/dataset,sh /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 -run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config,numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset --accuracy-only --split=accuracy_message,FROZEN_GRAPH=/in_graph/frozen_inference_graph.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/fp32/coco_mAP.sh -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --benchmark-only --number_of_steps=500,python /workspace/intelai_models/inference/int8/run_rfcn_inference.py -m /workspace/models -g /in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb -x 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --accuracy-only --split=accuracy_message,FROZEN_GRAPH=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/int8/coco_mAP.sh -run_tf_benchmark.py --framework tensorflow --use-case text_to_speech --precision fp32 --mode inference --model-name wavenet --num-cores 1 --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --checkpoint_name=model.ckpt-99 --sample=8510,numactl --physcpubind=0-0 --membind=0 python generate.py /checkpoints/model.ckpt-99 --num_inter_threads=1 --num_intra_threads=1 --sample=8510 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset,python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_int8_model.pb,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-intra-threads=28 --num-inter-threads=1 --input-graph=/in_graph/resnet101_int8_model.pb --warmup-steps=40 --steps=100 -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -"run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de","numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --reference=/checkpoints/newstest2015.de --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb,numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100 -run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset,python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset,/workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/pretrained_int8_faster_rcnn_model.pb /dataset /workspace/models -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset, python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500, python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56 -run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search -run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200 -run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt,numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset,numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 -run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset, numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000 diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_dcgan_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_dcgan_args.json new file mode 100644 index 000000000..e5802f700 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_dcgan_args.json @@ -0,0 +1,5 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=adversarial_networks --model-name=dcgan --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/inference_bench.py -ckpt /checkpoints -dl /dataset --num_inter_threads 1 --num_intra_threads 28 -nw 100 -nb 500 --bs 100 --kmp_blocktime 1 --kmp_settings 1"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_densenet169_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_densenet169_args.json new file mode 100644 index 000000000..a5d665547 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_densenet169_args.json @@ -0,0 +1,15 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb"}, + + { "_comment": "Fp32 accuracy", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/accuracy.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb --data_location=/dataset"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=densenet169 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/densenet169_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/benchmark.py --num_intra_threads=28 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/densenet169_fp32_pretrained_model.pb"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_draw_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_draw_args.json new file mode 100644 index 000000000..d638d7492 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_draw_args.json @@ -0,0 +1,15 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 1 --dl /dataset --nw 100 --nb 200"}, + + { "_comment": "FP32 throughput benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 4 --num_intra_threads 16 --bs 100 --dl /dataset --nw 100 --nb 200"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=content_creation --model-name=draw --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/draw_inf.py --cp /checkpoints --num_inter_threads 1 --num_intra_threads 28 --bs 100 --dl /dataset --nw 100 --nb 200"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_facenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_facenet_args.json new file mode 100644 index 000000000..34b5af1fe --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_facenet_args.json @@ -0,0 +1,13 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=1 --num_intra_threads=28 --lfw_batch_size=1 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=200 --max_steps=1000"}, + + { "_comment": "Fp32 accuracy", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=facenet --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/src/validate_on_lfw.py /dataset /checkpoints --distance_metric 1 --use_flipped_images --subtract_mean --use_fixed_image_standardization --num_inter_threads=2 --num_intra_threads=28 --lfw_batch_size=100 --lfw_pairs=/workspace/models/data/pairs.txt --warmup_steps=40 --max_steps=1000"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_faster_rcnn_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_faster_rcnn_args.json new file mode 100644 index 000000000..ea6c0a75a --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_faster_rcnn_args.json @@ -0,0 +1,28 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset --in-graph=/in_graph/frozen_inference_graph.pb", + "output": "sh /workspace/intelai_models/inference/fp32/coco_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset/coco_val.record /workspace/models"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 1 --num_intra_threads 28 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval"}, + + { "_comment": "FP32 benchmark command with custom --num_inter_threads 4 --num_intra_threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name faster_rcnn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=pipeline.config --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --num_inter_threads 4 --num_intra_threads 16 --pipeline_config_path /checkpoints/pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/log/eval"}, + + { "_comment": "Int8 command for throughput benchmark with --number-of-steps enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/pretrained_int8_faster_rcnn_model.pb /dataset /workspace/models"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -d /dataset --num-inter-threads 2 --num-intra-threads 56" + } +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_gnmt_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_gnmt_args.json new file mode 100644 index 000000000..7fe7db376 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_gnmt_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=1 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=gnmt --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --infer_mode=beam_search", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/fp32/nmt.py --src=de --tgt=en --hparams_path=/workspace/intelai_models/fp32/standard_hparams/wmt16_gnmt_4_layer_internal.json --out_dir=/workspace/benchmarks/common/tensorflow/logs --vocab_prefix=/dataset/vocab.bpe.32000 --ckpt=/checkpoints/translate.ckpt --infer_batch_size=32 --inference_input_file=/dataset/newstest2015.tok.bpe.32000.de --inference_output_file=/workspace/benchmarks/common/tensorflow/logs/output_infer --inference_ref_file=/dataset/newstest2015.tok.bpe.32000.en --num_inter_threads=1 --num_intra_threads=28 --infer_mode=beam_search"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_inception_resnet_v2_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_inception_resnet_v2_args.json new file mode 100644 index 000000000..c1a59e0b5 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_inception_resnet_v2_args.json @@ -0,0 +1,27 @@ +[ + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100"}, + + { "_comment": "Int8 command for latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=1"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_int8_pretrained_model.pb --inter-op-parallelism-threads=1 --intra-op-parallelism-threads=28 --batch-size=128"}, + + { "_comment": "Fp32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/eval_image_classifier_accuracy.py --input_graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --data_location=/dataset --input_height=299 --input_width=299 --num_inter_threads=2 --num_intra_threads=56 --output_layer=InceptionResnetV2/Logits/Predictions --batch_size=100"}, + + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=1"}, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inception_resnet_v2 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/eval_image_classifier_benchmark.py --input-graph=/in_graph/inception_resnet_v2_fp32_pretrained_model.pb --inter-op-parallelism-threads=2 --intra-op-parallelism-threads=28 --batch-size=128"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv3_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv3_args.json new file mode 100644 index 000000000..733b691ee --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv3_args.json @@ -0,0 +1,44 @@ +[ + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 100 --in-graph /final_int8_inceptionv3.pb --intelai-models . --accuracy-only --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./int8/accuracy.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/final_int8_inceptionv3.pb"}, + + { "_comment": "Int8 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28"}, + + { "_comment": "Int8 command for latency benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=inceptionv3 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/inception_frozen_max_min.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/int8/benchmark.py --warmup_steps=20 --num_intra_threads=28 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/inception_frozen_max_min.pb --steps=200 --num_cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --disable-tcmalloc=True", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /final_int8_inceptionv3.pb --intelai-models . --benchmark-only --socket-id 0 --disable-tcmalloc=True", + "output": "numactl --cpunodebind=0 --membind=0 python ./int8/benchmark.py --warmup_steps=10 --num_intra_threads=28 --num_inter_threads=1 --batch_size=128 --input_graph=/final_int8_inceptionv3.pb --steps=50 --num_cores=28"}, + + { "_comment": "Fp32 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 100 --accuracy-only --data-location /dataset --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --verbose", + "output": "python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 1 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28" + }, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for throughput benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name inceptionv3 --batch-size 128 --in-graph /inceptionv3_fp32_pretrained_model.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./fp32/eval_image_classifier_inference.py --input-graph=/inceptionv3_fp32_pretrained_model.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv4_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv4_args.json new file mode 100644 index 000000000..0535c2eef --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_inceptionv4_args.json @@ -0,0 +1,19 @@ +[ + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset"}, + + { "_comment": "Int8 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=1 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=int8 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/inceptionv4_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/benchmark.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_int8_pretrained_model.pb --num_intra_threads=28"}, + + { "_comment": "Fp32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --precision=fp32 --mode=inference --model-name=inceptionv4 --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/accuracy.py --batch_size=240 --num_inter_threads=2 --input_graph=/in_graph/inceptionv4_fp32_pretrained_model.pb --num_intra_threads=28 --data_location=/dataset"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_lm_1b_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_lm_1b_args.json new file mode 100644 index 000000000..26d11e1c3 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_lm_1b_args.json @@ -0,0 +1,7 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_modeling --model-name=lm-1b --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/benchmark.py -b=1 -I=100 --inter=1 --intra=28"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_maskrcnn_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_maskrcnn_args.json new file mode 100644 index 000000000..5900877d3 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_maskrcnn_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 1 --num_intra_threads 28 --nw 5 --nb 50 --model=coco --infbs 1"}, + + { "_comment": "FP32 benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=maskrcnn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --data-location=/dataset --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/coco.py evaluate --dataset=/dataset --num_inter_threads 4 --num_intra_threads 16 --nw 5 --nb 50 --model=coco --infbs 1"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_mobilenet_v1_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_mobilenet_v1_args.json new file mode 100644 index 000000000..c98ada086 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_mobilenet_v1_args.json @@ -0,0 +1,36 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --accuracy-only --verbose --checkpoint=/checkpoints --in-graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/inference/fp32/accuracy.py --batch_size=100 --num_inter_threads=2 --input_graph=/in_graph/mobilenet_v1_1.0_224_frozen.pb --num_intra_threads=56 --data_location=/dataset"}, + + { "_comment": "FP32 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 1"}, + + { "_comment": "FP32 throughput benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_dir /dataset --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100"}, + + { "_comment": "FP32 benchmark command with dummy data and --output-dir specified", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --checkpoint=/checkpoints", + "output": "numactl --cpunodebind=0 -l python /workspace/intelai_models/inference/fp32/eval_image_classifier.py --dataset_name imagenet --checkpoint_path /checkpoints --dataset_split_name=validation --clone_on_cpu=True --model_name mobilenet_v1 --inter_op_parallelism_threads 2 --intra_op_parallelism_threads 28 --batch_size 100"}, + + { "_comment": "Int8 command for throughput benchmark with --number-of-steps enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=faster_rcnn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=-1 --benchmark-only --verbose --in-graph=/in_graph/pretrained_int8_faster_rcnn_model.pb --data-location=/dataset --number-of-steps=500", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_frozen_graph_rcnn.py -g /in_graph/pretrained_int8_faster_rcnn_model.pb -n 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data-location=/dataset --input_height=224 --input_width=224", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/accuracy.py --input_height=224 --input_width=224 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=100 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --data_location=/dataset --input_layer=input"}, + + { "_comment": "Int8 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=1 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50"}, + + + { "_comment": "Int8 throughput benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=mobilenet_v1 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=240 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_height=224 --input_width=224 --warmup_steps=10 --steps=50", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/benchmark.py --input_height=224 --input_width=224 --warmup_steps=10 --num_intra_threads=28 --output_layer=MobilenetV1/Predictions/Reshape_1 --num_inter_threads=1 --batch_size=240 --input_graph=/in_graph/models_mobilenetv1_int8_pretrained_model.pb --input_layer=input --steps=50"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_mtcc_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_mtcc_args.json new file mode 100644 index 000000000..b0093db93 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_mtcc_args.json @@ -0,0 +1,5 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=face_detection_and_alignment --model-name=mtcc --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/one_image_test.py --num_inter_threads 1 --num_intra_threads 28 -ckpt /checkpoints -dl /dataset"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ncf_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ncf_args.json new file mode 100644 index 000000000..67fa8402c --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ncf_args.json @@ -0,0 +1,15 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=1 --inference_only --benchmark_only"}, + + { "_comment": "Fp32 accuracy", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --accuracy-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --accuracy_only"}, + + { "_comment": "FP32 Throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=ncf --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=256 --socket-id 0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/ncf_main.py --data_dir=/dataset --model_dir=/checkpoints --intra_op_parallelism_threads=28 --inter_op_parallelism_threads=1 --batch_size=256 --inference_only --benchmark_only"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_resnet101_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_resnet101_args.json new file mode 100644 index 000000000..4c9132a79 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_resnet101_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --accuracy-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=2 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=56 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=128 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50"}, + + { "_comment": "Int8 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_int8_model.pb", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-intra-threads=28 --num-inter-threads=1 --input-graph=/in_graph/resnet101_int8_model.pb --warmup-steps=40 --steps=100"}, + + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet101 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id 0 --benchmark-only --verbose --in-graph=/in_graph/resnet101_fp32_model.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --batch-size=1 --num-inter-threads=1 --input-graph=/in_graph/resnet101_fp32_model.pb --num-intra-threads=28 --warmup-steps=10 --steps=50"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_resnet50_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50_args.json new file mode 100644 index 000000000..199ae2c0f --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50_args.json @@ -0,0 +1,40 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50.pb --accuracy-only --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50"}, + + { "_comment": "FP32 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for latency benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 1 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for throughput benchmark with --num-inter-threads=1 --num-intra-threads=28", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50 --batch-size 128 --in-graph /freezed_resnet50.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --output-dir enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200"}, + + { "_comment": "Int8 command for data calibration with --calibration-only", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --data-location=/dataset --calibration-only", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50_int8_pretrained_model.pb --data_location=/dataset"}, + + { "_comment": "Fp32 command for throughput benchmark with --output-results enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50_fp32_inference_results*.txt"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50.pb --intelai-models . --accuracy-only --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "Int8 command for throughput benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200" + } +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_resnet50v1_5_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50v1_5_args.json new file mode 100644 index 000000000..271813ed7 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_resnet50v1_5_args.json @@ -0,0 +1,40 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size 100 --socket-id 0 --accuracy-only --verbose --in-graph=/in_graph/freezed_resnet50v1_5.pb --accuracy-only --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/freezed_resnet50v1_5.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=100 --data-location=/dataset --accuracy-only --num-cores=28 --warmup-steps=10 --steps=50"}, + + { "_comment": "FP32 command for latency benchmark with default --num-inter-threads, --num-intra-threads.", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50v1_5 --batch-size 128 --in-graph /freezed_resnet50v1_5.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50v1_5.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for latency benchmark with --num-inter-threads 4 --num-intra-threads 16", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50v1_5 --batch-size 1 --in-graph /freezed_resnet50v1_5.pb --intelai-models . --socket-id 0 --verbose --num-inter-threads 4 --num-intra-threads 16", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50v1_5.pb --num-inter-threads=4 --num-intra-threads=16 --batch-size=1 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "FP32 command for throughput benchmark with --num-inter-threads=1 --num-intra-threads=28", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name resnet50v1_5 --batch-size 128 --in-graph /freezed_resnet50v1_5.pb --intelai-models . --socket-id 0 --verbose", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/eval_image_classifier_inference.py --input-graph=/freezed_resnet50v1_5.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=10 --steps=50 --num-cores=28"}, + + { "_comment": "Int8 command for throughput benchmark with --output-dir enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200"}, + + { "_comment": "Int8 command for data calibration with --calibration-only", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --data-location=/dataset --calibration-only", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/int8/generate_calibration_data.py --num_intra_threads=56 --num_inter_threads=2 --batch_size=100 --input_graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --data_location=/dataset"}, + + { "_comment": "Fp32 command for throughput benchmark with --output-results enabled.", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=100 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --output-results --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/resnet50v1_5_fp32_pretrained_model.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50v1_5_fp32_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --num-cores=28 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --results-file-path /workspace/benchmarks/common/tensorflow/logs/resnet50v1_5_fp32_inference_results*.txt"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision int8 --mode inference --model-name resnet50v1_5 --batch-size 100 --data-location /dataset --in-graph /final_int8_resnet50v1_5.pb --intelai-models . --accuracy-only --verbose", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python ./inference/eval_image_classifier_inference.py --input-graph=/final_int8_resnet50v1_5.pb --num-inter-threads=2 --num-intra-threads=56 --batch-size=100 --warmup-steps=10 --steps=50 --data-location=/dataset --accuracy-only"}, + + { "_comment": "Int8 command for throughput benchmark with --steps=200 --warmup-steps=20", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_recognition --model-name=resnet50v1_5 --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=128 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --steps=200 --warmup-steps=20", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_image_classifier_inference.py --input-graph=/in_graph/resnet50v1_5_int8_pretrained_model.pb --num-inter-threads=1 --num-intra-threads=28 --batch-size=128 --warmup-steps=20 --steps=200" + } +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_rfcn_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_rfcn_args.json new file mode 100644 index 000000000..f8dc9b0a0 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_rfcn_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset --accuracy-only --split=accuracy_message", + "output": "FROZEN_GRAPH=/in_graph/frozen_inference_graph.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/fp32/coco_mAP.sh"}, + + { "_comment": "FP32 command for benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case object_detection --precision fp32 --mode inference --model-name rfcn --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --config_file=rfcn_pipeline.config", + "output": "numactl --cpunodebind=0 --membind=0 python ./inference/fp32/eval.py --inter_op 1 --intra_op 28 --omp 28 --pipeline_config_path /checkpoints/rfcn_pipeline.config --checkpoint_dir /checkpoints --eval_dir ./research/object_detection/models/rfcn/eval --logtostderr --blocktime=0 --run_once=True"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --accuracy-only --split=accuracy_message", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 FROZEN_GRAPH=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb TF_RECORD_FILE=/dataset SPLIT=accuracy_message TF_MODELS_ROOT=/workspace/models /workspace/intelai_models/inference/int8/coco_mAP.sh"}, + + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=rfcn --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --verbose --in-graph=/in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb --data-location=/dataset --benchmark-only --number_of_steps=500", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/int8/run_rfcn_inference.py -m /workspace/models -g /in_graph/rfcn_resnet101_int8_coco_pretrained_model.pb -x 500 -d /dataset --num-inter-threads 2 --num-intra-threads 56"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_squeezenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_squeezenet_args.json new file mode 100644 index 000000000..9232b10fe --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_squeezenet_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 command for latency benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 1 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose", + "output": "taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 1 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose"}, + + { "_comment": "FP32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case image_recognition --precision fp32 --mode inference --model-name squeezenet --batch-size 64 --checkpoint /checkpoints --intelai-models . --socket-id 0 --verbose", + "output": "taskset -c 0-27 python ./fp32/train_squeezenet.py --data_location None --batch_size 64 --num_inter_threads 1 --num_intra_threads 28 --model_dir /checkpoints --inference-only --verbose"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ssd_mobilenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_mobilenet_args.json new file mode 100644 index 000000000..fc4a7b1d9 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_mobilenet_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --accuracy-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --benchmark-dir=/workspace/benchmarks --data-location=/dataset", + "output": "sh /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/ssdmobilenet_accuracy.sh /in_graph/frozen_inference_graph.pb /dataset"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=-1 --socket-id=0 --benchmark-only --verbose --in-graph=/in_graph/frozen_inference_graph.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py --input_tfrecord_paths=/dataset --output_tfrecord_path=/SSD-mobilenet-out.tfrecord --inference_graph=/in_graph/frozen_inference_graph.pb --discard_image_pixels=True --num_inter_threads=2 --num_intra_threads=28"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --data-location=/dataset", + "output": "sh /workspace/intelai_models/inference/int8/coco_int8.sh /in_graph/ssdmobilenet_int8_pretrained_model.pb /dataset"}, + + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-mobilenet --precision=int8 --mode=inference --model-source-dir=/workspace/models --intelai-models=/workspace/intelai_models --batch-size=1 --socket-id 0 --data-location=/dataset --verbose --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb --benchmark-only --in-graph=/in_graph/ssdmobilenet_int8_pretrained_model.pb", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/int8/run_frozen_graph_ssdmob.py -g /in_graph/ssdmobilenet_int8_pretrained_model.pb -n 5000 -d /dataset -x --num-inter-threads 2 --num-intra-threads 28"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ssd_resnet34_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_resnet34_args.json new file mode 100644 index 000000000..0aa2ca495 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_resnet34_args.json @@ -0,0 +1,11 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28 --accuracy-only --data-location /dataset"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd-resnet34 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssd_resnet34_bs1.pb --data-location=/dataset", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/fp32/infer_detections.py --input-graph /in_graph/ssd_resnet34_bs1.pb --batch-size 1 --inter-op-parallelism-threads 1 --intra-op-parallelism-threads 28"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_ssd_vgg16_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_vgg16_args.json new file mode 100644 index 000000000..37d478e1f --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_ssd_vgg16_args.json @@ -0,0 +1,17 @@ +[ + { "_comment": "FP32 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --data-location=/dataset", + "output": "python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_fp32_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=fp32 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset"}, + + { "_comment": "Int8 accuracy command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=-1 --output-dir=/workspace/benchmarks/common/tensorflow/logs --accuracy-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=2 --num-intra-threads=56 --data-location=/dataset --accuracy-only"}, + + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=object_detection --model-name=ssd_vgg16 --precision=int8 --mode=inference --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --model-source-dir=/workspace/models --in-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --data-location=/dataset --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --steps=500 --warmup-steps=100", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/eval_ssd.py --input-graph=/in_graph/ssdvgg16_int8_pretrained_model.pb --num-inter-threads=11 --num-intra-threads=21 --data-num-inter-threads=21 --data-num-intra-threads=28 --warmup-steps=100 --steps=500 --data-location=/dataset"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json new file mode 100644 index 000000000..bf5759531 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_language_args.json @@ -0,0 +1,14 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=1 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28"}, + + { "_comment": "Fp32 throughput", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en --reference=/checkpoints/newstest2015.de", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" + }, + { "_comment": "Fp32 benchmarking with no reference file", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_language --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=32 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --data-location=/dataset --decode_from_file=/checkpoints/newstest2015.en", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/tensor2tensor/bin/t2t_decoder.py --problem=translate_ende_wmt32k --model=transformer --hparams_set=transformer_base_single_gpu --decode_hparams=beam_size=4,alpha=0.6,batch_size=32 --data_dir=/dataset --output_dir=/checkpoints --decode_from_file=/checkpoints/newstest2015.en --decode_to_file=/workspace/models/out_dir/output_infer --inter_op_parallelism_threads=1 --intra_op_parallelism_threads=28" + } +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json new file mode 100644 index 000000000..079f99abd --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_transformer_lt_official_args.json @@ -0,0 +1,9 @@ +[ + { "_comment": "FP32 latency benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=1 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt --num_inter=1 --num_intra=28"}, + + { "_comment": "FP32 throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=language_translation --model-name=transformer_lt_official --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=64 --socket-id=0 --benchmark-only --verbose --file=/dataset/newstest2014.en --reference=/dataset/newstest2014.de --vocab_file=/dataset/vocab.txt --in_graph=/in_graph/fp32_graphdef.pb", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/models/official/transformer/infer_ab.py --param_set=big --in_graph=/in_graph/fp32_graphdef.pb --batch_size=64 --file=/dataset/newstest2014.en --file_out=/models/benchmarks/common/tensorflow/logs/translate.txt --vocab_file=/dataset/vocab.txt --num_inter=1 --num_intra=28"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_unet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_unet_args.json new file mode 100644 index 000000000..cbbe2f3f4 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_unet_args.json @@ -0,0 +1,7 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=image_segmentation --model-name=unet --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --benchmark-only --verbose --checkpoint=/checkpoints --checkpoint_name=model.ckpt", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py -bs 1 -cp /checkpoints/model.ckpt --num_inter_threads 1 --num_intra_threads 28 -nw 80 -nb 400"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_wavenet_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_wavenet_args.json new file mode 100644 index 000000000..49ea2e09e --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_wavenet_args.json @@ -0,0 +1,7 @@ +[ + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework tensorflow --use-case text_to_speech --precision fp32 --mode inference --model-name wavenet --num-cores 1 --checkpoint /checkpoints --intelai-models . --model-source-dir . --socket-id 0 --verbose --checkpoint_name=model.ckpt-99 --sample=8510", + "output": "numactl --physcpubind=0-0 --membind=0 python generate.py /checkpoints/model.ckpt-99 --num_inter_threads=1 --num_intra_threads=1 --sample=8510"} +] + + diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_args.json new file mode 100644 index 000000000..64fddac5b --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_args.json @@ -0,0 +1,5 @@ +[ + { "_comment": "FP32 benchmark", + "input": "run_tf_benchmark.py --framework tensorflow --use-case recommendation --precision fp32 --mode inference --model-name wide_deep --batch-size 1024 --data-location /dataset --checkpoint /checkpoints --intelai-models . --verbose", + "output": "OMP_NUM_THREADS=1 numactl --cpunodebind=0 --membind=0 python inference/fp32/wide_deep_inference.py --data_dir=/dataset --model_dir=/checkpoints --batch_size=1024"} +] diff --git a/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_large_ds_args.json b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_large_ds_args.json new file mode 100644 index 000000000..3d2297515 --- /dev/null +++ b/tests/unit/common/tensorflow/tf_model_args/tf_wide_deep_large_ds_args.json @@ -0,0 +1,27 @@ +[ + { "_comment": "Int8 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Int8 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Int8 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=int8 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_int8_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "LD_PRELOAD=/usr/lib/libtcmalloc.so.4.2.6 numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_int8_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "FP32 benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Fp32 command for throughput benchmark", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=512 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=512 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"}, + + { "_comment": "Fp32 latency benchmark command", + "input": "run_tf_benchmark.py --framework=tensorflow --use-case=recommendation --model-name=wide_deep_large_ds --precision=fp32 --mode=inference --model-source-dir=/workspace/models --benchmark-dir=/workspace/benchmarks --intelai-models=/workspace/intelai_models --num-cores=-1 --batch-size=1 --socket-id=0 --output-dir=/workspace/benchmarks/common/tensorflow/logs --benchmark-only --verbose --in-graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data-location=/dataset --num-parallel-batches=14", + "output": "numactl --cpunodebind=0 --membind=0 python /workspace/intelai_models/inference/inference.py --num_intra_threads=1 --num_inter_threads=28 --batch_size=1 --input_graph=/in_graph/wide_deep_fp32_pretrained_model.pb --data_location=/dataset --num_parallel_batches=14"} +] + + diff --git a/tests/unit/common/test_base_model_init.py b/tests/unit/common/test_base_model_init.py index 43f3076f1..7a376fb35 100644 --- a/tests/unit/common/test_base_model_init.py +++ b/tests/unit/common/test_base_model_init.py @@ -17,8 +17,22 @@ # # SPDX-License-Identifier: EPL-2.0 # - +from contextlib import contextmanager import os +import pytest +import sys +import tempfile + +try: + # python 2 + from cStringIO import StringIO +except ImportError: + # python 3 + # only supports unicode so can't be used in python 2 for sys.stdout + # because (from `print` documentation) + # "All non-keyword arguments are converted to strings like str() does" + from io import StringIO + from mock import MagicMock, patch @@ -26,6 +40,27 @@ from benchmarks.common.base_model_init import set_env_var +@contextmanager +def catch_stdout(): + _stdout = sys.stdout + sys.stdout = caught_output = StringIO() + try: + yield caught_output + finally: + sys.stdout = _stdout + caught_output.close() + + +@pytest.fixture +def mock_json(patch): + return patch('json') + + +@pytest.fixture +def mock_glob(patch): + return patch('glob.glob') + + # Example args and output strings for testing mocks test_model_name = "resnet50" test_framework = "tensorflow" @@ -109,3 +144,101 @@ def test_env_var_not_already_set(): finally: if os.environ.get(env_var): del os.environ[env_var] + + +def test_set_kmp_vars_config_json_does_not_exists(): + """Test config.json does not exist""" + # Setup base model init with test settings + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + os.environ["PYTHON_EXE"] = "python" + base_model_init = BaseModelInitializer(args, [], platform_util) + + config_file_path = '/test/foo/config.json' + + with catch_stdout() as caught_output: + base_model_init.set_kmp_vars(config_file_path) + output = caught_output.getvalue() + + assert "Warning: File {} does not exist and \ + cannot be used to set KMP environment variables".format(config_file_path) == output.strip() + + +def test_set_kmp_vars_config_json_exists(mock_json): + """Test config.json when exists""" + # Setup base model init with test settings + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + os.environ["PYTHON_EXE"] = "python" + base_model_init = BaseModelInitializer(args, [], platform_util) + + file_descriptor, config_file_path = tempfile.mkstemp(suffix=".json") + + base_model_init.set_kmp_vars(config_file_path) + + +@pytest.mark.parametrize('precision', ['int8']) +def test_command_prefix_tcmalloc_int8(precision, mock_glob): + """ For Int8 models, TCMalloc should be enabled by default and models should include + LD_PRELOAD in the command prefix, unless disable_tcmalloc=True is set """ + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + test_tcmalloc_lib = "/usr/lib/libtcmalloc.so.4.2.6" + mock_glob.return_value = [test_tcmalloc_lib] + os.environ["PYTHON_EXE"] = "python" + args.socket_id = 0 + args.precision = precision + + # If tcmalloc is not disabled, we should have LD_PRELOAD in the prefix + args.disable_tcmalloc = False + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If tcmalloc is disabled, LD_PRELOAD shouild not be in the prefix + args.disable_tcmalloc = True + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) not in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If numactl is set to false, we should not have numactl in the prefix + args.disable_tcmalloc = False + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id, numactl=False) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) in command_prefix + assert "numactl" not in command_prefix + + +@pytest.mark.parametrize('precision', ['fp32']) +def test_command_prefix_tcmalloc_fp32(precision, mock_glob): + """ FP32 models should have TC Malloc disabled by default, but models should + include LD_PRELOAD in the command prefix if disable_tcmalloc=False is explicitly set. """ + platform_util = MagicMock() + args = MagicMock(verbose=True, model_name=test_model_name) + test_tcmalloc_lib = "/usr/lib/libtcmalloc.so.4.2.6" + mock_glob.return_value = [test_tcmalloc_lib] + os.environ["PYTHON_EXE"] = "python" + args.socket_id = 0 + args.precision = precision + + # By default, TCMalloc should not be used + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) not in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If tcmalloc is disabled, LD_PRELOAD shouild not be in the prefix + args.disable_tcmalloc = False + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) in command_prefix + assert "numactl --cpunodebind=0 --membind=0" in command_prefix + + # If numactl is set to false, we should not have numactl in the prefix + args.disable_tcmalloc = True + base_model_init = BaseModelInitializer(args, [], platform_util) + command_prefix = base_model_init.get_command_prefix(args.socket_id, numactl=False) + assert "LD_PRELOAD={}".format(test_tcmalloc_lib) not in command_prefix + assert "numactl" not in command_prefix diff --git a/tests/unit/common/utils/test_validators.py b/tests/unit/common/utils/test_validators.py index 369ddfd76..2f590a23e 100644 --- a/tests/unit/common/utils/test_validators.py +++ b/tests/unit/common/utils/test_validators.py @@ -26,7 +26,7 @@ from common.utils.validators import (check_for_link, check_no_spaces, check_positive_number, check_positive_number_or_equal_to_negative_one, check_valid_filename, - check_valid_folder, check_valid_file_or_dir) + check_valid_folder, check_valid_file_or_dir, check_volume_mount) @pytest.fixture() @@ -152,3 +152,28 @@ def test_check_valid_file_or_dir(mock_link, mock_exists): def test_check_valid_file_or_dir_bad(): with pytest.raises(ArgumentTypeError): check_valid_file_or_dir('3245jlnsdfnsfd234ofds') + + +@pytest.mark.parametrize("volume_mount_str", + ["foo", + "foo:foo:foo:foo", + "foo,foo"]) +def test_bad_volume_mount_strings(volume_mount_str): + with pytest.raises(ArgumentTypeError): + check_volume_mount(volume_mount_str) + + +def test_valid_volume_mount(): + # create temp directory + temp_dir = tempfile.mkdtemp() + + try: + # test string that mounts local directory with mount path + volume_mount = temp_dir + ":/mount_path" + check_volume_mount(volume_mount) + + # test string that mounts local directory with mount path and specifies read only + volume_mount = temp_dir + ":/mount_path:ro" + check_volume_mount(volume_mount) + finally: + os.rmdir(temp_dir) diff --git a/tests/unit/test_launch_benchmark.py b/tests/unit/test_launch_benchmark.py index 608adc464..6145cf614 100644 --- a/tests/unit/test_launch_benchmark.py +++ b/tests/unit/test_launch_benchmark.py @@ -37,6 +37,9 @@ test_docker_image = "foo" test_batch_size = "100" test_num_cores = "1" +# need a valid file for tests to work, see conftest.py for where this is managed +test_input_graph = "test.pb" +test_tfserving_framework = "tensorflow_serving" @pytest.fixture @@ -66,8 +69,35 @@ def mock_system_platform(patch): return patch("common.base_benchmark_util.platform_util.system_platform") +@pytest.fixture +def mock_path_exists(patch): + return patch("os.path.exists", MagicMock(return_value=True)) + + +@pytest.fixture +def mock_isfile(patch): + return patch("os.path.isfile", MagicMock(return_value=True)) + + +@pytest.fixture +def mock_isdir(patch): + return patch("os.path.isdir", MagicMock(return_value=True)) + + +@pytest.fixture +def mock_islink(patch): + return patch("os.path.islink", MagicMock(return_value=False)) + + +@pytest.fixture +def mock_stat(patch): + stat = MagicMock() + stat.return_value.st_nlink = 0 + return patch("os.stat", stat) + + @pytest.fixture(autouse=True) -def launch_benchmark(mock_platform_util, request): +def launch_benchmark(mock_platform_util, request, mock_isdir, mock_isfile, mock_islink, mock_stat, mock_path_exists): """sets up launch_benchmark obj for every test case and handles catching errors if we wanna test that To catch errors called when running launch_benchmark, use something like: ['catch_error', SystemExit, [{args}], {error_message}] in parametrize @@ -113,8 +143,10 @@ def launch_benchmark(mock_platform_util, request): req_args = request.param[2] error_message = request.param[3] if len(request.param) == 4 else '' else: + # add extra arguments to the default ones when calling LaunchBenchmark req_args = request.param + example_req_args else: + # only use default arguments when calling LaunchBenchmark req_args = example_req_args with mock_patch.object(sys, "argv", ['run_tf_benchmark.py'] + req_args): @@ -167,7 +199,14 @@ def test_launch_benchmark_parse_unknown_args(launch_benchmark): "--accuracy-only", "--output-results"], "--output-results can only be used when running " - "inference with a dataset"] + "inference with a dataset"], + ['catch_error_override_all_params', SystemExit, + ["--model-name", test_model_name, + "--framework", test_framework, + "--mode", test_mode, + "--precision", test_precision, + "--volume", "~:test"], + "Volume mounts can only be used when running in a docker container"] ], indirect=True) def test_launch_benchmark_parse_bad_args(launch_benchmark): """ @@ -216,3 +255,76 @@ def test_bare_metal(launch_benchmark, mock_popen): # ensure env vars are set assert os.environ["TEST_ENV_VAR_1"] == test_env_vars["TEST_ENV_VAR_1"] assert os.environ["TEST_ENV_VAR_2"] == test_env_vars["TEST_ENV_VAR_2"] + + +@pytest.mark.parametrize('launch_benchmark', [["--in-graph", test_input_graph]], indirect=True) +def test_launch_benchmark_tensorflow_serving_framework(launch_benchmark, mock_popen): + """ + Tests that the launch script works for tensorflow serving framework + """ + test_env_vars = {"TEST_ENV_VAR_1": "a", "TEST_ENV_VAR_2": "b"} + # Override framework and docker image. + launch_benchmark.args.framework = test_tfserving_framework + launch_benchmark.args.docker_image = None + launch_benchmark.run_bare_metal("/foo", "/bar", test_env_vars) + assert mock_popen.called + args, kwargs = mock_popen.call_args + + assert launch_benchmark.args.input_graph == test_input_graph + assert launch_benchmark.args.framework == test_tfserving_framework + + # make sure that the start script is run + assert "bash" == args[0][0] + assert "start.sh" in args[0][1] + + # ensure env vars are set + assert os.environ["TEST_ENV_VAR_1"] == test_env_vars["TEST_ENV_VAR_1"] + assert os.environ["TEST_ENV_VAR_2"] == test_env_vars["TEST_ENV_VAR_2"] + + +def test_help(mock_platform_util, capsys): + """ Tests `launch_benchmark.py --help` output and ensures there is no error """ + with mock_patch.object(sys, 'argv', ["launch_benchmark.py", "--help"]): + with pytest.raises(SystemExit) as e: + LaunchBenchmark(mock_platform_util) + assert e.value.code == 0 + + # get the stdout and check the output + captured = capsys.readouterr() + assert "usage: launch_benchmark.py [-h] " in captured.out + + # check for an arg that is only in launch_benchmark.py + assert "--docker-image DOCKER_IMAGE" in captured.out + + # check for an arg that's in base_benchmark_util.py + assert "-f FRAMEWORK, --framework FRAMEWORK" in captured.out + + # make sure there were no errors printed + assert "error" not in captured.out.lower() + + +def test_launch_benchmark_custom_volume(launch_benchmark, mock_popen): + """ + Verifies the docker run command includes custom volumes + """ + custom_volumes = ["~:/foo1", "~:/foo2"] + launch_benchmark.args.custom_volumes = custom_volumes + launch_benchmark.main() + assert mock_popen.called + args, _ = mock_popen.call_args + # convert the run command args to a string and then check for the custom volume mounts + docker_run_cmd = " ".join(args[0]) + for custom_volume in custom_volumes: + assert "--volume {}".format(custom_volume) in docker_run_cmd + + +@pytest.mark.parametrize("precision,expected_disable_tcmalloc", [["int8", "False"], + ["fp32", "True"]]) +def test_disable_tcmalloc(launch_benchmark, mock_popen, precision, expected_disable_tcmalloc): + launch_benchmark.args.precision = precision + launch_benchmark.main() + assert mock_popen.called + args, _ = mock_popen.call_args + # convert the run command args to a string and then check for the custom volume mounts + docker_run_cmd = " ".join(args[0]) + assert "--env DISABLE_TCMALLOC=".format(expected_disable_tcmalloc) in docker_run_cmd diff --git a/tox.ini b/tox.ini index 90ac004f4..20ae07d16 100644 --- a/tox.ini +++ b/tox.ini @@ -37,6 +37,7 @@ omit = .tox/* .pytest_cache/* __pycache__/* + benchmarks/image_recognition/tensorflow_serving/* benchmarks/image_segmentation/tensorflow/unet/inference/fp32/unet_infer.py benchmarks/object_detection/tensorflow/ssd-mobilenet/inference/fp32/infer_detections.py benchmarks/recommendation/tensorflow/wide_deep/inference/fp32/data_download.py