From 275ae9b911d33a5830b5eed089113f89582f0b7a Mon Sep 17 00:00:00 2001 From: shawn Date: Wed, 31 Jan 2024 01:09:45 +0000 Subject: [PATCH 01/40] Added starter files --- configs/fine-tune-llm/aws.yaml | 3 + configs/fine-tune-llm/gce.yaml | 10 ++++ templates/fine-tune-llm/README.md | 18 ++++++ .../deepspeed_configs/zero_3_llama_2_13b.json | 35 ++++++++++++ .../deepspeed_configs/zero_3_llama_2_70b.json | 28 ++++++++++ .../deepspeed_configs/zero_3_llama_2_7b.json | 35 ++++++++++++ templates/fine-tune-llm/job_configs/aws.yaml | 55 +++++++++++++++++++ .../llama-2-13b-4k-4xg5_12xlarge.yaml | 16 ++++++ .../llama-2-13b-4k-4xg5_48xlarge.yaml | 17 ++++++ .../llama-2-70b-2k-4xg5_48xlarge.yaml | 17 ++++++ .../llama-2-7b-512-16xg5_4xlarge.yaml | 16 ++++++ 11 files changed, 250 insertions(+) create mode 100644 configs/fine-tune-llm/aws.yaml create mode 100644 configs/fine-tune-llm/gce.yaml create mode 100644 templates/fine-tune-llm/README.md create mode 100644 templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_13b.json create mode 100644 templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_70b.json create mode 100644 templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_7b.json create mode 100644 templates/fine-tune-llm/job_configs/aws.yaml create mode 100644 templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml create mode 100644 templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml create mode 100644 templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml create mode 100644 templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml diff --git a/configs/fine-tune-llm/aws.yaml b/configs/fine-tune-llm/aws.yaml new file mode 100644 index 000000000..952b7a686 --- /dev/null +++ b/configs/fine-tune-llm/aws.yaml @@ -0,0 +1,3 @@ +head_node_type: + name: head-node-type + instance_type: m5.xlarge diff --git a/configs/fine-tune-llm/gce.yaml b/configs/fine-tune-llm/gce.yaml new file mode 100644 index 000000000..4d67b7a48 --- /dev/null +++ b/configs/fine-tune-llm/gce.yaml @@ -0,0 +1,10 @@ +head_node_type: + name: head_node_type + instance_type: n1-standard-4 + +worker_node_types: +- name: gpu_worker + instance_type: g2-standard-16-nvidia-l4-1 + min_workers: 0 + max_workers: 100 + use_spot: false \ No newline at end of file diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md new file mode 100644 index 000000000..7b78f5d46 --- /dev/null +++ b/templates/fine-tune-llm/README.md @@ -0,0 +1,18 @@ +# Fine-tuning Llama-2/Mistral models with Anyscale + +Anyscale currently offers a simple CLI command to fine-tune LLM models via `anyscale fine-tuning submit` +and you can take a look at the documentation [here](https://docs.anyscale.com/endpoints/fine-tuning/get-started). +This guide provides starter configurations if you would like to customize the fine-tuning process. + +### Supported base models + +- mistralai/Mistral-7B-Instruct-v0.1 +- meta-llama/Llama-2-7b-hf +- meta-llama/Llama-2-7b-chat-hf +- meta-llama/Llama-2-13b-hf +- meta-llama/Llama-2-13b-chat-hf +- meta-llama/Llama-2-70b-hf +- meta-llama/Llama-2-70b-chat-hf +- codellama/CodeLlama-34b-Instruct-hf + +### diff --git a/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_13b.json b/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_13b.json new file mode 100644 index 000000000..6b69e1b2d --- /dev/null +++ b/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_13b.json @@ -0,0 +1,35 @@ +{ + "fp16": { + "enabled": "auto" + }, + "bf16": { + "enabled": true + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": 5e8, + "stage3_prefetch_bucket_size": 5e8, + "stage3_param_persistence_threshold": 1e6, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true, + "round_robin_gradients": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 10, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} diff --git a/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_70b.json b/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_70b.json new file mode 100644 index 000000000..aee680d62 --- /dev/null +++ b/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_70b.json @@ -0,0 +1,28 @@ +{ + "fp16": { + "enabled": false + }, + "bf16": { + "enabled": true + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "reduce_bucket_size": "auto", + "stage3_prefetch_bucket_size": "auto", + "stage3_param_persistence_threshold": "auto", + "gather_16bit_weights_on_model_save": true, + "round_robin_gradients": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 10, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} diff --git a/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_7b.json b/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_7b.json new file mode 100644 index 000000000..6b69e1b2d --- /dev/null +++ b/templates/fine-tune-llm/deepspeed_configs/zero_3_llama_2_7b.json @@ -0,0 +1,35 @@ +{ + "fp16": { + "enabled": "auto" + }, + "bf16": { + "enabled": true + }, + "zero_optimization": { + "stage": 3, + "offload_optimizer": { + "device": "cpu", + "pin_memory": true + }, + "offload_param": { + "device": "cpu", + "pin_memory": true + }, + "overlap_comm": true, + "contiguous_gradients": true, + "sub_group_size": 1e9, + "reduce_bucket_size": 5e8, + "stage3_prefetch_bucket_size": 5e8, + "stage3_param_persistence_threshold": 1e6, + "stage3_max_live_parameters": 1e9, + "stage3_max_reuse_distance": 1e9, + "stage3_gather_16bit_weights_on_model_save": true, + "round_robin_gradients": true + }, + "gradient_accumulation_steps": "auto", + "gradient_clipping": "auto", + "steps_per_print": 10, + "train_batch_size": "auto", + "train_micro_batch_size_per_gpu": "auto", + "wall_clock_breakdown": false +} diff --git a/templates/fine-tune-llm/job_configs/aws.yaml b/templates/fine-tune-llm/job_configs/aws.yaml new file mode 100644 index 000000000..0346aafeb --- /dev/null +++ b/templates/fine-tune-llm/job_configs/aws.yaml @@ -0,0 +1,55 @@ +compute_config: + allowed_azs: + - any + cloud: my-cloud # You may specify `cloud_id` instead + head_node_type: + instance_type: m5.4xlarge + name: head_node + worker_node_types: + - instance_type: g5.4xlarge + max_workers: 16 + min_workers: 0 + name: g5_4x_worker_node + resources: + custom_resources: + g5.4xlarge: 1 + use_spot: false + - instance_type: g5.12xlarge + max_workers: 4 + min_workers: 0 + name: g5_12x_worker_node + resources: + custom_resources: + g5.12xlarge: 4 + - instance_type: g5.48xlarge + max_workers: 4 + min_workers: 0 + name: g5_48x_worker_node + resources: + custom_resources: + g5.48xlarge: 8 + - instance_type: p4d.24xlarge + max_workers: 7 + min_workers: 0 + name: p4d_worker_node + resources: + custom_resources: + p4d.24xlarge: 8 + - instance_type: p4de.24xlarge + max_workers: 8 + min_workers: 0 + name: p4de_worker_node + resources: + custom_resources: + p4de.24xlarge: 8 + aws_advanced_configurations_json: + TagSpecifications: + - ResourceType: instance + Tags: + - Key: as-feature-multi-zone + Value: True +runtime_env: + env_vars: + HF_HOME: /mnt/local_storage/.cache/huggingface + TUNE_RESULT_DIR: /mnt/local_storage +max_retries: 0 diff --git a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml new file mode 100644 index 000000000..3d05ed5f6 --- /dev/null +++ b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -0,0 +1,16 @@ +model_id: meta-llama/Llama-2-13b-hf +train_path: s3://air-example-data/gsm8k/train.jsonl +valid_path: s3://air-example-data/gsm8k/test.jsonl +context_length: 4096 +num_devices: 8 +num_epochs: 10 +train_batch_size_per_device: 8 +eval_batch_size_per_device: 8 +learning_rate: 5e-6 +num_checkpoints_to_keep: 1 +output_dir: /mnt/local_storage +deepspeed: + config_path: deepspeed_configs/zero_3_llama_2_13b.json +flash_attention_2: true +worker_resources: + g5.12xlarge: 1 diff --git a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml new file mode 100644 index 000000000..441d31bf2 --- /dev/null +++ b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml @@ -0,0 +1,17 @@ +model_id: meta-llama/Llama-2-13b-hf # <-- change this to the model you want to fine-tune +train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data +valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional +context_length: 4096 # <-- change this to the context length you want to use +num_devices: 32 +num_epochs: 10 # <-- change this to the number of epochs that you want to train for +train_batch_size_per_device: 16 +eval_batch_size_per_device: 16 +learning_rate: 5e-6 +num_checkpoints_to_keep: 1 +output_dir: /mnt/local_storage +dataset_size_scaling_factor: 1000 +deepspeed: + config_path: deepspeed_configs/zero_3_llama_2_13b.json +flash_attention_2: true +worker_resources: + g5.48xlarge: 1 diff --git a/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml new file mode 100644 index 000000000..ca4961bee --- /dev/null +++ b/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml @@ -0,0 +1,17 @@ +model_id: meta-llama/Llama-2-13b-hf # <-- change this to the model you want to fine-tune +train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data +valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional +context_length: 2048 # <-- change this to the context length you want to use +num_devices: 32 +num_epochs: 10 # <-- change this to the number of epochs that you want to train for +train_batch_size_per_device: 16 +eval_batch_size_per_device: 16 +learning_rate: 5e-6 +num_checkpoints_to_keep: 1 +output_dir: /mnt/local_storage +dataset_size_scaling_factor: 1000 +deepspeed: + config_path: deepspeed_configs/zero_3_llama_2_70b.json +flash_attention_2: true +worker_resources: + g5.48xlarge: 1 diff --git a/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml new file mode 100644 index 000000000..7c04d1a80 --- /dev/null +++ b/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml @@ -0,0 +1,16 @@ +model_id: meta-llama/Llama-2-7b-hf +train_path: s3://air-example-data/gsm8k/train.jsonl +valid_path: s3://air-example-data/gsm8k/test.jsonl +context_length: 512 +num_devices: 16 +num_epochs: 10 +train_batch_size_per_device: 16 +eval_batch_size_per_device: 16 +learning_rate: 5e-6 +num_checkpoints_to_keep: 1 +output_dir: /mnt/local_storage +deepspeed: + config_path: deepspeed_configs/zero_3_llama_2_7b.json +flash_attention_2: true +worker_resources: + g5.4xlarge: 1 From 174acf8240988e32b9a9383c3e8c1e5177c8a829 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 1 Feb 2024 01:15:43 +0000 Subject: [PATCH 02/40] Updated readme --- templates/fine-tune-llm/README.md | 29 +++++++++++++++++++++++++++-- 1 file changed, 27 insertions(+), 2 deletions(-) diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index 7b78f5d46..3decd9545 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -2,7 +2,7 @@ Anyscale currently offers a simple CLI command to fine-tune LLM models via `anyscale fine-tuning submit` and you can take a look at the documentation [here](https://docs.anyscale.com/endpoints/fine-tuning/get-started). -This guide provides starter configurations if you would like to customize the fine-tuning process. +This guide provides starter configurations if you would like to further customize the fine-tuning process. ### Supported base models @@ -15,4 +15,29 @@ This guide provides starter configurations if you would like to customize the fi - meta-llama/Llama-2-70b-chat-hf - codellama/CodeLlama-34b-Instruct-hf -### +# Step 1 - Launch a fine-tuning job + +We have provided different example configurations under the `training_configs` +directory for different base models and instance types. You can use these as a +starting point for your own fine-tuning jobs. + +```shell +# Launch a fine-tuning job for Llama 7b with 16 g5.4xlarge instances + +llmforge dev launch job_configs/aws.yaml training_configs/llama-2-7b-512-16xg5_4xlarge.yaml.yaml +``` + +Once you submit the command, you can monitor the progress of the job in +the provided job link. Generally a full-param fine-tuning job will take a few hours. + +# Step 2 - Import the model + +Once the fine-tuning job is complete, you can view the stored model weight at the very end of the job logs. Here is an example finetuning job output: + +```shell + +Best checkpoint is stored in: +anyscale-data-cld-id/org_id/cloud_id/artifact_storage/username/llmforge-finetuning/meta-llama/Llama-2-70b-hf/TorchTrainer_2024-01-25_18-07-48/TorchTrainer_b3de9_00000_0_2024-01-25_18-07-48/checkpoint_000000 +``` + +You can go to models page and import this model by clicking the `Import` button. From 14e35260f4a213a711f70bbe5da3828d6013daec Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 1 Feb 2024 01:32:09 +0000 Subject: [PATCH 03/40] Updated template --- templates/fine-tune-llm/README.md | 12 ++++++- templates/fine-tune-llm/job_configs/gcp.yaml | 32 +++++++++++++++++++ .../llama-2-13b-4k-4xg5_12xlarge.yaml | 10 +++--- .../llama-2-70b-2k-4xg5_48xlarge.yaml | 2 +- .../llama-2-7b-512-16xg5_4xlarge.yaml | 10 +++--- 5 files changed, 54 insertions(+), 12 deletions(-) create mode 100644 templates/fine-tune-llm/job_configs/gcp.yaml diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index 3decd9545..27978223f 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -13,7 +13,6 @@ This guide provides starter configurations if you would like to further customiz - meta-llama/Llama-2-13b-chat-hf - meta-llama/Llama-2-70b-hf - meta-llama/Llama-2-70b-chat-hf -- codellama/CodeLlama-34b-Instruct-hf # Step 1 - Launch a fine-tuning job @@ -41,3 +40,14 @@ anyscale-data-cld-id/org_id/cloud_id/artifact_storage/username/llmforge-finetuni ``` You can go to models page and import this model by clicking the `Import` button. +When entering the remote uri, please make sure to add the +prefix `s3://` or `gs://`. + +For the generation config, you can reference example configs +[here](https://docs.anyscale.com/endpoints/model-serving/import-model#generation-configuration-examples). + +# Step 3 - Deploy the model on Endpoints + +Once the model is imported, you can deploy it on Endpoints by creating a +new endpoint or adding it to an existing endpoint. You can follow the +endpoints page guide to query the endpoint. diff --git a/templates/fine-tune-llm/job_configs/gcp.yaml b/templates/fine-tune-llm/job_configs/gcp.yaml new file mode 100644 index 000000000..eaf1258d2 --- /dev/null +++ b/templates/fine-tune-llm/job_configs/gcp.yaml @@ -0,0 +1,32 @@ +compute_config: + allowed_azs: + - any + cloud: my-cloud # You may specify `cloud_id` instead + head_node_type: + instance_type: n2-standard-4 + name: head_node + worker_node_types: + - instance_type: g2-standard-24-nvidia-l4-2 + max_workers: 16 + min_workers: 0 + name: g2_l4_worker_node + resources: + custom_resources: + g2-standard-24-nvidia-l4-2: 1 + use_spot: false + - instance_type: a2-highgpu-8g-nvidia-a100-40gb-8 + max_workers: 4 + min_workers: 0 + name: a2_a100_worker_node + resources: + custom_resources: + a2-highgpu-8g-nvidia-a100-40gb-8: 8 + gcp_advanced_configurations_json: + instance_properties: + labels: + as-feature-multi-zone: "true" +runtime_env: + env_vars: + HF_HOME: /mnt/local_storage/.cache/huggingface + TUNE_RESULT_DIR: /mnt/local_storage +max_retries: 0 diff --git a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml index 3d05ed5f6..87aeb5cba 100644 --- a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -1,9 +1,9 @@ -model_id: meta-llama/Llama-2-13b-hf -train_path: s3://air-example-data/gsm8k/train.jsonl -valid_path: s3://air-example-data/gsm8k/test.jsonl -context_length: 4096 +model_id: meta-llama/Llama-2-13b-hf # <-- change this to the model you want to fine-tune +train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data +valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional +context_length: 4096 # <-- change this to the context length you want to use num_devices: 8 -num_epochs: 10 +num_epochs: 10 # <-- change this to the number of epochs that you want to train for train_batch_size_per_device: 8 eval_batch_size_per_device: 8 learning_rate: 5e-6 diff --git a/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml index ca4961bee..c19300ac8 100644 --- a/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml @@ -1,4 +1,4 @@ -model_id: meta-llama/Llama-2-13b-hf # <-- change this to the model you want to fine-tune +model_id: meta-llama/Llama-2-70b-hf # <-- change this to the model you want to fine-tune train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional context_length: 2048 # <-- change this to the context length you want to use diff --git a/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml index 7c04d1a80..8d03b59a7 100644 --- a/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml @@ -1,9 +1,9 @@ -model_id: meta-llama/Llama-2-7b-hf -train_path: s3://air-example-data/gsm8k/train.jsonl -valid_path: s3://air-example-data/gsm8k/test.jsonl -context_length: 512 +model_id: meta-llama/Llama-2-7b-hf # <-- change this to the model you want to fine-tune +train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data +valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional +context_length: 4096 # <-- change this to the context length you want to use num_devices: 16 -num_epochs: 10 +num_epochs: 10 # <-- change this to the number of epochs that you want to train for train_batch_size_per_device: 16 eval_batch_size_per_device: 16 learning_rate: 5e-6 From 478577e4d5c685897507ac52cb22eba4411607c4 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 1 Feb 2024 01:45:53 +0000 Subject: [PATCH 04/40] Updatd the template --- templates/fine-tune-llm/README.md | 28 ++++++++++++++++++++ templates/fine-tune-llm/job_configs/gcp.yaml | 2 +- 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index 27978223f..9754e02cb 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -20,6 +20,9 @@ We have provided different example configurations under the `training_configs` directory for different base models and instance types. You can use these as a starting point for your own fine-tuning jobs. +Please go to `job_configs/aws.yaml` or `job_configs/gcp.yaml` +and specify your cloud name under the `cloud` field. + ```shell # Launch a fine-tuning job for Llama 7b with 16 g5.4xlarge instances @@ -51,3 +54,28 @@ For the generation config, you can reference example configs Once the model is imported, you can deploy it on Endpoints by creating a new endpoint or adding it to an existing endpoint. You can follow the endpoints page guide to query the endpoint. + +# Frequently asked questions + +### How can I fine-tune using my own data? + +You can open the file under `training_configs` and update +`train_path` and `valid_path` to your training and evaluation file. + +### How do I customize the fine-tuning job? + +You can edit the values, such as `context_length`, `num_epoched`, +`train_batch_size_per_device` and `eval_batch_size_per_device` +to customize the fine-tuning job. + +In addition, the deepspeed configs are provided in case you would +like to customize them. + +### What if I want to use a different instance type? + +You can edit both job and training configs to use +a different instance type. Note that the `num_devices` field +under the `training_configs` file would need +to be updated to be the total of GPUs that you expect to use. +For instance, if you expect to fine-tune a model with 16 g5.4xlarge, +the `num_devices` should be 16. diff --git a/templates/fine-tune-llm/job_configs/gcp.yaml b/templates/fine-tune-llm/job_configs/gcp.yaml index eaf1258d2..652ff409d 100644 --- a/templates/fine-tune-llm/job_configs/gcp.yaml +++ b/templates/fine-tune-llm/job_configs/gcp.yaml @@ -12,7 +12,7 @@ compute_config: name: g2_l4_worker_node resources: custom_resources: - g2-standard-24-nvidia-l4-2: 1 + g2-standard-24-nvidia-l4-2: 2 use_spot: false - instance_type: a2-highgpu-8g-nvidia-a100-40gb-8 max_workers: 4 From 1b18b28fc38519b641711692c36ade9878f35c66 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 1 Feb 2024 01:47:24 +0000 Subject: [PATCH 05/40] Updated --- templates/fine-tune-llm/README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index 9754e02cb..5054b1775 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -21,7 +21,8 @@ directory for different base models and instance types. You can use these as a starting point for your own fine-tuning jobs. Please go to `job_configs/aws.yaml` or `job_configs/gcp.yaml` -and specify your cloud name under the `cloud` field. +and specify your cloud name under the `cloud` field. Next, you can +launch a fine-tuning job. ```shell # Launch a fine-tuning job for Llama 7b with 16 g5.4xlarge instances From 1497ea417c9ecbdd40b3a650d30d55907f4a0b54 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 1 Feb 2024 01:49:05 +0000 Subject: [PATCH 06/40] Updated gce --- configs/fine-tune-llm/gce.yaml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/configs/fine-tune-llm/gce.yaml b/configs/fine-tune-llm/gce.yaml index 4d67b7a48..68de52efe 100644 --- a/configs/fine-tune-llm/gce.yaml +++ b/configs/fine-tune-llm/gce.yaml @@ -1,10 +1,3 @@ head_node_type: name: head_node_type instance_type: n1-standard-4 - -worker_node_types: -- name: gpu_worker - instance_type: g2-standard-16-nvidia-l4-1 - min_workers: 0 - max_workers: 100 - use_spot: false \ No newline at end of file From f6173032cbd3d470066d5d159da73839378e0482 Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Fri, 2 Feb 2024 14:46:22 -0600 Subject: [PATCH 07/40] fix Signed-off-by: Edward Oakes --- templates/serve-stable-diffusion/README.ipynb | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/templates/serve-stable-diffusion/README.ipynb b/templates/serve-stable-diffusion/README.ipynb index 55c9c71b8..105bdc522 100644 --- a/templates/serve-stable-diffusion/README.ipynb +++ b/templates/serve-stable-diffusion/README.ipynb @@ -96,7 +96,7 @@ "source": [ "### Step 4: Deploy the model to production as a service\n", "\n", - "Deploy the model to production using the `serve publish` command.\n", + "Deploy the model to production using the `serve deploy` command.\n", "\n", "This creates a long-running [service](https://docs.anyscale.com/services/get-started) with a stable endpoint to query the application.\n", "\n", @@ -109,7 +109,7 @@ "metadata": {}, "outputs": [], "source": [ - "!serve publish main:stable_diffusion_app" + "!serve deploy --name stable_diffusion_service main:stable_diffusion_app" ] }, { @@ -122,7 +122,7 @@ "1. Update the `HOST` to the service endpoint.\n", "2. Add the authorization token (will be set as a header in the HTTP request).\n", "\n", - "Both of these values are printed when you run `serve publish`. You can also find them on the service page. For example, if the output looks like:\n", + "Both of these values are printed when you run `serve deploy`. You can also find them on the service page. For example, if the output looks like:\n", "```bash\n", "(anyscale +4.0s) You can query the service endpoint using the curl request below:\n", "(anyscale +4.0s) curl -H 'Authorization: Bearer 26hTWi2kZwEz0Tdi1_CKRep4NLXbuuaSTDb3WMXK9DM' https://stable_diffusion_app-4rq8m.cld-ltw6mi8dxaebc3yf.s.anyscaleuserdata-staging.com\n", From 557286c00d96d16a3d63556aa417693bb09d55bf Mon Sep 17 00:00:00 2001 From: Edward Oakes Date: Fri, 2 Feb 2024 14:48:18 -0600 Subject: [PATCH 08/40] WIP Signed-off-by: Edward Oakes --- templates/intro-services/README.ipynb | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/templates/intro-services/README.ipynb b/templates/intro-services/README.ipynb index f37e1fb01..40e19362f 100644 --- a/templates/intro-services/README.ipynb +++ b/templates/intro-services/README.ipynb @@ -100,14 +100,14 @@ "metadata": {}, "outputs": [], "source": [ - "!serve publish main:my_app --name=my_service" + "!serve deploy main:my_app --name=my_service" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "**Tip**: if your app has PyPI dependencies added from the workspace, `serve publish` will automatically compile these dependencies into a Docker image prior to deploying to optimize startup time." + "**Tip**: if your app has PyPI dependencies added from the workspace, `serve deploy` will automatically compile these dependencies into a Docker image prior to deploying to optimize startup time." ] }, { @@ -133,7 +133,7 @@ "1. Update the `HOST` to the service endpoint.\n", "2. Add the authorization token as a header in the HTTP request.\n", "\n", - "Both of these values are printed when you run `serve publish`. You can also find them on the service page. For example, if the output looks like:\n", + "Both of these values are printed when you run `serve deploy`. You can also find them on the service page. For example, if the output looks like:\n", "```bash\n", "(anyscale +4.0s) You can query the service endpoint using the curl request below:\n", "(anyscale +4.0s) curl -H 'Authorization: Bearer 26hTWi2kZwEz0Tdi1_CKRep4NLXbuuaSTDb3WMXK9DM' https://stable_diffusion_app-4rq8m.cld-ltw6mi8dxaebc3yf.s.anyscaleuserdata-staging.com\n", @@ -231,7 +231,7 @@ "metadata": {}, "outputs": [], "source": [ - "!serve publish main:my_app --name=my_service" + "!serve deploy main:my_app --name=my_service" ] }, { From dc59153f947b6cf2aecfbbddfaeb61381eb7e31e Mon Sep 17 00:00:00 2001 From: shawn Date: Sat, 3 Feb 2024 02:09:15 +0000 Subject: [PATCH 09/40] Updated --- templates/fine-tune-llm/README.md | 19 ++++++++++++------- .../aws.yaml | 3 ++- .../gcp.yaml | 3 ++- .../llama-2-13b-4k-4xg5_12xlarge.yaml | 4 ++-- .../llama-2-13b-4k-4xg5_48xlarge.yaml | 17 ----------------- .../llama-2-70b-2k-4xg5_48xlarge.yaml | 17 ----------------- .../llama-2-7b-512-16xg5_4xlarge.yaml | 4 ++-- 7 files changed, 20 insertions(+), 47 deletions(-) rename templates/fine-tune-llm/{job_configs => job_compute_configs}/aws.yaml (97%) rename templates/fine-tune-llm/{job_configs => job_compute_configs}/gcp.yaml (95%) delete mode 100644 templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml delete mode 100644 templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index 5054b1775..e29bf8331 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -20,14 +20,17 @@ We have provided different example configurations under the `training_configs` directory for different base models and instance types. You can use these as a starting point for your own fine-tuning jobs. -Please go to `job_configs/aws.yaml` or `job_configs/gcp.yaml` -and specify your cloud name under the `cloud` field. Next, you can -launch a fine-tuning job. +First, please go to `job_compute_configs/aws.yaml` or `job_compute_configs/gcp.yaml` +and specify your cloud name under the `cloud` field. + +Then, please get an WandB API key from [WandB](https://wandb.ai/authorize). + +Next, you can launch a fine-tuning job where the WandB API key is passed as an environment variable. ```shell # Launch a fine-tuning job for Llama 7b with 16 g5.4xlarge instances -llmforge dev launch job_configs/aws.yaml training_configs/llama-2-7b-512-16xg5_4xlarge.yaml.yaml +WANDB_API_KEY={YOUR_WANDB_API_KEY} llmforge dev launch job_compute_configs/aws.yaml training_configs/llama-2-7b-512-16xg5_4xlarge.yaml ``` Once you submit the command, you can monitor the progress of the job in @@ -44,6 +47,8 @@ anyscale-data-cld-id/org_id/cloud_id/artifact_storage/username/llmforge-finetuni ``` You can go to models page and import this model by clicking the `Import` button. +The checkpoint uri points to a remote bucket location where the model +configs, weights and tokenizers are stored. When entering the remote uri, please make sure to add the prefix `s3://` or `gs://`. @@ -54,7 +59,7 @@ For the generation config, you can reference example configs Once the model is imported, you can deploy it on Endpoints by creating a new endpoint or adding it to an existing endpoint. You can follow the -endpoints page guide to query the endpoint. +endpoints page guide to query the endpoint ([docs](https://docs.anyscale.com/endpoints/model-serving/get-started#deploy-an-anyscale-private-endpoint)). # Frequently asked questions @@ -65,7 +70,7 @@ You can open the file under `training_configs` and update ### How do I customize the fine-tuning job? -You can edit the values, such as `context_length`, `num_epoched`, +You can edit the values, such as `context_length`, `num_epoch`, `train_batch_size_per_device` and `eval_batch_size_per_device` to customize the fine-tuning job. @@ -78,5 +83,5 @@ You can edit both job and training configs to use a different instance type. Note that the `num_devices` field under the `training_configs` file would need to be updated to be the total of GPUs that you expect to use. -For instance, if you expect to fine-tune a model with 16 g5.4xlarge, +For instance, if you expect to fine-tune a model with 4 g5.12xlarge, the `num_devices` should be 16. diff --git a/templates/fine-tune-llm/job_configs/aws.yaml b/templates/fine-tune-llm/job_compute_configs/aws.yaml similarity index 97% rename from templates/fine-tune-llm/job_configs/aws.yaml rename to templates/fine-tune-llm/job_compute_configs/aws.yaml index 0346aafeb..19140cf72 100644 --- a/templates/fine-tune-llm/job_configs/aws.yaml +++ b/templates/fine-tune-llm/job_compute_configs/aws.yaml @@ -2,6 +2,7 @@ compute_config: allowed_azs: - any cloud: my-cloud # You may specify `cloud_id` instead + region: any head_node_type: instance_type: m5.4xlarge name: head_node @@ -42,7 +43,7 @@ compute_config: resources: custom_resources: p4de.24xlarge: 8 - aws_advanced_configurations_json: + aws: TagSpecifications: - ResourceType: instance Tags: diff --git a/templates/fine-tune-llm/job_configs/gcp.yaml b/templates/fine-tune-llm/job_compute_configs/gcp.yaml similarity index 95% rename from templates/fine-tune-llm/job_configs/gcp.yaml rename to templates/fine-tune-llm/job_compute_configs/gcp.yaml index 652ff409d..2eca37168 100644 --- a/templates/fine-tune-llm/job_configs/gcp.yaml +++ b/templates/fine-tune-llm/job_compute_configs/gcp.yaml @@ -2,6 +2,7 @@ compute_config: allowed_azs: - any cloud: my-cloud # You may specify `cloud_id` instead + region: any head_node_type: instance_type: n2-standard-4 name: head_node @@ -21,7 +22,7 @@ compute_config: resources: custom_resources: a2-highgpu-8g-nvidia-a100-40gb-8: 8 - gcp_advanced_configurations_json: + gcp: instance_properties: labels: as-feature-multi-zone: "true" diff --git a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml index 87aeb5cba..504ac144e 100644 --- a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -2,7 +2,7 @@ model_id: meta-llama/Llama-2-13b-hf # <-- change this to the model you want to f train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional context_length: 4096 # <-- change this to the context length you want to use -num_devices: 8 +num_devices: 16 # <-- change this to total number of GPUs that you want to use num_epochs: 10 # <-- change this to the number of epochs that you want to train for train_batch_size_per_device: 8 eval_batch_size_per_device: 8 @@ -13,4 +13,4 @@ deepspeed: config_path: deepspeed_configs/zero_3_llama_2_13b.json flash_attention_2: true worker_resources: - g5.12xlarge: 1 + g5.12xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up diff --git a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml deleted file mode 100644 index 441d31bf2..000000000 --- a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_48xlarge.yaml +++ /dev/null @@ -1,17 +0,0 @@ -model_id: meta-llama/Llama-2-13b-hf # <-- change this to the model you want to fine-tune -train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data -valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional -context_length: 4096 # <-- change this to the context length you want to use -num_devices: 32 -num_epochs: 10 # <-- change this to the number of epochs that you want to train for -train_batch_size_per_device: 16 -eval_batch_size_per_device: 16 -learning_rate: 5e-6 -num_checkpoints_to_keep: 1 -output_dir: /mnt/local_storage -dataset_size_scaling_factor: 1000 -deepspeed: - config_path: deepspeed_configs/zero_3_llama_2_13b.json -flash_attention_2: true -worker_resources: - g5.48xlarge: 1 diff --git a/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml deleted file mode 100644 index c19300ac8..000000000 --- a/templates/fine-tune-llm/training_configs/llama-2-70b-2k-4xg5_48xlarge.yaml +++ /dev/null @@ -1,17 +0,0 @@ -model_id: meta-llama/Llama-2-70b-hf # <-- change this to the model you want to fine-tune -train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data -valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional -context_length: 2048 # <-- change this to the context length you want to use -num_devices: 32 -num_epochs: 10 # <-- change this to the number of epochs that you want to train for -train_batch_size_per_device: 16 -eval_batch_size_per_device: 16 -learning_rate: 5e-6 -num_checkpoints_to_keep: 1 -output_dir: /mnt/local_storage -dataset_size_scaling_factor: 1000 -deepspeed: - config_path: deepspeed_configs/zero_3_llama_2_70b.json -flash_attention_2: true -worker_resources: - g5.48xlarge: 1 diff --git a/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml index 8d03b59a7..be8283040 100644 --- a/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml @@ -2,7 +2,7 @@ model_id: meta-llama/Llama-2-7b-hf # <-- change this to the model you want to fi train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional context_length: 4096 # <-- change this to the context length you want to use -num_devices: 16 +num_devices: 16 # <-- change this to total number of GPUs that you want to use num_epochs: 10 # <-- change this to the number of epochs that you want to train for train_batch_size_per_device: 16 eval_batch_size_per_device: 16 @@ -13,4 +13,4 @@ deepspeed: config_path: deepspeed_configs/zero_3_llama_2_7b.json flash_attention_2: true worker_resources: - g5.4xlarge: 1 + g5.4xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up From d001c0c85836832d8d4cf57059ecf6c7efdb283f Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Sat, 3 Feb 2024 16:27:45 -0800 Subject: [PATCH 10/40] fix hang --- configs/intro-workspaces/aws.yaml | 1 + configs/intro-workspaces/gce.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/configs/intro-workspaces/aws.yaml b/configs/intro-workspaces/aws.yaml index 5913550fc..16a1311fe 100644 --- a/configs/intro-workspaces/aws.yaml +++ b/configs/intro-workspaces/aws.yaml @@ -1,3 +1,4 @@ head_node_type: name: head_node_type instance_type: m5.xlarge +worker_node_types: [] diff --git a/configs/intro-workspaces/gce.yaml b/configs/intro-workspaces/gce.yaml index c9c1d0a46..b946af734 100644 --- a/configs/intro-workspaces/gce.yaml +++ b/configs/intro-workspaces/gce.yaml @@ -1,3 +1,4 @@ head_node_type: name: head_node_type instance_type: n2-standard-4 +worker_node_types: [] From 1053ff2cb0f04f72995caf6640f70a3673fd2cf6 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Sun, 4 Feb 2024 14:22:21 -0800 Subject: [PATCH 11/40] fix import Signed-off-by: Eric Liang --- templates/intro-workspaces/README.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/intro-workspaces/README.ipynb b/templates/intro-workspaces/README.ipynb index a5af50281..e500a478d 100644 --- a/templates/intro-workspaces/README.ipynb +++ b/templates/intro-workspaces/README.ipynb @@ -175,7 +175,7 @@ "You can click on the \"File Explorer\" in the left pane of VSCode to create the new file. Copy paste the following program into the file:\n", "\n", "```python\n", - "import ray\n", + "import ray, time\n", "\n", "@ray.remote\n", "def do_some_work():\n", From 5d8d627f601a048fc2dfd4c0f91f89bfb702c140 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Sun, 4 Feb 2024 15:19:16 -0800 Subject: [PATCH 12/40] fix Signed-off-by: Eric Liang --- templates/fine-tune-llama2/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/fine-tune-llama2/README.md b/templates/fine-tune-llama2/README.md index c8eee47ad..6dff3bed1 100644 --- a/templates/fine-tune-llama2/README.md +++ b/templates/fine-tune-llama2/README.md @@ -11,7 +11,7 @@ python train.py --size=7b --as-test The flag `--as-test` is for testing purpose as it runs through only one forward and backward pass and checkpoints the model. It takes ~7 mins. Without this flag, it takes ~42 mins (3 epochs for optimal model quality). -Model checkpoints will be stored under `{user's first name}/ft_llms_with_deepspeed/` in the cloud stroage bucket created for your Anyscale account. The full path will be printed in the output after the training is completed. +Model checkpoints will be stored under `{user's first name}/ft_llms_with_deepspeed/` in the cloud storage bucket created for your Anyscale account. The full path will be printed in the output after the training is completed. ## (Optional) Step 2: Switch to a different model size @@ -45,4 +45,4 @@ Use the same command to train with your own data. Voila! You have fine-tuned your own Llama-2 models. Want more than this? Check out advanced tutorials below - [Walkthrough of this template](./tutorials/walkthrough.md) -- [Fine-tune Llama-2 with LoRA adatpers](./tutorials/lora.md) \ No newline at end of file +- [Fine-tune Llama-2 with LoRA adatpers](./tutorials/lora.md) From 8f2285f6771eeef8fceac3184db32f667d1d31a5 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Sun, 4 Feb 2024 15:20:02 -0800 Subject: [PATCH 13/40] fix Signed-off-by: Eric Liang --- templates/fine-tune-llama2/README.ipynb | 2 +- templates/fine-tune-llama2/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/fine-tune-llama2/README.ipynb b/templates/fine-tune-llama2/README.ipynb index b983fc104..1988889bd 100644 --- a/templates/fine-tune-llama2/README.ipynb +++ b/templates/fine-tune-llama2/README.ipynb @@ -70,7 +70,7 @@ "Voila! You have fine-tuned your own Llama-2 models. Want more than this? Check out advanced tutorials below \n", "\n", "- [Walkthrough of this template](./tutorials/walkthrough.md)\n", - "- [Fine-tune Llama-2 with LoRA adatpers](./tutorials/lora.md)" + "- [Fine-tune Llama-2 with LoRA adapters](./tutorials/lora.md)" ] } ], diff --git a/templates/fine-tune-llama2/README.md b/templates/fine-tune-llama2/README.md index 6dff3bed1..35709c502 100644 --- a/templates/fine-tune-llama2/README.md +++ b/templates/fine-tune-llama2/README.md @@ -45,4 +45,4 @@ Use the same command to train with your own data. Voila! You have fine-tuned your own Llama-2 models. Want more than this? Check out advanced tutorials below - [Walkthrough of this template](./tutorials/walkthrough.md) -- [Fine-tune Llama-2 with LoRA adatpers](./tutorials/lora.md) +- [Fine-tune Llama-2 with LoRA adapters](./tutorials/lora.md) From 5ac0f1f0182dbf732af0a5aa9914258f6080b430 Mon Sep 17 00:00:00 2001 From: Huaiwei Sun Date: Mon, 5 Feb 2024 09:46:57 -0800 Subject: [PATCH 14/40] Fix gpu instance type name to match the internal names --- configs/serve-stable-diffusion/gce.yaml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/configs/serve-stable-diffusion/gce.yaml b/configs/serve-stable-diffusion/gce.yaml index 623be2bba..961d4f4f7 100644 --- a/configs/serve-stable-diffusion/gce.yaml +++ b/configs/serve-stable-diffusion/gce.yaml @@ -1,10 +1,10 @@ -# n1-standard-8-nvidia-tesla-t4-1 --> 8 CPUs, 1 GPU +# n1-standard-8-nvidia-t4-16gb-1 --> 8 CPUs, 1 GPU head_node_type: name: head_node_type - instance_type: n1-standard-8-nvidia-tesla-t4-1 + instance_type: n1-standard-8-nvidia-t4-16gb-1 worker_node_types: - name: gpu_worker - instance_type: n1-standard-8-nvidia-tesla-t4-1 + instance_type: n1-standard-8-nvidia-t4-16gb-1 min_workers: 0 max_workers: 100 use_spot: false From 55e62be4f0c61232c2568a87b11ec4f02c4f8006 Mon Sep 17 00:00:00 2001 From: Shomil Jain Date: Mon, 5 Feb 2024 18:19:11 -0800 Subject: [PATCH 15/40] Add auto_select_worker_config --- configs/serve-stable-diffusion/aws.yaml | 1 + configs/serve-stable-diffusion/gce.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/configs/serve-stable-diffusion/aws.yaml b/configs/serve-stable-diffusion/aws.yaml index 6c1097375..32540af86 100644 --- a/configs/serve-stable-diffusion/aws.yaml +++ b/configs/serve-stable-diffusion/aws.yaml @@ -7,3 +7,4 @@ worker_node_types: min_workers: 0 max_workers: 100 use_spot: false +auto_select_worker_config: true \ No newline at end of file diff --git a/configs/serve-stable-diffusion/gce.yaml b/configs/serve-stable-diffusion/gce.yaml index 961d4f4f7..2becc1f2f 100644 --- a/configs/serve-stable-diffusion/gce.yaml +++ b/configs/serve-stable-diffusion/gce.yaml @@ -8,3 +8,4 @@ worker_node_types: min_workers: 0 max_workers: 100 use_spot: false +auto_select_worker_config: true \ No newline at end of file From f458c5bdcd28b0ce2d70d1cccb7b32f556be9790 Mon Sep 17 00:00:00 2001 From: Shomil Jain Date: Mon, 5 Feb 2024 18:19:32 -0800 Subject: [PATCH 16/40] Remove default worker node types --- configs/serve-stable-diffusion/aws.yaml | 7 +------ configs/serve-stable-diffusion/gce.yaml | 7 +------ 2 files changed, 2 insertions(+), 12 deletions(-) diff --git a/configs/serve-stable-diffusion/aws.yaml b/configs/serve-stable-diffusion/aws.yaml index 32540af86..6db8ca4c6 100644 --- a/configs/serve-stable-diffusion/aws.yaml +++ b/configs/serve-stable-diffusion/aws.yaml @@ -1,10 +1,5 @@ head_node_type: name: head_node_type instance_type: g5.xlarge -worker_node_types: -- name: gpu_worker - instance_type: g5.xlarge - min_workers: 0 - max_workers: 100 - use_spot: false +worker_node_types: [] auto_select_worker_config: true \ No newline at end of file diff --git a/configs/serve-stable-diffusion/gce.yaml b/configs/serve-stable-diffusion/gce.yaml index 2becc1f2f..f282a1551 100644 --- a/configs/serve-stable-diffusion/gce.yaml +++ b/configs/serve-stable-diffusion/gce.yaml @@ -2,10 +2,5 @@ head_node_type: name: head_node_type instance_type: n1-standard-8-nvidia-t4-16gb-1 -worker_node_types: -- name: gpu_worker - instance_type: n1-standard-8-nvidia-t4-16gb-1 - min_workers: 0 - max_workers: 100 - use_spot: false +worker_node_types: [] auto_select_worker_config: true \ No newline at end of file From 5a29e17411908e140d2a910a10a8a681fb8319f2 Mon Sep 17 00:00:00 2001 From: Shomil Jain Date: Mon, 5 Feb 2024 18:20:04 -0800 Subject: [PATCH 17/40] Put back defaults --- configs/serve-stable-diffusion/aws.yaml | 7 ++++++- configs/serve-stable-diffusion/gce.yaml | 7 ++++++- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/configs/serve-stable-diffusion/aws.yaml b/configs/serve-stable-diffusion/aws.yaml index 6db8ca4c6..32540af86 100644 --- a/configs/serve-stable-diffusion/aws.yaml +++ b/configs/serve-stable-diffusion/aws.yaml @@ -1,5 +1,10 @@ head_node_type: name: head_node_type instance_type: g5.xlarge -worker_node_types: [] +worker_node_types: +- name: gpu_worker + instance_type: g5.xlarge + min_workers: 0 + max_workers: 100 + use_spot: false auto_select_worker_config: true \ No newline at end of file diff --git a/configs/serve-stable-diffusion/gce.yaml b/configs/serve-stable-diffusion/gce.yaml index f282a1551..2becc1f2f 100644 --- a/configs/serve-stable-diffusion/gce.yaml +++ b/configs/serve-stable-diffusion/gce.yaml @@ -2,5 +2,10 @@ head_node_type: name: head_node_type instance_type: n1-standard-8-nvidia-t4-16gb-1 -worker_node_types: [] +worker_node_types: +- name: gpu_worker + instance_type: n1-standard-8-nvidia-t4-16gb-1 + min_workers: 0 + max_workers: 100 + use_spot: false auto_select_worker_config: true \ No newline at end of file From 9d3e4d48797576d4502f3885c598ec90bc2c7314 Mon Sep 17 00:00:00 2001 From: Shomil Jain Date: Mon, 5 Feb 2024 18:20:25 -0800 Subject: [PATCH 18/40] Add whitepsace --- configs/serve-stable-diffusion/aws.yaml | 2 +- configs/serve-stable-diffusion/gce.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/serve-stable-diffusion/aws.yaml b/configs/serve-stable-diffusion/aws.yaml index 32540af86..459756a1b 100644 --- a/configs/serve-stable-diffusion/aws.yaml +++ b/configs/serve-stable-diffusion/aws.yaml @@ -7,4 +7,4 @@ worker_node_types: min_workers: 0 max_workers: 100 use_spot: false -auto_select_worker_config: true \ No newline at end of file +auto_select_worker_config: true diff --git a/configs/serve-stable-diffusion/gce.yaml b/configs/serve-stable-diffusion/gce.yaml index 2becc1f2f..27537a439 100644 --- a/configs/serve-stable-diffusion/gce.yaml +++ b/configs/serve-stable-diffusion/gce.yaml @@ -8,4 +8,4 @@ worker_node_types: min_workers: 0 max_workers: 100 use_spot: false -auto_select_worker_config: true \ No newline at end of file +auto_select_worker_config: true From 8298c3696aebcdd619a57b7b9c30b1d909daa189 Mon Sep 17 00:00:00 2001 From: Huaiwei Sun Date: Tue, 6 Feb 2024 15:19:14 -0800 Subject: [PATCH 19/40] update the outdated screenshot --- .../assets/edit_worker_nodes.png | Bin 0 -> 107451 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 templates/fine-tune-llama2/assets/edit_worker_nodes.png diff --git a/templates/fine-tune-llama2/assets/edit_worker_nodes.png b/templates/fine-tune-llama2/assets/edit_worker_nodes.png new file mode 100644 index 0000000000000000000000000000000000000000..9bf1ea3612ae953198c27fff5794dee02b104aab GIT binary patch literal 107451 zcmeFZbyQVd_dg1FknU1YLh0^qL>dm=4bmaqaF7xZkVd+@yGx`)x8MXK{+x)LG9>4L2<`J zLE+gZHOTV<4>F9^B;Lu$Kv4tt@KCVOm{3oEJ80k+3K}2ku{7WgN)npj@4W&vtKr#<;|532&?gA!5{ zk&pmB6%Fl-jjioXZ5&X2%FclV1Y2=+dnhPevd2GY2?f$4D5xh-&6Ly})MTW&4Q;IG z^^I%{jOkshY#+;k;&tH$Zmo` z{L@Zj7qfqBvbO)-Eue!8k53qw=ouOQCou=JcmEG!k5B#)`_-?1s^fhuj9b~>*iOX8 z%F5WHghqyRC{9vNZJE^;%DPx{w?+2PySm{#KzLbPSIB1 z(3qd;x5$4a{dewPrK$g04-*SJ`+rvSACLY+^s&d>3dZ&}mX41ju54}Qzz?+Ve@gyu zDfNHF_?cK)e+&I*>VHe9{f`p=nfl)na&~6Gfa^at?Qsl#HS0gqe&^?9cw8y}v2y-d zmcLSgb;*y&%kX!N^CNmy%dbO02|`J{5mIu2-b;q_z!9DLhUPmy;*Lf)?ng}w^NRgV znGYb^xR%Cy8AHBeEzXSGJ2xW;u$ z-^gQS^3YiCevrUoQb9(Afrts;)@t{0Ge}vF3 z2_KO7?+Xz-6&jQH&jlX^g_Fk!Dd+rCEEED5@`FVXhV;*+kC_I8L(Ygx@kfLhni}l! z^iOdhabjtp)(+6OAAdzi1!04S|D#!c5*w>ojUa|F0<|JyJqwBY zR|cbem|F`LBFkL49b_>)2)JiR_M=Sep(}BT2{@=m)Js$t>2SYH7 zu|HSpr)UUnZvYB`(e7li-H)#b`5M&)N~Jp5=95KKWKyxn`EuQ1FR`i&*N*!c;^O0< z+4mIp!!PT|rSfZ^@6EgpM12|gN;FtJf!R3Aad%Q7TM{SkEOvq9*J|o92Wb@wCUe=p z4L~EJC-S;XFVb$DJoN01rAu_!8JE!UJex>km^j;=>UW&dHmR;O=t7hb7cUE|Uqywf zo8ogjjS#rM>?{}ggwGcF`L&oi>})wY%kQO091POp@~d?|)G?c>$mTsA5ee(ViYFx_ z>z&f|F4oN&=D)GTW~$HJez<9Uh-y4+(e!S){DI``K#k+hWIfMmHku_NxQ>vk+2m4A z&!OMp=VKz^eqPjtG`}&NN>t{-%FJ@Z7xI*)MgoR3!WDiXiR&bTAxF2>3vb~qE52|QNhE#P`=RLr}aVYfL%$LD@Nz&dqty<6HJ!q8%>EeDKgw%6VDtfX4Ko$*YqO)8-; zTW8s`zWwR)%%+P)_t9KAqQVr<%~bFE&!sKbeUj#5IiK{l*y+evmMW{(dh9<#AsF$( zfIY4+4jGCysw0=YZs;UIhWzf$U#C}u_DbhFtV%-3hRo$ZiO^-YSQOp(eYvJd8l zOS($B?H1QYBH~aR#fUb4&lJ0f7VYqbMdIztC*f^fpHG6v4H&$;SUTJ&37Fu>MyJqr z+s>Y>j-ghjmbfdwvRb`Z@|L64sLE4Tp&FF9xj4Mg#2pS=!P%NQ(&UQ4WZC7DBIC6T z0JfAUOz@<0_@Qq08-LI2Ji`Z6XEz4F1zT!m+X-|+kAzqT-4YkFe!ANESC#JukB_vO zUCY?@rw_azZc+nTE?2_^=r@N`IlAfwp+L*SAesyeL?mGA)b{=f-fKO0fjq&{3{j-Jz#x4-_yd1!ud8*u z!DoFj+V?k04;mt!&tUc!8f*FHOo);;1`{a=IIN@TG;2&nU*2_s^*RGkJo^&;#19h) zQU&Z~<{$NrU>%6j(nQrS6x07y(4h2#9XQE%0%2Td{8*tk(((Hsr7jMd;zE{0l(4np z;Ix#&*nIIdutrPe7Qg{V!c>O%Dhv|49y=SP`lh`SjBm1a>Im26yFHG9+Q(~cmPpqt zSI7JmUKQKrG_@8h40!B(0U2jYDfC4e@<@J^{KDs6Lv`RBvZ7$WczNlfz0q=WP&pc* zT5cfP>iB_d(RDLfg-7zUv_jvzGHU1(B9~?8LcTGRL%IO3n#5S2$}+*_#tkK29g`D? zsj-^t`@&}N!lG2CS!38uNix##`C%x=>$g3?8+(xa;`@7G(qW}VHbhX5J+U>jB`UI34U;#(4gY@_mfoc{}G zm+*LeFNhQ;bi8jE*ncCV?3)*0l+y$uq-ulXjCu;I-#$g9cpUeVX4{fBUu`7vd%~0H zulL0eP7lLq6B3=+Q5oQv9pQ)EMg>G1g;RZq*S}n zNrK4pR37ty+1JSQW8=@CMHkDxg;)o z^8Dpo4XzIca4IX}b(KeH)6^C@6KDgkeo|0BjZ=Q=sr-5L4rk14WuB#0z&i}D7x~eK zf&c6yAlj>|5j2=>!K8}3l5e(^p%b>F9Zj1~KWxy~=!hit{?ObWSzf`3@M2`Ye>blT*DW^tSPT>Sq#Z&Kwq&~(7QkvXt5mjzb^uEb zPYK(1Xcj#j>(s%K0LjQnWZo{98Qx^zD~a1Ag~qfMfcEZmB_G0aD~+ad<_^6P+C_v# zWT0zY8%|yFir2qj>KuZ|qjD;w>Pq`6>!k@do zMbBc5&r9#t6YUr}oG6Sd(x{dkk4THAl<&yppX?&wbt%(&--Y9QGG}slg&oES-A8@^ z>l-axe?}3%kV|W;f_uI9R$_Q__!I9m1u&-0=tn9U(xuFi$A0{}E=ch-;(^5%{g@%$ z@1C|hxJzG&nZgF)=Zs1Xx2eC-$6eMUr1cQYda?Jj3jH$ynk6v=(-<3~hlq`>#i=1C zqL=Nyne5C;ilI46 zAn3h+wrCk?uT|~B#?%fb6`Yzys9sMYU@UDgZm~}fFgKo&nHpl?wsDoT-n-oiVFh)b z?$<8uq_gdb=piV`F1qdWdUr-OfI$ybt&Gpv%%eRX?k>1fqHFzGeaXkNCEfVFx9!?_ zset`DfOwg2ZpdMFU5u;MZyF7K;`5i?AN5;^XZxp-FC{0Y;udSW9 zJg=*2GI)=BUi+RcE+fTgz<3vkcY*RktGrluNXm9jh6PHCjn1Ls;dy)_5cA2FJTH=? zsTALqmcs4E)*geh@m7$=X}^wh8!OF8lx{K<;B|ci-c)8c{s}f?b&OaifFPu`xM7x*kXEaIlp%#PQ#8KDuZIjkCIRzf+PX z?&i%!ZgNnLhq)7aj=bp!WSV7~MgGZmF>kMNWpAhFWIo695|=MiaHR=8XZ!6;lWV04 zwagM@^z%O37al!jNhn6!quDvvqE_jCiH_L)aSRLsw?`i`y_Go;rSp>eq=$9$rUvW) zJ*ED^M_Z@^r*p?hHKNRhAqpd!64*|l$vqtQI$ao=j(l}=QC!>C7Mz!=qbZ{OnQ=b? z*876thBC1(F(;#LQQoxltUkVE494YBn>tr~-ZCC^NE>7tezM%YP!Ht|EtC~MwW+bV zHrwuTdf6Da?;;AU;k#O+cHix_q>|VTjQLz!vgs?9(G$Fb>PlWwl*pfj9F#F+Rn$&gKa#PV25n0jvgKNi|uxd6SF!{FRM)C z%1-1(d?kX?_UZIoyI(r1?Z3k0z!f;hbtkpM@fhQ|$q0+FuXFTbRKC}g=u4PuSju_7 zE|QZ9s}+*mY&Xgw>QQqpzARZT+rj66x&B~OU3i-kIbK#ECr|5E9PL^7;`-+|0-MWd z7K9bRFK5wNxR~cY);h6$ z84-SQwOlnepW(9mB!rdIMxFxJ@+VIm^L7pErxw-u{tkS7mx^iQBMII3HZDI6_Bkn$ z)yP2%X6vj6--iI5o=i_7K^MO#kL#$+n?q{MCZ%w%%YT-3DKt~5i!}Mdufy-ed3REV zhUVO(>F7>^6^D}Yy%X8!DV$WjUNIM2Q=*?ENT$KFkoMhugGIJ0jS2N=y-vehQLNg& z_?L%d8fgQ*7zYS-c@bFG&jksbN+8&IwucFcsRErJHGiqzw}R@Cye*ZT@+s1Di;~!j zn%0<8RA8ScQdl~ni?L5{LdIIq`eCe9h!K<_!UVuHBD+`@&E@1=yYs%d$EK{x4sgGv zHafpIBtjZ5m=?+VL0J|w54c8zn{4}>>iT6+CBfqRwPE=obZUj_J`(1f!-rRy`GPBc z)aP+3;5|>oDm1e5{M;182L2TMniDm~nJR?-9Q>5CIu*)vwVv7NpS7^LrLFfGG{xis z5imHteD=F1o6sR}zsyoUC~QIxxwjd-2|1rI*V)$*?(DN+jN7OlaSYAT9*l#;(rXceSqLLVP&5#1eF8m`c!} zYGC|ZR%g?ADdAjF^_lI9%Wn1JRj>n{LrK+ z(w*>Lp&m@2%K&lbC@K7RpLI3#xt;^lmqnkd`czvazuZ8cz&Yr)6NN|{4)TBP3s!0) zYGML+gnUeXFgw6zjSs0myqK~Wb@q!dd0tlH(^GTES z-=rWwEcn?=n!os-kurd@RqP~hI{rn;-be#tU$@jg{{hyB3jwq)M){DpH1The0U-A3 zZFlfvqkmoMF#zUQVrhVtzFKn+MTCXMrPg1j|K>`_mbD!twqArBZ_~F$5bfS zbIsQ&Y8YqGmXqv`mQ0jAqWwJ&5l=w?$}92VBdUHn}6rcf_v|`90}oZ;3@H!4_yGacFp6Mko_( zg|_wc&!$uikd&oaz`XK1=+jOPjncE6>Kps6vADQto){eLg+Q6HaO z9Lh_sW{$%rR*uxVWl8fmOAymvea3~v(L4m916E+S!Qs)f9FjySYqgYSC$&yNb=<1(-wUJl`p^iC%JYYX` zJzwmuF`qYk`}OB@iVt?7H^}Xt-FLaa@oAq#ggRM>Zzrcm3UL{)w&prM$BE&R!bhkB zmIDsZ1<^3{pFYn^wmBkVzng34c*-yLqQ-vL5G&bh#cQ=?^?sEr?N4!<2z8PW;UtPT zINg>!oS}*pgwXTO^WMC0E`*f7SKIPi7{Dk)*MA|zOaiam2LHp_Ln~v6;-wyoF*#uh z94{XB@4dj50D6iNtd;^R!T}%I%PBV$-kWcq%Sy3{`CI_*v1$L68E|^s^!2K1UV<>b zjcE)~B&+s^4n%RyZSD-et@cVJSWi@5>J!0o@5fS1iS$gEXEscEQsf%6`9b*i+OU4L z47ikIlloR|vrgOPy!|_O7Dhc8>C9{4a;GLo9V86`)(!G%q0YfSK*op5`EL@6qnih4 zT~LpBm+SS^J`8x|x>`Nu1;Qr1(fjrkxMjq;ITUqXJt+xwo_ zbvZZiw7d5@hQ(d>BZIow#cTJPJ%v)Gg}n1#luwD9?Yuhx94ocz_|L2v*}`mFLo{lf>)*ofO1cd+eK_`8VG?)D zW^8YGqF!h5`S$Kmz-@`{0U|f}gRR2v7_?ERW9Y~-{894&_D1N};6+p;WHrK?i~M-% z2?uNzvGmRz{2o&^8UzWSS2@(nZL)9AE_5p_51Gxh&MP8rVErvJ8wf8c+nsY2RseM` z$kpp5QK7K0F!JSz#oAI9sd}|{b&UU=w*q#p9c-WE3a@{0lR&s-HLbF8uu^PO+{T1! zpd3-`Lifd%XBNq@1nQlm+If;VeIkR2PQKQ(_spt^Tu*GL=6Lms@l7h#vZClgT^0I4 zuYAQZd5LcOa-(4dwyXvrTkpTC@+r)AiJ+B0jiP-XxXk0rN!wlt8{Fj1iSW-n&4!Ys z`^_qqvHuk*TYN5Pf43}Ke)NloYaQ%t$}Cz~xM>aEh*L{kr> zmx6?*4yjtt=SGcs<70ID8=}c%uWP;33vlxbPSW<8*>0?wQ)$HLpU--g5nfkN&pNee zvdD6!4S%QFFRVS0Pt*HEP<~6}xNq-CYYRJ3ra$F3#QoK$xHs?M%Vq~Jg*@MLg5Squ z5OjpB8}vm$2{@=qeTzme@6T7YydQ$?gE=j=MrUbBTV|ym8&PnGJ=D}!_kc;p=^NXM zG4zP07{9u?U1GN1Jv`nSm|E-EehvBZk+U?Z!cLi1NoY#oW>{Ul-X>oZphS$7l=Rg!OLyBe?pNJ`C?((QXa-tuPB{33&ZY3+0qn&LQ_gZ3?I;SCf-@hUd6M0sBYJFTb@vYJ7zA*4;@e7|IO5y&*HX7Rj8C%H2 z$PE+MiMsKJeb&+uts0`2nIUvan^sCS>2fg><~`_>)bJf3y`T~LGG4mHs6DW5zHO0~ z>biymB@TnFU0}BL-p*l_FqdlAfbh)LwRYS5Fyh*_b!59!oS*tf~V&vJH? zMkIz8X_8x`=e1VJU30F+B3~PCq4s^0*8>yY%ZQ%f%BN;9D1&$L*eAFcAhdiiXW)Ak0tsRu;K z?I<}uS=?q&CCSw>2Mf&^m!%e$X8JrNk&4A#vqBd;>`=8u_3Bowb=lsyNLFmZcClo& zQTi;;YS3OkB`PmD2<5kBDfkRDstrNREX`NJW-vq+d0yY-p;%d18^i}nr3`XbPmVuc zlAPPCzr^enqTj!I_b^MX+Mg*y_?>9=(&|{j-r~YcqwWSRJZFkfje$I&T<6`4Hh1k5_z>fupgO-zJf~0 zNfgO(Mb}7}BJF?=>wrb(+A`hp`AjkQ0k&wa|U z&(eoSE!S73Ud3d%qW3!fx%2R)`(S$YT{kYq3#Ql_!kL^O++7-1(@ftx%XQye(P!8C zkR>%@h`wx5DWlf!#T#JNYH}|>AjB@}?{jV3=F14RN5hFW+{dC;oP^ zV=!mv`CGw?qdZoQ)1xd%1CD={NXWurOVCvg!`$!p!i z`d#3@a^=>1+8g~lPD*fuXyg3|BDR4T2_reE4=aFj-*>mHA4!$pM!;ddTe{N}DFq2W z1?#Wj;xE`h112+&?13o4jVnFenb)YE8Vb>=N_9`0bZ9H*$E-H{z`}j5BSBIc$fHMa zR%cm=P>~)?>9pkU88U1fmu@kluuhJUeQ0k*=!`5YYB#&}80Di*>VWm8JY3;%VDT<_ z_EP@^Qz0?D&hsdiw}-hG-awCCWdH8)xR1S6R$|zl=zH!S7%Z++t0X*LKOJWc(H+~b zwz21>2xuWp9v5R=MJA$DLLgf$K8`OAG}a}C$FS9&u;9a{RkYpPu_=a%g?9uOy2wPW zSE4vx4&)k03fEZ9fb^e{TO}1Ge^WiwHAGnwDlgE!6|Q?a_VhNyR-3$MWA?*t(Tr10 z=!|eWqzhFaf1m+E&VaAUi-*oj@z>%(f;go52f})gB%)dcb7Ni7(k(T1a1XE7xX7tw zSiz0mCFY^Sy)A3{s2yRFb7P})*Ue0|2!k{cEi-ni;V33c6k|@+Upn0{RL)d!w}o0s z@N?4I_}aY7wD>liiBbhzcKeYJ)LBNRn}-hD8_VXd{?!1VakKS!?;$15onqdm>w86< z#J=Z=R{SPu^_+?OZ<7rj0pd_A*dF|({Z+&Fl#g&UpUaQ*uCNZp=kK9}d%x6N;U%91 z;$!aSN5ij?%N1iEVv_x&(Qgj*fW zlEwJ5*tXPycR%rGA56Q;Uz&ev`8XNzjt<9#wFX?2PZ&d&0#`Xw;VPS9_ljNew`r%3 zDeeqgP=&~kmF(?m;4P;JVYJ_+ycdS+|HESHZ4H{kqz@tdvrQ zDqe=hs+4L%G|DQzsmYq=i$%#l(iuBk3Hpfm7h%?m+?yMlqZIF+4L`x<_;v!>^!sak zl0+PcxIlf+Ft8u&JV=e6^+8RM#Qy5dj9CB!`#}5#gTuf;B*D-!G1lT$IETA*0d~Hn z+~hqN%+B4lL=w*G+M|&*of*OH(|KN4e6x)X`qtpjJQrqtL63i2eqpP95{wt!BKXOn za7vl0-&qAQxpa&@0H0^PL0PBq8mILNGW&^im=|s1DkuwO@*kQ6pzF|ka$sKT1cg5P z#Ns!y@ts9%{k+`S3~6Y0s;L040VVwlO^DvP<`3OKhdnadX$#dL;cZ6i?t8_Xm=8{< zRI#O|4X_bkkJNwUi`Z?Qk}SX>JL7wof=gBn`UPD*R;J=k>suR7ESkxex@vl_|G1+X zAT51J2e-A=>g<-qF^5sK23{iWew*Y65{fq*XsWKl56f$x&Q2IT;!?iXzPCP255Uj( zTAh`B3_{)DYO{y@w?!`qj8HyKfUEguT`c@InY8J+s@Fa@;hRvBFH(^HZcv|mEU+m# z5td>iZYyrxCOF(Uhpoz{Q@60%Mo10>K*LXxyEBKAO#ZVX zvGJv9C;sykr~{q@fdHYOyx`y3w(S#0D_anH1n?tw(-WU-n)H1|iJI>T^$f;@p0}BI zMqXI?zx*dzNI^CX2mkk?@Qt&hFF*0}TL(P!oWi zy*-l__>SFUMqDF<_2a+|5v^R^F#4nG^|yZsJ~TfBxuEeglRmbpach~>mW)ffu{VjK z9dgE)xP5yC75M&=SAQB6WqxOtjpInT zQmJ5Oc4$B9wnta5a}zjOsp2)>bMISxRIGw3ua@0l5jhO^+s}?yI>JxgTsgmGG;w^Z z9vs2wfB#Zl{+A1gv5DY2<7E3RcUV!y1K(xvm;3bo5#s7wzB8Dyy>eSuqj{^;$&8Za z>QA#)VfTr%%7nPyHYA;!7CE$fKXy;Oz#YC2>jy8B5(LTSS>u~uZ#;UmV1PoJEt|XC z!B}NAxlNKel>%4gX7pvGV>Dal7rmd0kxXRSdI!FdJYw|JDndk%aT+MmZNR5mpH48W zcv$nAd2g9pfo~nDe52Z5f3kV{F^yB-75B|ElZb(F!{OL}24^%@FhDU4nUGcx`5_O0 zOw>f&$-^}-FxtS{A;E*ysaR7r_DgsA;s50BcpagSLwRG|L0xTmXg3p4CsadI*!>5% zQBfDs!%5FVfmVwv^Ra_y!DhXYM##@Af=o;pd_9wz%B-fcvW6c1XTOMe3u5+%iFsS( zg8y5Y1M6}Gr1e}p>H;9L{Nbm~13+DL;vwqef&D+O%Q@zd9(h`CX16ObI}I>x-%NPk!|Wsk3DPFgNQK zp1r@oqz?^%CfcoA>F54$lm2&j7TurXy`BU4R`7 zp_PO}9R88rZ+zHtcMK5pBoNN{u<=xt3zULULV6M^P1YCCp7vjzY#J|XmIi1!--IPI z8_zN*G^)r@sn&!l1s6ZF)5ax*2!bLbBaN>5ly|h7rBGk;zY;~ni$%oglW6w4^f^=; z+bB2eh7~}(pKp*6NUXM)1Rri2kk1`DO?;`g`*DdtrQ4De&7eAIlVD2Tpa9|axGcL} zdbmsVxLS|1+Zm^7o$?-&;pio>XFV?D8IB2=CZgt%MSYBR!3=q$xQu#gidM8_& zlW}NN{E7tmZjU-6xxoOl;Ap3Nl1gX12K>-~=`G`|J}$F&q|?ePmK zp;TUP1b}rl+Naoy*_EyKJ>x$3GVAVa%FNYyM6W%>ygj4KZec=I9&()i)lVGWa`8A9 z`N2GNZ??HRmmB+xaDiKDf;z_YSQ=)h`R6&$j#!sj6e%z>aTJ7}YCGd4X4x~&lZCC& zw?RFL9HmW*52EfWU-lb^-~f`Kj2+O$ela4?te9MYm!O1+iJ1iuZvYY`mK5Y_J2%zg zI-beUx1#{ye}3)m)=aIdzs2l2WXjJ>Xmk8D_KYYJQDx4~rk|-nfr(W_B5TcPwk2T@Kg!gZ&+2=7@ zz9BMON66$V%Ms?X0W%1-nxwxVAzHuf1qeGdhj|{9a>W|#arX>-4Eu9681lHq;Q?Oc z6mr-2@_M~@dm$JF;*k!Avl56n$oqkTttYzDUkYi#aA<_l0I{qeIP?K^T9z%gaVs@A znC;>}Tx}9PvL*;%#q6A_e$?Hj*GqM*6`&rxIMwJ7}x%@_hM zw<6wCK{=XXGn2ttNA5zWVtiRq-cb(g#aFGvDTRRwN2BQxg68OLHA+8&D<_uQId+IF|sB$c_ z=GSI+hee45qDqZ@`;_%QGl0<5%+*#xBZYRlo^cO$9iesPTR0KJlc&f>;1n>?k^AWu ziT&nKj2hdm)4@Df{mBr|`eEw>>SU7`|9}^G^4li@3aeR4E7jgao0fbwjc{MyUjR%9 z@zLtA;^_rqz-G$9*qn%~aOEfb@whqe`J&-6)1R0#mCm|tmB7v+O{01FS-sNj%*Xii z$GG{U&zX)yC6@2IV7+OK>Cz(Fy3phKoLi2axr=a4_?z8bDdkM@I-SE$8+11_U-Y>n z-5wK96p(5f%6P@}mAg3y9(&jHSK!bXpC`Gx)ir9y$(|RnE;)aKVRJvXEFSP_1DLXu z97{>J{y*MiY1P{?yPcXQqhwngul34`KFJYLcVF?xb4UQrb$hPQdUJmI#KZ#>UO^ZV z^tPiX(XZg6@p5PErmC#Tn3%4rB3Vu@W*VBtylt!e+n{-E7Lj6UbC08PZsllKSpYn=&N ztze${GK##5<7gSccxm@lb>xSjSWFPRaP8CebtU^{)mX^do-r`t%CQ1mt0lK}GB zGYd)+6hbdaQ_7Eo*4KOc(kR!hciFw?fWo-closshdnq9F{^q#wyBj=Oeof@ z0~1u3M0TrkuNS0Wrx0A^H)g)Ed~|b4&8aYtFBM-AcB4ecqAO{r@UNMDl>~kNv_K_4 zYc7SuJDpaZL~vv>xz-WhrKPIR!5^j?pm;-`aDBaeos9#PWHM2pJR66218|nR0)823 zcrs94L=oT)ENq(C7hzBg37ukLlIn?>_1o|-tT6(JolCS7vh9vTy4GB6R%|NtSKn!$ zA2^)j{5&8CXW@rFu{4AqGBcgk%H|sn$TvA&?!U-7MtSq>iQjb_98BjoLo@-(tK;=C z*)3hqyW$irC-32dNE%gHMLzA?5^&o2e&aIhN0O$hSULWyH>sFm>yEqRiQmT>(0GXe z>wM$2?Xl9attjO7>Vr zor*@3UtF<2t3$+?65TvlHSL@Xqnz>PV^y8CXBZZRY!ARyrBC%dPvAe8EKVH8?q}Ua zmIEyKh;4XcEkqDQgq}jx7A#r!oZQ!Ol!*tkQ;yZDQiKcP$|)Qjr5>a6v%@PjW0rqjVW#gv+*2dedb&JaBh=@Of&fUg9Q3gSYjFq z0ovkRr{h!9rPfh!33W{W^b_NQrB>bIR7Dcy;WEn^=JxK?HrFO#H)OV)F7qLO@9p>x zNgM*drrb#o0c@(U)Hx?%`T88Mb5gjz;Z{bYUzC(-gjY%@AhrAN8xN)MQM?xO%#12& zt*J&8Hyz&ON65m3J_#k{bCQ-*Izo-u_9R565s~IQ^f)x6)JiQdUEPdsf3m{CH~`n~ z=5D`1KO)4w!V$!FGF#gyWEa46HE^T#nXpk7BK#t!pWZ&@fS}i3$8kcLutkQE;!Brb zY2yJm8&8fB>0kxwquT%?g@RKPH-JaP)rcKn|t6z2GM z7m%?KzkjbRq7$vAy6mlE%V3s-Uy%%XCD>T- zEoe@A^Vyk!JlZ=Q0JZlnE<8U3RGH5B2B!Vm z{aoD#vcnV(5`@CSE{lXKk^;M!PdMnP^zhD6hPy#P{C%m&1SOt14GZ7pKjD%+#J9~l zt9^sVxJl<-v4I}8^u+PO?I*IDsbCw#p`3ETbJ`vi?RNLUx_W}ft4Bo)5kz~*twbY7 zIaX~kB`ErOh(@!fNJ&CH(`08np9FT+4+q7UU_L`MBxrc>kv;M}5lRR5UH!);7~1c4 zM|v5SSN4T1)7)OKXiGI)g}f;`(6R&}Fkb1)ygg{=<*Mw)$-Yjr{dTFKJ&>MEvHELS zioqnd?w`q@h|rL5@6!9zJ&0*&5j_P~DB5A>;fH1ljP1429$t!n#8KA(xBwy8qf)t` zJihq&piWktDYZ282ZWF-{_|)QP4T{Wea`giO7&%*ib^Xv70@~L zD5w_}D+7G@-KoYU74U40O&^E9=-Wo#(RN{i$a?Na8OLcKDByOn`bZio`8q1|cbh%W zKGni@x=d-0X>c#OCOWf+OAs5uaMaYxsuXF_-AuSu3CJao0~YGf{16wTGD0CRe}rD_ zHY*!4%2IpELZb^dTZ>|Mxb6|o8)$pWTNFNO(W^E#TuLpI!^P(HNzEv{#uq$JMTl9- zQp>G=5l_Uz2`3Pdi+EIZJ*#Ut+qJYS-^pB^&)H-KBixw-Ax`{o9wmUoMFvD5L;`^) zNCTh1MQN-g*d3ZS&Ds6!5_bSqIn2Cu8i@7txk1o!KT~$gM;xHWXLZR7tMb^`Q{Tt( z$9p=;ZoQbnkahRTYWx_d-jjaE_d)UK?9hXn1p>-=LtLVvY%y(li-u)fgve#fYs?Jt zIQ|y;l0wltp=22Kfi&6~Gn9WN5Jao8(C8E?42$H3-4)3Y)P`m}sFn$Kszww8A>bw~ zWtJ;(_vCO#>=i{xqUcz%fP+)qcBqOuVJ(m<&mO(Q)_a9#)-1B?Y77k6n}>p%Qc4>_ zmu7?UpZBuN`d)nt{>j5=2KZbW`BM8cemS1jkF;W3=u1<*{NN}Gx$Usd3Y1-JFQ4b_ zF!}efTJ1+VOAk#&{oc3jg8R+}J8vqoVW~0Ai&o!22|Rs@?4es@J!g$0Bo3{n@=2)# zn`W4ILX1X`e9)c8@+)ORaqN2`;z6B@i_@XWZtzHs^o?k^bUT{)2Mr5AlWEV&6)YhQ zoA<{o1!TYZ@Uo1uev0jq7vdJE}xRZsk9HwbFxj-!4m2yf(J7T55BYnTU zf&qnqy-HSJHKngmIg`l`&i_+ilz$KpX~a50G?ljGb?a$URoxG}yO)7W#cy3Z9QJfB z7G5uKFKx#~mGdhXYFuT+P_$C$y+Y6eL96+vxL+f*M+$w58={ose0yCf$43Zu?V#nR zlv87=(DZR6INWjx(JJ;b74%AGQgouCQ59v2I^FJe&#ebI^L0Zo{<1T$-<_TJXR}3c zh;7l|T4@Eu>|h{Wi!4Hwh$4xprKG zJJTMM3YTr>`u)j7SPO`}cv~^(sFPU!ibww)ZwD5RVn5c*%NE7LrDv#t>j?cs>&{1F zvc>kW=G|wAN@+tZ;g3icGYfQ6gqy&kDvjntZUCjP4D>KS@310WW``*u=&&asJ`a4% zHW~tDAVBgI``xoHm7)lpg=JS@72Kky!ST#taWYYR}i+mM8wj-3~|q z)xvk-%l8|k2h5#y#eI`OE-Ab@y*Y>Xk+}=lc!&z`HkFStkcnca?yR`j zUh2HDJ{V#X;o^l_rqN+FTinUHXR|p z0^->`N(eYs0R*=7D_mJpd2{_HET*=LW~*->@)G|8W0S*j#MJXM>&y1V}QE&v+2db{;e++3&qSqUI|lFa-5 z7Dnb`&Zc!@GS8;vikz|t1{P7=!=tIYTP-;)O(uy`UZd8Uj>l;aeij$$@dN;fuar&Z zCVTL-iOA-Sp;i~xs5Pe={K6i4%fzJ7`v|cma9PNT%VaiN%{g!Bo?m7k(3!&R3%T|s zu;h}C=hIDTd&YcKmbq_7AVa+q;=#tOdj{iz3fJ=RStUhpK2L#4MZf}Ohs=}=z7OMA zekk?!=CJiRtS|6*95BYVA4X3`TdWA=8QiW+w@mDgvkqAh`*%!qLq{3}FG`ZL!0o43 zB+JxDN(KfQdhvTeNG1MlfnHPTy=jl;bki4!1S*~=#s7gy~a`i69P8?hS-O-cp^zP%X zBd8z|fQrUxze4Ah>g!{=rVaU@Eme{dXUz> zlkD{-j^M~mpgk{K8@$o;JyRh|T;D2RSm(bXHyzQp;V*L+9|E(@R^N`+$%}+HRq@u5 z(#hfOX@P^&c-wbQJJAFlo$P~j-gJI18NTbSOlHGx$i`muq$a~DF@S}}mZ7TvL`p{j zNRXJ~-rZIZ`vF*6G~RNsxy|!}k}J>->w#LA=tvu`z@E+V<*CVp_-atq}Id8sh?w{mvOZ zF2qZ39Qpgwn`$kW$`vV;Pwcw$6}VJ#52q87Y3={5JO~jN06B&gae|PNkO*mJfS*cE0fF8`c`j{D7A5s>HL6V0`v(#^81&TN{R$f9 z1iicG*Q8;+{OP<>x? zmzXU`(pZD5Mg$NM-U#cx?W@F6Ee;+JIuR%@l+0H9s4CKfuBbzlb&EINu7!ex&-Dvz zvkT?-Bg6CN*ro!gt~&1fZ2KolEgr7h0CX!ZZQZ!f-k(4CquFSFg*=#&D(7(atRsO;Gyy0A#QTf1~q=E#Q*zr~You zzOQ;dya`lmL4h*$_K%YdhG;TsQn=ZMlN!`>@=>cL97ODla@j zj+9kSC`U;3{#9yC-WWE3d0#s5E6vKS4uo3+~fN^{8 zNzmmwB+yGAzwknKMg!sag2tx94}n`Jy7@UH#Xg zhaf4)&ZzYW$Nl>BIl_4bS1GM@#0zS~N&%)$XEqC^i$>mn{6KryDDPp4RHXI2J7ksc zD^e(61QFKNMXr#B&;45wvfwb)3|n7v2+75}94!*^GiAX{$b>YFl#&KFm&29Jw|ci~ z2y|b>O?0JreGS40lhNBCI_C1&3<`PrtY1g8*#?Jkm8DK)+ys2pnwJjCQ~f@egL}Sw-%Ih%ej67BE+d&>e)*! zwmoODZcRsNsw~6iW-fcYE#kzZRT0wr^L^`xBobME5rTX&VDieu%P=lqCq%%I4_X4p zp(gz#V_D5P9Yn;|*w%!&8xE1c*Fwy{Mq0cJ($i^lvZ_Dsqv^!IJ~0jY6~47BNc=5F zkSri={Dmq~rm!ByMQo+XaPNjd?T%6I*`ahZc*ya`o8$VCwC2>s&2=V(%h%{p$D!nM z!kif6?94qO>4^Eu$+bI zlFsn9N099O>epUwYy5W$2^aTA@`hE%e#Bn@D|R9stt^c(BdS!Z`RxwbS01+KF{f=M zC-Pj`4{w1lO>s(Qa@(BK{#m*4~m1a}DT z9^BnMxLf1yFo%2p_s+~)^L1+G!&{Z2Qm2~coCo@xXRr0!Yx`D#Y5kapfa^fp9LJE! zVHXVrTtaurJRg0)2`F-!`}5~d6r>#EFRk5YPD+(QeFm>~$x;gWxJb9krWwb#$xTP~ z*s58L1k;iab%lI%>YJ)T?})CX!_NS3#&OqO{c=r}h;U}gLCkz??eS1i(>No0zt3qL z_*4o^QEeKh{pD+Jh-H(+#5v$o<1G@=Rf%bI7?}(op#K?JhzgPi>%Ua_c8>Q={^8V! zV>oEK#!_8`S(^I?kp1!~DC+$5p2%{GSb3uiqu3np9SaK1OEkm*8+__WP|an=q6jm; zBv4`YHNZju5Jj)XtwaxMI;86%zGrhf)>CDOYr+Do+U{s#QRu@?D!g>3Ox z^+2>kb|6aCGZ`4;WeclOH!9_Iw@2d&^gTOs4y^-8B@XlhPA@jUFccFsEys;wKo0GR z!1_Ds_>w3%I^?O#+1jf_Ygh^08`m}py(%~7W5^7PhlnC6tIIOzv`z*k#X(4Uq#om9 z@F{vCLCcy20p?E`KP& z3f%_H7uMm5(`U9l^9tv1P(YaHSwBq@&?6LRG z!!Dr{Z^^DkFkO>^I<~QE`{iSU=*#cw0JtA9t+Q$+nheL@>>q)>TpkYHM~yTV$2gCN zJ?0XF&r&E=`Q?viGr5c_N>bfkAG)@M{qDXXR3*T_A}P$~OIP#_T>}I>Q_g%a@ z336Xa#4gn$i(BIG=g(PMekOiI@`))vZ~gv}Grd(b^;ER+oKIUdbq=c##)X_Gpu=p~5(;$v=@45Ukn zdyZOfx8L+?x@*LltaSwNa&H407Hf{qmF_YLS}-6vp54G9iS?t-CaPsly&~b)0OD_K zqF+3mzI9mfh$nFdX?U1i-A+Pd*cRev*KImF<%XY9m9+^Y;WceG4X5yXcGB()OXdj3 z^KWchjc!%i^&G6NP*RykQg3L^-BSXcJhOzXs}mO(u<)+`NzWyf;( znNqyps^%vO$o#6IHP3v&2nEi1pjyBBGssKf&D4A(P~uK_DLv|eXO0h4BjJ^9$=rqB z7A$UyaUcZ1;!+ofAnc=gL^R=Gqt0C(E~pDWKREhgZc+6Gb>{;myaSK~!Ha`JN31Vg z9>D>G3k>H<{VdV0gLutd%EO)>Oyq=pC*=5qei<;ujPl~uB7u-jYjT|WqX+8wT*ch5 zG%!|N!2~bve=oP9emDsOSkd|S7WEc)a+97O*_Ye%UP-J=iit3SqD&zV`NU&IndhUK zTRHQhK>eYETw!a#0|34DG4yhc=^4m;P@1dt%uA!QNVE1wX^`sv0$Hwc1cIunb}^W- zEV%_w|4>G-q+*=!ZHB~v_Hl7X#Azc!aR#K*mz#gtt#$Y|5XAD&YGyQ*VVdp$-f({e zG2hQ?&+mE+Wxwx0lD0g>xc7@d9D&2vg={klvhN?9w~~Dj{&rG*5y|pbpI{Jb(3OyI z;-HwwX?%|-!yfe}nKIf*QBfiO`%7rWcL-DUh0Zvy%JYu1pn!7r_CC*;_rcw6qmnMyS5mpbx{#exA$3bJ0);asU{OfQPEP^=6R9p$UcA)QvEZ3q*%aX z!sv-3{+N~|Lw@N>g{DvbpY5%Su-yLf^~X$Q5&!Cd|ECuLa^BJXU=ig%%vdl8kO38a z2B`mSW@Y69jwIecok9N93bTX(?G8JmQtDqdZ%7t6vfa|!#QX0dS`47$7B1f^`rn`L z2;hIQ|9wjTPgjXd0(;&#=0%gm!1=E@oFWgxo0mUQASD67HneK35{vbk=WWv+w}znrde14>O1(}X*Y;-!KZMC@60vI)a zi6oxBSibl!wfu}0n6LyY8!57B#Frqwv)CeIzyNXBJ$X&1^t)UoUqKGor%40EpI_R} zpNb7XQb}){CF-q#xF7QzmiaC=6+V}0C5of>r&vlY9z1mVrB-c~<9$v5y7TowgMisU zc+65pQZnl4@%|;;4YRtKt{fThbjH{!iA{@GywqT2>x_(7j|b9o-m5+21Ei|BNAQLT8-pZ<2)ecD9a#*mg>jwb7;%Wmveb38SwKRnz+sn7Nm$Ui^Me8v_ zR)3gvXK5&fI{+Wmf=(VflqC$7^{sDW)pj2qr8q$2sTtE!{w*h#WRg(CcHGF@8 za^8oACqQ=HcPOWK9Ynh zj^E>NiZU2Jr}>TB^!tF6R_jGT-Wd2$lcQAp#P`Axo(Kjx_!Xlox&rbi5|nO$gEj7>>4fdHrj&?`4I9j4!?hJizCO(It>i{ec)~2t! zd&ZYPtRk8LUH}L&3lql2yG(IfW9eG`Eq>_tsSt!b>YcYuY^2bm&K>{6V75>$$ZOc8 zX^Xl)U8slZ*2;1^pFC3V!3W6pIAKsLQ5$Og(P@!j($;ppwN$5%ZXVp7$3kuF3c|m8 zI@yq~(WiK7IdzFJTSQFTcR_eHGfFt{3f!Eowk|7JOY8Wtz8;%SNkh9ZO0D99>s+~c zm6ZyqMjd22T%z*re<2vv+_a8$qGxEnUwFX92D$>tbpziC*s7h8wf4=@J#nk+0Wf$F zfIFS*BZZzcY@cs7&^22HZ)dH>;H}^S(TFYhkem2`%txM8gF2HRbL_`` zj7}x_>%BvOn1B#+^Zl}^@gD! zQv|>QG_CuQ$JViv2E(eS+x3}3+89ZiCBG&3NC9oD*#y9#c?+~+jR&jy%d!F!$@#ug z@9e`|uKa^X?zSe!qi4{?s4bvo3lorug<}7BNjfEcuTf66i;-9%$nj-N*zM|oE|&gn z^0sV)IoDXN#dIO>ecg&z4B$<}(pzj1m=uN1ihjmfMME-H0x{Q%()qPjZm??~CC!vj zQB$X@Rln%D@h*4hBRNr@DsW;)JK>~pfyQC}vsKT#Fv2H+%)VvMd*j1~S6bwam$_df za1$2zsISPBHh3kahH16E7`@{Jco0F_vP=`a$pj9+)GV2bP;C+ zXd~oADVDb$iPa0R8M?#X^7WOil=thu&ix^*0Af>mve_t9{Xu9y{Sen%gro)61UihWo9C?39 z$G>-5sCP7LCg!kEQP`5kQ+aORK@Pi_Sk`yY z2}`GSp!%j$lZe?6B;o`qcwf;kWcAD*JA2P6wK4>*+0TOxn!#!`ju9)S)C^Xa`_%b! zA4b@WG9Lsk!M1z%UD zEZ;0XlupzOzp!dBceuY7XE44lnq@OWv$90H=EAbS203?JE>I-{Bcct_YmTRXROp!z z$SWOhW#w~*{0buwv&dsb0nEh7f_P;x;W}iXBmL+Q#QXiWAoG@G zm+JoJR8i(g!oGfO?e#AxXc-n8gp6+*&oZNx!N|AJ=-m5*fJ}d+G55Ya2PGP&{*E6L zCN>f(4H-66Aa*!rK8pCs3feU8d|zpc(0fb#`Em+57@E_ybkAa;Y1Q9BUl4PX-t>Zp z`m$f7bxyji<%jSWcvNHQAvOC2Yb~z)y)JZ3K6$z1m}5nllB-gfS4%q%d8PnF2}p+yHpAQUsUMI7$FkQ=s44LUx3Grd?-q zsIo*!-098K`cSFXXD|&J&kmAk;c5F$;kc~V{J`g%R0-(9rc^#mA0%nlkV11%Dc8a- z0mNBpX8ViHpSFH%7DI<3DKgWqDg9wi?jKW8{O0WN9Uk^}`rW|LauwnkP|I1262nRG zPtMi@?zY3Ib$>!}E);THyxfR8D>-f^Ki^&0&6LEkGO#w^b)K8bfVPi`MD64{FSZ8` zPN!r9Vh~`uV?5pVnc)%L{m^0A#7W(6M6BxrnAX(wy^F%hudfeCoM;!Ti4U!T9;ugL zSD;#-&?)`Bjw~Sv1dRfQ9CoU0v0N^5M5eGO5FH5Wb?EwmNR5~kLI4Kj(l|f^08qCT zkMr=mN}=419^w8@{d_RNC8l8hb>d2^Np|FwAE3;MdU z0-&#xULocDvFg1wKWpmq&s=*}_IElZAZE4ytNyN>!MwRm+ymA2I)`Wt1&Gre-2Bo* zpW6>~J#6+NR;1ZgVL+jaupj!Z@!wF3)VEvJm+CgoR}o*60ts{_!tb=ICF9=f(%Z>g z}Vu^mX)i>_8 z3)Ozd%;>}9;9*<5Qmz;vkx-bYKuHAS^@_61gxio&!IzutY3d28B7vEoK;UNfk=D+W9FZ=$y?KYQ7qf`&>VP%+Wr@D+qU9gup^6p*rq;kBc1rL*ZNqB!{9IR zJLfT+dJab$=uFH|s_*SQfCL0wA$F1eW9=pv_)u}7kg|XOwTLl{fc*r%o1N$ z$|yDbzJGH~O&ak&7Lhk*@L3b4#Fz6i^d}H7<%~kREAZfDuD;y%UYVSHc+FLiiEL>Q zX<{;OH{WrM1PU0E`dYNs|3nX!A5&d_WwGZJw1Y03S9#?`tA{fXTSe2~D)kIW;0s1> z73Qg9x-$xKYPzJZuNZbF*#ZlLGBKFF%QAtFfAcy01xDA@B!bA(LSCHr-Pc9fN0{w) zclVpFC}btT*=kwQP!!tgPKEl6iqVqMG|aZr*bQWC_OEw`Iagkvq|$}WffX_`f)AIQ zwKG%#C=|Yvc-?#^9`;ym^CcMoo|Nsqh31ndUL}DJHgV*>--XG_SocH$R~x&!v3Ke_ z;om$knQvj)6- zqVRnW(`JHJ5ktvaIaz~KJV~ra9~^@ru9ZF7{T0vWZ(aFwzKC2RFD`XLt2%1XMi5$Y zHGqijkmc%SRQ7%AxTFgln?{cUc5)U0*KPk2#G)a85ya%Wfmt!$6mX5v>_fd%rl{_7 zLiim;stm5Rs9wbI=XA_7G)`|s%~@Dj1k>-28s+tIKn@*cbSl0z^sTHMS@IebJ&1uM zYtOKmm;DNBoD7)Elb2Yc3K<6|D?I>P{MEGQxu3*yF^#m}YndO+eACK+*dN4J@L6A99w0AlagdrShVr18<)YMBsL!5DW!8aNaStC6^1Pg zWo2s6l;i~k!rl36?;aGs{ZwfA=?mNGmLnZIA=Yte<{!feTeZ9zvq{Ar^jM`E+kK4- zJE;9pbd^8S8Hqe$ZBu}ao^Z!FIWY`ilf}4MSX{SDCUKHl9XA*Dd158FUl%j`ND(JE zn+47WMG|J?ywwP-U>x${x|2?v$Z>LVDArD#S?7ftKx&iaTA~8(q*AULssbTX}* zQr&)iH6ZW35J0bcPtP}FzfJro77Akbyp{FfEpn)oI9t6O9r)1C+<&X%7dFX z$mzZFH?aKe1y~L`S$$S-0-vEX6nh5!G0|v-@(R1rYhzr#qiYA(S#2ezL7IV3Eo8%(C@-D}TpR&o3Q9rtOvrhh^FG(^8Z_-|U(mMOEXvj&k3Ace z0b~!x6yvHprJfU*E|S-C1x*6PYNdn9hLV73BEf03Xf=A(h;BJnHfaXoe^5fdsytng z3Gsm>_9IR z`34jNQCAglB%sjsw`Wfhq8Ef;kF{Y|@HY6q8Vy9hm)l~ZCN4!=@!-3J7N-$5()A48^KLobGP z$lmSy1MCblf5UME%*_XpZvp0y^*d?jyV!=jA|Za_RxitSZDA96}$t(lV8lPfJo}K+7fJf zD}3sQju7~MmGX83;oDX=dHUX#;ZRf;sm0e7S{#$xX|Cm&cE0gETy-51dswHbXX#Xw z&b%x1Kuxt$t#_?5&$3VFXAEw*Bm*s-C`g~G>}vwo_SzVbHzwiqef>3NpHO%dcs^v3Cz)O z0~+@Luq-Cs>rTpaH0>R*Ga1w6?6fNfO&8(Kmx*bFV{|}eqAIikGC#SlV6nQ!ayKeN z9HZt-69a~lE-r=dkQ`K1Z<^OSOEW)A+^2Rzbwn*BlP(fGq%gnQ`XfS8BK+k}gspJ- zSozE_nJ*5SqB#Z!KLHG5`V%%bT?+aql9mMWlX$nA_WLOeqs>o54|QqB_x>Iy;|j#k z#B9MQr!@FUzn_w39Nc@2w?_066(*P;ZqFEOVc*RI$X@+r?pVvYA7>9pGg`O|Vai=W zQ?4R#>8E+?uDskOOt&DrUcFLrh7K7)$i6R*xkVss*roo4)`{ftA=z6 zzUBy^#Tu*ARhDk%1)Z}}qt+M|pe9b*o}jA2dZg67kYJKiw0GXm&8SD5e1|YK)OWcjfH)@piAxq)L(XbGh_A zHIEuLIGg(_UB2BM0V_yZ@}=FLGa)_L9gYOQkA>z3TA3?7TBF=$;he&hR@&?qg~FL}MfANhuLZO&ok8!!I4MvtUeAJ_x-!Dh%& z(KBGKQ&8v`Rb+|r6SXMs&#w|f0fvs>04Y|C)^Okw+OuJ&cn>o0^+c- z5Rs#kPSgD^3Dh#ajJQ>$leP|3)ZzuaJc%@T!tfXhg~prSYlImofB5n7oWFg3BuW(A z{yy|2Y?UDXKcM&Y?=@-GjcDUhz<$5fv1_0IKQ|6Acpb} zMg@aGyPvMeLGx$ZbVIzxKYaB0w6tGeqxfM%i@6;KUo0MiQgaeLK&Vv8- zq|Oq6ws`Z1-4eT+dBa#k`b(sYnW!==sT0 zrPfQevI5T&MHaKAx{$d%5?SyP9130mDLlF&fDnatxR)mJ zxJS_I{Eo7iD)+U}XicM6%<@;aA9XFr@2oP6Oe|6>Q#;>bQaQ(E#5k>&RV~w#oz7R2 zoi6kCp|2ekB(eNBO31&de%UE6s*?N-=+b%LLT0h{t6zB<7$udpoygQJjjisyMWR(< z2*(zdP8G!p`XTx8k8?87uI_ZRk~I%Tf?e6KRp7c)Vxlz@mhi?v$O$1?>sJs zR{+#{?s2`Z9@`N}Re+oY6@X@GcoB3g{*-5$%^90U5b-GK0`3jALcg%lTr~yrLF5m} zITL)p=vSlKM+?BeXeeIjF4U=itbBK##NxQk`0nMVUreuBysJvJ&@`M}m%roQT4+~A zBw(5XvFots!BuEw9LC>3I|L%8(jO)2pW3!2`X2pZDZ=l^34JMacC)zmEJw<(HfT2M< zWv=Jj7JxdWo`C)aNK0q{WD`}OKS6Bo{eYhDkccKCqXq!FZVs2Wq-_gf()PgyqTP!& zUyJC!O9PzITrE!9TsWC*c+J*GBrKXDGripsPV4!f7`gq*dHW70RBtWPUp**=<$WBy zj|KGCJh!dzy6KbG*-k$QISPx6B5lg%iS2*;W%!KUd_Eb~GN|v4EnvfOPH0&7?6AD} zqddyHUf@yhP$;z_K97QMid1$rU$cL4>)JHyXt7Xxn5OzcqekFCp&1pO3XI-dY}}7| z_l^Xe=^WubVAHDZcZ*(*`b$CVT@BvpZok`Zf^70*o=`A*w|}@i2rK(?>c5YlX4QmW z!;_!mg)2(*nPru*LcyqC>(PB_8<5p2>vwOq=V7T)v|`gWKLAc;T!r8OK7qveQ!aS|3o2`q&+ zOAT^7Y^U4FwuL~bJL4L)P!)-g6xny!IL2Ff&#`MP&o6c^D=uc}s+Go4I|{vz0(#9) z+l>%?y6exaPmqQpCYQKdS(!v$Stg5_>I(fI`L|yT?>%o2-{C%dl$}CmY4KR}_PKL^ zfITdzzYCRCUFbv$p?_RL|2FNz)H<_JZZ4o4 z2h%C1rBh+3f%HA^zdfa9B<@CN+kD5A=iU~buU6Y};t?f%O0f4Xc>DY=JOs3i8T=0X zF^LFlFjT1#{KlHp+@=$T$PnJPV16^4L$h;%Ch5>1uRpm^oB#7u0ga1_M702FjK%sd zy1U7z_)y-1yNBG<)tj|mayOQblQ1qTKV6&s!&(8vl5cKW zGYwWr4(h#KFa>^Bn7(a2R-};HVWX}2JBPq0L7n*P`yc~(d{CQo?EHTJ8N>4ux{$eI zpmn;-m8MrWW>Vzp_2EKQ((i|LN`==+w4aspMim-=CAQV7Onsi+;SyLKEStI_=ELUS zpVf0t?6Sr^XDHn1Z1fL8v&skWH}N~4PnRe3+cT=s( zuFeWvhls!0FX{WxelWshH6=Gf|1(#)4vs#k`7JyP9eiM-9X_o37*RU0*J;k$= zl1<3|C1WG6Rw~N0GVsq2WjP)(TRawxV7AO3!d_TDZx;E3Be@S0R+i=@+awxzpw@(tfOWk-W$x*AJHP&6|x}L5skLpa~=z^2kuM8R1F%=5(DQ5%? z=PI`trt*b)A%&*I;ytKJVrL%Ef{*Lx^~x9?ih95oc8?~(p~rZHN}>z?#5ree0hxTlFyq+Aro z_Qn^oKU=2WOX^;oM>9)@KlX9HTllTpqCyD z3r(#8oGE7m_VWv0eSlD}=go={G6X|(;>>#s?x=fL4eu5 zeu`T0u^i$sgqT+Q4#)Y=rK>E{&s-!T@7Lzr{1>a7dSd)LCW3Ne!$g7n9u9;m*Nb>Ps)1iZaOToNSH_ROc0Qj z7W)3W_6aAt%)67GD9b1XH(P$=eMPYE3(ANfu3DbL(D$Wo{8$2X?r8C6(qY)E?9N>S zoof~>SJUeHdl!f&29Ll5KP1%^i8&600WEp8i$+hp#wCu zwqPzd7^$KqRRzm(FcRIBdL5vW@)bC%Of+N{2C!w6r<+g ziV^;EnM3;zy1|~jyBR4<B;RTe-E*7ximdfwf5YS3n1wvs=bB2QFJt*kFG+N(5acz-R>GbcC(rI^gB;p;a)znv4^gWNRFbN+ntQE))<-njmxj^ zTUMKhnoDxC&^CNSyNRK<2Msq3KiD>=Z2lI(T9Sw^Y#SZ>2p+%P$^ZDVMxQuEzZ@yy zUX)@Mqe%tltt_+`mZ<=jz|=PlIxHrWxaYHlV=WDGfsVvfd5)^db+&9cY)rdEC`n^V z*)EKD{|opp4UHQSeAOo(4CN3rj8Q+`Ejo^>0@ijjuD#IXMa+nFtPC-uG)M%4wt)ue zFz%i9hK;>*C~m)(yRc(D60_}l(RVjKS!f4PhUo^s)o zHmQyXn!6IxcJ~Pt-UU^oyaK%>yp1+;c>KVxGOEuL&PhgIqe_pWYYcj;3R)k6-(y+& zn_ZjHh{Ac(B=NwduW3;$bYHK=B9`P6XqsY|@AbD{w)q6d zmEoD*?REzqtmp+Y&XZmZpLrQ@<8-}54kMt6>{b}b9#XR`vi2E(!s`OBcbUP0>dS{ibsNJusgh75dZA0Gdq6N^XyFg;dVvFl;lbfs-f$8Zrq-u8uS{MAc=49?iPDf#$WLi8 zVmTAmij}mrw@)#6973Y!+xNE3gqPPN`^~9b)=eil^M$GQo0swypA))EfHG zWKe6b1|fEp)Z4E&at2Z5jc3B@{TLtQl+rV3NCK01>o-CgxC6-`kC5bs5Rz{JOi^o} zqbT6AX*0y=zYKnX4j^%42B$N7>9N8yP2%Z$WX27)r2(UBOB#04+*8sVm2+jrm-E*; zA7hX@i~@c$7)t~#fj7b%@bp>l+bXFrV11J~Dlyqgf7tBfW75SX{wBU~C{Urhm)won zKwa^>KoF8jqtGxyVkNzv1us3~>qZ?4v8}#K8XS7~2%9$BLTPz--mE{FRxyuBW4|9h z%LD2vYMO0QnOZBw6Pahea1)h@PA3=adeSB{8COB>Ji&GSj6Vmx5!*i2*}ud_&s5vb z+bL$?JqXk6V4^0k-%81Tjl)gY`Pny;O@AeCVgu7g;GyL{oNqK zbqIhvLyqx33Zd-P@zvnV&H-Q-Xs>B3g!jaz$$QEeQVL?5?hF*-{U$km$$3`yKJ+hox@Z`{8!2@9}|2=PCp zFR?+f@-w(x{O;#+iPcvHq=6zE;}5+s21OTBV({BXx=a`Jan{R>1Z-mo=C#Z#%D3iu zOd90|iW$vR%C1Dk4Bm*Vf_}#1C+|g0*sUEhjw2H8kq3Gko-;sQs4cfPq{M8~vg^LYGHI{&!z=?0Et&evHM9gF_fua4Vn1w$vXnWd5B_PNlU8Hk9xWt`x89@fl4RA9Kt zXLVh4mA=^dt-7970zK3PrMgv%&yvM^CSae0j7y*~s1Oa#-p-lqCg3;fU@nWXd zIczN_{C>Q?jxdw1ejmpGhF$Gu*HhD30gIfo4v`8_Gf)Z*7&U8fD!@bc+KEYyxE_oj zre+Q1Y~U3)RC5zVs?1y$Z?-W|IZKSi*^|&Bk{XjqMw1ViL1eDL5G*7N3u=La5R{MstiWap*yNWs(U4{UltSdp_! zdrO1%lVUAdXIs~kTr$@VUCK>k_o@Lm6c^l$MZLpP?g~nZLGG=KE2D=Fxi)%j$>(8Z zO{F$XSD&Vf&9Piq9*JS@1%XPN*9(=6nan|tpvgb|sDC^F4O(@NW0YYkvlNeKhWX@@ z)xYC`S;JW{N4a)G>^(O)(%{sgGBb!Nkp9Es_SOslfK*N9#~9jd)1WB8*OdLl?~NZ51VE zGaVX}bpZO<$5ko*@GFYy0w1IO4>H6KedLh!uxuuukNbu^n35d6F;vTNwjn1o7hadI zoZIWoiaOB*G!7}(E#fYU;c+J>|H%TlR|Bz_9hvh&JS=F;;Te8d<6ZR#xuW=3F>D(4 zRy4P%Y;pdMykOKU8BW>_YIF z!l4{E1s*RZfsx{+pYHzr@X7!%Dqb&S7L}V$pAPQVVYu%cAfgd8h}FgiK&d&qu}6*u zffSX^G~Z%g8osR}j(h8CjFjPefOa~;e|q7FJgl~R^!ya`=u}>lnuqw!=_|y}uog1j z6eI+$l95rZ{Xm!Uxk)$QVd58^A_y7gLN1AQdTzeav(k_}m4E~bpKtJDvwa=7S62{> zY6jyqUXPGyeasx>iuOqQn0|ya#y+mtGR+4jZbb(+p0?3A+-%b%P<7ZHGo6%7{_#VDo?forfg_796q~ z0s&xQr*AqLxbJoHi#k|0of@{2EL9>&1fokx?%9DF{LoX2?y%y|D3D=G&EID~@n#d5 zDhd`F%ufO+P{^9KQac{u>byw{bQDJDXVR zsGT-sUaw?-_lZZmUuVbcdj$;+II7PMVsC!T>Mif=BNOLe>wd-bo>-ord^;FwSBy)i zr@a%Ly7a!K&BbLT1}_Xq!YX*3-J758Ov3BE5N6N^h~5(`zPsP{@zgVJ+2&Rceh+8$ziMtZM~n$^MY?)UC&$FU3P?LA2r}K z-;eo1PZZY~+}vRb?js9!4`bI}%~TZW8Asi)4?Ju-1sh`dq-fBc%Dv1s+!#_T)F7u{ z<-3{>VG@g$1*b7zs3dw`a}Lw!iCTQZ{Q@0J=wA#pq{~8ab-%G~Rt_r(I%?&UcMl9m zriujmg~()JSy{tH66ByF17Igop&W$TmEn%9mUwF>JzeU(UmO+;L}<4RQ2S6g5b%g8 ztjy)sf*Xx5(#1FSw;}U0%V%BnZ$Ia+uTq!>SbEWI8Y%zyK0dIt8Q@nh5g>p66OINB zM8o#1|7+h+2}5^V{u3__ylXUYAeM?5_dj!pUK*B=Y4u?K=j)@Wppo;bd@F$TuYcZF zLHLyL?->2R@?5C^ujwt)_wIjtnE9n9RJ&uX83%<#1H$|^NNCk!!G-$y$U!D$!Nf+`tPN9S-}4< zm%`wW5W|#I-7CsqkPO)r$YT2L5bkE`Z0r%Sc&=PmjKWKr=da<{hFHw&{T@B;AM*)hxt$@UStG_c9pb<~Dm23~{=* zkILdgYJWjacp&lie?BEz0A0lxYRy+TYE zOVp*0^jl#?cN|yK_q*E6Fu07`&8m+Nvks#I-NzML0ho9Amg?_fPi&W~Dxu7t_-zuk zYE6@uDUP^)A5&R!#y>K|s`S>DrdZ)*GNFI2{L+o9duwEsQ-2mDF21MeRk902myU8p-d3M7bE)BaSjyrgA*bPhBX8Cb z7W=(bFb%%o=_B{;>j`8|-**dN3zpMImH1r5ru=e%iFoO_`DyW7DA)rwiVE>UJe--< zBs0JK+%5*T3GRJTdA&V9^lIg*C^|3O*G$ExqAOSv#>xlFdf`B(Z37)`39d&%C^D|3 z-PEn#r=pd`RqSx>!N?m1#KR9MtwcP?mz`RdU76Elki}WO4lqM)QU3VWK3Y_Pokd-q zgyc)Xy4X~r#Nn~k+IePJ>YfJHm$dt>vgWiR6#2qKY@uRvIMsR``$yvR8#zkfXor$i zJIT1H_gcTAAXBwkTA~DO)-}A3X+l%i*_9+z=FL_0e|Vbso9~zSr;i%+rzt&i>4Rkm zk$&+(2fqcUeJ6Nr(sIuB0DHCs1J{L@s-f-44))D`Ol@+u!j*^iS7m&|{-A#-C7cLT z7_TP5UbKtNq??tWblcx=vH2^uP&e$8{l0_R=Eq$mkK3Cc7B^iukTxw76;<;O1Ge`M zKPabK)|~@HI3E~>aj|xar-$d+aBUP8z8Vxa^S_uNeV=E|xu6nbk5igl}h zXWGm+D7V^E;?l=?UhPwuod!^}Ey6>8T_;yvs=96V!J?(gv?k+IQ`MhImkwbECkmR-%0E0d0g zhV-vs%P79`>pftB=6XLl_WPM6t7_JdbCqbTaTF+v$I@;e!ik>TDIrA#w8tn`eUhru z+;KdLmbF;+PQGw~9pvQ8D6SfkF=zzVZ&H6SevYQ#2Hnx)OPs|l|KNQfe!38K>6Gmi z7>ZnB-kM?7q(ZyVzVTqv3o=H%g>yq===ijhe~qUX{YV9_)q8dN&|hm*V(SQ{4*QLw z?|HPK=(5GnafhGp#5HeRvrC%@|Vr|2eB zmOqt>)vdV%-{aJ3Dj%ud2+Txf+{v(q=F9L~*01aUf0x{->>C8o9N;0=8n1J-mE@ha z6+AwB%pf8SJ&ucADd&OIP7dOSlUa1Z!5D0k?*e*H5}y3VnKtwlD48@(ktFBrIKC2SB;7vvt^=H z?pCe1LxQ&;ofPv}H7hd7J_1Z7?IQMzT>3x+eR}M{r!FvWmec_=Wqo2N;HWDNXH(mJ zwMBH9!=BnsQlFLBaKGMjWoYKs5-c9G58s*-03+`hm*bYzdmD~zNcNmqqvk>neO*cn zFEL-x_5#WO@^+=mDLN5A|q8-jo*O1A2)v> zA<=>HZC&?Bi4ja^eHHxoM+(bE<%{*Ywgs>HN5iIO4jT{XFHkzBC)&4vFeGgUDd1PyPVo& zf1DczyYHXUU{_nHng=nMj26JI7|!li-*FjI-{RjMCyuWpnMlZLzGdOeP64yJd&+$4 zEO2=w?@iR@|IFi7rTetWec64iiEcD>UZuKUR}e;eJNa5(Z>w#uy0NNur-lf9HoNp$ z_G2f1SvOs3&~oPQnMIw`?e9Uyk>2lR_pX}g8LV=4u`X9$Z=PHMVi9?Ju+5lt}E zjo;{Ld?$zRA8lDY$Mo6Cs~yy=^K5NcIeSN-O*tLCc83(ruf@_^(1kpk=}mpk!9K~Lo&F7)t`-@VK`~$ z!S?6XeA3w!O_$`X#PifO3#G=v);{Oqm~6I76YqE$F^7QD1M>9U8M)$me@md9wZQ2( zyK^HLZetEHgs=HNJR}0_5Z}w;(b9Es3E>h*27j0n6dF=j7vvae*6Q&H3N|!w?MW%s zIsP(_NJ|v*E>Q5<+V*>l=g*U0-&0biJ%Z9yc8JrLM2+LGlaEf%U=KCh9Gp@EU5wjz z@+@r8mVLhOKVbFG87)uA>LSd%&xV;jB`)>8UZByLKf%ZElqKb&Tfo@x(^F$+S8lsZ z#e#{{c7M)J=Jy6~({gMEWUF4wIjdG=Y35yVof2{!b84WZ=5=G8UN_Pdh{zKa2#9Z6 zW6DPFP8@SyM0{Xkp2VKh1D)x&HisuMrL@Fbx8K)BS|M;=^Jf8-ZbZhF#GUV@SkKytN8Y6&qI9=rLVS8jUG0w z3u&Q9XX|UVi$rV?zdoO9m|2X{C9_i7g52xPSzYVt zDXueiY`)}(H`_P0h_Cf`dLqF`lKX#P6va{#I`!#XfRn$fZb_ZW6Ic*;by~ym>m3`k zar@jfxkM@pk#uL-`-V2Z;aT>ew<;CXz5v6r&B+aBIwS9v70DT?xga!vW(d^@dkn8~^_8h?LHgI+V;`)ZW6RJ#DN7Z@kR>)CW-L&|6uR0 zg5p}cHf%Ila1R!OG;YBqK;sbHJ;6P}Ei@Y39fCUq3j_;J;{=!B1Zmuz;F3MbS}R}G zKH4X{>i?_Wsy={%p0nrEkB)JV>n^;A0C$hAn_}4h{!Xb@*=F@no$eK^tW!eK$q@4O zydia~`&d-8MF;kq?66@5mnI51aA&UJO}Ae5Etad@00hVMcbxNHx&7wu@G7WODS8Qk z8@)3EsY?JBvCP4z-aSh>f>tFCq1;(A!H(8)77^^JpIQ5G5)1l@oYy^g7q*yPfsly_c^wzSX|?c}sUlU|pvch)ezo-=adDLc|54Q3fN4moM*lxGm+?#`w`3 zOe~@o@|s)1XswB3i$Z*d_nmkRCi!#f(;tx&CzUNni9f0zmRjVPyKlqxW~1E)hdI_} zkLLql`MVxzNUw<5V+?<`s=HT~+zHJi6>`6Fp!09vg}3wdrO&3g!!}PP3i|O*9IOmNwmFvQTV_{2BlU+z z#eP3G;`7{#diz^0nIW$=A(^@26(=`TnCcSEw865Kj?L`*?$j$$eHJ!t+lqUs)ob3A zIX|yX_1ofKPllPf7_lh7TCISVy)FBS*1l*;!4tmKn$E&#S;2nZDyjcLZm}3z#s#uQVt9t4bwP~2s$H3juDRFOqQz1 zeCzT`utHGD5ZE=x1`N7| zBMnYjUo`TdA9K1crkH+zrLP8PywxSXr<;qHgQ1VhJmm)jK4xlT%Ilg*0W? z+G=&^24*qVw`D40@j& z1#m#Z_Uj>B6!r_>>ueUy>CWlFVkqk&Q)PNgOeh+o3uoB#^_Q^9+QF0^Qxgnb4yC2Z zlUUzcTLaK$*YhP};ue(;FCwy1^FHEG^u->SpTDfbp>M28r+xJsFHD{5(1MaV|cFug>0l;=HMo=D6+4+Po$cG)xRxsm5B$kS4iCz1)8TO|wm z?B^MWZJIOle1wdhJ%+ZW-eH`4v=m`i>%9~!qqY8U$nUA&VwFN(dCdM&w@Ull+y;A; zCyM_x6vL!bsnPh5nIYmfh0M}Dt-I)2ck6?vXJhBvLrc3t96`u7HEaK@9PYQO{n>yE z-kHY1xm%b)TJrGyggzFCpvuFrY4A+&F-dzwFX?j-Wm6x~4W%qZCXa zoIqaS%gSa@NX3QKf%~nHjvXPLQU_y7H{$9`g_Z=^`=Ay*exIPrAw!dMCC-j}pD(k6 zzdt1+_*i(pSunQ&{Yv=N$uhAb?}oe8d|#Q~>HxKTj#}h^MPJ%O>^XSReXw*}HoB5z zQgIOwxn|{w(IQ8^iKRF%bIq>!RUKS;U_E0^#)WP0jHaUdbF@J|9UbP(cApR%{BiY_ z(7K{f$xQ~>4O(*qX}<}Z%^>GWL#arb6`pVd}b9 ze_)^`o^~18BR~{I8zo(qT_pS2uW5+bE9LbWT_7jMWrx62bzo)I%8li3;R9H$KE_FO zs2~}~yH9Cg`%G~(+=ceY&l#=E;VEno;X!Lx3 z(9K-nR$<~CY(hzN1>_lyF&P=(t5x^uEOsLc)!{YD+}^d?NBRqT6+4(>Cp@a;aY{M%YW4=xt3eY2dwBbdrw6 zQk|;BK`;xOTG^?=V5BvSMgCU{OI=`=E?@!zffI3ZlT< zxyu-}ZMh?Jh(qU>5bztH;3Wu-3j43`T<~bnqoDwiqBfp3+wW=i$kAQSyLB2bxK=aL z!b48xlr3Kw@P#5l;!kGIpo|`RiY_UAdp75NxI}|Cpf-cm)yVQHA4{I~OA`6O72@N( zAYvLw-w7~*HaQIEsy_uc*#-F8vR*5b47kB*+2?H@DkN`MI8osob}PoFVj(N9M#Lm_?v+lx-gG`}^K@R;U?LixNesIT zYkzAR%IG0)GJ(fI!#QjwF_CeQ+U2^a;h`vA*>xKZsR&`UPCLLkaq?R>!tV_b7|Smy z=N{yVH*#BlEirQ!U5_2Xn1jQIkQ=jX#9h@c?~DBP`hKOurU=|uN6nbUVXRz)q~ST9 ztC{~^d5U8#1qWz&v}jVOv~+~1Xw0jL$h*w$(@dWu6rR=EwM+w7Rl$TjwwiJa?+T_B zR8Fm`{UuoFz2IAh6*wH}!pm$%d*L`cxIts9=QTa}EjN6v$MyLBzFFyH8)=ewT9Cm# zEw;}>ar6tP%_Xe8*#u?x^+4lxJ~pmwvik*P*DQT_kqbXp2`uK@GnK*!o)$a@#x=~J zcCB3jouLV4aXEart4s1i|XRq$cHu9KVOh$B}>&&H&J3l{}0RxH^ z$HIfsb<~TvW$JnOjJ5=9F^LTd8lfuGCTZcKkZBpUjbBA%8RJ-`M_mn;23KHkytj*q zg}%J455i*n&Xn}>t0xk57~OO>bEaqu{7==TuU!xNrgKGYEH2#>Yw?&5(dgLS`wa6; z^D;qmy5CI~)hRD5WMAI{(UBq`I>Hazf?}TYS5R+QuVU`(3z@F0yyjJj=l%(|(dbV7 zU2(#v#Fyn?DHom?Eyje zveFVpO<}|S+9rLo=2^{Qh1hF=F)zk^EWY4+JKFfY;NV^m>(FuQ6$zey8u@!4b53{R zYWwtABJd=|PEGw@JwNM2Dm)CgW{prMCC0UbDJwT5^Tc3TFAw!ccWQ2t0Zwo-E#i23 z18L}Uw|#A2s37Y4V%}z(%ZDlXY{+G*_ZyV8wJ2dF_&xTf$JE4H25aAi57mcG8sk8E#KuM^ z6Ns7jS$sfgHrE@>9eP!6QuF`I8T3I+iMs+mn6rN&6{sReItF`kclv4BSO%- zpvoC*>eVrEM6!~)jt4&&od9-H=2WY}c0UQrO0nbcBdQwAe!j=eNaeS;qFC*bo#tJs zUX)WZbIdh(f*M1jiQlU9Mc{HAW;ECw#JaNG<^)Jhu`s}u2`<=T%sxwC{KRbC3`U-y$6HJUJsz8M zUl^8fRbr(1J^zyLDsDXEHquO;7aFttvN9ok(=2_b39IQdwJaTZIHY0m_q18&Nc+D1 zTivI*aJeyo;h}y4icMemlCJ10B>;W9YbeK>BIllmHAO*5rqbI!MT<9=Fm=@MJ00!06@h47vQDs0kSDL-A>R zoS`i(lyryvtrIS+tm?w`*`tfIa;|Yz?8dXK1}dZRP(-g-VikLTEdr^ucnoQHo&6=Dx`)-SzYq9~^uV#qD+9AL>Gwzp1|3VR ztMReAOM_#M=3ywRcXTUNLB0La)LeU_4G?#HwjB-aWPnWdz|I|M+t4QDYK!ijId|qP zfaw=vR;+x`SdriO(2wMKbW(izAfILxli?X!mSM-mHj@t-kZ;;Cbo0!J2nR*7j^ z1q!e&-M=QqRwJgslAv$ERCK_0vCr#mIDKQ6T(6ExnXMog<9NW15lSMBxwpx!)wg7a zGjV+Eb!{f-Dv|tjE`DBYZ1p9vM9VBHnsbfe>qxN|jZg|4m*r4o(%_8BGmZ@lpsTZj zRX+H4-!2V3-|X9G3kG#GygO_+auEOiA(A?DM-pS$Ri7qlAjNLFX7y_5PQzOqW98D< zG?4yBB<&V`*3buM&mkN{dzg16x!Z{RaXuG9i{r;uck#(S1dChu6L-UUI`RyH8b_&Ym^>Hx6~jO&dfEBV@{0zU$@M?Of9h~uN2j>wpNj^ZM!6r<+`ps z`h22a=+qh8d{16CKktxF|5-EfA#Ih>R$0+VP>X?zaCUm!MS{}zYb!c!G43eo`sm5? zC0a!{k;+B!QK@dh4tz}|yAQ4#FBcJgwtAE(eUR;QtyV>etQH&=`L0`L?dYb%ay+@A znu(ic+-n;J@6N(v3g0QCL{sF^C2jGL5pP_ioHQFUk67Vr zZ=Ms?u&>$K8w^>e2|hPA>F`Y6BS@Elh^J9cOyb`d+$8v1nb1E#4jS64W47kSoV#dZ zCE!Ez148{dM%A%E!A7;;>^jlm49Ve93tqxOo){7H?f@BI!Bten4Kd8c_*&#UW%yL7 z_E5UBR9(+es3HL%vrv8rC4rY4(b#OMQCi=B^C)z_Bue7yU;C?|K#6=3(1#!~jQQ8UJunCm zGw6)a#02~&?4V=^6cT>9o56oY6P@CKsDi~ZzE|u&RRs<)pwwtrahCWi-Y^DrwJa^$~vHv+B<|G(z>BX;>;^Zc*({ED0%~A;S@uR^2C-L*c z15oqv4fv_laqYV>?d6C;rIBA6zjI-hYM}zj6hOU(qI(xkm%lA~^tDUY0_xL&`q70V zGxi!limhDhbMFDzPoug{s!Ai(sM-Yq9$)NUR}QtVds>c2W{SepcD_n*7DeQi0FI|oYdcAY-UCu;%nPSxA}8s(w$M^8Cd zz?N!{^U(tKu}H)uX>>0rgxsU}IiFpIzm;BtgRy~q^m&44Cn7*aC?|aRrT~k79R~nW z&j!wziaoAu3^ItPP*nTJqW9r6o{1spsOO(yUTc}YhCbIljMeWZ=#cD{z%@a-Ay{Oq z1Y1u3t%h)T$F#9W@MxYEh?r@ZlzAp|3yscGch_h6;j{is@BPj<0J`b~j7j@z89+6# zeYryO-x>*QQ=}=&5ubfnym;nL7M`qgnr2xM0b%4O=u`rqNNN_VfIK;h{!gGu$q4@r zloKs{W2M;wafGggp)rK!0FScmIu;mG2o|Z?<6dHItIqg~4-thwL0R`?1MeG^sC}M{ zlO`{LDxCUe2@Os12J2?91= zoJmhSRz3#^olmN7V;2ARxmrOU)yMN&PHenf@}!<9RCI2IIrfre?d>l?1*x#*^p))P zv&;$E3|{a9Z=qhDa@ys(^&bV0@sb|ryBm#PQyHDY%3h|(4AQt~;*7W~%uW;@~G{?eFEn1T%$mC?n`~ z)<+?!-}!qRLsrtdy_^+O&KI*!Mut)8GjLLcy_-CFd>*CDekwS=MN~j~f^H=0enjx- z*Fq7I0i=VG%Vk?d|2qouiH!SK1Y8qxJiuD8PwQ!(f(N{S`8uWm)lw*mWXP?bCxHEs z0)}*4Io42R5K4E_93A;cvEkBT{xsLdJ>p~{qdL_p+1Pn#FYPJd<0xHj-}-B3f}UzP zd*lMZRuIOy;NYGJNYTW^Z~^du8{CuF)Ts>R8j7nspkUzzF6s*1buASR!)z-*IoRqR zk(G&nO|`)4N1p12qg}Tkv7ax&QEH8Xn(V9F6`m8o6H}Uuv>Ro&v-M~1Q^)7l**3-j zYrZOWQ>CA%m4>UrF4<3k0!;SRQK}{f4VTXh&?x)&L<7jv@f%??N<+0 zhhdFwom=;H zmis1&^zi}E2XVs!xSChJ#EsHyWm~KIkSV;DGfXoQPS$#*M?`*m6)7aM>SwqnWL6?Q z>n4_D7$LiuO5wJCP4*jLDVh;Vc^97k>~8Gj=X&A5I_w%2wW=r2mEZb7bp*wt4VLR| zciibm^mKlL8-O67_|RK;0`p;*&1@xT_v$L>@odMLDGCU3d;>#}`Sek6+?>(6p9JD3 z0bsTrEVmZabkcKQIE0LAi`7W3k+H!O_75UPKQOGOgJ!0&DY&dHf_R*0Z#fU8cc{s5V zzr3vHw1d<_mWt}C%{h290}eetgWx^n z8QePhDm?=!s*)C7$FDY#62hU?N}h^83wZ1I@O` z0O=-cBpth~B5s`MF$&nA4tnyPR7vP?ub(yK9po^wxC*JMS+DG<&R1MPlcjmtA3fvD zrlq9Ni>X2}gZgaM&pE`Ic+dfp_`(1lEsL&3Pr#wORE|wT{nbF1gv_ILj2}C$OxbPx z0$c!lbI1#ii&={;kN79%mT@gR%)e~>=gg1w(=4BPh;=ONpaXcMLeF61xzEW4r6C#} z-?A5dZV!}{!6zQq1QR>NCg&b!Z*lv@I1-}}F`3L<83#6{o91r99zEnpqirF^ z6FXINb$8oUK0jxquEs9@Nb_VpC)u)|0d}ce=TRX(3tblOHLwYbTo_x#+pI?iIT=cI zlme;e2ngtl#Xrhl=wBX8BIww4$03$5-OW7}3pzZNNkN4(`958<=YY=OV$NrADw}#_ zkUd^jto}U5TDU1OU%mww?t{AvdM|A5t zNf21+7*Qq}l4&jD@to;SEK;s4e8d=ugRWz{&|_3NRCqlq+!CkZJ5a4}L6EY3vdNpb+)^GtEltR?ZiIv2i(Z-e38eSU z%nOl$^^2e1M&rB>@9JW#pODMnZMF<8)k`AQ>mwsRGk9k8xU%3pg?~2{h@A6Xo#vom zfO}20mE>OZTk`yIoW>7sMPg+FFp3U#aG{tPYdt2myD1K7Sc?9Lfk>RI>8Gk5`~i?- z(LD1?eQP()4q_T!!Iy%qEOm^2_P7RhlQ+NKwsj}-54O;?V;j^#=!6C{s0!cprGoIs z1C;6UgXs4G7lQ~qWt?|-+W9ozVncN5U4*Orao*MS_JxXMo1WF#GIyzl7DvtA!#zJK znlqrS-qHKcTiQer`DG}~q)2w5UNjQvx}W>Iog?n!6tx~I`qMoJM}!7$@(sI~D9F0@ zga$Nta?(qYeIOc_`qsbu)2~kFfwag?1{=v#Xx?I=f1yZoNe8Q?a()H}uy0~3%0A|# z6#V6VxmZ+?4kaXeW!HTnqx899YIS7}VCc}gSG!y|n3+pY+sWeQ(D>?!{GyQxh`w&x z``QfN3s^oKd}3c=#D^-jJo~gGm}MWQjU7Td)*V93BTJMUl!04)taME>D;KY^xSrf6 z>^_Bn`-FoMishQX+O5Z#6xqv^>VeN_ZTPkk%6s4(=u9+fjkeT4c!zPmvd})3ksR4x zi_~g(|IbMvRh;r+f`6G>>3K41+4d9W{pqV~Blxhnd8;=p)onqGkG(2d`n_=~%SNLtae zM>rLJbHOMZ2iq*VmqmGfB7>US7z9)g7^j+SURAaI^IT0W26Q&84NoJV)NQ4|^MoDe znfk8`JNf)E78V2Hv<=fy$fTr$ms%Ysegl5XzYHKfBB_ft$zl#9CqYjfZwu`Q=7lzZ zQ>m%Nt&fQfxF43EY`V~Dw$$Ry{0ZsUE0qqvd)Vuwhe%+XWUoCxG2!HF=z=nlCO4Ps zHKj+}oLGBd#Ivxw!052|8BmYYc?IzawvaW&NjSkc7h?cn z4e@p-b&R=~!T&ng*b&HloHw#B8qWkTATz$k;pZzGpC6;eqGGhM)9`5YNIM8)7zi~| z@ma?A0XQ7?UH7+_GXbAXs&GxURE5H-3opqe!*TTvoi>|ka$Uv|Wj5b)32tP^*Xl;l zK@yDOt<;cBn)wjf;JT+DVZx7q_siAdqe5nWi?4=l!tYb>H-IyrDo(ghaEB8>`A~;8 z7(hK#{jN7(7_aytlW%JWWyN&F9h@F^J|d2&RVV=5K#HQ=@vA&~D)7sAWR_AU?tteK zX(2kn_Z_0c@r!z%P?Jx?d*;A?;70^*fLNEq;qu%su8dp25OLPNDk~;M;Oz7maEI8T zTh(EHbcdIV_uutP!p%_rvowz3#3DQXOa%{R`Vc~l-t=`r&4|d7udG;l;Bzc9Tt%p1?ULI9RUI}tn6J(BO zMsF=qjPtcywQN~{`vp4-9v7h|LA*>jvEwc-2Vw2PgTJxykT9sF=*X$+?;*36gI~6I`j&a>avwyZOJERMJ#A&x6YUt)463FgSaD>9zrZX%ABdI z55~ehspdCt`R>kj>sR%wTmAFkhr`UGsq zSmy&)c>*KXy_}+FpraRV5X=_vlM5dS6&bejlyl=U!G6DuFaq-I2)5Q&n>RSQ$J?oJ zLHCHyc%S6b^uIKn4lzy4N^M<|m_+zt?FqGEz2JW8@BjopD&?YCrr?=0XXvn=ZVg4M zTT#>qMLEYzg zkHUy06Wkecbm?#dwpJJ2U{Nx8VwU>q$gk6rLF``0c|7OoAxcb0&vui(ZbjEWT}3b; zt_Liw*{J;H=Pt;w+4@7tI&!*vHka%nOJmPM)_5_FQL%{-Gqle>H`|4M4$tYIGN71< zAE2bFvS@-(!J{G3ih@tue&4fE@ki$EBA`!#_&-w-W4y}hNC@so947XLyPkrS3cV%L6NH{v%S1d?`5QX_m^MsZ41ZT z6IQ5=Plahq&mbrfha=B5;s$1+CY4&_LpkE`(YBCl8p(c zC>2@uzhqsk9FPkuDd=x{-0}T+p&&LP3zq)gv5&n7o|D`EJe<+Amdb&xL z!@govV|ug*D0_{HzsJ|B!wAf1RDuj|!+>G?9uZj_ib@UiJ82}1Zhuh<;UZ8^3ThRx>A?BGycp{7ZtfhG&>lk;=MLv)>%aF9twdZl^3(FQ@cqz-El&y;WhN98g`OGHITUfFK;1}==%d}3~%h2m~JaN z1I8%4M7+YLJKF9y=YKKgG&orBV_0K2?D>rTud2O>RQhfaC4wFQENTDtQ2BN%lzhf` zeG&SNGvFp3Q?hhbc5}qUFN?H#u@DU7Tnlo zIpkMhB+Zzk_9Ra8-zi0O7WFEOw(Z(@OALJj|n1ez-B> zkE0BTN3PaS|7DoSP}ULFq z_lG1QLkScpgcb3@ywvB&>6>NQx^*%n@_n6D3hYb{7aDitDl3UoYrNa zhR;+`GoKSaN>TjaWxEkfW%?uvB0CC z=uh9imIeOKe}goFzylcMJyO`G3S{vgq3(*-n?tDpW!$(FIBxCsQO9WXL=eT~HCWHE z0tBRwB|sK+#}!ND&OsCSP{o`VL~DMf>P68)f-@7vZqsGDO<7%7f(nmu?krb;W~TW_ zvGSM$4BEc{IX8+?Cm*h4Hp8!U3Mn}_gSF0EN=P6`Cn|lh+GjcUuYl(-?=Uv{e64kn zTrz8Uo!w$7;D}lJn3(-pZ(k7&Bz0`|zUowogIKKrXLqzRETLa2noti{{gD?4uK+(D z&iSSb3>#D64ex=y%WgXY_T+|XCWWWI(mxaz1|e>A#6*sS(sr*)G-5U@4&~sWGm?JG z^jDs;h#$ptfgl{U({Y@(p*IgzC!dVc)Rw^fFJ+nB@qi)EmM~9F%DaA5BmTERz=4x_3kuvAK)Hwd`l2&H2|4yNj_bp zi1=!{c4D) zsp0Me`ZMbKl$L?`bohe%R^|vr>wK_h6VwZE=e=0{mWuU8`PnP%L~wDf?QEr3rsH># zpSOS+VhdgV>nODpHlpzNV>y0lK$Z&+?z513FPU4$qZ0r?zK^>wI^X^wF$x^bClo)8 z6wM>lI&H|&c8Z5xjfM3({rsl=MeF4`kp2MB&y7-00Ou&+rwf=$XB?QU0svmMSp&c{ za%zyR?L{ribTW_iwAB%Ai&$lzRK)JlG0Oe@CG6+f)E8~GTGL)ETKI{2CS0D~>9_V% zSzn&>ytj(c2gY9Bz+?8Y)a_`=gB*T;MG=7%awUAVjBp?eaPOTa1rFKp>6J8$V$P88 zL1jQLy-euMv1IOpSisYgj}O<?Iwxn$I*16oBkYcWaVX zLHTA23*I_h7^dOo@74*ayt)7&c>+|8VtE58Y>)(w7l+Wxr9Qwk+LCbR9i*^Q53&5= z*Qw|VI4osyuxI!MZtmVBNHH20S1Fq4pn8ziImyxg&zx$+=2r>H8p1^?r%#-7@bP}? z-TqoMmvP7klD-BA24I=UVDb?Ix+MzzrzQyg{k);*)HwUf6Td7r#VBRKNznJ56L{Ir zaIxc-lmLsG%=2#f!mUimYS;+avr}1A%*6{B*tjQsQU+cpQl12sZ@|j9Va(&_AA!{Q zh?l}FJP<{BP4k`Qed9I!is=RE{3&NBPuRQ@u?H$c29bjeV2EG_q^u35B!z$9ITvA_ zuy`b`DEtvM^datBG%DEE)r=aBdE~WUbV^j#Xk*be%a%!mV4Y@+*sXpZ=lZd4mbRzB zXa*A5Ofl5}hP`HyQf8uF;(K6E*i?e5=x?PZaeP^*m_Er^l#!Q?G~5*CLsk5III^<& zuu0x_p+Oasm|ZmY8pCf1$VzGDY3?|!)~ZoyxFiszAww|~Jy6DiqsRoOzl3H1Z0#+^ zu#cyD4*+d@yHIHIQP1I#gsWP*xudd_Q7_S0YNCMVAIa37*>h(@+=wU- zSi)CENRGieWI$Oqqs&m#;V6(UR0&I#l4Z|s0!YWTS7NCEbzh+up>c3)vl+7c3x|FC z;3G&r)EjI?6;X; zbRHct1Q5Ha8o>JPTxhD8GZ*vE_H9|YoluD($j^^4>(1R66-mQJ*u72zK8L_0hQ{g* zDA!C$!AX~Y6?ISKw2yk%ADvaEX-wM>Ii99XT1h zILpfsHX8|s_-g`^jE{6@*w@84w}?RB+?#yN_&96G?jq7MV#JroA?v0aLAYVAhYNGg z_hxCQElj?|0iY0d774x7lhVe)4pIw@PSzs+S_QH@n#V1LzuXf8x;G=J>t#^-t$FnO zG$4arb-0LJ5Qei;2;I^tXYF%CQ$6m574HbZA~M6v1kyp9;*e=4Q{v?&hn7}P!GP#w3RqXm5jKD1qo;LchxyLc$K}a zgXm8`ybo+C7HQin@Rd6uX~&@nB@kCnF+0+dfpY--=XXGgWBb8&SC@-!+2nRe*lv)w zHWJ~!P4NIPKf`>8gq{vCH~AoHiyXHj>;l>ez6cW^lN_T^ zx?gV+IJRBv7^!zFfA`J(0^(T8$$sqvCB~#y@4C^iQ3eFd|}u4$C*clG7V5|bH|vqRiU!! znxG~oD=eagy#Vo=jTtXSU>ZeBR6lJWAPLN*9X^r6zB%8?semO2+**dkV{;j=Fz!$Kfjknw>}%tZ*Rps(#R z!O^NG->1&zv9|sN>iJRtiuk*}brWGu$H6g%T_A<7)z=y&7OHqgRPE4Z9T=F5!(=CT zy`Df2??pB51{l|WqhvRyWRL7c6Gx@$wIyD^0`4}c2+`Ja3?fH)6=z_z-4T+Aa-7zI z{?wiXgZu@ztnf)U{w@Hxce7`ehT&QVQQiQ%dFF0^P!N0|e%e|rE*TX=RxTFgZ0wl; zK&>RRx4^TYk|so?06Wn+2pM?m{Q(FfMFDboXSeU@vJmWZ65yC zW(FLP#Mm4>><{3v!K9a@mz1$beTUT)Zb8oobs8LUmkhb{LAsx1O=CM1yUWp*SR0br znzpHEO|N=bVjx`W?0&}xhTW2>qGu`0PM|-_gs2q?VmbY`AKLP`pM!XdGG~(ED(WrD zXi~BpbLxGJCgI%hk3gb9g-#_wTR%g06g86WS$mHE8dh`wmHjExhKz151>8DRTpa|K zv@$s3!f3Dw_BMXfKtVh54|%aq86G%#(ci<@*?uFWmhssd$=oeWw;LB*1I{#4I~X{} zLe|qPWNYVu2ay%$@y}f8zk_@{>WOL2_(5c#*T>j5%c~J=A5L|y$qs>d`d6q1N?*~x zM_N%Ze57Bvbm$;gnrJGgxeMM9z`<55A{Mm=L$ZuebnJiR1&J$5{`#K(-y519@sa@a zC-!It*xD5V1pf*0hZfG9kQWHpF@JV5lo9~@(R@1$eZr22NkeAk-|1WwK2_Wodd#_@ z;3)F<^gJF643D1~Xoo2Oo}6Mu3xTlcKq6Pq{o@4r=Osf5oVwpRGc{2E8^HK;DR?IJ zk!P0){~k(ZC~bkNfzQJw%hCTHMuF4_8W22H;8$Nz{x2xu_YEk1_{3Y}FDU+Bgdmm? zIOPBDL;e5OQsEW@(VOQ!cCac5@7YUy%>o%*SDEzbQmL~-cW8ggIh)=$9mSMay5%~J zY6lCo@|qQIGyyBEwnw+CM~kteCVYBDt>3J=X}0bqYP6bAgx%76YYpun^xHj)8lU{v zm>S-U6ob4rv&9d@{BI6wlvQ0V)Np8kglT5c(t1 z24V*4$OBc0FV*RKZ;dKKZyqfiHir$1waN|3OEn*nixq`f0DhQ&&0vT%&Gu%rYHgA} z8&gA?{D)TN{6=K)MzH6k!Zmzdt@V_K>-(ALw{4#KeGN4w~?z0@E7;n~o1IJ@|s0Uab@f8>> zHn|h2$S1QVXv4PY5rpsc@WKKoOVkAqI4C!>AUFJvVG5D^8;GwcILmW$r&@JO>9@`3 z>4aQ)4eagFK=aW@tDkExmx@&-_=_#E=WLTR;gJrtCEAksoi_)z0$LL#K2^L$d3(6z zao`rZq{zF=0{F*-8ynZ3{SzF@;RXg%;%)Z(gX`mbxX&bTGph6ocM^w9mvE_kA6px@ zk0x!AYr$pKWjGUsqwJklr->y_m%qv5Ym@|a>vo!Izqp+CzO0{B;GJKmVE)-D6=BvH z7$Ox*JM()Xfs1Ww=5@x6;8A4Eq@pbE4sN&!e?gnJ@&(A?M|ECQ_^r>{F!m&pQ1Q!2 zY8G)E-fSOXnVXNt#!B$J1hs);;0_PB4Raz*qgGg}=b%yTjnKt97d&XDNmJhNVsBSt zfsvj(xtdLE?PSi+S>({l=i3Yl~t?}d8pMz+bO``M5`n$KoC{*)!?PAE6)C96x2n7QJCPW%p7 zf7i{Ey3bz?Il+6&vUUmR{^ersFF3A^a|e0n&PI~DqPnsZ=mvqv#zA4hC6IwggGuq) z_9#Bt?N&s&Nn_F_luu%Ht$wLfsY0iiwJIp}eqPXYQvr6bMEe!Ha!E0pqe}w4ooEN>>YNY8n-pPs`vPRzb}PIU zmsLO%{qf6Z#0SyKrE7{311Qy-t2RBzoTi#)(AZ)^RpyHR*xas6QiQ~L@%vxV(>n^G+V<5f?X~IQ zqtX-~iar<;5&2E5e_NAUg_D&&Yvi(`K11_3q$X)>A)i4a$>YC5x{#-uxvIQFVw^j z%(EHB5Hn&6-87Wdd?0Pyxv}22Xa)AW8l3P16{nxd0!o_BfY2f8Aq_{=6i~^ap*wNd zC557HuU@1$5wR5mZ%An0If>4SlWtcRMx7OlJR#Ary=pS;-leo;LeN_OZc#34_tq%r zAl0tjk#beb>a7-wa$TVL8|mCrV>IS;ZtbH}WBu*Ps(=$VaosDBVj5VtZH8|i^wmjG z$1&vF$!pTIOO1~X8Mhi>BBrS4XPG5U`t#?`!rjsc!k?6%deLPH+81l_#2vNXcIa3g zdM_LMeLj2$xkW>~U=?)bYBinEFvGsvM_&t>**cbBenF%T!v>GfWSlj;^NrE<&h&T? zBNB^z&ZXsZbwb~GF(8jLk?ix%FTBAM@RiiLGV1ACMJ=-nlzBH>=@)tRA@T0!_NY4b z!6J$6t(~2d#YkGYIeGl=1sCYiQC&7#w$IToz7XsD2-cs>6%;-*Jo z<#%bObx}V^8CeW`V@S2Mr>ES~Kg{}&)-=GiIijc>aSNVel0MY-M^7aw02{kc$)Yw$ zlYFq~L{$&%`6>y|IfS^AR?Mn>iLcTX{LvIE!0QlE9tSTlu9mama_g20 z@oAw-8lN?tj*!;K&JmBglVTb-p~4aPem~+AYVge#URiI_cn)m zO|YX-tCW5N0@ByZ`O1+L+6mvITua^SGty6&kSPY+zP2vtNUNY0rhdJfiLxl&J^U@i zfPB7dtcCv*VcE^;w}f}pwm`w9AHm3|W%kGZQ2MerLYG4m@nsqgyT5gi<^gve`rl2v zoUmh(%7V^L*E;xZViDw*U#%bFMJ$ zQcPeG?POB4=ZqlPz^e1ZEQYBj%Tm5~VC|Qx>1MxhyS6}H>LE#B1}m>5z2lpniUr)~ z^1bp`v8E;6Iuo~vg|O_@DC<&yqdpUxpU#bFN6bnikpYKW9{meXK&n05i{qvqVyU1X z`GV(-!w+ZrGKO34JH#4~_g`M}x*c)K&sV`I?FT90D2+m|s`(YhkP60Z`z-H2?tNpx zyHm1jUNbO#*FLE`O1}7cQ0uK+?e5h2ysGJXwC5oaN}dg#mM|3%ES{{gGglW*ZpkjK zMOHeyaE(%eX1X6r`IkuQl~qjlAf>1c@SJP;1(wjJv+MtV*!$|QD7(I2Qc7Xy?(ULq zq#FqVMLGqfK_rGo8tD#^MoJp#kd~BgB&0h8&gS-hc;7$ZoL|m*)|%xHIM>WIyRN-I z^{vsUo$w>${LzGl0l_Z)%%%%aC1k?$%$k(Fv89ES$)+xDC)L#ZavU3rVb< z3L5%wvtA6nCvR}0^;+=gAN1;q9W;Fl4MI1twW^#w6g6IB;Et%@>LxCfL$jE0{n1$3U>o-ch`82lx~*8ZAvC3U0O-%`c^)4MHoruLbdm?>rc^w$GM10bmmg+tj(-cmueA$5jy4l@=8Q!m;KcFf{6K;NIj*9Y!5(%3~%8o zu+=UsA{)*5krCktwo8)H$jWQ;IYby>^%AG@%jtk5tZ3Vir|H9R5xm38l_ z_aAH6K&z^E$F~lXt{W0GUNl0=y9>d?Az|@M6D7JMK7-q=fDeI)X1cQP?@cQ#*teFKNdA7io%1$`ida6M7)>05IadWd9_ieHBh zU9TP<2PqotQcZJ~r@ooT?BGv&tw7o8)>^iC6a9~<_03u2Aq{&f{1yi@;s&+Fcf)TQ z6zurYhmM^LT@#s4EYg0;6@MzmP7jt#l1?6Q6=BcetC(%<`X8udk@;>NKk0QZGTb~p192 zd^YgnkIAd|gZeh-US}m7T}y1MH#^Nj(gD^XNj@d2xA+%Bf~9^|k2|;9PLwrVLR8`* z&$eTIGYcU&UQNAUn(s;u8Ss?U%~NgWXgf_55^cy@WZQF@Q_F4$6ls)vBs~#6Cs})(#M9zk}!=~vhlTTbK2>m|$Ea=HOuIg4_{tU(s-S;2VvH$`%|}>5>M&XDU}l^Uj%g#7G4c`frYLZrBgwCw|t)0uQgVJ{hWzEl|B)>Ya#viV0D#(H8aS&||KD?{Jt9pIW#FT|=R+f1bP3n#GACQ%om4SZ+hk z>-63GBqrxt`2K05cMZ~+{Xqnh$lG$51&y*Z*+r8pULB_&$YDv2c2!*vur4?h(-7t?H$3`QizU@k)<^BxFEG5%WXRt7&{MD~7Lpu&h@$HC#5 zzda;r`!wv(^w{hQofEwnA7S>I+}qer*mZ5YQR$GA93@V~*Dyb(a$nC%2x=uPjBT6o zWuR?4PctUUR?+CRsL0P2+6^a3^UM3{hhYhU-yJer)>*w-$hPP0w8fg&)ldz5)Gn0$ z=ga-3e9lj=F%hIX$1&Rx!ZQzoow5TczAdG34}7$HIoaG1a}}6q!0jz4q1b1;P`-aa z_$)Q^XU&Q_W7&3xkC5wT7Jf~yvDZfm6^zLq_qC4)Ap{|n7GLd@dfJbI3RRTgjr!1s zX{Fj7@A}!BTa{MV97*Ed-E~9_GO6b`UDYp>RQ9wwGm`Oq+)|Lt&w%r6jc6~VC*3F! z725Dq{Om8-P^)f06M`>w7j%GhZLIu;QX9`R?ErY$QhrD9l$VD7A=eT%5}NVsE~M`b zN*DYUh-??$9g%5B(3;2@A1}a;WJ(L>L<|57wX(8Xxm-)q+V$OEc&RziMNZZ+tW02@)_rt6Vksl{%*dXrh@^Wbv78otJ{qV86 z@hF)&o`hwk8evIQ;`S9Ft)jOOh5S05)RpzOa#ciSA^fFt$$<7|=$%EnT}B|E=8Ie- zLb2C!_LZV1ORojHwp-k($a~I<;YWJkOcq$>wL2`#IN^(`@|VpoOaJM!sCt6OuM=+c zp>zM^2VklaMJU=N%~@DYV2il%Ivrgd6#UAjPfg`vszbU&@=daXy%kFi zMWFwbYDDT2v&kuLj&{C6(x~4ZehF;|jQ)UW2Ra1_4BN`qOp5KSVT7M5t}(fhU}=S3 zYf<4v#Cy1NLT-y>#>GjdGeuH!*qlcW1?3I6x9Wov9!L@?!w8GD$860?thUA({z<(N z&qbq#??thGCYYDZYvsXwXidkZa`aA;M0_ z4brfe_(06vId7i9%-m0A5%@haR-E=;=mp3c zYyG5)9-39*SA7SM)J@vYG|Av@7VqS)zKt!Wt!`?8CtqmXdg)QLxvCt+3;@V-L`2P| z1^Is%cpqMle$k(;v=Jj#hATui)S}UTMYYy*HtVX@b4#jWwjl~ALInk=%38d$;WX>N zD0T?8udz^!)0SG=5ZhV&1OCkpKKaNmCjJ5b?)(6LQkqPv7wvz5^zZjMI!%WE3~VsA z!OMXPME2y5m>WDG@<)zog7 zVIqVrrYy;0kU3Xi%i10^(9*N$mN{EP#ItOc6?g%2RR(=9%lEMK2ojwl@Aq zD~632kUiuqWqrgfCu+A2HFhoo_hR&ICNC2Wk3If#S~qid(lNo`mZVX$LKYVie@3CJ ztR6lT|4Lk|R8KmEx8%*0;G3a#8~Rde2`?5V$!86SyCbf1t~0E8-Lj#X!`m-)JZPig*!Lz*zMQw|HL|{h^ZxvCC6n ziQx2dk{GezP)Y6POR`(vdmoOjAN8CFbeXoAJZKo>2w^P|EgxATS%w(re@A9;_DruP zLh~~m)Gtq>Dw?u@i~5g5%}f$z1w{`w1$HJIr%*vsgAtv?c_2_QQ@~+nvGO@}>@%$z zs|B^~FDNb$&7j$b5yT`0dm(HzgPuSil7IS=C&oC4%^o{44Jx^s!19tZ*1Uvwa9K$B zH)o4(v5dn8pMwqE!@wHF5-)%&si{I=e>yRS1D@SXYG^uIX6Sg&2`YaM`BOpFDmP|NKJuU7?Jgcy>j( z%wWcK)b@!h^nT0wcZ`609gv~alwd4XvFnS$SF5Qg)}}RGXs3lEXB_Zids#7hAwPZN z`InBnA1x#ZON#+W@FqndusVd}^#Db3vKk99Y`nZh(u1HBiV!M6@!aTf=!nt(3?e8( zJE&iVsu;cg@a4v`V5+%38$y35U=~1opUp+l|L}*-NHB+Yg3?GI#F@y7;35`@goj%S zAqA6q>WP4m(tju|NCLP>$B*S9r~`7(VN?)1x+XoW7A)YRas=Ishd;Q2<-?e7ZHMDw zNa=u!j8H0tAO5g&zYy4cpEi2vuX1ovJC^CYhd)e#c(2g;=&Fu?Cxtki5g-EtX}=#m z{GsALC9ILsBKV;l{@XDBti}Irn7?D>za!@U;pF?@sr1jP;`{$>jmh#Td@ua|%;xfJ z_N_AaB&hMc1)`N^=#;Y&M^j+imX^p{QqF@6QI8H`C5z%3}$P zra?v>LA}6;mg~AZQwh|w_(y6bLI}R#A|Ogt%mx2jIqy0fSnxsm?@Gsn?aPFs{CVfc zT5qfwP?mBkPXT|o*;1>oRjN}TWBzxbL$o^ridxnCM65#fPcnUu_kmMs&ZF{l!=<$g zOh85>-S;POp?;8e_B+sIQ9~kp{v66?WAt1((UdQE{O-;4xn%^}ka!SiCD@~<6aLTg zo>hs-aEY@3e6y^nHkBWiuO?m?BsbnTMMQST>>aho?`p zIEC*=G={GOs6X#R=Xo&QLyP~%pA~)a!J3igG`db%gG#Rv}x<&(pT>Eoy{l~X%?<54BmsQ|GuJeedUk~^j)W= zLGp_kQAQ!u4|&F>YEX$#Y1xR0ouU5s*U2wgO>_UNTId#XoF^_Bkq(G;ng&Xgh*Olv z1~fdD=&RG;FD!-r%$E|(c>FG12U#=mXl!-jAjN=AhxJ~UjcQijme95Vw7SG^o3}PH zeG-6bv_}cEos$&3llW~@1qrCkoEOm?IBDa#SHEEj1eK#jTFe4KygD$6A$lQxFC|^2 ziJb6bZ@w@tV)8r(-RS1$5xvCW0|Ig60mtrnY@Ff|G2MeVk}m&*3NP5GPMkg z-OV{f0~PJDvM$LvQ0v7Q_}pkEu<0{_&&~R3J3Dz^=S_UGoyKhuDGc6q1msf$ZH>D% zvndY2kS}^XPORkSX-&2_o1dagfqNrpTJKFMt^MAjRepM|aw_ja38B{=kZG*}-zU%M zvhafCr-<+<@_@!~@^UXQ-9h!=nG%&QRIRjg!>Earx@p6|5t|_}uqEwbl2^k%xBe5* zU#B1l<(gNXok=BwxvYun5R6N~MfUnNtVOP3?NWt;#n|3_zjnUiT>04hs3f@pt^UH{ z8pM5Y1Ilx*v$oAjOHbLU*unu&nstV@+dAI3YLPNYgtfji}Ip z091){yN%;S1Mg(D>^Ns(w`<_(9u9atmHuY|!_eaRx3t(H=s13>KmsP|5M=8x2rVvEFX?6X5IJt$+#jloFM`~ zoE=FjMCUj3#!cS?K1OYMuODUi2XREe6--0e0rx*U9B~xnr9ECO`TeiyVckG!w>_H& zav^4WkEbm%lZDTWKjAShazQB1R@|2Z@wLCh8)Y%WrVA6HQ)X8b3&WX94ZY8p!M4WaJO!!@`omuOOq%#wgV$cC z69Ia>X{*b1KoPCzy!nID>Lmz?!zTB>$YxDA*~1g6`%YbE+oCs)MFh`?3AEuQxD`a_ zOxcHVSlUTbtI*)?Z3XMZYsKbn6qlSRi2T{qQsd>XRC?DbBP7{!Nu?GLzNR8H(wr4g zadd{1L!5)R`Zmh{9JG~aK}x5Fs!06P1OlWdyD@6Tv4rq1F}k%&(|u3v|HiF~FQZ5S zDB`&(;Diw+vj=!z6+C6IKr{xb`7$TiV?7uU85ajNYki#r zQsz2Q@#$RDRQsI9l=-yyAMxr)1cY(U_@Qw{=8tL9iCt}|@U~<^{i+Lq7&%WLLLxZ0 z#$T2YN!lg&Zj&s7;9J`Twfc?Ns zGM^BeKsJP;iTdpifkI~4*eiRWh^KcZ;;At1LLCcixxHe})~Jnyxi{Lt@u4jspy72B z^;iQBO^!i}H;N9tt)KnREiDU{VgA*}jbZ;DL})1>gs_3l9qs;T$Os`<6tv5&hzRW{ z*Y82V+KxZsquB$G-RhrdyJsCBB*F4b!m{8~{k+Wr=M&5R6%{c#ns6eXzKXt9rYvs> z`O~E^T&V(8>Q65$TWX<$`?zWi#vDeh29{MSA@JFs`cReHRtHzbDA)g%aeA!cy9$coX%flIBQ21;Xz-Fi|^r?ml$^RuRF=XfZi) zeegLd&DbjZl;xtQs!p9>f<_7qdv|7Q%QS{T2oSLjh8>mRU;q?V&;TCI>EB_ofGnjT zZiL$Ldd?ncPi1@a_s;tzU!*ksx{eq5)>ZpJtn6H~3SWX!o|mO^0b7sFf-t!fC`|NZ z?ptH~#@+3C%g|w2{jU_%>r#sLil&4wsL3w=gZp@DATIB{FQ+>G$2{$eF8J>K2=6!kl;`DZOQ=FbokoT$nXD{l3$@B-|r9UvU{ z?Qy7G1_6L*;Ac&3>@FumK_*~zDs$6&0j=-R9gj{~6?9RVC!))*lx{wJJo@F7X79a; zlULZHnknk%M6V?Xk}IFT9zliwPZq!~Z}EAaQnK+8iSrK$D{^|%9&iqXp`VxsYt6YW8&mBZL1KqkI!OfjN8Wkc1ZLHRb+%=uswyV5y}f(bA=hj!e`K6o}wLt~RTz<-2o4ub!#{ zii{oZLyAaeEkiZeD$)vUYl#<8mc@S zZI6g;mf}{C{%#a4At7;ysB$Ggd_yYn$ABkJD9YX8WhRT{IWP0q@?lhlS6Q`F#%QVY zO^u}-TWoPPSwiH-qU+Sl64fC>2xxK25-Z(%J&lbwAzEgEWv`0xk39;S@;AI#J-MkYKs?+ z)n}1lM3uCYI# z2R44~{K_m+-t*i$a=k(~D^-)%!YGMsz+&f6Z0<3vop;EbE+^hybv3c2{o zl(0Yh3RUp>!uRipe~LiG6NrRxR>5`<>pZQQRHmu6C9(|FKv2<5iNI> z*Tf?v$v0Ao?`M!TeD)Z>pd#s1!Cu-pCf3e6dIfhOU{;iYu&j{-SU8D5Z?HM-$?0G4 zD(-p~!xLRF9b8EBYKL2SFU%>=Od)=FP+*7OH(9wHhH})iC_rvyvZt3lvr+VksC7=# z_4@JbFMKQVONv|I;1L~3vfu)luGmK8BkMfqNd@rrhg)Oz#N9soz(;jloHF|t&IuK7 zz1w~j0FOipr$J|5(}mcf(xzLCl@`M}v7E>efdq?4j&;GH`y2PDA$`nN_xveg9vWrq zDDCg1$Lg!i?s?3Kc4D5d4Y%AuW-Owpn7e22BUId16#EUo*%nv!&VGlA9Dj!_hMP5y z%nz5ZB8btZswHNY*IXY)T0PZ~^#9msd+@=3@p&_r9jblvOC(Pn8lJplOF7h$1H_zV zYy?TB%nhUFnIVt+yQM%!;Np9U6BG2Uyln-FHXnjXJuougtg`T{?p(rN_Ye`u;XH6C z!Ko~+Bi*|pz+!c|-d@h#&VUy$Q`4p6$SnWE67xwn@!OC`&!0*g1|oZSll{rF@vAPa z%=3(l#iceh8!ptGdm8FhG~ewuXwGm#AkJW=rn&Jh>LAcjsAAUj;9t!qKLQnN78GU~ z-o7Vd4NuR>HlF{I|#0zLdUr3Iswb>M!H+*qg))}wHGGLpZkaky)H@Ug>`hv zolLIIM-|x#nGMt|D3gFWGbn4`T!>_3S0Ff8rZM0B2Guy_>|{(e5gc*GQ1YRk<&F~k znP8LdI3%!ff$LIJ|BP}Vm2Y@@2;=YZL7GnON=*~@B^0^;Or0*Y1A$;%=-_KiBBt8t zuiAZyVV@55!;mMb7{eXM?{OX<)x&!hsVQy-qNT=uG%y^Ktf<(@-kBUPs|)P)EKW5Xe9^E+*^zx69;l~1->1AfoT5(@w3n6DXJ7Glt6v*UzeHqq{HEsS|AmgH(ZUro+P}pS%|QSusB(?e;=M z@(~_&1pNBiVa=>}F!Ey$h$F8teQYzODzA$N#1B=gZuZ^h*`wz?RINjo5UJc1g;NzRW%b^sG(< zUawO&x#42br<=XrW1DKSs;VHkx;^60qdtMxKHI9AnrrOa=I-V-SPkGnKS0!w*ycZY zNkFg<`WP*(eJoI*+zbRKztM(nYnC~0jdt0&E-ViRso`4RU|+mOET!oR?FWa~g<5o} zZ?aNuUYq3w@tPc8?xl3#^_f}?QLofFIBFs0kUtV6m!`T4)j>gc?Fh;!;kQY;TI69> zjj*_ab*XVCV=@u{?`xBkA_8vjeENU#8}SeuOHyJ9Yrkn9Au*zCrxE^$WCDeLhSb?V z9o9aez$4feKXFd;58=gd==s}?kS72v9w9)g`Da;BkJR%i3g-nQxwjO zJMhKj4~xl9_u(9PEIJeb0f`0Xcd;tT0WAnz7(RQ#&)4q<$G-vPb02`cw4ztbN82$8 z1f2{1fH3%bh;AB$+*OJG0Ju~xI3pbaM`bTqr`cxTws=jtl;1bSI-u-vmmShTD*Qdl z=O>2^yw}l%8u>(&xXW}ew)5^|4IJkjdXhy!(#Qsmni>BL$jc2l*`0miy6APDhAyZsKB?IYYJC;AiF6Y&i^D_~>~%YgZ` zEDjjlQCV1()+L}O+7}LfxOE^%{y9J|1;B0s%}1OJY{V%lLBmi9J*65JJ`T6iFV(9a zXuNqHPRFkIRnE{MneXjuPV^p=HD8z>77WOkX2rC5YA-;OBY5gS#DbNh6Ce1AW*~zM zcVy%SWQwnWQzm!m`-8R#N|NT=t2dVb8l;*r<_$wwk;pP1Oc^aV>Tq{X_y(MI$TZ^Q z1>#RX&l{C0zkK_x`W|$41fWgt#o=m%)^@^6JNfClHh(0v5=>G-=V%b*fBh?6$nDEx za?!b+SrA+C8KB_5j=|=_6rkTnBeLYl`}PetAYCF|@NCXWwNP)u!0p*V1QCzIlI83@ zVho^3gk5&iE6|>1Ad3H}hur5ofT(RFL4J2*IJy9EHcH$ry_p25C-;cES*$W|%=Sdb z9ZUP}lz?=KPE|>*`6cqoc&8gqRM*s(-Vo=)xQQtcYKsZSYoN4H1oo1Z^Wic@wEb z1LX3?fxOH)o9=dh%4b1IsoAzQ)7|6yd_gtn=Z)7St*&XQ1+^gmeb2@>(@5eLun$iR zlzQBNXZ(b<9L5byI#XGg;r1|N!P_`)?7bHivzwy4qWIi}Q29+UbKy@M4q#)Q@(ft{ zCw^Un#Me|EfMxal+LZpj_H8+c81A_6JJ&n=@DJGZf|n;Nt2RBN`}bKaDzC34x)gKX zpYNf3Gt~axGNJvF_5{wj>esG*mw{xLQQ+!UAYXQIE|wud901D`_{8_C+$7X|izk z&m&ElK0{%npOYCR_NDrGL*OrhBXZqphr~Kz;U6X0O8B%R7k~w1A%yZ81AiYPd$x{C z=k#kpzM!k^>k~MI{RQj>M~;dy6(Iq9x)2EUekEB?%agb-`Jz-q1#CA#K@>^W9F%HDy6y2olWt;?36G66 zb+m~6z}NZtu}3_XKPW~(I?J)2{j(O%FGTGfWgpB_Q>(tHDT_m_bl^6+;n#0E^Kb>e z`*h*HNHQsO=?)PH5e?CNZ#M^c6Wqvu8u*#`^f1wOdw8ba8kS}L?ku23)U)jx+q}@H z7UT5bV>=0kP+Cp^+dvB@-RcP}9|C=byRjr|u*%|rkZ-FA;1`hZRSr-CMnoJox(nGb zACEnRH|RC1D=|!gm})f(;OH<+f(9hoTR8sp_SwpH-@T6q`Q&wtlG_8}uZ?I&I3CMi zJKf}HVCJbH^F;K-Y7T<++_n>!P1rjj7u?TCTvylQ1ejXsC))93A|6RMs*Z>BQHD3@!^K~8YLubAPQc%0FjB#;a*ssoF7h%U43W5L41 zE7Uy8EAA<&x%D({K0Qg~y(`?x#IJX!6ED)eE@0YhS{7&TYumu#J%K8{k0JA>6GB>*#F2UpD0!! z08{2A$a&hg4Z4g(BVc)K$(Lu-o(IwV7$%i|4t4iWz=w`sFP8g(OLU$h&$#$EHH3zS(;REDps zz-nF-tl|-T+U?k?SPiUJcN6;xKdqD7tr&y>aUU+wH-wcs#^fPHZOXz~t5=`$r% zM>XUUA>R&R#F_rIkYlJiMCprL?VdjEMO|zT79(jGkm_!lEOOf|pOtd3L_vX`pIsYoP4;`b;yoV7k zF8(jI?K=~=e2=JNZJsvK>dFsSogYwf-zGG9pJ({gI5u*?3=IhFl>5)`G!?JQmd}yP z-(;ojUi>Ifr&fnGB&oRRqryl_Fa5-$ozW00vN7IH1@=W%?-P#+9a6?Wn(!k^@5>Uq zFpV>=-a;Er={jxN;R0>2XUICyTd=Ell9?uqQ#DTkf%Qf*cr&hGD!x{paMH7W4aDct zOFjEZ9Gc5=c8+;6+e!rm9Et3BU!rED0oWH;(gTCMNR?kgdGWq8Q|~LAIiwCI?rQNQ zW!}v9KAd93tMyI}oqbK3y|-19Rt%RrRl{i|9z8bNxJG8`Kl)$tX}cQ*l4G&a07fB^ zS~x8fUUX_AKxaLkX&Ox^ph%D8xtYsZVCiha3j_M9lplpt#}cfMb8gx1lf3w;I$~+x z8dUTRuy36tPKQsJLtMOjzu;(uWh~k)+Cl={2T-+>^&AQ`bmZT$(w7|hBcm!n?3Zw! zPVz@R52X5GC8JTTvigXW%3esjfBN=4w^1OycaL1lpCiRlyIH=H$*S;Ju9C}~Ns+d9 z&^7coeih}+^oMs6tRa3{Y4-=`KuU=BH&>$>+mgjbP?d6AYSU(XpR3T~P577T@Fz zBZ_r!s)kxd)-Ggt``Ap7*&X0D)2qe(P~$zz7a)&} zDrS7#eig7*Ej|wzX}S_5Z{oUoXf;YU;lV8Q6pfFj=4}nzz1eZ?zL+DQ9U|j;4U>^a z+dkX)!pJIlqhM>FS)#snbWLvY_$|DwsE+b;S|uxSt4ha#m zLuL{R)F^u`vic&3BKshw(pzN<-8x_4`Fg+Xd_vgIoapVk^*RhurR)~Fwy@>~ffq6=la7dZ5V2&Mi@0~#-l+%Kqqw1T;tbxz2Ppnss-!#X zU&mCq_h0C{#Md&kfDSR003Cv+6F3kWCPL*--By_cME@8_%P_~n0dQ1LUgp1k2{ZOe zZc@*zws8@YQ&`DS_05(wpYNiKnjiTGTz?`OPKK8%v&%hjKAcTUCAT0CgkZyvf6Ul= zSSCVd-IANipraNgvhPyK=ywesE^=#M+SBf2Ox3yrlkK5>&(?asKpbzBv?U=;LIGX8 zOF=PC#NxxL0mT>8DR=mw8i|On|v@KR*En$(>rZN8D=_BBf)GPG6`tZ4qz=W4zMx5z|tkBd$Pj7 z-tx!s<5c@0#Un%I){hsO6}C3%TMHEmPcUk`0`GIIlp3JoJImSq+5>IVbLkv|mb1{X zr`iW?3TB8@-7sDg;%UED0w)3|>3j5}7B|XWIs}9H#ShT@%wTMLDqY`v@a5)UdE;8x zD3kW(N4QdViDWj_+bN4ouK`S>_yCP6AthMt6;i-Zp1sVQos4PzCoq`5dVabA3nk{N}od!?2K@ZFPI#aP7H&jC&*39pDCzY2Xov!Md^I|Ma9L1}RwG0|B zoMyfs^;evr?-eFiCDl!@boWFcvrmZ%a4ocS+Lmb?XYUq%MJJTD8~B}B)Pn;@Qn^UE-_{#@2gf67D3|N3G>!VG=3u1oVs0=0MEZ?& zmB?n)2IHBw^OgROw_!wAQU@F&IU@owYYzf5X-SMLB4geW$8Pu*pOQhxQP?7ZXG#+a z^A1N>@RkV8g@P|TmT~v;IR?3S?RwwN8abG=B`8U=10{;M*{vw~U%bgXU?)RLuw+N#97s3G!0pp75%sc@`@>Qm&MExP3Ug?3lQ*?-cgUX z2%X5$ttSJaJOvLx>|sRX<;4-5V^SZGK}VWa#wL=s3W5h9-xJ8kyWDoBogR=%5){qh zidSu6E(_(ce@>3YRGH6J3UQlYh5yOe{tH;@#>mL6bxToGp z$DQuJKOU6$2Cz160-OItPf0<=Oqu90F>8foHQl(aOVv7;`mX^a@L3I%)oQM zh$ZMl#6#9q2o`7O6s?$$H%9SO()X=hq8W_X($qnm(set z`5B$UQ>5skRq!uo38qDSSSxC}kzS2wAd6n*t6%JTOu%X7A~@CLAYuh9O~>Cf$*CB# z3|&&5nnWfS!4njyz!ZPZ`S4e@buX%TjgU~3_g84Nm%H??4<8+V1eG$E%|E{$*_RM86WoZwRLt)U#=vGd) z?bP^mgibvE0r`Q|r4*wx8N{zDr(-J{d}xd=7A5`9$^!{)?Te>kPWY|J zzHrfm%Kqf2q%E^6Kv$|j`U#A#v;i`LuGTJ|R`CbH{SeKciuR{O#bUwMQl4nfK$^dy z3)$Ds8CD|PCo;W=I8@=YW*?BXTK_V~R zmFyw(*M3kx_oBQ+gC^?&C>OiF?d#$M?@6~cqrN1Vukg+YuR}1jNF^kdi6=o`GwF)lBIyml14eCJ+N#x?(VRKNao!JOXK2(D=s|-r;pbn$Ew}&6P5j81=dK&(n<`k^3 zB^mw`Rzn8GtRe?qp~ij%*Dq&7wz|5NU%dQO{xPbM20^ZwX6l{VKNH(m4gzIJE}Kjj z@F}Y~kjhbKB!YE7T4d1q4Dl)Q?G`EOW&DQIiCPl|S7d0Z%lTL9mW$G=PTX$wMP;__ zyf6(*l4(Gj+!(pATY0?g)iZJq+;~9ut2-PU{(U6IP{14pY?aqDWl>1>aY~#R&0sR0 zr)4>b%WuVO{XgGmXiN{r6_FU@A_g4WyA9zd;^4Co z9z|G-tEtBJpE4tA)<=@e{@qGM6T)^XwO8#jr7miy?T@IQ;NO@t5!`=ZwprJIKCt{b zX%Bu8PhO?!{5y$HEt`B-!_bZIrS6hZ zC806j(fs1$|IS0us(*Y9Kj>nyV!|#zH9Imz8lL(6<+tb1Z=@6An=*|TxeD=3+Jk*k zmcA>xf7?Y(0A)#?K5PQR(#)A`Vg*+f@*J^BDtmF$r(R^EOle34Frvvt5o$Ad!m$*ZPXNy+yelQoV&L8QQv zHTnSn{THt@Z;I7Be$t%BWz2&l*RfwuUFgKtz)>x6=D7EY@8zlQB!2GWe;( zij+o?-F$-;uABd(CyxAnJ~5y^z+4FZ-M2Y~f7T^KFjb>ww8PG}m47uF4dXCPytESNkH|lb5}@hqX&j zoK?SJoZqkG-IsgQ*1fq32#XD+P&~uCBT1P-m{lAQ$XB<&Gh$V{HzEXj1x_7g-u;pF z2Ii~PaW9ZYEq}f@uR-<;bbS1Dm4zZ8qs@jz&L0n;6;B#RXubr1;uaVnhRs=|pa?`N z5Z7YBktGcww185{(pa*W8n3P1HuC7NTEIc)sRirFt5$Afn;toz{1C~={F>C zK5GU9{C!HHDRo5QJ#g#}8~_%BDYW*HvM*DgS{ZLoo5m{VdYtOqJth(T1LkM6kni`< ze3C}fz!+Smvw3ZNw?hQlnY1VV1}spGb$)%LkE1F=iM+^hjdKmzb@Xnu!Z zMx=ylK1Le{mL-V zpy~x7r*S|X`^6Ll0yXgOJEpD#z)x5 zyviCdi4vJaR)0}}O8&NCi|*ZaVgBP5P?;Jer`gh0zxnBUB(ZZ(>PEV^i~g~%&~yc< znU27AhKgX;5K!R_=5&O6J3$9kE%5UYD^hndZr`gd$Fw?bz~a9yl~vThx1_a%^X@R< z7XO2^ue$7)t0|Cl1K(DO01GQ|1XcpHyllgaPED*59Ee<^zN_w%q3#{MDFK&hNNeaw zjqS^6P!c5q7pWqa!kJDv1xH6VPv{4kP6voReJdrNw;qj=fUBgeG8wF;hnB#42S!L= zf%>O#OHZJu;5z%9QmpQ1r0WOu)gj_Agf5?f3Lq%24N*87o_|_s7sQD()NMGMd8175 z$g}%|rrR7y(Var?#U(P)#zeT8ArsvUzLrQ;K%ng`PP+HR@58}VQ>hqAtmUl~;xCW~tfG#2R@_ z_JGF^1A^7rRb19=9T4Ca`or}5!7v!y5#r)!fM(|Wz10p%{v{PYjSCL!3YrZL5hstK zFgCpDrPBu`Ub_{L+P;E14+IXz{m;scte>E?530D#-0Yom5Ai<({Yhp8ijlmpx_s;% z;uMH*#~#8P9;JNX<^d!F;q2IXn;>GX@%FULCs`lzp@+mNoLgq9Zc^_}?>+l5Yu8v> zp`sg>W}rU@Y9pwbBNP<|E1b5YG6=T}(FxC@M_E+#vdfvbtq4#1Kl0*kUI^BZ?t4yqIoI{dh$|P(}t(!#N4AX)`BG33rpc#5Sp0!OZGd=ry(1 z01S&_d8iT5FuO#wR&mW0`A_hu+fD4U@qeqK=2tFpZ^ewb`)DAlb7j|jY2Ko5PK>~n zCyx0TXK2eCCFkdravR~i%HY-dI`4hIQmK6lLS^v6rR1@kqG(t_xf%W#_<}_D&~%aY z`CHLO5w`tl`BAQXKeSwB34EWOo-XoR{-nNRMOhMufTi`sC4>lkZU43$Uwil@(T-0q zD&_4{f&vk3+niHzX{N`0n;?!1QMXON+M7oTBtPYa``}rwT1jU1vX)OQZ?`*w*n&m& zA{>D^#Xz#Le4CSDx&p3WGn?qSNcST#JDL(yrTCrta>F(_6CTCtkL+3^b09JULJ22m zYrq1#zUPt)B0r5FYD+N^N1;?@mhEmd*LM@mv%7>OIisc@;-Qse@<1i@^j4@)22*jJ zQ#$^QUl+3C@%8!sThF(Zm}=$by2al?k#);X)i1l^0!kXM9lCIj_I>VVEBN&tdug-D zVByKRF0ht+{OvJ7d7oHJ@}%a0r-&aU$8GaP4@+7!d-WPi44pQ*aHz1Zp4{bUt^X@3?MCAiH1V&)Z|<07-dEYk4DbZiG&b-cpTUeuL{ zvnwg*!WgqZ-ncs!S0TZs8u$7GbjudwIBzq#m^$xIVI$O8+IEGyZ>rr70@xX)Oy=8B zPFLGnY4KGpkvp;Zp|YWC-boyl5|2M)Nx5d?{vYn%GN_Jiiy95EaR~128Z@{&Ay{yC zcX#&?G`K@>cXxLP9^Bm_xV`4wd+w`xRqy{-U)8SuL9x4g_v*FQoMX&6hMe1eZ;;{# zhCA&lUXt!Zyx8{Y7CLHm-Qw(*6syp;D4D7vBhu`5`)2&oZ#KZn6bHPtt17X7qo{Wz z!*YzG1B+?H7&<{e;&{4if7*;h%=G7)No*k0A&3b~)U1YuUqWA#b)&?)zXa~~5&Dq8{uvi6m0Zrg~8 z<1#H}M3@ZHjKW@vHN{bzYj;4s9nLp!QwJvD-_o|=Y1s+(Yi&a7%qm4h0W8W=Y_iGW zB5IH@)mEyw1>BT(K(rhW>yPUZrzHW|2VO5B3UEZx@8hS799tsQ9@JrExwfC81x*yk1XC|CW98OYVxre(xZ zA|9cmvHjVoR!8>+TgTzQZOf?Pm~Ar?cwKZm0L3>v>-xbp9pY;aLhGSOL^durCyO(x zy!upShE*5AUtmDtpd6rpWgSvwuF-bA%lp=z1Lpbg*6|9{FCQV17Jvi%rNpztWi{uf zE?98X^k~&5Rb8Q9#NWC}dgFg!0D(OYJJs}}0Y$dnry2wB`J_-nkD{IpL3u&5yqfGk zW*c_D8cO@yFuHM#3fGY}N79(sZh-zUcMkX<*fNGg9hh9dAbDu^bDyVE)f|4_3aE+M zgVxBU$Ycde%P~~7%9==T5D^vmvD_XXahd6RZG@}i{JI?1E;{QjU}TSt1iJW3S!M(D zB675+Wt>-4bbliN28c$QD$d6*kk4z4c&Wxdz;&Vfy=@PLLjJ3mC|?H6P}g_vWkTW@ z6B6CU1}YkX#~(hqZ_f6ep%983wN1YUFT3%D6nY;^ik>yJ@ ztiJOex9%e-OI&a(Qnx}~;bahR4+4NqCnaqP2wpDM5ycsvW^Uf`5r$e>#ud z;#qQ10rZ6z$#04>4Yw}FGTw{Q9x7@x$-B5mOM509u)GU)4{hpY-5|zn zSaBGIODqui$R&Ea?(6Fa1M@}t#`tF!BBNaoI&lG2H(0c@zcZW)2k6L20&y}=r;`_1 z@M0C*R;`VAX+q%5i&ZCiUgU`g$PW#Tujrz!VVvNy7|A182cS`gcwA1*g0T7+9s@#Y zu@uHqp)QaB3c?&_R%Idb(IZzmuPZhGFz>kNfC?BmR*VS2vL0-;a5e8x3^Qbwszkl- zJRo_%J82BU-rhb~nN&HT_7>Uu;~L=MVNUL??s6%B`ixwKd``4dr}BL-X^;Q<-&M(YyUDyA$WxH$v-a zv`9#KRya0`m1a;j`o+71rdd7I>=RL_FJeL11uqv~46`zp{`UYqe+sCr5|vMpt+Axt zsxVuCb$Tm-e~|Dr-5Ae5oWwyi3q(&3_xB9Bgpw{S0PdhtDg28Mn}#ik1)fsv|0CGD1jQ>^+2aYUBPE~${V%xEG;L z08WZ4-T)a;Wp7jl3MRj2U&QDi)|hYDO(*ctK7RQ6yy+VFM=)#^wC_-`h4#U>FRte^ z6={Yc#deoJgfY-%dsWCl5OV!O=y|dk6fVdKC7s3cj)4eDfg|Ha;+FpToy6}`qCox^ z0=OD&8dJo@hyH13I2Cx_6%OSxs<2X39ru3TO!A&>)#9{}1Q!^^7}^+0Ob{`3N#wb5 zv452#Fnu4$s+0ovUtbn13LT^mf!M~)A}_I!QpS+zy7&*jd^NiKea1%j_qUgGUm2}z zAmi=tvlm97?!)SVV^sTWG5ebbqu?j1U+oJ}w`ndct(X2Dk9iF;)rVFKw^ac3-idXQ zyqv&Ir=a?UrRJ7i?MocCU2MTtxls^0&&Vt z5fU{-fkvV}5c073o>`CtGePhEMBX00=RG8An?;=@7sHZIFaqa%E*7~&&(vqO*xV_;{aocE{t#H7C%JU=OSDbFu-jbDi7(V$E5fLCdTVa)pa)T>w!-! z$p@5a`A=NQsA+{W?$l#*#im`F2`MP0c;CWG}#UmgAV`s zVnXPLJ{WqQ3-9v$EhGPMYPH;78+sol?CTJtP5tL!g`iG5812XM8-Aer7S~OC^a%k?71~C!NXy7@)L#Sst8PHP zLHS_{oCJb782Yb4LMFefBH5?XnCUy?(YZ~p{8mw$EEKM`0$GCj*=<4kO*b`TXiMx* zvdzq&glXW4NCo#u{sYdAB>UCFb+ahxsveM-po^lfC6$KtSmr}*e`dr`QtOANOYqKV z^A?IHG5UzVa2YU};9jg2;1*!^tG<|C789D<0qsaXrkzV0O$e60C7a$ZvsgM{_kh3JznBpEr1$wppKJ*$>e`Bal~M7)R_?f?{{M; z01TPlpPA=hs{I4Nv|r-?@BWGX>ECUfU?Ud)M+!=2d!(a0KQ5nNEuU`#8TwJp0%2>u z*d-7(kIHpE({?^Trp;C;f8};EW5jbgqDI8~EUDQLgYz!E)7$x=LMfL*YGw%7mgYH^ zCe|z#;C;O|83#vdce^xrIQh|yajROaA+}`paC^G+1@N1sJ!!DVpi0j=J2z)5SS1I5M40z6GBy*U8oEhs~@q+=tC8Iog`* zyl0VtfEYU9Vz_XfTh8^i`gC>a{uq&RbviT^&~Z|(RjOQ?XkWkhS}=7-MEf2THKpX3eu+jo0<@gu z%N)Q;jnWu2De8OAIieA6 zj^+|v9^Wd)m#p(rUj0DTwGz^Spx*9*d#O?)TAL>8MTX33Q`|Qk`Rd(%_v~g2SLaZif(5#*@Cm_qurSZnRqbazVb+|3ZM~BQP z>HSgQGZokkAm)#a!tHdwa8+5#@YVfN2k^b!gwEN)l)pKiOImnp|8y_0-aG>}T_9sQ zQ;;n{ca!d+Fj@Wpk2$o9m6a7g-e3yRF6agsznY}fb@L88?Gau0^LZ9wTWv1IJ=-5F zkQs%A{}S=IlcLK8jCf2rw_W|1+cuZz;^;6AUXtNNW$7W}1b~S3|Fw+Xl*_PM; z3KO)x+i9#IG}HOM^~y!~@x7(9PKN_nYC1U*?_-m68vCkt@OAU(yynPe=wI34wc+$F z2p?1LEH4f5=!Xdotbxri5hQjFN*|n)^I6-hRTYL#$LFlp?aU`g(rHQ<5Ax?|?wh=A z$hSKd8N-ez9UZp^wm9#RS@Rr7r9ag@HwV{3fsntj-U8deX;*DK3SRf_{&=hM5qa#B zQ?l+3c$LB$`4lLl8dMQ)3iCMb(O)B>zDdqdJhS?~=Bv*AlK*-h=F0k}XZrtZq%^YZ z^ZYy0;}-JTP$a)f!)dNH!9*r?sO{l;u7+LK*Ka);9_4JlqG<=bxY%{j2Nn}3vP-kG zPcOlOe2(imo*K+lBKbDAlC#MB&D~X(bE<&b`6$_!Uw@h7GFl*io#$mYCeXuJ8$G%-pg%(?2`wco|Ro8owLGVYZOr4%G~e9Iji z0jy?>XOpWP-u0dL(rmUg`O_kG5uXN{zkkU65?46H?Xi$nSqC^GJ8o9pAQH0Bm-~&} zav*kl+<7CVuEPXD_{vmpHo#=G@3$gXmz;n))$f;J{xLi>B*>+7@yu{eE zX$^!hv!GQUvE{2x3H!#A>3)_kU1JhYMPvy1bqnxpvh-Y z9|`6&3bQ|78p>&Akn&t zee8UTwW?k$n64IiDl1*K4?T_a>(Msi24fQy!&KnlBT|&<2eEb= z`Tl$&@=|(zsVvP)kR1>C0xY-|;oIQ1z0m^jXRr2+X_cQnPVNF)-C$C`#8Kg0hWdbH z#P+Vz2!AEYujuKeUf2AXj5Co*OWkXnp~;R^>eBJKaW)Ysb3mqF1JUgRZWc!L5!kG1 zd~MtFrGq{WlP^fa=i39L8F#$di7j*yO?UGNM?4ef5`?}ZVN`0&mCetnhL<*aZ{we?xm2_pmJ}|e5(0d#`EHk0D>@HV9IgM@ zuD2`C7P^Q?IwPEYl_OS(%V+)ju~zJQ&!KrO2Y+VYZhJ8P-4^{1Iu&7Dix$!?9yD@)r{gVnpm(g3%boAS9|!CEd;4<@)r$0+M? zWhRRS-nUD=Fd}MD8@j0ZbyCZpE$6MsLTp>kQq#*ggkJoK5a{ir`CsdrTj58r>bz^m z*}v)LJ+de+hyDQ2s*LTSZ@J@VHp{kH@t+D$kqP$G-BYWPH?=&ZKUM0ZmdW@&a}B4T z4-%{nD&5RgZJOZnO1GcYsAO+K>6LzJWa>mx!wOJf=sdi;`ABdWvj)w*H9Wr#_|zTd z`~n@mAPF?2#jRZ~_nu>_tj8(UdG@2R7@jb4zge-K!gWJ_HEK9i#1N}u=^niLbv;o+ zoGDLKQ2K70wWt-$U#6cNFh(Kv;2QP1@%-X&A-^5xzEv&3wKaT;(UA!}c?n#0E3*~Q z37|2ic53GcbLlm^8JcRPoF^rJTel31Q*uY|n|Fk11FYgrlea_5BBU`m>53U+oqI@nQN_Z^XlpNt%bpvsPAi_6EMU;=$UGsG$nm zxx%A%JI5NUllywtYl_I(x}J|kolUrAn!1{ll$N%xpQXG$0^mRqtHlYZllxk^%gw{1 z#YbxL@Q`S$qCLF7t9nd*_>n6`8UPofk1ZE$3&Tz?8sH#6I_~=QQe5$*o11ogAoElM=hCPKr0lq zvM3Fb-!BDwP@RmIJKbI~4LSB{!NRwXrpilS`5I1JUfuNHe6Aix-{MLuhtNf3o#jKI z0n?H7J?rZCV(e0KDBte@|H z;<4=%K`Q2nMvM-gW0QzajL)3BIV(D6#-?F{@{$x!# zjO=Z5Dqyk2yMDprF#G6XM95^`#u1MH$??`SU#$^iD&?M!>~D5hK~WBaHjbbV&d=j? z-8~9^#=-=L#el$|ul82gcG#UolpN&qx{Sn>Eg<0SC$+=p&;T()8(M2N&K_@k#{}!V z?Ko0CDkju+4)d-W{A{({p=~>|f*!bNV{=5xiE5H5?>Hl_qXjMZa&Yu=5jQypGgi5? zSooT6d*Dw9KY=ts)W*-5B}u2xwnEJ8g)3a8ICwiRrthm;=psWvWKfZz$Kl6W@={#v zv6FEb?|k-KHpGWR*fk}uRR88;Dab%P%=s|=&^-XF((uWJy8>|(u%LXFPhB|1jaIMV3FP`Fu0(khke4_ShP_4#HO3cBRC{oLqNwBm~@*$&dwMZsor6Z1o1%L240Q zM0wy4hDk)?6t;>~wqH3^mVhz-0G`tlS5UCm)k9JT@so)RE|0RIIyNkMJT$GE%~Oq5 zw%eo6YyN`KTxvb{e70}rwNnhy)f=%dQpnt|ub;4c6>TJLKp>F`q*k2C_*GVaDukU4 z|G8rA^pyeA=c znL}#%g1PBg`_sqCGvhiT_zb|soDgAL5vhfb=`#V?;sHBrh!fbG_p9+sZ#2KNQv#%6 z6IZS$jCKGvvnhghV3@Js9lrR38se%MvQt&M4fy;`+uAl|)D+zS@pMDU#$4o4NC*+R zlYsGFZLw7e`Rs8dt(kNCk+|?jy1tsG{$4I%D&p}wM93n17gQ_d=mufB9v`iX9nZ=Z z8HddU3Y~x~-8j|qwwUesmiUYhCN!%Z>QU7kK?{yHh-7PeZbemlm)!@#5mllbuq`vf zr#1ateko_PFWs9&sAxUX-inax?rP>(U?Ed>@XDz2RCsQK)<*;9E7oR}Su-w7*@l0c z&QD7ZC$ZAkC7_#@vG#V)n;$+6AiqqEX#Uvnw&a9rQ$SXcY*r9)_s&qT+-O8t)r?fS zvj%1fZbvMqYJqj5YNMJsxteyVs8XQEjn;KQbN%qu29V~vGxbt48mel$yxYlhi4PLj zcW(^7$}v6T_CPVi`{QmgZFq=R9c5WKDSk>=his6p59WW}qwTHYv;JVybwDeWG@M1d zF5T2J_j)&tJh&>(F(L*rk<_uDaoWU-0!PAc)3J$*b2Y%jO@u94cPYQ)^vvmNm!Aud z@L(Y=Jk95Mcl0e1=JQ7hd~w+)e@nD!Mwc}R4^Aia%q<4`46n6X(7=kYR#`ws5iIpNp3 zgi@8XBcIAaxu)o*o6;iw^d;Fn_gcn5nPqDp$;vGg_Zv^2E73g>vMZO>UkcdCg659t zl15LVaG`=dhR;4rkmxN>F{4OZ&jZJmu+%QIbs003c@qf>E<}V*(U~+ovqC4tEL+%1DNp*Vb_eGmylcdmtxK3^DmRvdRLdiVfeiD zJ%(YkP;Xvt@?8=?91*~R{p`uI?K(*Cxn0cm_w8Yurikgb_P}f5vjtO`dtX&c8BVBP zU#uFGBa)!N+K0ZPmyzIAavM-{m}<(SlKFlD()J8dri5fs6s zxIW9e%70(MgL5U2;XF{BZV~p?eb1Niuq1hpLY}U)z%ao2wbz};t;yqcvCxU&8fm%v zwoN>5Z|?$`U9EbubKy(PUwB11HY*aeY8(Jf~t|vt9bz8lFra8+EOxg3jeSG9O^O0p@m89CN=Ff1qiwlX8tkWL*I0u+&! z7c+(JZMw>cpyJ17xzwkTzU>KvXzA*IX~b0?##y`qLye{z%znvt#nN{K8d)tSa60dD zDF%h>m~3fSs8pV*mVd`Zfkuf8#i9yqoOVhw;M&6u5(-mqO?S!}>=d_vBZm0`LrOJC z*!FvP1kVs*0RxJnYov4$CY_$qcalztx-d@1!`&wqmayRy9px~SpdD`?hX?deI11y@ zgy4G`?4K5?ycz^Nb>o+JV|7H|9gUYJ#_6_t21*XwUlnvr{~G-XRh+`;(`;6kNWrDb*oOG}*voT)9@55z$VbzU(xTK_msiF1s=JDZ81L zmqfgsPR2G;vcU*=&QcC+eVxxRkRVO63TR=_DvB&lU+*}SJsI-KpEo_qk$#`9Qjkc4 zDfzAwi<1nZ$33%mvU=0p1{lW|GJH)a7n>~O!5 zECp2o1flp1{MnKeZ_g%KlAVj+N|Ws9_}5oC_{{2V2gByN^A)2JpZZ(^ne#IJTybdd z2jtSp7fKhQ+7Nyb63RHiE=&CND90MAal6m##fw7NzK^W)m@0dUHfEBhwPK4%2jM;I zM@ccVItI>{E7IwKVaV;n4MHIipjvFnGS13Hc3WA#PYB`b%)R(24(N!=I*^IR$V%ab+_^;>f989;F;KF}GO7ewLtO zG84#!V8tiuG+abL*L8tzmc{@k*T!pX=jn8pIOUtQ`+mkIYHz%uN7nzyfCXDU) z!*(c^TP9%=DX9t6cYiZiS4x@_)7q?Iq&YHC3P+F!O@gQi4cgjvWsjj1FptmOqtCQ& z+JYo`f7~;+>Sx1uXjTo3c$%{D{`qyha5~A2(o(EQF`8yTaL8{!Nv`Yrj%aFS@JIHn@B`~56DJ# ziX1k7x}4dXZf(D5+gw<6y&#|%qmHK}#01}t+IA>%OvXMAI9?+zi0y5z}BFjxY)U01Z77-ZZH(Q{N@2Ab3Y zLJ%}uzw_~maQyc$4b7H>R){Dy<~gL;a_p&Hx*xpReEXba6!&{9;tHONI`{9Rax&wz zorHW(y@SGhUdmmK*TCOIi!W?)04wMx$#dH*$4Q6NC|8U1c>m@vf&PwMXHP_b$*WDc z2-S$GGgTo3H%M%M1@-11-O-`~!@$*M(Vh@Cc)s-2!bgsk7HKdXj19nriiwrLkj#)f zR00d{#?}g3Uo+-flPaRtwH1-p=k(|aCos-rE~fQKt+dj{)7b1Y5O|Ue4YggtPkJ0p zbCWiH1f{*IN=4{xZRnFpl-eXcj_|dJ4N_*KUd0Ce+(DB2W37x8eJQ;`gDSA01BG=e zK;$DRrxtF5wgVyL#{>=&O9fY<>6L?gXE&aZ1dHk_-gB`XRFQV&(o5v6=m?=f1_CY~zmLAUE# zdDBanNk)E^nNv1`5?)nx5AVF~9BmMp5qj>S(S%>%X>9Egcj7R$=DIhpwd$iq%~pCF z?gb$U)JJe^G6jg{wU}hInJ&Ffrq6s-y|WC+e8}1=l|r(|i%Gr9&gEB~GYlB?uJ5krmx z`L-&Mu_OK@j?k7v5Q_g6GbB`ri9UYycWrTbN_!bTwdD1hWLAbJ)LZh-Zju>Ox+)O%oya?izhXiU_!d*?sjC@(kLJ7z2kcCWKW6B ze)^>&rTw7wvLRfStn_fu!kOzT*Nw%aBmzuS`MqM+LiX6EQ**Wv@Kun&3yX+ALJAeD z6I-2%ToqO+S4|8p$&CVzGgfkCso4+YXqk6FLkpz(!$ms) zZBk&vylm$&M=A-6k52@WB?gCE;PJU2p>)Lc^RqU9@*o%PLqy*qiMQO9zR6%HtT$(kW=;3{$^maauGar@RWt8AT!#I3D8l;@BK#(oWN{cK;Np)(LoegQ`{I z3SWw#nUmFo%R=i=R$E=Z&3w*CD{q}DU0tF7adU?c4vVA~7A(?OfP?LC=pT9~m0&>` zZ%N7_U3M&bt))YFS4~AYm`F5?!tdXtU$gx@0=VYZQ5UVb8alj0O!Vx3 zJf`Ft;?wniBSA$WU4U;Bzn>aRqSQfHHTJwaUC_2TxC#sI&gih*XIQMf&2NG;E6?LK zXj*i`5k2h8^42*joK2u2#5++=7?mVG0gc^3t?i{(C^tqL)}isrA^*gNG@T^cZP2OO zMQ&)F%f{@B?B0%O)9LoW3|uWxd;vBrL%T_mo2rm?eOA@tnT~-HdYuX7vM2$}<;aH2 zY-rk{+6Xu5Jcuxf-e_MN+Yg)EgrGIDEM=x3>CCJdE64s|%d9^iVck$*2JfJdl%}vx zL2@ECyx}^7S!u?X7km;;;ICVATf18QbXm_;x_>WsLu-=GtL<29H;sONSioG^ zop0HD$q@-Pp4nSBe!3+Hu=ZGtC6oBeuin_{xWKSj>sq;OF*y7B>vVP&B5E`PE}C#W zrnA8+Au^49)ISh9qvSq;fY(j(JdUQXqg?wg&89a#g?s*meRPcO&ystZvDbeESpD0i ziD?+my{eu%4EJRp_R-|NzQnh%+;>0iX50z<#3)sjgS@dg+W7!q5U45>o`KEA`Pg1Oaqn_81)W^gi2Wjg`eqjPfpEtf&yP;Wp5MG zII?^$>&G%WlKRA{o( z2ga=OwhEuqPQJlvnUYK*I!+uBPcrb4rpETo=gU`~UyvdhfUjLDD!a>ar+{*%=OxA| zU~>8^sLpy-fi|9z5m-rP*3ur}vA#|MQdKm?G3zDpN)MP6Z6!(-6~BMFE!R2QbKFu|8RK9)YEs$@{@)@97%YFYBiIv1VfXB@FTz;Z zC&8v!i3a^F?ZR}bm9bd5?%F=DyQ=LcQ=!nXylhNPy@&+t3Y2m$-#Xs(!|TlF6q3?> z4@f<4A{!Gp7c2Q>0Ni7N%B<=nSXCzu5szayY|3!=h>n4+^r1Zp2WUX@8AY7VSCr6> zI;&h*bCamjRU^_VdV*TZ0jcfLELS@w@xGi~xA52kAlh`BIa=o}|HCs~yUn&ZqR~c1 zf(_EjEqR`Uo-FbdlgEQ_Egy@Spe(y9GF{0p%vZGXYF4*Wm9hBi1&tvmm17>(d5*0 zMBV!*xNDOtk8s*OAN$bYHmfbxD2)8;nS9Qb$0hK@m~3W7O5 z%0sRq{*(hjg!Zh8yXr{@1GQ6JcWJ;GO({;a%AL)G0obz0GSKMLXtmm~+EQ}fu3VKO z+-K2E?4gl4uhrT3N~c?q{@NtKHm6b6u0VaTR2-;OB)?{h!@t4#c*CWdL{_yjp2}f) z)F7GUI*KZR9HS!AHusC->y1l`)0v)j-`#)L?HEQFaw9ClEuRx+k@;HnJV}(}!ISd@ z}rKifbjornr=Ej?3}Tr9SQPR?5;?nhlU8*)ic9!g{T%Uixyct+^7*IX02F zbEl>E{0n87%~pmgt_8OKyE5Om`afmWbXo&R??=5*YCtrwGf$;FwPm_ymoB03leVno zT70$KVs&-UKYIo-5wfHt7{=MPNt=`-eoM{C@=|WV9GUfUO`BE%t%}pCQVL7HGY9@H zG}(y*dgRAXLO}*bYsvQO@KLKU)Gq5>T?Q!sc`rKNT~|Y9zPpj{sC$U!ix(aIHa1xm zy+mEpA+TIs>5zDY$WsvfNXH?2vY%EgFIZvOM%dPYhb+K^}cq3FZfwHmK_a2 zK$D_N^dns>N%G6n7e%LP0Kq~V+qWsfLkh9soryFD9}6@Ob^h-*Ok4@r#KQHa9R113 zP^wf>-eXiFU;jvyFR`qi7VP$Tm3Hp5=eQFbUZ-5FrCun?nVv{_?)x`oV-8y!nG|XE z3ZqoX)e!PW0&XV?iMsY(F!Q`ocF9>`urlZYPB3>m%9*0`_PpdT^ zv#QSuUxZLNJG3HW0<(oJfRdOlEi3tHw#+Dy1jMH_-XzU2Wzc1NtGcV zXI3qc6$Y3HMwO+;NdE9@g)s2IP~%q~PQ$A%Tm5Sxx)lk$AV`Lzr0A>KS{)>;znjHg zPQHK2ZKc{=UIKRgIbP@1`i0`uqop6tOwJx29wv5;QmXRR29PwG1=9B)*-P`;EEk5~ zbZ~#|BY9a}K5GSlTQEpB|-XkLG=I+{zXbuJ4^-b;GwP#w?h^o4il_|4nLqL>7OFEhYX;G z%}1aYg||a>-A!}8OuJHo08=>TIUz|(f6phJ*q;4AP0h&fX2n~((mT~Jf(Q5tEW`yR ziK&j~ZH|+epu(Ad#}oy)CPj~WR2Q>B-Rc`3J16+3+AsgzT*7?8rSov!_QO0S|9f$A zWYhPPo&?#s{;wi#D+{=x=Pa@_l7A-Ndlg;!y9i4yTF>hLOm=}&q03+TMm2^0FIcpH z7s#8Cg>n6NKqQG@--T%2xV8xY`$Rs#_Tc~%j+2LEh2Y<+KmU6`JrU#oo|}>kuYLLc zc!n?Fqg5dNq;A{j-O8>|Y!F;@E<$Q^F_`5Wn=8S(6QRSGZ#G5B4|3dRjIr&&BUx>6 zN(6YaF4at;HGEDtd6S7$GR;Qog;2$I4dui6`r3E~W!kfkwlB7LJB^5Gd3KxR@9`nm z**dWB8r>{8*SMbh(CBuQeKwz!)TrnEGdC=jaRH( zD=v6ki~2sgeF1_!@I@4(iKC6Z5*D{lig%;RNxHaG*HxzHI*W}+94S`{9a@WFHguVI zM0{tS?=iO1;cT~@Ai!ej-U8h8A*)ZqS4Lc7NI3Mc{azTibsy!P!_mslgul5GFY`FL zoLZhY7}MIY#coVtO91RKT9;KY3iP{v>JO*zgrkPZTv+o`oiBeIx4om1O{I;*Wsd>E z4-$%StUFDUmk(2aA2rWmJFrynczk|gE-dIcyLU80l?@KQZ+7lk?{}Y4yZ1P;bWYIeD}rgaa6l= z3+pPGC6ObRO6n8*wMny;Nu)e3XFsP52Q9w^@4n$b`)#{g+-34zO~-KTY;=pf2Ns8m z<)uo+3hMK;%uxEUD5}7=+`KjYqs=97^nL0PQ-V-61UExH7sC@CX<983Xlu-`&ubiZ zY0Vc&t13d1)2qEYrZnm;MgU|!%Qgy?bi33x)cn#(=&i$H%J15Ha2|*>SH+-g5s&K3 zrdGb0-K&IcqyhZlNTIG*d7~dgD?GNgJlzI8Zevbhk$F9)gj<^ z5M#Acw^+G6l)lg#x;kkYg~+3%)?`+_r&g>0m?RpNd^}i?kV>E}KF+o&vnss8RW4Ec zM(BRvBh;wwF!hV;Y@}~k(K+KQLrWJ`=9rZ6NPMD?_v9QA@S+-odsiM5WU#t+oMpB? zgjjlS4J!#gD_xt&K<*n)#}mmg|Kko%Yy(4H2R#x-!E3!xuBh1|&rJIxiC&T8oNR&4dn`!SlK zO+?H{{5+eldT>NsaY1`PN?6dUDBbKHydflbeRHG!U{UG}`9h4u*=qc?!~ zc(UVJ{P&#m%*{gb-rv&A$L{6I!>%`T0XeKNDQr>6^BK>hif`H*7f9OTS-v;r^r|`% zWH$D>otsOg`5JR0k8SgE#dLDK?0;;nVMb^KUddq|&t|(bTGyr2iBu{ya@U;?|8$#I zcTCvnh({4p6)jhL^9*{#GFUAaBy@>{Fw_cKt-ro-V|Pey_8dK$?KX|Ln`Y6Vb!6oo z=lC?&6dJs$RBD%2%D8RPDh>VBTEy#qEz_#kcw0=m{71N~FI5oe^bLcAiL5amk*uYR zypie1KN422b1NRxtdd;s>UDQGnysMVYjI7$>72M?`!QMN@UeRbSu=Og7_I^+xc$O6 zS6*!~u5lXaao#JJC!ZrcEfk4cw@ga$Vcq9kvF>d!gx>Xp76;MujjUpJ(@w2y8wTSB!ez@@6Z=+$H2rR+r~H1$oE} zUCwOXhEIa%oxis1?bfE0XO-;z)c{TV`nJnk&rExELn`U`VMW$#)k&8iA|z*v_hV`B>Zir|WgxQlcA!Y{Dg|$&r;=7? zApV;otZSKnKl4-*r2lu!k+BFRG&fJ?SjC6z12YT4C1sIgffSux{S~`VcK7eqn-NP} zbD_^SgNF{)a98Z?o@cz{@M|p+$_q%MznGn3`N|E<-sAAI7FwN^#az=P=xeyk&-eaM z@oJVEnuue_MSQZ~Gkm-!zEz6_dyoD`{JYmmWN{f(kpom6^)HA)ssIJ&2?zvPtaOdz28^jHiDqK0g zGay8;5|w##5Hb-W*6{Mhxgr$_^MTwJzJG}riYWj#cfn%`L=5gtgzHp^d#cR49BFpe z2FVSL6q2D^zNK51i%ha*qw;iFaphtCr|FBUcgmE#o+@B<&~9DoCM?trq&f2X$=tjO z*v1$P$Lr_30+&H9D0vYTqHY*(^4duU;jBHJ9t;qQ27D!rO#|d9y%OLZaSxr}@W< zgms5CoXW|}Wx_ZulQ8rTKW4f%M0{ISPGh&d)kI+SOXFPnkz?0HUU)N4aUn^Ohaz4tH07}%>cQ#;J=J)ALGjpB?Z-prdy;7&+x4Q# zx7UiYr=aasjVzDHVH}>4N;^L=ZZ}h0R>Rh77|>;Q9UuLO zER$8lKojAW3Z!GbYsy0{N)0_Q56OWndq2ZmnSDvxY>N%``Q~8&aj+^Z z8Tu-3>IMivI#kZpWS4cC+`e;9rjTu>ACZbPD1eVR4KJbTE!}O$;KstS0ySILT!lAC zCXp_|LwWzS!SSLA=9RaQ{l@zlu3$V~40akw`yo_p2byIIhGr?6g(&-fKWZ_SXkk=+pTI=dayF znP&?XPcojNdnM^S*g2sOr%hXZYu1{`YtZDn$l&F{g2EAqb&xi{|DoEpcP{B=c~7dsF#LniZ9>L=Yb!_5J?k6j7$u6^@j86UaGg-^m{U+VvH-gYJqm!|qvW^UBwd9P zuNSIX{Dl7Twn6y8D*5$Q!bCMwQS-ndB1j*=9?%OM?bFUb3OzG{F847+B5*pe4pKp_ z8f=zE%g*FV!AuhE9yHF+i5xb`G|w@q5SQ|@+L70o^t0M&Y7379e=Wm^{t1u8L2v_R zt;~z`;DeWek!NKybAOSs?f3tN;tO4X^ck3>*{#@|ZuEALGou=<*KM@()_X^{%^_Km z2#0Yqj4OZe${bkqL_)K!@Zc0Q3Tnx;w>ZCG=C(q(7sTjv+2kuX5>8`veO#=uE;NFi z&TXBf3A_iOo}KT7Z>9U6M9QL=rNa@v7JJenuhWaD9$55q6OOI4?NjG$)yW}8b&e(D z#A=J#aktsSD<#@L1+@YS994<*nmNaPwBJo^KH}@-uuFa3_PBps-i(qV4u(Xykzcn! z#G9Qd{!HBEll-~AMY3~Hq-RonKhH4qb}_N`pZ+uh8R8redJv?~MH-vsF9Qh{$ET)@ z4`_Ne8CHL0*~sqr@9!OT&j?{~VPawp2Ob@$Qy%A4Syz1Lj>f2Lk6EtQ#*%I~3w2G> z3P8vDADMY$?~WZS)WteUH#a1*yo;|$yo(`|ok_&^KIx>=oSitnkk}Dw(@L)UN$+rC z8YV03URQ=3eXz&p=<};Cpyeb8!KN-13TnT8mGJnSUC*m>zWKfN2w|p)VA@^Gou=|uBMrpK#+V#y}#n`tHT`c=u5 zx6a8&w^`PX=tBu$aQ}!GR@lSj<-O~SDrBo0 zGJ-L;BGLRL%5w-;7v#hwy9AAE9OZKEohu1-ug!l zVK-D2hFK8N+W+#RhtKYO_3FH=FKJ^|VK?Xd!d8^$=kn z`?68V{fNn*J|JX)h9o3iTt+#>l`1+~m+af)Z@FEE9si5#u1+QRMohB2dnd z(wWgko*&7p-Pk-RQ}*W6IGGk|RsG%TiJr7R7lZ8x@~6D_U7lU%Sr?gGNhKjwArD^7|BX z+_Xkvf0d2p20jeZ(+l}}ZmU$!;^5>DqxpU>Bc$SDjgAMKCRD!T zzcPrUQbkj80DAKCSh?oQ&es~;z9k5YjA;G`t!SveFhOk4E}!dKL1Eu_EU@60mZPh$ z%Die+{#Z!nSmDL=3t`bmtEHjOITOKY`V3tU79K=9^&^omM%ucYleo=s}3##JVKC7=dO509=an<4}=8* z8Z22FzqsS{Kg$;(EV{h5Mqz-4K~?A|htcasLlMy{FYgp5J}=E%c^eF66K}G$fzSWb z-d9Fd)wNv%qJT)qO?Rg>NQy_gyOiz{q#Nmw4(XKclsbg8NJzJImxqv6d>01yGrlpt zF}|PQ81KV4|IR*(Rr}g&&pEGoJ??t`E)0d|A3712p7Qx7rvaRm95(JCEuGy<%~$n1 z*?wmacsNUt244(qKM^JITeegAxYq59pT7Zlad)kt<8Y0;6R{7lFetp%P>#AoBfz!8pD6{POZ$T_wVs7kNYx zVBtk+AgI`$7NEa#FYbJJfkjk9z5|tK+ET5g`03T`LS_|dfQs6ujEy!AIqvg;C z@qvYVT_k|`9vzJ=Ebm^-b?V2BY-#`{h1%IJs^ex_UdcNlFm$>-OSt%Mjw#oEpRN-g z<8H5+>7a#a!eRW)S!pE(Adi9>IA2vXQQBnAy)Chq7rd_4`ubTCYSbPj9KWYThfRuB4!^CK=)k=?z@d(ojgV}ar~ z6aQ-s2LM#s@!;)XH!FNDlj~Az(+N7I;`Mpes&cDm_$V2B;I!VH) zef9mEM&6ELEv_zZb?2Pzif_2-h-1R?~)~s zG-~!{6!8;U@4aay65@Nu5htRiUZMFS)GUwRRx9|AS1ahz zMJAkK#;`Tg&_7)&>BZ~RDMhHI0ju&5gUwOS$bLNmP92By@LuY6sPh5kv&buTtL{)O6PX62fKn*JW+Qs*f6Cw>*|Dm~W z_EaH=C&DznC$|b5tVj_5I@0Y)*=|?rzt{Y4ou61?Yw|C<1$kz@t^N{g2Qd#m@xIo+ zz83hRrSEaOv)tk#z6g|Jv9YPZkT+A8M5`j?0uohwejPhv0ySG*fu}}+OlY}7_v>+@f4Bq zgWt_XW)zsG{~HaDkl@Xc%#o9B0`X1U46pO2*m!drJ0J+{SZstH=ArsiLvlaB_qmKd z6#8lZ_wVCGeRKBh9WEQt|K4zao+pyvdBTMr`!}%Pykppcr}}+5xw=~gT{I@Rf3a4$ zj>&It;0`J9C{N>>`8U!E13F(iWNg6ywpax+9biChM~$^^7ZK&gYKB6d6XUlx3wW}T zr`;Siq~fe^0G3}LYDDm>0Nz2te=B4BKX!=oJSdKKE|lJP#E7mp#cB8CaSE3uJs$cN z-pGEH-JdvbO9g>n7Y|yulYm>X7@T#otJmY(AFjq>S|4r~fxZ~Y7R%F_o8&9Q8n!TbEqFdgt6cXoIct(9zH=hdBB~<0V#+p+s2Yqj>}SFaS;M}G5gT%N7>mL zD2dyexh~Ir>Mz-19W;!eIU$5fwZu0uhgOLKDnG@+X?xF-b+SHdtlMn@J+q zuVO?&l+m9|Yn)L}<~Gx<$p`3 z1VD0iuxW;mK&NPLnMw4xkS}iPU;7(mMZ5}kC!Sv z(by!oYV{Z*oq0F=;)6jBOp#4df4z9((gFDK<|%dFyZix;*e0p`dX+2PVKUP5{Fg0b zPZ7Icft*p+>M3q(IvO1HP5PRBDv42(W0-euLYdYHUva@~xoT0i%2=dH2w9gu&>J@0 zU3gCzDmAz>G4UGLp>#+XD+CuThI!0}Ggf3wz!H{16oSyJRhOyVN-*T?!S#RhVqVmR3E2qV3+uBvD;dL}GI@+&{Y30Hd(e;$CD#lmqoL6FEgRnN^vk0(*~AJ>n#Bi16k7GBq4>LxDOxMkr=%-$ zm^KIXZj^#1*3uum4<~rOSp^=gudi=rj=(6UW!>!UtD6t|juCvZ zbLMumf3DyZI*4gBonZ@9{$vdrq1X&78mvheKx&}zQYac2O`ewp?U%l2QP=f2)^R&% zZSvA(w4y=IbXmCS<*^@uK7fsE1iXo(7O864= z3S7(ypxslVS~V)Q07-YF*S*;6#GsEVuAw^#W*9Rr#2Sx3bQ!jsLYv{b_Olj))J2N1kF=Xbd%T}YxiCG0bmUA{ z3^nQwy0LhOp!BcwsUWf{QBABntG*#QIJdZb(6X?@ZM65;%-XWRbCW{k?X*8$Y1WSLp2O$opc*@B zky=jBOTgUZ51p%Rlz0jAOiN_Grck)v9U*Q>Fx4B{nh>!a%tD0$8S!X0?}7vwRPO_; zX75wPI<3t}4pkI-d|qCZLbXt)=5V%f^W9166xW|05`#1o_&-F5_1L@elD`ahQqTT` z4>fANc5XQBC3pEJ<1{@ZLm_Kd-b9zj8Hg_IZ`kE1{wVB>>&(j`2gXTyPSWoB;Uik@ zea#d{&oClEXzS7^hzn-()2$x%pj7(idwQ>r(^>p@k-I|~VG~ps^P7nZs0%H^f%iX+ zlX*|dQJMDH)Tq_(IMPJxANT-K+q`m~y@_7!tn@iBq!-Q5n8FpL>6D0!=|Lkxo_!2r z<$)lHkf$b(SIbW1T{@!zhW26Y{r)-W?`>lU?QC)+b9`p$2F0$eE6@(TK(BSrlfFb& zx^su>&m;o9KrwSsF6WxsHfD*b-UD@ZJ`*y~aV4dQw45k4P#>{VLQ{Q2+#Fm9)HS`;485Z=#RDFnuR5*p(`q7Zk&uWN|@PIRH_W90a(f%CzVl;#c`TVJ$ zs8JYr0i8xpda#Nqrt4idA-H=v`@DXk^+ngy&a0}Y+-k3os9;MG>_NPRv=KzxQCpNR zD$UW}$Dh!#8G795opunLTGfrg;JzAnzkFR^br4^MB2E<=`2^enkW zVTz0*d(VQdqZ|9mtm_r(dQnX%T@5QHMWUX4%taw|a`Jp3_zJDry4M)PzaClVoqDG~ zJaj?-Ai-(dv9HFm%xpn#_(MgeE64hNJYKCs0s4kf9jvYS0@aIM<;M_g^+s=jAFeOx z&nt2Mc|?+Y795DO<{Sohl9oF5)^P(b}2(lUn8O`|fZEeFIxOyGORIu;PFsc52_+*%v2l ze&u}A7K16$+oO49VKptUCUfoTI8=tK-8hdYr}C`tBdm`Oui$*K6qw3qZjFUdqg0}@ z#yZY8Ll6(T$U@pnjJYdPBa86N=ki|{R$!u{GV9V%MDGsQX} zo*T2t{iN7b?c>zWMDeyOK|$cM<;NoH0uEP(1ZF*kq@OTW9O?%TjclOuCk|Ss`=syd zd{q$-B7K+<<0jCFdAEoNLcTVIGMf0NJ~#3>-Cu-hfbetnb#BuNFNc$Tc688IkPUH( zO1IBCAZ_l_H^C@FyenjxU%c}C)2_T_h9+5+W>2%&$XvjNY#R`vQW`8-oMD`4NC z%66Lhvvr9Vvkm0_%?U!Qy=j(Dtf8E zW{No4Zpm@IP-^9&C1+Z{>;dPjC^~af!2q<3qpu05iCpPIP`X|vH>)fgx}V#cvenJ7 z)WyUtyZbJo(?%)FTfa!8+9qw}_d}VskJ=Og1g` zd4&a&!Z?r4l<7RAL9@H?c~k{8ro~ON4DG<_KcQ`pWWv$V1*6Hl9WQ3?s!GXlNE)-8 zaa7#&*y>6%$jkL`V(P@4-BVouvAxiQ3d+;N)?(@VDDd`U`z}UyvZZec+f5$$_F+n3 zO39lh7!g1vL9K#KTXtr(Bh`)}(s^izhT`KotJ3gLwa{EAmG)&x9(b)v^D10L$y$BE zR|i=eZ~YLm$3E3qo_aK`r_>#F*@wRuusz5scb^;N+3Kn?eNsQw5$ML`VAOfk`*2AO zkFV|}Upw-W#G;qx;}>3wFy}J1VZqQzgFA?Os!j6lonPFfCTsgJ8oOo9*#Tkxnz|;=Zg@@ z^Pb-ss%j-b3p^`P+e_r0d)yb!tbC}h+d-|ppS0iDVl&%3Q;(eUsg#tcMt}cu(Qwi$ z*b9=V(~nz#mH1il4e9&DFG;Q+2qlC746}`Ez_-?xh!Y|$P(g9F4ZD>MCYpr zklR4vsNGnfK%bL*eH%zZ_soKGUHGs_B8tu19yYw>HxADt^DwH9R+83T#VYp;7?-B{ zLA;uJTnQ2*R5FMi=cmzl8tqo;X2Wu>FF?Yu=Uf`EMQD~)pA9cwZxfy{(qyCY5XwI5R;HD^p(Zkk5t88m51jgHsXF_fypr z<9<){>3F4kI3g%Fj7_k8(SFs5@dTnYG~apPNbMD%0hgSdT5PbGblRL&)fYxZRpghRX6 zRo1djM?WxBjQ0XmI*t=k%U#khyNEyhz=``R9I*6)&v~!m^IF8NIjprM(k-P@2)YoB zM(n#rTR_m~kn+Li{R&d&bxs7I!$t`uy#);&+NGr2PBDvrAZeH#d zi;ZhZLZL6gK^x^Xapu&|Iwd^?nMCHPeR37P)>rbn3X4JKv=qpqS_*9U7%5noNR4w* zG+Tg92Dv?rR;v3Nq7Sm4q!pg|4fUd!$Tj(FvtFJFDD=pk!-qMIitP?!n@FlRNW3Z0 zv2<-p4J;w4KULCY%`10Qe71^7jp@)S5i#OD>2xn`QNt_(RGF=weA9CU>`2NG($4zV zEG+#@om7H8enOPfs*iFzD4j2zG+8#h4;$|E|2p!#mO?%H8s6Jkr1DA(?_1&}gVTN( z!lma1LCf0iyG}50SE;rygWGHg+KnRKsOVyPT1aphsL|ewSGZZ?)>F3 zLe!|v&jz)7-*%~Pw8kYt%X?Er7Sb&D*>nOI`yO7}PT(Z=#V6|QZ60!_b~tk3|8SZl zXpxOswQ5MBjQ;VE`zc9Riq{Ty$V+PFA0IxF#T;K{>^6}3VrMv*zH%a&r#x^c^xq2F z>>q?O+TB%%Wp66zecRNMqDplN@zQR>yvA7I8J<$x9KVNn;pMXUZWU#}e_2iB@v&Lv zYvEFMl~eccvIsHMijYRA!d+cadl?pM*2PLWRtu#?H)ZL>3C%Pg#3NzSLdkSoPq~qZ zdI={3L*vXhQTLramM!TD_P>6qm5D+Z)zF|>-?o*(i&hbc(JgnQr+}{u9AAUtsM^cb z$HduOVaex*xXiV9)aMrGW^Uz|YZqm+#^lYN`ZEUyV>UvE@9xBRlcES9p}T7$S*(6< zxm@vf7!Z2BYVEQ;|IB4J7}orCj@xyLZS?*7BYZXWDUr%~Yy5{H4sSDFtAdxl>|@sp z!c8KqUP?^sC+#ePz?37%r`DtYQfP=6f-G=f0j?QI9YfeA1yw_U&zk6XoNrJqam;<> zrmHcrTF*j%pwX!y6Aq+nGWn4&wY2NW2Wir9c9glnsnomH2A9_&|U* zV1?WOK$40ksShdiF_0sB#i!{}sM+9M$H&tR37EDyZoLC2ydhTFsC#m81*av^%0_T;lQ4jb=(mp=J(0e z@|y7#EdZa1_#pH}Fa07QsM&nu=oBP@g~R$%yFY*TGwF1G{vsN2&3l$1@qRQkIQJcK zCaHV{WqfiDj`oUHBX5n3@exKy1M$vt0%t^8Z%Mysh~G^o4^VxAf0uZ>kv0qx42_8r1Cd8Hhtot@)S^7lFFtW-@b*fHqLmLi~P+ZiC+4{ zpz#8dO&XgiJBssUIL4fqwf@=dB}2&gXa+aL7%ON+yU6g8*Jc0dBZ)}Tu>XS?1C=ki z5C_ad&52(C6!vh9@;8MBNGyqmI1x3U|57LrAyD`baUae? zcPAgEO=KjKW4oyvC1bHeOhuDs2>zYdE*f~maHqOdk4KuQ5tjz+kToC%84q!XoF2|uH`Np%gL*&4#zhy$fSGr(9nzw9! zbI89iUtWM6+xv7-{_O}n)&aBWBYptj_oX}CbK+aFOciGiIRhMxEzP2T}cBhfzs{{4T+ zQ&zCmbR8SQ|L#2>L}Ji1AH?8y*50pixPt7VqwG$T|7aQlnufn0cDwl`+rq#&@JLwx zq}1K~10>Kix}xTPr|8ct{dbD~yNmw6y3r1PvJa2El9tG&fdhY1V)CNp!uo#y13R)M Ay#N3J literal 0 HcmV?d00001 From 0f264e2e6f9ddfc306ccad91c7d78e41c10e099d Mon Sep 17 00:00:00 2001 From: Huaiwei Sun Date: Tue, 6 Feb 2024 15:22:09 -0800 Subject: [PATCH 20/40] Delete outdated one --- .../fine-tune-llama2/assets/edit_nodes.png | Bin 54987 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 templates/fine-tune-llama2/assets/edit_nodes.png diff --git a/templates/fine-tune-llama2/assets/edit_nodes.png b/templates/fine-tune-llama2/assets/edit_nodes.png deleted file mode 100644 index 4ba8605a27d64be40c392b841490f1f3078aec03..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 54987 zcmd42Ra6{Nv@MFe6Ceo=AvjHNcMT39Sny!Mz3~v-A-GHM;2zxFf&_PWr*U2pa_%|r zjC-Gr`|+TAbamBU+iKTdbI!FoNKsxA9pyC&6ciM?w3L`K6cltm6co$@5(04MD(q`5 z6cietnW(6uw5TYVqMfy|nS~J)lvGfx8lt*NAAXvaGIB2)W77KDzh7b+*OA6X9#Wn-1|wVIRTHD7&-$z?a}JUN?6x) ziukkUa&k`q7oGv( zHl@FM?uhKIF!h>sl9_1IC#!C(kAvb?>q7d?TmifXj|@GJHSKBHGQRDSh1Kq^UqLi@Ah3|xW<8MVnm05E#kx{%RBI8H=*G*blUi3sQAIBfCDecbZB}pgH>)U;?}$93^N^fa5!Z8beG?++ZvUVysyMn ze}6xf2otVMszTo%s_-oVrCdCd`5le{dm!;yg4?nJ6Ct9J3+@FVsLo_^e1M zKY@=4OvvG?H}RXBn8O5fDYWZibSK{52Co#wJ<5zp)Y1e~P@PoC45-j{kxqLg(1!g$ zNNCD~#C!OGIIA%{q@D24<-z4#swud|=RHGk+~h2aXa-Yv?OYq`eC1F~QhowyOiY2{ zYU@D+Mz$SUc-PAVLU4LMLak8B(_=y;y)f;67H@Q}6n^8P)d|6{e1+noNo>QGJ|)5C z>-MHNrWS*V6l7x(TBL{ai+tqiqzlI&(_Yk~A&o*$6G=gB7sj{)#YW(ui_5?%kWt=A zIxt!AC0JuvR6KZ%U{)->!v9PW6Y9WBA?h(2{Zb6=jX#c0P)!~5VTjur_VG4mGV5?B zKjPNOLi%ycs03K7PI{YcKJg7h++EDZ%TeeHpV(@t+j9Imya|U6SR2xOj96Rkc_^;s z5@rlkh)>i4*}%t5#-Au_)CVm?%K<+#&wp?!DfTl53wCX8MSXGlVkJsfq4}szDqdnH zdSQ%taYvMXFM4r(O!QDmU4z9NnA#(6NKkI5&abCOU5-!$&&$*y`g1SH-4R_PBz7wr z-O=|kkP~cV6g%i#XuE5BleJBcB^%5C1DaQXPmW@+mqD*=nNYZOIS?F9`Epo(xCE+q z5ft1rmHhJB4feO?_3+RwEL3_lEG(>Ki@iQI^z;$5^&{mHH{n}%$9yQ86(j{Z+7C8$ z`$At8kR8E!JFtd&7#7R!Dc-!t2oGdPzTPxgnBUvP5Tp<5yC^NS1 z8>Ap1gQ(AM@cM+r0$(J04~q6j;`O?IMpX{mmkcM%iwcnpc z50?O!6AOZ)fISx&5p@3sM>_qp!YR3UR9!ZVGO`)58RyQMo!12+=iWsk9OS&wK7F*G zv416el3x@T5N?+6lz5Q+ouRJWNTc*Z!05FRtr1~Pl%3z0__ge0wx)_l{>cQx?x$VN zFN_xq7XlYy!NI|a!FlgPgB6X!`!!=d=m$Mf+nWv?sgmiEOA>-IM`LflXqnqrRaOmDK}>7EKz2-b>UQ7k%1#!Ha+i!}hU{qW z*g4oc#P5=fHs#!!CH!Xo!`xOT-ti?soA6S{x-P9HO{nyPb?Ks9lfn(!J=wj*{l?A3 z4JMimEU(W?A6f)L^x@a%7*iyN{3)D`4p-hQ&ge>_ydCNtJN{beT4=Xnp{)-={EDd)4kUhJQx>-*xZ z+N4@royOi%-&@~((~LgUJz`uT7dwQmmBl#DG3VrSpvpTjJj66!(ta4s|K-RERJEkH z#PN-}-QL$!Y%9wA<>0$Lmxm+7CcbW} z9s*VeXS;rItzxZC{5{w;h?)jB0yQ#|*25?0`pXRuOniJZFxSRA=N9h_G2Q$9?f0C7 z;N`osl^ctbonxb`+#9RQgJafoK)~z+ftGmL;sNAVi2Z0M-3I+qQ(ZsFX zq(TASz?HFjrSf29aEcRcKF@uG1p{DGjl{) zqW;iI;fn3bUI(f8w73+->z)k5_Ud$Z%qY$9Rv_Aax4Yc@Zm(h=5m+TQleNjW)>zmS#$q!4Irrp zs`dCJoN63t>06j1_+>E$7OKDjF^`@m-Bw@t(vNd$YQTaoHd< z?cHXFHI`MD-xXQ|_i;y`GJj>6cCE!4e*9BxP(-JF;dGHbNid*2z*(GG5>>LJPFP-} zpW0P#wAWlT^2w``BE!HsM}H%$A3}IVSYmm;q~6eHsxp(SknERiZH+vmtY)OD+8kR~ zZL>FS88kQi*nL;*+R%1caiHrQ=;W{;x-j$UKuNo4z-DUfb(MtXc%|Fj;ZHXh2-^hD zgw@Q?hFS~KY3c=tyVvf67GgUl8}Vn7BL2@V`KQeHHfF@ioW&)j0+r1_$bV%XNKHrO zN*r}}ND@aJav8f8t=0{nGQG(r~DFjJf)7v=cfWk^Gi@ftU4iFl;kn^H1f9 z2`B|szo2Kf_>rA?beE93eeg3S3 zV3i=YKd zc1C%&cC8?tp5~Eu>UVB*pf*<;w$$FTK-x?E63d?I({&yc0%1}bM(Z9i)aZBUjE?(P zjwuF@)SP1FCb5*q>175HG@Q;*UsfP)vY@+qGelq;tGoYpv%3Zzmgf$Ft^DQ~;6ig|AJiSxifmPQWx zWME4RD|;R=KgHh?JizhOX%Ge3-y#m?{1obPie#eJc1C0zOw3Ho6apw@WMq7HpN)Bx z#U%cj1K;>5OdT9-ct9X$XJ;m7RwipZ6VN+uZf+1W3y6h<5h%fE?_%Yk4`#Hor+lvD zKk5-PvNyCdvvDx9wjz6~SKq+e(Se_W;;Etk@%P+MBe2jb*K_~M z^MRgj|9@Ey&ui`PSzxgVpzwkI$HEmrSzk@Pgn|-+k`@#G0EXUgLQYXtOFe1`{y-MV z4A%>VtTM=l)M+X~C>1&-MwwCk)^hIa?3daFn9_^t%_Ib?v{{!OdcAr}w?Lsi_VO?S zW`*%uIk;K0ZYx-^ur@SVa{ol3c15_|;v>3;07s_-`{9y8ZPl%b>tf7+Cdlgt^WvjL zxBE-414nsww%g?wVnT3FuZ(`A^TLYDd~OOdn5P#Ol2nERdEi$h;PrtD8u^228b15` zzeR;$a?Jl3(IHD^6rNV)D?MA`&}hRxAE`HTJ$j>9kMMj{`28zfA)P|VYpTCJBl8|( zf9|7VEZiGKRTs?(nx~qvppj!r{_=qLa>UmDUVLpvK7WgoBsSb48IHxlg_o}x)mVO~yI{K5SsOVKFso)~E z8oK`V>4w$e{IAK;m{EUzx63-0vrX=oVXs(o4H|l71bjJUq|#9P4Jco6>1KD4&axSH zqt)tn_-E~}C-S>FDWwo&DY&}9FytRis3@RMp*xMuYWCNZ2bDHZ$;4IlTUp9pI_^#s zFeRpQO=rWGP+1gdRK8{WCE0BjTq>FNXLom^h0mqb&1|ZBFl=@s?*Xd-B==Cr`wP-% zH1ICLRy2Z;bC^-Po^9Poh|KC@caprh$eZW=XT8cc>;4#8tMl#OMsAo|xKnDn9@Q2) zIIqh31Z`KvYE6GS^5WIm9caq zsJ6lR2&1M?1tbik3NoorzWKHLE4_|7O*gjSvMmn{{Tho#K=9_m=MZ+yno-MXrQMI5 z*tP;X-aCw14uObpz5jKAj~Ien+OU4-vY_c`MsO3y%E)PFxCers?+D3@{IAf-Jkp}s zSRmBS9$4;^KzJWP%va?Z8i|cHRel^G^)j-FAnjRW+dikR3knA2L_x3SWOnn9}33ubUasv+XBJhsq>A6?C+n=2!)gITb(?1vD+t%Ob@^<7|A2nwV zc8uJW8ECjAeJ-nP6(E(VyaEqEb(~{prQN-HVW}reFG+%cd&)eThdvuqX-0aQ1n$0= z6#%@ErDlk`ZR)z5JYr%ogBD>Vb_Bz!*zL=rSH6{Sd*+&+VgZpk_bHrW;g?S0FWJ9* zP+|}KFnb-r`WtihRY-IWCEHsEd^IZ>&s~{@wEozi@m1ZfJNY*={BYpB53~tf<;<(z z?2ranIYf~tSh-u`pASs0oXn(gH_6{+yko|CseU~@ArmNj*OQ^A!Z59;n2n||fwp`y z&Xm;&)&5oNwTfb=QW|tO3uZMFTYKBw8@xn`&G=ndbg@vOO}NE0?T==W%8fU&pEIWc%}qDMAk~{Tq@4f;WM4Ks{qfCOMH7wA$r#OvezAT>0RlB8+eD-t^je zVJKQ~ox{qN7A!}E`I;+(H)*_ZSf|+F1vv~iHdnFNiF0B?#chAfee7JBy@&c;D_xV_ zsu-(0??zQcvHn-_6mV^OMnB?--f?w7Q3V3s8y3?Xx$6YeNO`@C@;WS~Rl(Dfpm?H6 zYahI>mqp-s!|Mc|&;292bo%w5*_@C9E#?VcDiNrBgxl|^WK?m>%y=wqf@N@=Mf98) z;vQ@t+qD{r<5}rvE7Rt*z?NEk7l)E_W9Fe3(t5T~`#JC|vin+3Gv3k}0k;4$e>Y6_ zagsuwISO$yRZRl3l<-8q&8JBvtKqG(E_mwE=0`v%@=?kdAGIj!mtO9gb7|~MTI)m=R2s!cj2yr

IBXQH(lc3Rf;R7b& z8vC{(EwgvXCARxueXruo`9t;oG((eINh}Sn#AUqmC>M6$kY=@x^TWP{ znf#;YQ#cbkFm61C42y_?b`UgPc{pBEw_wY?+e*KZnaSTaof{iAmma4>H#{7Dv2Z$5 z{dotU)cT;!yZ#L0&s5coiw)VExfEx@!oD=P!lYpdOp5`z_-0P9QeO$Va%?l{#S&99 zl}6FzJ5Dx*trNXA0Tp(%Bx3Z&Wc{+hJXG#(nIBdN^dbfB(n@-L>>DQ^S6gwLCWYq` zJu3_crlP!qF2~~P_^0z28$sk`o{gA6f`OFKs|BOk+Kmju@L9?G=BHd5HDl$esD{SZd?vB+2X^LhmCtYG|Lej6c(BiZ{CSE6V6#1XRs;a-+K#VA(8+Ie6vb@@f zEBh8=Npz?8a&?VM@XDWS|JGYzLY(-aNiC&#=F~->p~|P`x-E-_lp~Om^wxs=5>}Ty zeWrqE{PYEj$#jrf=qu{ob06)QG3Gbvq(lRol)pi(NEX|bllu1&w-P$5J1TKlwW%yA zlxxDbmhhkOwQNZ$f0K0U6m_uULnimJ33LHG@L@iDy;+-dI+KC4q&9ou^RG0p`?qwT z)IE?8JoYvl%vP$Ww_68$!&399Y%WiU>o^>wXX-&Zw~?>!2DtTkXV?gd8cyf~Pv?OkkG zyeQF&pe39QwdBJS#KPN8Wcs6cM-pBgRCLp$Aa+>W{stswB~z58i&I-0^pY_Ro0 zJv+nO4D=0{Ml)x3CcX36QumyE>*SoW>pKAv);IaIxZWrWtdCW?rVhP zP2a<+9Khpwm(1NEzb^i0p?Yl8x5astAz22OHdUF^74zfwTzGqwB%7praERs2RNwKt z{M2PV>NBbBzv+&*dffT#);{twhdm4)FS^I0cfnnHqoAqkG#D2eds_oQz5ILbf5fb@ z3QcsbW9Q0CHwXr9uStEI+8>GY?OI+c;S#G*iVMNnQIUD~N@Sx}QPzDFsk8BzylECa=F2f{CSfVWL_gf^CfJ@vn{NV4#Q#5=%MgWeTyBMi4Cp?LmCGTl zIv>o&hB}Q1J~qx(7|Hwui$CA=Kp|P6WVOSNvhz_Br2fc#i?>`3a+dzc*ZFv`^kAvP z_f33$!Fh=>ijLw3?DHLdZu10yL+2fW+0B-Ih-4BQ70yl3)%S=Yn zHp3l=OKIdXEM`HpVmYh4V+iyO|9R8CD9r}{C2A8i-WV>5=iG8BfeI+Nw!X?Ij0HEk z&#V(p6UkwlIEL)GWT21wlV0{jkRHV`XejsgV7RE2e0n92BUY6E7329)(0r;`WQ?bs zFYA~67Q)FZe%GosR?|_kfB-g+8;4q(mG(dI4P*!k!*J2}Ur=o_J-{&s=(eOAQ8n>d z{Xs%TFqjX=OY|nE+|3?Kkoj&okWv3!lq^m@+WB#ch2E!f13#@6Pminp#ilvAWDZ7k z=97LsCI|!C@+6JC!wcJyA0Eb{BxZ;45op0eWXA;xkA4jY5f9TI*Cp3RL1gmjHEXGJst9;zgmqf<=t4GG7J+evR z3F2$W3Ubdtx zpacZrB9k^BKo&B*VKq1U*xNC&hWIxscQ!1C_G&Gxhgrei%WeiMYy5d1#yx{eXE67N zbW%iku8Tq;bs|JsAu_Wk?Jvd}3Gh!(mv$$ACf_e>(ij@`nQs)Q*XwbnMDl@3v3lDrboz1b_pcWj|KC|Rw_3gsp-4IYUw z*bv#zyz*vX|4F)>N=imN?(X7Oppu6G^38MeR1U_xSi>7kct`nx)$75%rw3+{$g-T- z?Q(zlenotFCTbILU(d%`1Prom<+$(SB`4h&gCPmLz-u^Pm=z{W!0AG`NOb|l2+C=7$PRf)8TUpYYWENqUAN0a;w-vUP^C0 zCg;bPej8SgWV?9Pmj*sPeTPX|LSIAJbL%ZQUHy2sOtRbILmp{mDYIG4a`GqPs*OL~ z>QV%E)*oWfcKY_(4CNM?Ii{y zW8&26TJfZuOWbrawLe7cwz0QsMg?+`)~E<~6nJ|6`zBG7EI`VfQ?~d>Cj~{;@R0ih zH7C7gl6?&Gb}Kz}pSMM`*7w2)DwuLag{LX+ssOR2nxF}X}BUtks6bpU$ z11nQgnj5|7Q5|8u_rt!S#U7-gv|TmRVq%G!AuU?|y^bG+xbz3wahqEtnq+g{w)Fhb zxELrwnqY(8)Trk&#nXjfzML2m8tDmNGe84ppRk3;M@t^77z)Gfo29oA>92?;A*<)q zo0huB_BB@3HhH(DQC*@P2IhL0lBOw9S^K(QGNDHq`cWLTk(i}oFAFtr=@vEm(%!n8 zdfk=$WZjLI!_LQpIpNI%sDgX|fvT0GuE~WA$p`ofhSVCxez!Jzy)vUwV4u)qHDy_5 zb?mNs3GFR{HQ=W%g&kd`@G**B71JP8`|~ur6{tq$c2MQ# zL`~hIx>QiaOy{+QV2xZV{{F3!Q>gO`#736c!73dK*z#J%0;_?%;-^(DZCf$vdcs=( z2F7b|y1$R&`pqNl7#kIVc`=b0i?xgeX;!G$?GlpkvVR+PW{08PQZC{eOCTyN zwZi%OMndCTy6IKXKtwPfLF3C=5uP-dH0T;rUNehVMdk5#$`;HCD}wtf1Wx6mw^&H~ zI0+?{%wHz*%Ve6){hU^QwV^mwVG?p&81ohLY4yM4-OXX@OafOLT9ONL8XAA z-WUhc8QX7hQwgkf5TwE1zY(s{3urXEzz(?Qx@xQMwMsb@FQggT4V&ACuZyIv(Xv_! zks{)`+t@u?VA==`g&_w%zxmUpFy_ zX6kEP&(i_ZKf$J}^}NT@IMDV-_4igXqaYGzxwAw5QYX3!j7M>8FCR6z?ckRDf$?8W zKbH&U8Vg>+ka%yb6r{N0gTGOSF`9v)h*Y{U(^d>!W>)p1;`OdOgaL}YqOn}QoZ97m zI-$SQ#}RGiIGR>OZWdp0PX3C4bnY{0XkK}?1+7zGWB25K&%F=z$$I86R|4^>)S6+< zs{1T~w<~3<0TRCW2bA@O{jrdR{3sK({@F|i|>B}f8hG_549fPY^yfNsSN;-8Grz;EC6ay z5NOy2czB2&j4cPAyQY+|0tlp~D~Dey-Nq8B^dY!)-uo zakZpwHTam+F6+bIo4}aF7{~Rorx&TgZ{~2;Z6P zmA?Q*7p7wkXX9x<{b6m(eKGG*Z$PlXFxoG;q8R{A}#tcmCbo4C%*n?X>)yk zEp-|=Yj3H=3oCcTeUtTYkaYx2@hVS zi#(#DNzVT%)*@c~{S_K^UIx_k!q=}VJN~n}KPKs6N&PMzFT*SeADB7DWFX!aa?KKMN0*8JCI!J(KY@F2-R&zte|deVpEfWzu39n_<0 z1Pr2WtNNX{&TCck<3QI9n_6TpJ#B<10_c=@xDtA!^(8QR%;qyiu2J6G8DbB0SoC zBB+>xcV!KmN#^sCf;UReCtU;qw9S|v+oCH+EM1o&bW`5Xi#CFM^nC`n=M0#)xm3@a z9X#H?bk5~_MIxq#eHKVFqzM#GH{0XW&T8NduMY^nZRVnA8qmEC%K?ISD`f6qyH0Yc z^`og$-2@5-mi&LPoJ2Y!R_Nq`dT*)w6(86pp(3PjT30b_ZS~%Mm^+V5zOyp$+oX%& z&F_eLYtOS}X2*aYF{0J%e_@q$I8EBGE*u&*$DV~LwK2w&W~5QK(Sp~wPh?=8|NeY@ z#IqlQPKSOR4TvIAV%N?dA%iUf`auHKpPNkl~ShjR{7al_b z`0k`9^$~YrJ^(7c!&Q zIAu*EB4zUY7Zc3rKnTPb5*6Xzc|y$aZ&OH5o5H_WR2huV@}@yS zN5DO(cK(YX9~m5yE%`EX`4u#%59a4(cR2B$dU=m?ih8!~LG_gXk-`j)GhC>hyLUo` zwd;7Y@{(MWXGR~f=UO1)L`!<&)W_#Zkw9j#jMw{r-Jt4)mb;5n5xs?_mt`^_rZ%}X?KnEm z(&jac-=rVPL$v-)dpN+Ut9@1qw>(!LfUrf~md>@@&#|pkm#Et&`%-5>T5eWNqIm93 z1yI(=1dlJLRNyrBgFXvuAbvV}gO0;^$z7eeGq^#&1EI$R;5x)LvBr8c6-HL`b+%9O zBeT9Itpv-Pi8{ks@l_OS7z7BN(a_767f7bL^xi9gj9vTyzQ`>AfbN}0X)&vf{#bF2 zz+@iB!sMq2SmR$Zn86Fo6w8>G-i60ujy56drd3~!)410!()*h>j#%k|&^SQu;v&J2a7)_TXfjo@-op|=WY zM7dYrRUC|go#`J3wq5Iqz|`st`-=WR@lFfomeqI==@x)j;;jNZeXfHN3}O?nrg}V6 zNYHi#pIS6U1Qr*9X|j{GX)NlyV4on|fk9Grd#~ROg<+nwu5Pd)1W2qV!)T~)5YS)8 z@C*fjm-PW|J5;ROvRJIs#KYj4AmHJO^%^c5TmyG0Bezqo{>?QGq)T!->lLWo7zraB zvIh`j@!tLhmcOfHo93y(Z@fBlIx%~HYUP}DlkQZV|12w!3((37s^2$d-SOVN(07B) zYVB|O{X-HF7YQa!&aT{74weO8O2_x}?<{d)Qz=oKKogljh^W{(sS6gzS&dXXeU^^z zcfB;?>mvEtzUB?|R{$dg+0Cz-KbEEL| zD9*|VE&22dS*dx{gIaMxZ ziWG|VHk77BjBqmNyw4-{w`5SpKpkMW>cG-1PPy<`+$Ig z_c*=ywHT5as)~PK>*;y+R@~qZ!@oBi(_80&_Jg&NRr7k>1QVt5;}k?d<6{a~aFWEk z-Co!i%WrdYGb=*wM_0&8tir0CjiL$jBLW-CBuoViYdU<2xv392yE&Osz%xh$$bc+D zd@~Ba7g!Eq3%G%VGO4iIUT2*<63Octgxq5+czp6VuqQ}3Auk8bMP!fdEHooo_M=Vq zr^}^rh3_=Q-%_ZX>R*saO_;aAp&l_koH8H$QMUh)4Y&^>$*0=KZd+7dhFU@|Q4e%e=Dgq`@Am22qvp1c6d<8_RzX=m~1! zk1*yY?41SB9XQt*+WYAoN_LlF`Biqge>zg_n7N;00p>%1_ek7uGh#go8jyK#A z+uE|+y^{*|B2h7}kX1vX=dY3&1S~$B%$W1zEsncVQNCLCjSyM@344=T#PIV|V2FfQg^>y96Rfn~V3?4LB+Y4Gi+yAeUVUrzs;s73M!J z?ilh6aU7#%8tG?mbPLQ_eMHk2{J&m=Mikgg5W>Gd@3-|4Kv!<3e;Pk~3Kqa`{694m zT5Iz`Y$&})_Uk;eM{;kYaRtVq!wuaZBeY>W5_GkpKKaf5x+4n!+_b&)hRV z$bRj{PUIx{DdPHIPCn~R<=AhdgUy_N1vpaae%d}kn*N!)VP1)9fI;9GgP8+n|s78i0$UCyJ& zl*vFLtNyEIv&*(dJT?Kb+@8ym9#UuB>wdNQddI!sS(~e4edq1b%&*mq2VX{S^IkW< znEWeg(!9m`cw)KKyg+=SVwjg=7fi_VS?Y??Q8I$W5eP;(obVRj-$EL+8=ScDKdLX{ z%5!Dvom@--$o^`eun1$m$*q=mCp&t6Cof&6w*#X_8lVU1Ye!Vgm$T+pb2XN2#x0)r z!KVh+Tf?cD#CM4&pLr4?hYN%jO-{$?Q+d{NkBzQMe*YkIJ_<}&`s3YJ$1MqN_7hmN z0(85>R&UtDaNa|kKo|;dOxIDr0y{l!sCo%HG3Q4%`dNbJv@w^=sR0s099ZIO5B zTHbjlx#t%&EaETW`;exxtbWIT=olC7HGsS^c?Hm}z!FdVZs~55Gvsv(kesrN?r~_J zjtbLNwFHEkn4Gs#o$C=vz^zTU3r9{Ssg56Md}{p&{5gG1{9aPQj(Z+~dwA!MJR!W0 z^=O6q+NJ|b3~y=wrN{dvOp=#Q2^gR0Fi*s25Yb*@<=&d|8ez8)yXiRJV%9V^yq_H_ ze_sBU*xPt3^aE;U&dF7Re@I4P92H{G!?gQjQG_=V@H*sg-O))P@byzX0Z;xUAxc;; zBK2hFwDKBe#tbq zI^(gYwPg3inHvDQyI@(nuqhGB-7>JNCjPWBkin$NWSju}CzBU4Z;v|Aca4XL&y-+# z(i})Dm^;eUe6?EkeK@I8*$!XkSRlU%I&)<~O$&D1onh=HAGqvxEWY_gE>Z2lY@V~C zC49l%Q=N)AaIa5lr1rQjRvhij5z>kqlRQYL zv{58^+7P!3;9hHNw<@3ltVE@a_oh z`($I0!TWCFagFqm>#oALX{mlSn7+L#bB0|DX;zMN;l;-nIFOSFuX$~U-{McOIa3by z-Z|r6BB3jSw|}X}f%yey&~V;Iy26HMs!jc6r(0T>U@;O>@e?m_zQ?DbhrU zkim#pA)}K^fM3|cEk-?TKoa0zMdy@1xjbtzg^@y(g6}%3h{qkk3U|R=GDCUtsjtQ8 zXEukgF3!SHi0RRh(a~?dTb;V^s6TnPFR?;vF|;b}x1^rKfoA-ki`#KpR><_Xvui^W zbV$`&&mOQIiS4SO+Bo;%N}KQFV$99YlG=2?72SM3P?AuNQOVD zXdPj~9*K{=J|nvn4$W0a*LlMgLkSWjjK!DwVf6&8x9~waM4ia^^FB{-9fJ7{Z^>t$ zQtC3J*Ml&RP-fMI>r<0!buz1oe4XH$`@9#*g6vmScYZ31g#e?PotK`rtS8v`397$sD?r`l~ORoGIWry zT84=aJL(>tFkQ=O#q4#j5j$;;rti6&59dYd4{UUMPNr+!qU_8djj@w8=19Fhr*w); z%h$(mfE|wW1>!?WQ%i40v^J6Mmvm|YnDMVqH^!1uuadx)_D~oGQUJf2(6$Bjzp4 z#e1kWDFocNo?NpcNyqA%jX7v4-WI<`%Khrd<&g+gPEi(Q0I1~M67;@+n%3>xPrbKq z&L|tBC|A)SL^<|QBSJisHq31o`%CdBAY))eZpXEUc~$}$tYn(6&& zZ^U58H}sVn?!Ip2)eaRN17WzFq9wqLs~d8k;S#|fzlnGsNpJJygAl!H(z zzT_9T>1@urmDDwMX{2$<-BKu812u!yIt_BxUvP^UL1f&1k;zYY<3o6K5}S12TCH`5 z9k4G@4xC7ur@QI<%$=U%LmHRt_}sx>&coa$>$AGZAK+6yr6A#godDc-4%G8rt(pN9!K(+TqDk&sY1;W^P1G>Q zbqklfDZ`WfB8Ez9dusXvg%l`TeaLiT76Jm1R@?8V7X!pzDy7{9Cq3UE{`0kfpx13sL7VHE<*cQt=D(2M749I^dru=Dnw2k5|P#~RZa8B3br1ke` z7 z;-`Eh9Qr`;7tw^eNbI63<_EK{8mUc?lh9uRkU62YhsvS_!FTp{Rf%K zsF)dhU=7?cyYIJ}8QIxiZpj?78n%0cY9Dq@o6Bs9&*X^Q9s_kKwIyL>AnmPl5^eR`qMufR|OUv*80*iGqt$PU;7qhAt{oZo7=%i z5kO0tgwe7x-jq>68bp-kuvw-e>Rz#Epg|!c)8OiY`9&x_s;A$NNafj(a`+9WES4`m~=H=cR1EVdvz;K8hvaR_sGDDLBYJ$c7!nPn|!TlJE zxUH>L1PNMe@X!~C_4l?`=NAqGm1z|z*h+n8HxY(fMieEDk;*f) zFT7o;%Ir4!ZI&3cY8ddKcjkTO^X5wd!OIzda%QpdZn1pxQ>X+?7O^FPhg74}O_Ng+ z2Z1OAT>3!L{gZb1v^QA+iQ(adpPzEmhg~~{grDTA=~Ge)QJwxG_40NgO_C`aJN*SC zeU<0EYd|rOVy_-kL!np;SEqoL2#<>Y;xLF-CAVr-SfDe=&Enzin%o1x1S_6ITs9e% zEYm9ws{(5R*@M2wxG0dfX1LVy7Dl?*+EN+65dDgIAsnSk75oqq7vV3PYoK+CIUsSB zEq_M0Q~YFIt~-jHQSiDq9Uq@jb7CLlXAClZ5<9)?DVPC2YSRL!QhA9Uhv<*3Q^DQ~ zDnQtRDZ4zqQLQh47*%H!<+2V^2Qnm#QPm*|5V~=1qzP_lV^E_W7l^KHoYv3BZ6GA#?AwRlb^#Jq_oc3#IBd)iKJX_U zq!0^r63d40uen!rWt;q1`lsuC1>A{mm!sLI(Z4&{f3D7Sr%;T5t5{@}{gnrw`+&uS zIzNttJ?=Wcl}%~T83jtqs%4pVVg!~c{Nh_0>7xRX@`=w@P<$*_FaUD#E}{IoZS%N- zr@;(Ofqt^K!0Zj_%n=*z|1!r8tlop=!Et;7TO#l)LctYsu_CNspJFb}dIvM2Vh_Mr z{!MEP&-k}Ml~ZBdD3;7DM@b8M2<)j z#qUu*+7?%6cap1p6&NY^>O;OrAMpI83PIT#>~KLWb0D4TRG1J3%#=Z=E7jUzi{msp zPp?Vjp<<2b01RTgxtb$~zs|PVFz>eYc5G>?HqQqqA7uiIB6k;D&!SYQm@U^Qi<;}A z`%G zi;_{s-fm1f>zxiH9LM>`xpkm>0%u0Iu7g9h+WICp2>xkP&5@pd3*cCJ9k#>TZGWXc zdDgx0yaFl51VYr!9_80*H6;Y2v9m4NO-9;wR?Zq<=`HZo==#Sxszb>P$4oucIr^ zDZQQ@RC*DfhDGe)Uh7R-yj_>LTUKT{B~bggOwQ?gHM(mWsYwnnV`sQalG$yQ@I&#eK?F1wkNl5 zASAE$*H6;<^7&}`{h!a zYLZ9W&O^(vF9d%z*KH*4?wY%NGfBGL?-jQgb-A43PSUD{V$bZ<9+~ZOchX)*^yF6Q zKVtJfLL*9fX|@6rsL!Cl5ZLqT|8egB?LUz78?SiszQ%(IStD1#Ry$FkvhbZsravwX z2#wLy0{NU1?(}LUXgkyNu>s#~38&S7v{|Y$@QDGZ+1Q(&_|xmM7K@pR?_6~~KpohJ zl|YCLe`77}|6uPegW`&|sKJDw!6CRq0tA=fjcafT9z0lZZ#=jLg1dwSNP@e&Ly+Ju zjXRAs&fHGk`)0nWnX0Lp`7t#$Rs8AV`aSpTwfA0Y9d7-_^|P&EX89R$7`(5?j}qRw z^XJ1cYDWZ}{0f)LgjLcsp#`b!(h zYzCwvi})axf+f)n=}-M#!b0u`v$U@6{zm|(vIL0F!&yRg{aBsaM;#UYsaSw`1t?8t zcn@xG+BK!mILsaSPlDRY0WFY)O6#0ab4j#Uz;o-3x1rKegdx=t_zb+>P73s4X8sz% zpzjIntXCO@b1PrgB!$NJq|K+wTYOJAHMS97lU#7xhl-=`B_v z2Z_vXE6J6GS9fUrL3^K$&fdxB)O^%rW>$Q>CTj=cGXtn2UwBQI#@{TMfe-ZaLwO;8 z5p!Cp8SW1h1TdE&$6~ehL}a4^IaTvZ^o@&7d<&t}O-6!~?s&>^ptc*tYQb9XLC}~Y zW(LzTTBzaX5nIiRdB%qxxRKKtDCb1SV$=#c=)1Pf$%whz5HE(bpHn+62LB};)rX~q zDj*;6h*YG`KBI#Xz_51{xA|LR|Mm!X7!*@+D7gmuT4YEl*@hzYD=7qz$PrF+=LeFa z(h6DFz-(LmvqB6}IXpJWX01Zk)fVRUp7kVzEIO)*D6x^F06mnos1}HJxB=DPV-~bv zln{m8w^9w(iBC-VzHsz^JGUu(KI zz#H8JBGT&(c~G8@IK3%Q*RV#XI_Fj;|Cuvae*X>eu^bIcC`goQk_uW~lU zi)R^P+A9E{FNXe<^714GCqW(C5v)X5Z<#hwemenKXAP8Xr(7l)n!OGxC8XM(^81^P zM`z+bce%wyXLDtHkB`G`W{9U?G=ZRjF#uCc#x7%SjrhS(I&o^vjBug&$r}~dWREOv zkW}U~DM97w6Km`=tz>>gOcnI9=v%VmDDN*M_!i>vu*Rek*x^^nu*JXmw%r_FE!A}p z_ExAA!W34b;x5h0Me_Ik?rY9*E>3iB6xm-sQ!%$jr(vNoB;p+~b5G!l-<$%7sc}zW z#1oE-%oSlTs>)H?H@c{~34%8gK~5lBTJWL+OQqP8fj$&(fC>CEqmvWC8+PO}+E%~Z zvMLH7kMjzLdKyV(cLz!#7Ht{4AMTJ1(=FtP7DlNMvGfS7aQ;iGWdifx%#jnb+ZL<6 zDpwxRi(rNZw=X%O6OyKQVOsO6%ftFvqeU89Zh&MeU6x-V8p1e=k_O zx7?n;o+TUe7c=o+{qzvx>ke6e8UYYzm%yJ=f^k|-AW?>?y!?p2{2xVtc%`vaUzfQb zigCDob8he2;&m}nlLWsoDMcJo!$w-r;)~iJG)kF2LP3kDKJuF#XO(1V73%PoHBEc9 z0BuP7s`RTapc^l#=i|U7+}F}8EUSxOT+gzbSkdlE3FBF)v6%_2wDZ*QS>G{)?X}Ll zpZMBkG5)^;4L`F)d~fPSd}FY$D!#m$xPghXc`;l&1I?W9^|!4cPgdhtSA<)q^vw#^ zyz~8);;82DboS-2c(#TES9V3BhNt%`mBoGw2k9APi+{cusw;=*3Dq+LuxqS2ePi^y zYP7Wo+bZTmCxhdt8E zF8t-(PyQmoJHb0-*EwTP^27L`3)N|bF?J}((KJZY^fxu#_=9ewf7);h!$WK?y= z%zmeHR5#*qFDhEvY<2S3j2HC3jW?p z^fYz%_+>If^&FAUM~i2n4e0C*K!CSZ<*&_?jdcqW(J}Jk*pQOMB7}_-sjEmBYql*i~iorl~aY7z>)*Gt#k8j)xy7ONqEHvBRb!!?$Rp zM*DR6HxvErXHPo%u;>UXF{BrgMQBx@-9)p<@AiwgI?u!<|0w=jX?=n)JlxVER+HU{ zF3G=)PpO8=8syjLlScjGnLsw5ax82h2Sh*EckM-{V(ZF~J&WUf&YVi$hGZl;DbxI2 zwJ_Ff&R0SCfnhp~kv$C4bL8058;|h^ya*dEri^$46JTXf;G+Al&|Cno|5jKrMyW_Z zqvd01&&U7J4Gn-ye5%JmR~EvOOmVL@gQJf4u;J+BLlQdO4D&#{$pT-Nb=nBT=Dl)(dzwdLE8d zT3iYU|58f+A2{{@f4cu$4^u1&Z??jvQ2#QBO3QlxW0~&fnwgDKJ7^4rkYotZ2zssS z#5EdAbse$L>RXs`y6n*epp7J!!eCp(R+mTEqIDsr6W_9#>3#OOJ*W za+%&Ec-)U7#h~A^iwX@HfIF^BoX@(?8`u|dA&>TJE5T1=7#NY;fsSgsfyl#f$si-G zP=Hc~k+Z1Z2WiGHDPs4xUCfK8C=)r(WldX5eAmeyO(W#p<6aIkx?FZUUucf9_>>2g zG7~NjXG_g)jI51onu#*~vv2RVEixj0t&pxuKb~JG+wAv=2o5h@@SQDvi+yC7REsb# z1s`<7NzfHHm*`8^GTG62nzl?d5gYHJfN^OWug18!6>Cwx~!* z4hsvzwTxwaDSX*I+S%UOupaPT2mxXhPX5Yxq zy;kZM*DpnZm#PFD1|D`%Vw`q;i3VJt%PV}Ek9YM>A>W7kVp$Ig4_&v<<@A<#H#G-r zk+-pPdjo=)TBXDJizB^WTH)zO*%q z+D+geK)FX_SUW~MotaX(Tps#TD6Y5erw%Q+ z&XPr(j>_YF~;s+%?J8ycnr zl(xPZ4cMUGPBS#oN34slnN5hQPimE-39Y!R%-ttfi?_xOU!fK>vdDcu6)(~BA>ogq znxUmuDKXSfT(GA_ELPXqTJhx2>>74n2ou2(DeY+RZe2cgRrEyekF(4n2>v48gD?RK zcI&G>;69;IuHeASA-trTr9yTsV8%t-(>==R_2hE6g!EM7@x80BAo4Hj53Xr4TJifj zZ1Q}tjx4}<>@9M^K(<~L@1)scc@($cSeEo&~>rhRzKWNZV5cque z922;&dMTrojt%x53Y-JTu*HpY*U%O~$lW0f>zfa|(*+3toq|Z~sc_TLHMI!6RHAuK zM)WB!YM(|_{(O78`+8U@cAi)^O)D!4-VQ)dS$oB&Aq#extr3hrhn(Pzq5U?8J>6U!jDYpPTIg!q} z8|5X$d3)Mvy!Fl|!BQT|a|jYS5%?IoEZu``q*cJGzbADtkGDg#c`zS2Dye@kX=aG_ zF0^U{Ga++pz1OT=jGqZJG=tX;t;TV}`Ri!~hwI5Ity*sOCbl3|9UE#Q8tw!Qn3b2B z%NVB;%Lg?OsVFFb4U2q&VSa`-kTrYZ%E^HZkNoDh1~cD#bqtvpP!S{hCDhqijszJS z-4Q*g8K%h0SQ=Rj5C1T0zf~SgtqN!o*qO(or_$Lqt}%I#_gTrg|A{ z(;bDg-G`WB6a1K zo#1Jmz{*s)-&uOkGJ~^eh8xVIQF4o2y~s}97jQS=N8V;+A5nGV!-}0|ea|?kv-YHH zc<;v-9v%V0<4KH27C;ls)Twr}s^=fT4dxE%QW_Fj>-7NAQu(CiYEKtA2wKzRsRcfg zt;>4JQ%oqWbok1Y zBe}|FCq?5g^s~-)jyU%otFupDNY~iZ8MP(Mzl(XmhGL^4;%9H*3s_^wW%8w_kgcZn z+Ti|3CdlDkO6eKO&vrIBtFW=!`I3AYpWrSH*LfCnFl7RC)YInT%-E+3<7pg#%106DC^70ZZ%7NVd_$frW7!d zMP5#RtJf%9z#tguKn}_WULSSNiCnL&Qr%#oA|CT$?AopELES6M4El7WpFHDCXu5&GyK}4#(tLu$-@St zU{r%Rs6__CFum*Bk+a*)Vuf)BdoJe}i10o4=vTAM?TE}%Bfky57dnq%@LVRSd^vAh zTW7a9IM<-!CF(@d6l#TEg$UkJG@^BXf96PE!fNvP5M(!yEx>+nJ#_tDw1R&PFhpel zIz;`y%s({>f-0P=s9y}cj!`eLmH+G^G7yy!HIpq^0-}|nfZgvu6A!m#iUMbkkvu`G zy%`NEVg5b{-0q}%?_pP4NMo-h#aQfkW>jC@F4kX7gpLI< zrcA&LnR>a={Q&*gCB=4#r6g3@K$BS_n&9v;*{{84Df5J#(nqcm`F&8qvF6ZYYBgh3eMvLHD)QgfrNO zu(CF)wv*UlXx6N&pfes@){HUDYSliqLil8tSMAyLn0h0 z!Wookj_Dq&nsPzpJ&+WldsxGl)BUq$BXVdUh-pjhGCbpRKu1AcC7Wt4#)`n2aJ#V4 zqf9-GCv*r?inM3}B1XO(74a$GiJLoDHRik&b=p3={yKsh;bI~=R<@{fuqK4zQ7PkU zfL5!e1XE=ZgVZE_TxD%GSA9fA`}-f|f&}6h*N&~$WTG|Q>Neh;vc+;$PdV%}F#CJM z*K32TVXol4@|d3BLMY221Vm$d9!1~|Vp%h2r!?6K59wgBGiy?NXo%mx*5Jz$p}(F+!rTsP!mJzjbeIRPp)r zP{ilT@#_*5OoI@hYk~}wI<~IuXUn-``=}P+$}&lV7GdmGZ?{qhjVlDJ^PY9Bki2ko zc91AX&5wX*$kq1#A-p=ewT>HiPKFR1NVDC{SZ81WAL>rU8Sf(O^V8J&WZvppKsn6@ zgWOzncsCx5E0~^{!wx}zkVR5%%gCE=3p{RMy2oe;I7C~GgY#^*PT3}pja0=ZvD*4~ zpZ?&KPQA1&O0WsJtO-sAOs(cU>UYc6=f%oJP99#f&23-JvwL919S_zStYKZ<2Dy{k z?A4qRs4x)L4d~6E_84}jJUC~aQPhamU%-4Hy{MYl7cR#f7o9hSBQ(rq><0cgo~(-d z?_Wfh+Z_9O?401jYyH`OXfE>T(Pqo!yH{|34aEHTV@GcsWp_UEb9hG`-71ouAc|Hx z_Ym=_ikc;@4vD5`*?Qsdr_>>?@dbT$NsD$-3*|fQs8^~}ZX%QEq_5hmOYU`XmPzIU zqeu^9(wCkh;1y!EE4-L`VT2HlC7s)X2qsYpUrDH*NrubU_!+R}wF6K$q{KQC@#WHR z1I!T5g&7V0Q~Un^u9w{Zhiviwuic7O@u1evzh;4BN{H^h_O^Ue?{lWGTD|QCVupg4 zB4p6u)e~xjCBQy#D(hUc?jJqjEl@v+w@Uo`FWRvzptiE~*2oQB2?4n6F9B}*t3QkY zk4g-%h)DE~W0#pjt9E-D3Hotho22otrGgGrgBx*bfBi?tqzv#RvXY$tSxLCk00kMM zPA$!U=bi&Lt*la4{?}(=1LRP960^VlSszjh0h_vO4Zi+MsTd0ZuEYRUU-Mu8=x?MQ zkr(*KTx|RT6m!r`ikSW#=K$DrPWN@;zZ)U~BtdR8wwnJ@Ze;;P4I$=Dc%cX2he`l< z%NAVn>mS48p8zU9^WVi8$N>)~q+B!U-*Mc5O%I9_{||05;N{;0AI5e=RA<&V@_L0yoZC?MJ_X8I>B&7Mx>W^V7#srROH|{4GxI1>6BYugzWdq3-sP%*W|SOI{SZZcwQz)y z4zYRW8*ByG>e&NC3_HgBV)#8fW{Rn2rcUqAl;t#C?iB%53V&kIPo<1HSF2yKue<#H zuNeB6mFD0jmcs5@sU_GF$|6vG`0Z=kOVzMn+Q1~We0Je*^|L?dX!P+M|7*)D^zBsY zfRA*)0dtQ=fC<`Uv)IIB}Gam5XJwP=eU2QK=BKf)k+IwFaj^(pV~RT|*mBT4$-= zSbfRhGLHmO3B97zxis029Ct=w zSi52dI73{I^Bh8CIL6j%Hb_p!1W9i$_Q?up(oVUZI=9RWb#xbZ~{Ot?3%(YIrEP*`!4|fu-dWL&%BR>Z%FJMt`y`Hi6`pqr}%n(I@i-Q|py*h*I4&MMk+m40>Y{#2+a zYkWUyq%A;Majmkb0e$|RE!;GfoAagz@;n%&LhQ|SnUN7rK0tv%EbASl<_8kCismRn z*2;}M)@vLhgyXLACa;T+VJOn1fgheWh2t-eA1yT*xVX58Czgx*!Mi*obYoeN#C&Z< zc0aEz%O&5Hd&4WABXo~?p#1Z`D6g1fBZ#r)+*({b6>Mp4Xj~LX5pH!LHrL1!*%tuo zXdhXn@)qaBX(l1NGmAwI@V=PR4r-^ysa-|LKpe-F2vmah1WJM~@cP5XbaPq5Y26lF zX|qWKUJG}}iczo_FuW8l!uk0K@8Z&^VnWV=GEnUvIx~uHYR4~e{A4nR)QuJ2&zN~Z ztBL4zveBq3bB)Ij#|=vCc1}PY=b1@*+55E1?{9PGTABiW_C-}MQ-KD`iXFp{GXfUtz^ zY*GM}80vKLtBGdDv+UnC5k(P5L#Mi4dVUI3C1@`UQ#=imEfxS4Q>a1ri&%7;Dr_{c zW%#>ALgpg4rm7G1T@?SHsA_2O7%8Zd?1OE{bW_7qy(9x`4~=+O>kT~KN0%DR}c^!S<#H0 znlB{huy+=f9}7TB`wAum&1wu^KTK-AcJ0N3%DY*-1xOTM6XU}Ia>R3fawFe-AQp1B z4X=FNtb_Rb8SS$Py9(ntt%0Ji7;fouul8%r_UNhHs^kJaRD8mzxGfZC=c{v){s6*T zfZKXyh=Y!HqTZ;V$#n7EVI}!>BOpo?N)hj8ni~>o(OyR0A24jrrI~X;>;PrWc1e&Q z5Mzgni4eUA(K^aOgJ|Dn7YEa4=0jjLES%8AZ$6+s`&#EL=A&%vta1WZp9FULl) zA5b+tJe76R9fmr!%dD^QN|Q_?D_H}KQg@;O`fboVXx=!ozf>QHg;0Z<9xMZvLq)m$ zuFoO#o!c!u<%1_{tQ{-+1#P_Ij9Shn-@>dIbSFJ)Z_yqSI)AJ&YD;ba|fib_z6OMi+V zzTOg#G^j5UjK)c)L)IDa05=|chp_ZY*)`I9%bIGRLxSh@psF+ej7qe!(_g!Cb32)( zWzqfAdu4*w(J=aY?pw2D7tnO~b@7s!t?_6Ra=i*C6ZWy1azp*I7F5_(_`dwLNH>E* zf4ZnmD~lUbEZoMUNo3~qf&XY_wP~g|OxStFk0$U~wuWal06J2lK37`_z(@U2DWNk< zTzc*-&D8nywi5T@R(Qi1@Gf}e^jbEzO6#e7?x31>Iy^pB$32;_&R@Q*{*##DGt^|D z@8{NBa#&TVijk1PM))P(oe{o(Az!FnVp_TsuCul&N|9|yAbD;p^f}&3S z6=8aY_iv-JGQ7Nxe`z_cQ~|w!j?TmAP+-Pq)v-!wty@GuKk2_BKI$=WFNvkAY)h8< zc8qXY6Cl+P?5U0Z4}@Y3=!GlYY-|p?=SfLP1^+ZE{)1<(ub#5T@5s-?osfaYP0bAe zD2sQ4jQ;=>+`!r3t-jlT02G|e8ZA%q9{?3EW~QxG(D?T)5uzR3EY9lLx)0asQob9` zk#PG<)bXMR4mS!a+xv@$CgFBp|EK>8Kz@8yB)mnW1qgq(FR4HqfLom28G{T3oDLM= z#Q=P&%Vke4{<@9*N(8{0yh?QXUx0Q2ykgB`?N2vAn50I~1ibMy^BH7@|EffQ{Q`c$ zn8ohl#GU{qK%e<*9RHgI{D#C@0FKK=JF_t$CyG(w3DdJO<2lnEHMc7lWJHabi>adwlQEfL}?Os63MYFmEbw52><+o7b#S zsO$zv%-`D00F0i+X3sNnUgkL^ZZXgK8UU1>urb=xX(WFyM4Fdt0*58$FT*A79;O@H zOT?Q)A+sXauZ@1OZXPfKUV6E`XeVYB|C2rhV9?&>+UqvupRyP zu`Ph%!BE8q7`qi|lCg>|$YWUHU+?S#ph165I~aJ(qpwu`CQB;(JZi-N>Q7$7WZ2X6 zfHznetr(X+kN{$`4fB(l_EiOP&-H**=Yi%#-D)efaFxjZOaYhY?!VoCmdy2vT(YRP zT`-ie4m=o#-T6T7rbj>I;OOsdNa~`aW?t~rn$d>w!`s7?*Z~mSn%>)`IRx{dj}3Ow zV^S|E?^u_b3CY6exN`fR)oz-9Dg>NsEY+Aq&6$t})2UycK|10Hy`>3dg%UbIMR0C# z{NN_vF}r=+X6$}9u`N!U&KP=|jkJnf6%Bg7J@hhASRHCeQT?KEKq zh{3TCU}D${`_zgu6|tQT;{wW58a}Y_W0=kf`qNJPac0wY<=(hIWy|Q-X?oGmB;t}q zM<$l<)3_}|^PHL@#9o$AOj|+g6iqi$_hs;e#dq7x=ML zp3bn}viYPD9$T%p2iODl<0gNCF<0L_?JzN}rONMFHx=tOtHAMA2uJd~|J6W^v%#=5 zsGkO^I}hWfEAF-*=c(@Y(q!Et^k(+DWjf$ zzlg}H`Mro;xpw<@QzvPti_1mSG0c47q4K#x!M*XqAh|+mg(T$=AAssl@(l(VQj8LE zj>TV{;fH0(2b|8n^5+6S#jv(k7_|0Yq+%x>wpQ4!7{GI4e^WAVb-8Z7H68lhcP}sq z4k>(bt#X_EKJDY0{BImcH0P+AQ6%y`WOUDT85Yu42Hjj2roEt*&9fg$YfCP<$+TpE zp_5Jx%yAJD-(}!azt}zQj;6nXR_wny`(pqw=Mav>&;Om@-GF3OGmOE&04_8S(1Y(6 zQuUqp?|u|O20XR)Ryzg0V=f__Jh8^|^D{i-Qw;;6f*W{z%KR^R0tv`*_4pM&euIbf zNt%G)Z9T99zyYx^+iS4pEbG`+GR)#OKQzVAYXE4^{Y14fGy6s+;QU?auzF$1ZlM%T=t1RQ~)yP+sM;5Z{E}ccCZ62 zyGjnuaLxH*!*=$**F`*?HsV_p_!UwkuqBAy-^>6~;V~e4>?%`S@i`klO4LyP8Gig3 zXzK&EP1tRpd@tuL!|vdu?pV=>`rGSK|D)FPeD;Pizl$RK{x38HcCUl}>Ru^ytmRU7lDnKJ*G6Z&FOn` z;^aF=N!o_Hl;G6>YE=jS+l3%2&CZk9$E(<*jRgH=OiuG~V;V94n?irm&?f$7vCQDE3w3Bk1R!Rb<6#)H`ia6{s(0~Lro<3 z0T9;&3m`fbFhG(o>&(hyxX3cKQ(_5?#S4$aVB z>OfRXgx(a7G{Kjv()X(&IBJ zJtmQ02;fp+t@FLzBl${1{cdfB>WXr5YdE9J^}T-MaGA`a*M7;`(iGfX;Y1|z^cgY+ zs>K!7i)E5!T-t6}X?#ErZZpv>VBbZyXSo)dq_Q>jUu8a?906nN%b`Jjtzo!DScz9C zL$@g!AsYt|5&N}$lEACL%4P*q9;!Abaq1Y&z(>HN;-D39tPpH{c9C`L!+>fwj;P`9 z15I&1y>3~#6~2G3ju#*Ecf#yN%jB2|`#^Oy@&|C?qLM;9MiiG11;%b@4_-KuhOJ`D zt7i&*X%9`nHy;Hkq3eOIM2pA2VbJoMc88Y-XL=YRf@6eB2RXqtAwR%*b$imgJ8-UFU4*6MD>+-_y;0eM{4WFe5WQWfy z%L-@xRx|KLObk4h2nbc)1G@5#{k@S(_iB5@MRZT&)`x}$+KGxSf2YPpPyc!AX5Yu&nvi+MnhgM)ggy^P3#|iwWM@yo4o?8D2}m(H{Za#0p}6=Ob_3% zerm=jB6cj^EA!6)-mG1q6h-N=Jp{^XQO)@P_q%4H;Q8O^FMTL z>6MT=u++*{>)jZI{S4sYvu$0s;tyYgCEuoXzxB;^(b2~8;VqKeSvsJ(rVRcF81R}1 zma3{@3C(K#O)vC-h^FC_L|v(hQ1+9#JEHbw&ue?Cyo)K>Ry^4nYk#<293a529h-AX zTLvb*2`ubMNl$y&=t^7#i1!k_gN}gKJJ3(3A5v;j|MUywsS)l)y|)^8t@ zAcuG;gyRmKyp=33?^yZW`Ap(OCPf!ljsmRjB#as#fqwyUGI?fFDVBjF54cEZy!(q0 z$L0{d+}x2ajB})4*kQg^D;F2tU0SuKk$d|`Jaua{XJz_h&?i8hjTzuh{YgvIdNRTo z*MX`i=M7Lt1S%HK0qnq)s3Np^&h@h|K?c)41J*q#jOTrhEUJFBQ*{D(PJ76UiHPz!O&tOp`xPW0$YLKYL(?D z$dIKXM2EyT>b#7jPu9}<>r2<4^rWq4Mx%2&%R2*5P%QsknyYoN2|Mr>x4M*gHy&zq z*X}^a>kVbxfSM9Qidl9Ije#XZmJRyNHk;dn-zelp6OjwC*xs`%B4*eoJZ+jSq?{jQ zC>i-6Kmo55hym*`9QB#ri_IRX9*Po1d332ef?kQ_BP>S|C9iJkP=bga+UWqV;T@!x z2Yrugx)p*BK=AB9r~Q2M+br@-PrqE!z?w!O%X;(0-^lC^F-AI3$PsVcz*aRc=I)WJWS@Lw4- z;JtVoNR384_V&wbG&6fVgylLvw@T%`$)(#&U*#jmqs_ML?M&gxUz(ba^WG(Tnl(j! z;54D*BFiS+t}uddwP>Wx(1VynU6#$>dew~*X6?pgnjkX#vEYi*h9l&#j&SR%a^b}p z`fU7_LE)U6sD;Jim64zOb2^Q-y5TN^Z;gH@6+NM$MwCbG7jv6PIN53LTdZz6aucoJ z$g>|5_Z?7fx;2FT;!~GS^6(vR3iIBr+&n_hdCXk!*bs5EH16muT>N&nHAA6-8~h1} zG3FD-JE!8o%-E4EDiE4pS9LoICh3mMRWe^+s;X%)?098`xdS_?E23>Itftf!vt*Z_ zEt{YhJ@%D?edc9e4}0*2>0GkNGKn>v!;0E%^_dUE^(1n20PvC+Z2ZmaI`S5KQ<>CE z)Ll#=E0p0&0>K znYejscAB`%PV{4NfM;fDr#G7N(Bkgq;W^t0$s0#8X}<Qh6+%Ewib1e#%oip}VMwz2)dCL}&8|JWw zohIhrhwydtOTN`TSl{x>=R7>}_R$^_5Pv)Ta40F>QCC|EbcaM=ydagwGgt6{v;!ux zwGZi=7~dGP z?GTEI6Kt9#wufhCnK+S2#%_<@{$j1qo8+4$Tfvf>O8dvty4LKCU#=0I^dryh1cN+k`$DSE zc5X$R-w->31ft*G72|~6bgN1R0Xof4;*1O*B04t*D418yzk9xVjp05u?2KDQl*d@Q zUJmsQHv*R`<}_C1!S4xW%nCIoV!|^#{=9S5UyMqquYw^0XVvW9NYG6jWg??R^U%QK zw%vkFZ^l0n+y2}qlpoatfuG|AiuMOMstqUMi>~(nz@AM$z=N- zA(T$?G6%h5!xo+`6WgCFD7aOP_%Bn{&8X%2AD$I*v-8&+pebDsBpJRQ#gOU5z`8O6 zv6g+u3{ZIVyAOhH>L_^Pv%3y^(S4ClOhO*W@>n4PQGhx7lVjgtr2>_}#hLAyA(CKAlBq3`sEZFIn3}f9uLMv`Fe&SGGPt-H-#rbVy`RPVSIrO|H9KGrvlZTei zbG%Q!9VlXDQ^*81hunFZNe_{mX<&aA-B<-e9!gJ()d0GtoPRU|mpOf~>K`&3C1)P_I#1$A}3;p+V!`2>v&!rg;mKq;jF`qb0)HoMj^+ zV%)Sqxf%Z>lDW%Zgwc7)jJE_jp|pet&hr-1Bv2$-U>+$KzkhRHItB*8ZjY3o;@fy+ zWQXnk`nHfFBO$utChPH0FUidB;O?tEWkeRiLSv)+HfB$I!iCR|B1Ei;CYP;M`3G(ymYE<66@=^ zGcn7SUQbC*=M#DmeC6PDWi(Hu2SxXFOX<)_rQjy8>^PGO6}+jSNuHQ*{N?jjWC!_; z3Z_bsx09iXa@i1q;7PEcDTtn)L;R;D3B#2Yf43-E-`D+1s^$*9&UV%9XtnVv?Gn&$ zF^tf84`0%`VAQ4&!FjORdk1g@Cnoetot+bVRKthjZ2vv#8!`a;34WC!*7$lG4SoKC zf62az%Iim7RP40i;B`TU17;+Jx$vS#EGKecAlVMVtH>O-ak@*$KY1{`P8hW+i&3bF&r z=s2U&!l3P%-?&Nc!#yACx;P2vbJ6V63S zFnmWZZcQSkM@!|Y>2?pzAd4^mWzHTWFM5@Uy2j1NB{*eHglGKvMN|F z_;}z{?!8u)F+D}q2~d*A|MX0wwe2*PA?!5O2Lqy|Z@1Zo?mgdfK0aixZ8YM4GVwbO zy=Bd*C$+DVl}#3d|Ef1M$-Db;b=f;vcK|!}6Q&fu6L6f(FMJ%RREIa-GBxAM`&nS@ z$0J;n+R#_met>T~g5jiRy3rBz+lXhVNF(o2y2mo>b`Jlf$NZC7j}18(aNWUoXSR?I zt{|Ls1>M){{6?_M1on->(ZD24S(>qE)N|{z$!HRrmnvl}{zEmTdWWG${6FG$A}5|8 zR9~iTDbEl^PWKs2O%}D( zp;1S#)cWYeU>wV@R|G3;lMVVPHTw#gA|!Hca8-$nW*{Y#0Fc5#kjSM*NVhP=VXQ!V z3gEL~F+lS1Y^ny#%me=gK&^0K;t|j7ricH)K`@8{e*Vup_h{q%@S3&V49nx5UA6Qi zyPAmQe9LE$9!jGB)Aj?$1^)x#z^US0F+X9p$4f(jF+eseQd;bw(R zf@U?y8U)!+>iK|=BG0*%Jy4X~3Z0Zk@uUT=;uL^DgM9_$ft@Cn(+y`^6|yFtZ3SFi z^Z2!K#u0SUm>R|F<}T`nrEx=CqP1>;Btfn!dBsR(UGXx0-dN)SatW{O#<4BXZXaUV z&YIdv7g8d;rx=-yzIVx^7gP^051k=)?G+}vWSxGP4uc)kv(ytMEoL zlC$}&^fvHo%+PPVx4N!}Q`*4``7&LZwa?akZu=Yb#(4 zWZVs%9i&;7!VSv8Gko_uA=E7KwRR*rOx@E0r0!#y_B{+&VY3G`HKGx1ZDkBr5r87* zj4B0bqYzpID-jk=RLKh%x=F&1p%!L=0}fp`m_O<98J@%V-#0fztS74%h7$ zpN{PrrMGN=^&!==T=rU&_EyY#_qMCB>Bh&;EX*z1qpVlaWe>j>&HiBE>n^%) zg|Ebjk8IJ*XZy3md`-XclDfHw^V?i&-b>M>xBSf$N3CVHxg0uXf)yg;pdCHbb9Z1s z3;qa&Vn9j@pkd^Ayj_ws1qMXgvFm;_7I@~9gT>ad zN!}k7n16c>`rd3W)p*L-#6!Hg{~K9SkKO=1U^(U!{aaTtOfy@p8%(IFDaJR_n(D+_ zA=w5X;x-7ijDJR34?e9j`k46}wUX0FUf23N2+AW*Zf+A?*FMq?#^+3PGOb&lB#XE{ z$TaYbFuN8nP3k*!XqJv8ZqWYLyw&rKEQTr+Z8VG|ky9y$lN+p*=sSH7))|`>IzYGN zqZgdwR2AJ%^6l`kd;a|BU===@|{wo`h)xAHuyP z5R-Pl#2AdJER%N9fjtbIbNl;`{i1k|70R1IhoX{NTMyCJ3IMGHzcB)^LHa2KyenW{?IV19 zaAprQ>M+byoa$%|BHkx3M62D=QIXkIt1)Ekd1sn^Y?9tgMnu z5I6$C@}oPGV=cixYDp1VJC42ZoE_BRiq*fqE9`xCFavaVFwo6uK%3$^uS@*Cf|65o zC^ui;4<5CM1WdHPx0jjJ7x{a*zA49qN3#P*S| z{{75~h6DR&EKuMP0owz6bE$q_E71AR&jEP7Hvf1CSak!#!bJf z7R@59kWD+CPV+8IzTz);M@`4b4|U64Lw1e31xL@Qu0DAKeVfGvhc*g`<0$GfU}XKO zAI=3PTjgt6_>iE)QojU+dqC^-VP(5slb)V$WjKVs(e*;9Y);@+7^$`kx1C#j67%pE zKM0ctw7P7MxIJDvJZ^~q_R)=nIezoPH#^@<_QS=4kQ>?_Z`(Xj$Q?f!(T%XPZKDOb z0IPg73b<_5^DTqvh`-1B->r9?Ec(Khj*5)#f5A=0S9>^?;LTsa!q@jp*i)%#r#=cI z2vWgDtbql4{VD>C&;N|?D4Kf&6?sAjXbV2|Sg@;W06fY6e5MfMB_7YYKd?Mf@xRf) z{!YN~UoU`u^iZceT-k;oERo5)9manzOPDwS^F$snTfvE*T#pq!0=mmN#?;yV51z24 zW4{?;KppcHT%=v*P8^(J-#E|$TlPNsMs@#V!92xaQ5d$kZePC^N*zS-lLnMd^?22H z1UTbDqkNB6Wp15N zgkhNrKvy*=#qHsGyXo?HR$O0;plh~7*LwHVd#yQBByb&{LPJ`571Z| z%p-Qa>VDu$tg;7c#P<6ob?zREPO>MNBG3pG#4B= zr=NyXIn4r{@))Cu4gnu?ljiLyZ6nP5E-|?0pD)mUb_FN`-aD6FCqD3XZ)AF?jV*OC zFqX&G!l(=>b4*$GaoR8imx93`b*np&$%2oBn)BwWEK!ncYk6T!EB9=~UC{MDp;9Wp zt2IaXT27uqP4N};ACqa=3b(29<-JBA^X3OIk@c;~7B zdCR#g@t=Ea!4IemkpCQ_6kTr0t&eTmYa~znEj4`TC3-XKnIq(ch0bzq!wgtLkHrAam63!NhOt9Uy~yr&k%62NX6-RaYh%zPuopHX-H`H4zOiJ zlVOf2asehR;8pBUpjIE74ZSAH9s}qEq!#;s-1?%M;r8^t+d?a7_%B#USLl4Nx~SZL z-vpt=4z-#z0z-WXAZd^_9!HDr%gro>iJa5GfTd1CB1njtEig#el%T_e8p~;OeFx%= z&y2upNM?W3{>_kH{0Je!jPx{_o?B^OZ7^daj`*Vgh@()vpqJ;^$L{RvSm>xq=e0|R z(@6rIzLI^}J^1}#7U{)fDuKjxb%f?AOX>L`)z!o)wY?)a0^s&H0ZT0PVN!`K!q>X8 z?_U14SkZTHWH#~N^Kl^f>Lvy3E+|TbM+~5OA2k$`3A$Qc^H$xSeELfdnKAMg{+(&| zerH+JJ!f7*Ott@p#5^E`K4_jTPj^dWJ?KJY38WZWS2^#YTYb1liu z6v1<_2^-5JwO;_YAz>mQk^>&u?3&JWEdi|g<;pKdG&W~zGUosgec0-*&Z3W6lUX9@HnXHZ$a0MUwKq%W=l+t=E=$VXAFBk6B0TKOEP!FWqqCk z>X8B_7Q8{WZoE}r;A^&W%V^MINc?HDPczqIUr*a&Y3Dxz^&tPdcylA=*Z5_NauCng z_>gf^z$7e280K_WI={)D>}lqBN=HGAd8f-)%M6g}2fUHGb<9<{Qx zJlM}SNb8u%H}U7u|6zyZF_=q!fW*BEEX0*YY{wmfubma*`_(9MAn@cpC#3v3iCG$2 z$Km?-bLwE-3-1UmRVp$J|C7whW0#8q4s@R&|NcMwXUjApiWvj((`YCD`&7ZicS5@B zuB2%Bz0R#^q}{31jp?Htovylw z0tI<79oBqQ;U$2&hD?^yCZ|0EJxrlG>TohodiHgbZoqe~J~t13^d2O>HySGc9OHaj z?ziMY%&^*8d0T(XmWEXZ(D~~IAU>Ij>46BnmI45#(opPx#l$xpssb<6AV*}MsgM@z zVNEeP|3&D#4IHI@}hrFCeDl_#_B$1Ys02;){;%jeGQb-YJp!M|kK@ z?n`>_`IoC)f7D>L@U`5}X}R#WyvkOT+GU)xKP#_NSiVKRJ}dr66bUQc3i*ww3Bb28 zf9M9I#w6_SG)p{bH-x6~Nd?{_7_kt2DiJfUs@g7adCKH~En+V1g9rddy}h~g0RHQj zL!H*7{`|^e^5X!tJ4EWBx@QSJ`ytJ{&iXkjEiG+TbFF!mzOUCP0ID*v;&)7cVb|>- zjOdBkcASatvRw2|du%a`e*r612HbhhA~46B&Xj(I`cL?AD27Ij8!$hyDXsn6c<#g{ z3w`}YP=WRv=Ub6P?a93BP4EArpQ_s$#ZolGgcS~$?Z z-%d34v-9^yt*2wvNG3}afFh$J$gL6B2`cOb>9&4iS_5&h!pIpwSp-G?yi&$i>V)ji z%tjdePr3wc`mFH9c))ul1-N~Ut^X5zj3&reV)c1Zb=+itd&U|`IPB8Ti>A!7xtY{Y6l*P|algzL) z)+4x9FBqC2lMnE1w!b~ACh5jtX)|_eW&_DuB@$v5bE3U)S15WamvOb3!Z*d0hIazo zO8)=8>Mi!!rtyERSt%zjfKZLHU8ZIA4IW+H&bNphWFE`1>nCs}Foh40yCUhylj#Nh z0a{p?9}qU-4O{6w=!j$tl+c(3$@qHc>;3&|KoCF|LQo4Mv$i1Zp|;H?X&12Yy#j)> zX)|)rF*RVOm21}G`m?z26~qGChA-3FaxxynH5Jm+le;VEnJ3G^cl*4||Lf35>m(}} z7W~D|t)^D|?_qmLI6s>+PY+%Tg||jAmxezvHZ}%DTG%kw-JO0yHnUE432e6JFMFkB zOFr8kNSX8s4mZ`Jo! z%J3A0{%$1ld*S%cL{g$4;r^l;-mKq^k*>~@PF*$w9jHGpmiXQ|vrb8Dm26T@PaIaC zQINm-Ltwyl{}8F&-20X*DJ;Jy6!(DV_Q1L)0}t&636<&miy)4P?w3~nhx`>RU--}9 z+dPuTETwi(_8~=|fqj<)qcHMMHoR)FnL z5(DHp&!O4UK&X#_<)>5l=#tc`-=*ea{HTVa?eX78U{4iz5lkCL5u(g4A+MIylTt&8 z^lA^@fh*N`Iy>F}ad1+^i6NzTDKR4+ydGTzeerpQ9_-RY5ouX2Xof}ap%dlcD$(8F zgoAfn%f42kTd)KWP8sXM++p7VYr76|_`oxHwJ4+_9u*2+FGL8Z@!RREr?(vJB%*{mAbg=m3S0;vQpfqV_P$qnu;plX)3;SxO z%a5R~^uc1UbWv=-cj<@%r8$&c3>-7+_2Y5e9w%%Qh^cJR%3 zpF=v+*zGTm7CEU@zc~AQ{kPQJ$q5fRO<9e;Zf>Y`V{Ek)zBcQX{HDhq7?F-7i-2XM z@AI0*78Ib-+tA?scUCRzsbj)AMJF1W1=YG1HqUgNI5i4Vz904U2t0pVG!jtu-4KJw z?nIqJP@^V@&K~ahS2nynH2lz0PpeZ-puI+N>A+2hTbM;9TzD8ZPmthJiLObsTM~ z(LT^6ZYoSes2}&+dWJ3@%;EQ{dTKG(mSH~Jm6zaZL;@{mwZYkjJ{@3B;L5Zmr6o?0 z?eN9EhPaOC^heB1X|#c-)LjAKcwk-pI1;J?0fxC}3l6FHV8;BAFPbSv?gL>nfNf7$ zQpTRERWA#ZziC$^ zt$2(NYK*#oMdtW{MAgV*jnFdv8xH!$8kC@xm9-+A_A$ax)$2+Eo)^--2O6a}9a;)?LQeT3fH0zjzp;G{qJeYH*+rvy3YoK3Kf z*R76+22Ui%^OB`9mEHsj_lGMIFwsFQS4|{*EzPN(DB?R55htORo=xlJYT-4G(#Xg$ zHXO%(P9{q59uo0(Na7D6ot#7ngE2u1aGnt%TDZj9)6s}T>ieA<1u z@WDOq&AFw_n*N>wb%$p&1RT%l5NMa3!EsEt86zHN9wKFDf?d|#Uih zqp3>+3XT$r(Jv0`v8{a~kQ(cVj?YaZJ0nb^U+lbun=0_=wQSxFh>zEE=`j|@UpG=!PZlqf_$-CX) z;OTB+a-7x__cYCqV7+X8`v-_Y&%eRG9Lio)!1VtpZ9A5oDAqb6fzCwkCHxdRjcYZ2 z@#W>=V>(nOJbxc1y1Vi*TV;d96!j!dj1boVkQ^e! z<2>Zyrsiho_jDklA-*{2A;L1&Eflk0Dg!i~Z6W zr=9pvDTS6t|3XItmiJFcO`;p7{(8QUZU8Q$h){$5af8egsc7_!Z9(lH6o-pOXhGaU z|If()5|{dlhwrb4;zT2H<|V!V@@tc5;LzYh#pge!-FzK=Ehvoqm_VPdys{_iDya(+_$7F9Dy=Nh|6qJxMI-udA=dCYB+piz{ z=+R60S|wTW>{fEWZ%>Z^p~TUvNrFLEfmCyOX4JYk`@2aEbM+|2nA1f`L_m$63wc?Aperq#D{7F^w4MrURZdX$&d_`6D8r`A* zNHB~{kg&|#7tpnEFUXEz8NMkq`P*r-jFJQuLA7C(pH?%|Xe?o)_|_yqbA?wRC4Jw9 zE!PKSxM8w3A0b{8Uovh8sBleA2|>9V2G8UGg1~LEQS7U`nz`WlWgrfWpB3d}Hu{px zu35UM9%bzMQC*!70%%?ABx%nZK38`9^4)&fJ)u}G?j$)Wr9I_l=rKGD43#+0Pp8W> zz9_%BJ`UVU;|*G=@V70bt-fXw_rGadvQ?$um#x0Wn&lzd;xh$=68U4a6cNRuPD>Ko z6dk8AWxQJp4pjsUM%OCh*(Z*_55NC32C)HoUYqQ??Ai zh-()=veul=elhMwkb0ZG1#@g?yK7BQv=vh2pH7tWLoiFFWb?}`0WfaOTtpv6PQ)C8 zXZ>_sDWfk|cd*5I*PQA4;Q}z7E}DqqP)byZj>Y-%C{G)khb4S}OJN_!CrFab>44k#@52 z;bY&g2KvixjHRB0JF>R87Bgh3XW~vYgvD8GNruE=Th6-245#-Yj|~ zYjStbZNGuvNeO?#*Ia*69>G@ImAxH)d8a36>Iuh;n%z5jIo631{oHs-J_p6dAH?GgH z!*m|{{wU;FE?Y^QQ8!DNeCRLfMi3#?m2MGNYiqWGA2Y*70t7K*DY7xca2^dGGyIW6m$C7ID?VA(qs)6E?PpOeDgdWTFZYZlbEA=` zjRSQOCa$Br<0Gf?T!31uC{f20RQyd{p(vGp1m8hWG!&D_kS6zQdJb6{>26Ydp|*tG zJI6{P|ADn@dCDR3(1EsZ!w4T{#+fKz1cXBtmp=1jf?NElj;2a|wL!YRvpjLS5Er=O zoOwtSQqIuXyO*E#;^e;SigbMM1L+gk{uk`r`YAoAE4oHbSH{1&VU5^)|A3B64VMK> zfOmJ25#CKsFTf9^GLs=e7jKMzN$GerMqBv0Ig(3kt0I_k-#UAS>&4>STDD7+FG$*D z)D&xm;eodU=5t1@j&3_kEexwL>DFPK7<|iE1W^O@@A1 zS-F%bWG4PAEd#*{jBE6pj$uzvw*^EwFF{m`F2Vn)*HBBl=&MDAqfFbpDsG!(O)*y` z%wxmUVtq4HnXfUbP*nuvKTYIU6|>L<-s6{qLZ9hRGS$0osjfRW;fhHIELa2 zFzd3wlfxqT)vJ^2zFjl<&cz_e)KR*4dulp|)uxPt&*iC=_44nN0U}9!oreo^r{w|< z7eLh>rzfy}&p3nrN(FRsIW_~g&;fmRkq#B~oh^;sG>&afo3l~D@1sm!PB^X@mqeWi z+7t9vY17QCJ2fC}jt)Y{hac*NVjUc4Nvx*Wvk3QeQXkaf-%(*ahDWvBi~P)k-J|D# znKSq};bRvO^+q4O3zi@%l=kWFld8Cmg(Q!GAewYP`pK(q3HmdnP_qi|xZT;Wo9Dw8 z`PHqykB3v)@bSx$B#~6nw5R*6p(G{07gx68CLuget|;xUES5X&ylzjO-6D9J#rX0S zbTa~^@G>X!WzQ*_)b0suz`(c~_jf$;*;R^9L)CqKtGb3a`{2)o2 zA#iHhqr#kFeFA03jC*1}yGges;`-fO z+S;vSA|QRr9$Hye2QX-i&N7^c5n006eMP-lh$ZLma|J(lP?OM_UN5M2Ac+wjqupic zfN&kpi-eki`p5lWP#aKJ(dK$M{GkEXg zqA@m@Q@bAZ)M<#QNNA?CtVy=~W~K$RZc54Yg@LP4b{4p->KSIOk5kN7ZQfWnk}Akx zt_&(;ON1W_)HB@HR5@k4GDO|~V3>T|=-k}xAgWdLHQscM7!ak_3z0_t3tBPm(RwJo zQ`7s4wuH%RqWMXR|6%?OfOiOi!xjwBLLgm4n1>w61BcQ&2Jilc-a2Fe?(~j0TH)`lS}=gaBY6|Mf1&sPc4Mip@}s#j zi#b3f=mbc&GOc|OapK(<lu&Ooy#ylZ+vGx=|Wo&|AYZsXunm>+Ux?#1wg$rs-5YX5L8DWZ^e0~60wwBVT< z|1LcErxqYUe%XxfeFZ2_y-kN4*etV;+Hah^uQ$`BTkZh0Zp%iXN0T>D>}@&@IQDbO z3%J0^29$GNzmbf;*8ttsF))0L99}K>p`S6xe@7Udk1bWpTqcF=ozXn~g9C=349sEk003X>FHnF|?Bd*Zp0@wJ zBS_K_5TPCYnH~*@@D8*952NiY5zC}Fe!GTtWO@t2mp7GIm! zDzReX7I^>udD%EDgxXc114{dn2WUP(j|@ypT|G9`7Wf(-=(*X;Kuwsw<;Vi#q3_u% z?_LtsCVkZ~b}haj;-!ZyWym)_73aDg z+#hcmWAHoBHP{t|Vln!=oJ37UK_vma+wWS;E}8O?cwU^i>rY?|b{H58K+6=y52q_^ z;W+IadH!!hEn#~}s=rocMctk?&F1}GTxX8=f2>bVu%(km1P@}JlVZlW7H9*S>BA5F zFWbRTeGd#p4#{T!JmryNW7|+0=;-lR5-#xm z?loNE!2{Gxor$is{(rB@r#~pG`!`zIkv>k6SRSl1GasBje2m{x3$6>4?$`Wsv*Pu{ z)bE_)@MxX@fBa3j_801ob~(E7VJPO&nQ-y4XO#nWYx4efzdn`h`Df{)Z{y7XTg=hI zo|@U5HPXCsN39q~v5u>xq>dt8C8E7zp^_>utNtwe4#aR94#e zn{{UN(n#R&KggKOzlp>r&P(&!ADVqXBB6&C!<{jgSc1UjkZ@CnP-=B?BEu*GFr5v$M0QYjhB_oJ)0k%v%-Uz*y_Ic0=m6 zp}g>GzW_zawVd;bY#oNFdGC$L1umyyp3T>G)Otw+v)ST2&6v(b|KyroSF1+U*G$!}kAE1s&hk@6hSDfKS*A$dHHGO890rR#NL4|wK75LfXg0lE%QmP+Z2%yQq z?<2u%bjv)y!+HZ^XIH&>s5Y>X-7?N?EsgC0rGEHEh%-G6bW1gW6G@_Eynk8d(-JFN ziSB-gZ9{Q}hc16~4iW$n;EhXtz~;ye3c1wJ!*xr)p1t`P+rHgDJL`Ej{Bu9dKqY1ueN*m}RbS*Lvx z*H5r4K62y9_Y!O|ZHI}@yKA!jpBVglaFnq4MX^y)w@+>M2wMQMAGaXRN@KbLy^_AO z8IFL2d{{E&`D=2-XdAgR0$@dmVgQ7Q%n*Lj*>NR>6FtZF>n$gMUdK&KS`;p_NxJ|5 zKklb8fwxzy$PMI4&Wv}*&9b0$f+*~9o>0l0sUM$)FVkzQULT-KT=|#tS>v8YBg*l3 zsGq-5{YGJ-SJ`v|Ys4{0i`QdpB5Vw+3Y!!X6>C5MX6iKm;nAx@;Rq!M8n!XevxAWL zqL=i|+7SL)ZMppya(Co9K0JtbD=@}U)Q{e-nb1?AE?m~ypy+Y=#*9!@_uU(9#C$+K zoB{nMuH+!+v>SQ9ER0`vSb)!MgTqqE{UDDI_!C- zyASTn=ty!GSs-iG$kMngma_TI!P&z$H_%=%F|40KGn+Ew3g>e?k{_`C0Xtivm$21_YbmX>S9+9n4(gk7G=Un6X7HC+{|In&L@5b@^a z!%~~p(!!8pD+ZG^f7G{*-!_$=GkfK$gdM7lvAVy16;hVfTNy{-BYk$aQ&(jaK24BT z6~0QchBJdCzAIZjnlsXvt=-Bke7zP+*+#5|IATrt4|g1z=^EkIpf5v$c^xsh-lwji zk&kfsUfszmarDhnF`X=;d&unGNb>4C{|UB+&T$%RFg8LZ_X=XFGn7IFDSEG|KTDEe z2c441q&RM^$|bXzhkz`VIUQgP)XBWp=Sw}7=InXYuCc=p;k4r=9pYkkO-R>|3}}+7 zP9RL+v*WcC5$DoL&_GSCi$K{*5=HWwX3j^bak}|Pl{@pqGImP4*WuBNv)VJ=Ca5-N zr$glzAx>I+mHIRh;)(UXY3dzXyv13e=aLZ#mG#erNSsUw?d0nM(m@>=8R7aw=CG4T zI$lMq7KNE?Wxs8Hh8AmqBBB^qlr=W;{Ab@je>-V}UH174A!~?hpJ6{7Jf%X+I8KG?~_noP#xDnU_0|oD)75X_z@_fIqMBAm_i|g07}& zDblhzn7rb2HkzwwTr%>cq{bvbJi=Re6~dTYnyz!inQaKkRJDNP5!nY&h{VjbTvOg; zW>~h8R4T(BoEu9+LauHDT?UowKWs)kRXM3pS?y}l)b~L?noS4IW$w3iH5v1#J^2Bn zxA>3xlv>FDGNvWkN9Y2o<(kfBk}I~xwiP#CL1))yaN==ir3RdX7c!>u-f}C}+l${X z!{25>h~KJ0kDLsl!lVDpYX(v_7lYNIiMf_bO@k-k_Hia|D(|r07^btT(O#VhasKsZTutrduRBRIF+Qy5oTC2aDMS`=!`UzNKn?!_TJ(Gf*uHOv}@-xQ~g1Nmdf5 zBBB!tZp*cYsJ3%z+A`Ek7YH_;__|MY-ls83hqDIFOmT^@aez3$lVHSsX#yfDc&a~8 zUK|VBUcO}0n>7)R&m@I+Q0f+c1v&azZInF+SZIp`=#xeKbO=?qd|!r+Sj()% zI2%vf6~nHX|B2e}P@G+pb|emH;9){ki{xf6Ldi7K$$7cb6%4x-{obbTGr(gfSSa^; zq}NrV(YKhEq!n4BM+f5w6w_;c+6>UPm!mr)sC@Uv@I9;+{$C}X^W%mZ|H<&r08JXp z+TO&5^W(DDGTUC!?$fTGOJmr0XNS$`7bl*MT4_rLq-ry)>SVFxSVi{8Yc1Pj6;fxT z8ReIqmvr9Mc8Om{jT5T4@a1s*__ZAb-&J=nO%4@7htK45{gos#{J5I;o_|=f?6sv{ zzg)J5|Kon>6ss^_2dBR}-gSZ;vN-0rH7ZswXU+KZ_jicx{pOFSz(w6S=zfRePIB%_ zRB0^7a~Qn`h;%Wcr`W7d0z2Q58&HdU)!_2-j8K$yYUUeBEKui#x%(h`Kcg;bg)UUmSxs`F zxuB9J+S8vAkwc3>r5kJJbidkIE!e#2w9-$}K+n#uV91ep%qpqL@6+1?%PJkXM#}pe zhlBnLqW*ZzeA~yCOOxzHX4TS*euva0oar(YfW^r*avy3$XBh9(S>M}jwY9!^4o0W! z^4iKk(p@~8jp9RR>^qx4x$7K-tB?GXvH$;_>ZvctXo`x{oIT*Q(W!Yxz+=?^ee-MP zqX*KvLOiiVFY7jp0=ZePV?~2TTr6?DeDk0iSw!H>y9NT!lO1Y#6Ciem8Jn@KCkn4; zKc9>8?cLRY%~?TOQi{*kN|zztvELN^v$N6@ zKKMg??hHS0FyJ0k$AIano$3*k*@KGR_S;mvW>3TDy6xK{g^1h^s7q6cVzFM{j%N1Z z0*8CxD}t+ZJ-5T=PJ6IvldLskEpfTf)X~S8$*QUS~04 zsB3u9nDB%z@KbAmVxR?eHW~cP^X5)^iTuG2?j_T|&e1v3=tdsf>;RpCE0S%wEy?rXu zC|8z%P7gXWC8@JB`&8t1ym*NF`JjDf7i{T;A0Dwff?5pT7q2Z+FMRO@Zx5J8+=;8W zopz@dRJtZ#MaNZzyW?XzY7FkU#F4!3i?0!VCxn<=!+0g4wX}V0N%Atc&)!FcYyBY{ z9^OI31`s4J+RkbR-30o(L4i0!r(XBN+ z{fF{Bti!D>Z!_f8cy-h+BYg~7qt1>KKs8Qt%MyeFgaVr79?)uC25>7ek6xXXrWz+@ zN_JbAU6V<&iY;lG_uLm4;VtKu{i0@~^N^VLAqbXw*YYuyc{vkr`D_wY6t|R%Iaxuf zkOHI@;nLM@%UiNX5~}&rQXZLTST4pklxmv+OZPAGq;*DNGa9E=xA?22kR1yN5LhLv zHgG<{DR}5JB|f#-Jm+J#p>lFJnZHHiAgeEa!g2Wx6-fui%sMG0?F{dV^-AK|spVUt zkiyign|}J6W-rs&!y&r<*zvf{w-6PufWta%X_|avT*hzoN*;W@7&-?Sk$@;q0Up zbRtK$tR7UXDQ;YG$iCX2ILY*$j$BXOG{rxXxtTCL@EaK{Mee1&^rmXXZcjwIg>=hd?zYQb0yC zgTxt1sstL9fY+H{UZs5Z80Shjy6L1} zN~}kDA*`!Qz7JPGJ&mhbMEk}KoBf#c#J9h?){3fGe3QcfvuX%BF5P(X2nV5wC>r;f zbq}|w=s$T&*@WT+I>F>lY~sS>;>V3vrddsL4|-I@-FAFW$y7nkp(V*FCF1+kgYS5g zQT=nUNNU{%dqm(#9BO&-L?M)t5IZ8pG{v`NKxyN0J`ev{9By$(c6w4|=67_{7trVU zVSyY2$FXB0Qs)^ha7!18;l=ugUY%}EN0=J8y<6T#Ms%%&watRh6UV51FZyLlI+fJM z+=-#I6A40BeU^(OP)LI%zib?)K!f!_Nt_WgMkw9a_@D+%gR30nG_MfYnN`!70n)A& zYP)LBv6N>vg5Sb)I{c^cVeblHt?t7xEsPSFX+Au_^_n@xgb+jhMiLc_yu$d$@4Aw&uZM<~v>zM?<6y?= zWq8ri+@H66Gr&e)-FZha&0OxJyq--8lUxZPXuf0qRE(G6A3Qo`tuD#;bU$V`riKFM z306^E*bo{2mc@kXts}8?a;?jyUI#u&>!%Om^LCe9$M>yYQ!_CCN%0u`x4f6?nb>{Tf!?#-639Lk= zY6Ph2{-!ykE%3q7QqoJy z6v}LK{p1evK)@(?Baezuaq_*y!NR)xBb66UNBO}>%ITZ1=d7)Qr%ZKMYPeA}3N$DA;s;`Mc&*+eb=*-5emN@VGj&m2wgzDR-XKWi6k<~55KFaF~aD7B5Hyga;|u% zhr%ZEM-5jJvEcm;1!__$%=@qAZGCXxsA7At--9w}baGAMO=Mmt9&9NB$wu>@QZvcl z>QX)St9Q)|5B}B;U#S7L!*{htDO`V#6GNs%fD8p)M$|+T>7MGx3jb5Q3hVe{{wpbR z0To93A$#4TzsK^JYUBT8JfdiRohF2DDCKWu$U0qIfPeqZh^+)nz+x9_*ufKBO+iMxQ~#^#4%$U#S3Jo}z@Jfjr84xGxm4 zCoT*#xU~`2hrO+^mpM;cd}C{|h*SMYZqe1u5f=GzJ{hk60&ZAsDPWwx*rpuVzW^F-?N^`P0RfTk!a7+i9%p7aca&5!NZdw=T@&0l^p_SLm%Y}Y3bD6a&;w&KGTLwR z3?g1~%WG5xQAyoVijHeZN9zu*o_~}G0FL|zw<%wAY&gE>|CN@$fH`#WSliNEIUZu% zKt_a!LY68!7KBjs>3?(I9(R5f+G1qK`2t;`o6yBR3jj(fQfm5*!*&j*`{&|!=flDU zpi0Ev+ZE2UBgqc0O`bF3R^cC)Q`VGmA&jSgMub73jw&2~xSygt_X1u9UT@lr8ZTcP zH5ZM(ZklBG?BCzP_PA^D@lXFXjK1>b57H?%n>ehG#Od8$IU6B2sDOn>@>;J#}{e!quX%kKAr?l_s^Uj4%@?#d)Ue^07+JM#Zp|2lREGsYWS0jw$y*Io^HXY;{^o^sG zy4Y91q@mRULrGs2nEE829`qFt&iuN=9gEiAO0*mX>V)S+=$`_j3$r zlLZVxA^f;jTr<$WyNecPX?$_8=kZl8r^)?>66_SlnDmEd?37Io9MOUG$^FDYB2 z%ZZ!@5sF?z=eflD58^eCL2Ec@Uv3PU+mf6Syv`>-r+nEhusTs7l7@>mMABvhbk z0DZWT=|7SqE?^bZt*iQDKeF3?f6H^l7GXHPN@z`UCJ{;=U3wz-o=4@A=+5r@n@no#?M zCtLI|qTB>7)f8O~P~sVbnp35?2665jU}gDv#6Y6qYM|V^FQS)1IDnva_G>5aBe0rupmBv7hzLcV2VZ#YS+O-}QyO7a$L4EK}N zYT2=9H0ye5oy{I_<41A*Wzc(C>omd(X!~wmUi`w<`U_Ymf^?kWuEF|q)@1d}uJ9}9 zg38A99tOkSw;`fy%inkIU`9Q8r0~ofjFu|uL<6Q%4J=$GNDO_v9Z>(du6CwU)s$0G{f)GLC{k$H3o{LjNpDxmQ@&nZyVd>+gIv(&l<=B_lYa2v` zv6nV0gmCgrm~KyKFqU>tYePUA3EHhM7qrt+awf?+2;3Mc zXOx&>LL?$UHItk$X7H%cHO3Us^Y=Dx@brI7jCSj7a?$eT)SK#+USA-m-!4RW2mU)InLczOja+Zx-pjX}}>@G4Vs!4^(994aO%52$3I3@{P zTD=(JFc1SlKUjMVM&?=$_JL<-27RQx*s=)X*Y*gtc-$qKee)Tf4fV%H5dHcV%a64K z5IRDH0n=j5iPHX7Lm6YdzzM=n=c>B>Hlw8f!!;2s9Ij!M+;M)snxoOS+3?=bE@bf= zo+XV=>4^oKy6JANW!Ja!QFqh8!@%Ex^^JMrE57kI&rV^cuxX$psq53UR^rw)Xls0> zPAfh5acee9*pj`K=i@nB4N)*!+9HZZNC*xbM|xUfVL7u>$qb-YZGuoFx#Yzuf`Z3@ z->yGKSga2;);1+x6`dxz?EUk$4G}NH4)bApPhK1Zp=5YN%|$MzHgDA#raGJSSyqC4 z5Q_{fsHI;dszs?~zSN&C0@Y#=Z(%}Y21E5g$M*MEh%F-_kLZPu$h`yfQK@26j({`t zF*X1m=b5s6@_(h>i0*Q6qi)WotA5Y}2Et{RL@t3ZTMTt$wVe9TG*a0KBPOC;xvea9 zm}~$6(!B@x;YTWY95!bPvkyQ)R^=`=^j zr;Thg+J?%4l5yG<5>>;BCF-x{W2$H{8KzTqEJi;x$tL3Iw)1`<DE({b2&3*^#G;l{T=8oy3_wm zZMtmGo7WN$4Ba}4ewP8+OxK6sl2jF>Zv4sCISmg^BPZht#0+(0Vc6GQH^Zzgd|hF; zx~5UWMY)c2K?nUReyoCZ^jaG@fQXqhnAbA>;~E=E?# za6&H8-B6FW7mmYfPj!!?%nKn=QT>LiHG+dZ_%AvB<4FS6*qP#e(I+3M^zFSVqn+xa z=S(Lmzv#g~NjoUp;f3U)?P4y(SuP&L8z`nEW&#Nu3dWo(}^~M8{5OZ4N8~?V-_S z5Ica1X#cKMN9Awe!K4BhEfuBhzxVmy@B9Bz@&k!V5qAm+`ntiyQ9ue3Gda=xH)atf z06vE1Q;MN~Y{6dO75ZN7{E1KjF)b$Wj76Vtt^aY#EP$NHLiB&+`kJA@GfqDX7Y2l} k=*JWSF7 Date: Tue, 6 Feb 2024 16:19:26 -0800 Subject: [PATCH 21/40] update image name to match the image url --- .../{edit_worker_nodes.png => edit_nodes.png} | Bin 1 file changed, 0 insertions(+), 0 deletions(-) rename templates/fine-tune-llama2/assets/{edit_worker_nodes.png => edit_nodes.png} (100%) diff --git a/templates/fine-tune-llama2/assets/edit_worker_nodes.png b/templates/fine-tune-llama2/assets/edit_nodes.png similarity index 100% rename from templates/fine-tune-llama2/assets/edit_worker_nodes.png rename to templates/fine-tune-llama2/assets/edit_nodes.png From 8d59c1d1bfa7fa8c98914a663881fe2488568396 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 8 Feb 2024 03:09:21 +0000 Subject: [PATCH 22/40] Updated instructions --- templates/fine-tune-llm/train.py | 43 +++++++++++++++++++ .../llama-2-13b-4k-4xg5_12xlarge.yaml | 0 .../llama-2-7b-512-16xg5_4xlarge.yaml | 0 3 files changed, 43 insertions(+) create mode 100644 templates/fine-tune-llm/train.py rename templates/fine-tune-llm/training_configs/{ => full_param}/llama-2-13b-4k-4xg5_12xlarge.yaml (100%) rename templates/fine-tune-llm/training_configs/{ => full_param}/llama-2-7b-512-16xg5_4xlarge.yaml (100%) diff --git a/templates/fine-tune-llm/train.py b/templates/fine-tune-llm/train.py new file mode 100644 index 000000000..6a1c582a6 --- /dev/null +++ b/templates/fine-tune-llm/train.py @@ -0,0 +1,43 @@ +import sys +import os +import subprocess +import tempfile +from pathlib import Path + +import yaml + + +def read_yaml_file(file_path): + with open(file_path, "r") as file: + return yaml.safe_load(file) + + +def main(): + if len(sys.argv) < 2: + print("Usage: python train.py job_config.yaml train_config.yaml") + sys.exit(1) + job_config_path = sys.argv[1] + finetune_config_path = sys.argv[2] + + job_config = read_yaml_file(job_config_path) + + entrypoint = "llmforge dev finetune " + finetune_config_path + + job_config["entrypoint"] = entrypoint + job_config["name"] = Path(finetune_config_path).stem + + with tempfile.NamedTemporaryFile( + mode="w+", delete=False, dir=".", suffix=".yaml" + ) as temp_file: + yaml.safe_dump(job_config, temp_file) + temp_file_name = temp_file.name + + # Call `anyscale job submit` on the temporary YAML file + try: + subprocess.run(["anyscale", "job", "submit", temp_file_name], check=True) + finally: + # Clean up by deleting the temporary file + os.remove(temp_file_name) + + +main() diff --git a/templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml similarity index 100% rename from templates/fine-tune-llm/training_configs/llama-2-13b-4k-4xg5_12xlarge.yaml rename to templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml diff --git a/templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml similarity index 100% rename from templates/fine-tune-llm/training_configs/llama-2-7b-512-16xg5_4xlarge.yaml rename to templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml From d70f326b77f460ce61fd2e14a43f8b6880282632 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Thu, 8 Feb 2024 14:24:12 -0800 Subject: [PATCH 23/40] fix up Signed-off-by: Eric Liang --- templates/fine-tune-llama2/README.ipynb | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/templates/fine-tune-llama2/README.ipynb b/templates/fine-tune-llama2/README.ipynb index 1988889bd..a6a704653 100644 --- a/templates/fine-tune-llama2/README.ipynb +++ b/templates/fine-tune-llama2/README.ipynb @@ -69,8 +69,8 @@ "\n", "Voila! You have fine-tuned your own Llama-2 models. Want more than this? Check out advanced tutorials below \n", "\n", - "- [Walkthrough of this template](./tutorials/walkthrough.md)\n", - "- [Fine-tune Llama-2 with LoRA adapters](./tutorials/lora.md)" + "- Walkthrough of this template: navigate to `tutorials/walkthrough.md`\n", + "- Fine-tune Llama-2 with LoRA adapters: navigate to `tutorials/lora.md`" ] } ], From c640b14460c1f02808a38b634b309fe511a8eb33 Mon Sep 17 00:00:00 2001 From: Shawn Date: Thu, 8 Feb 2024 16:55:28 -0800 Subject: [PATCH 24/40] Updated instructions --- templates/fine-tune-llm/README.md | 6 +++--- templates/fine-tune-llm/job_compute_configs/aws.yaml | 4 ++-- templates/fine-tune-llm/train.py | 4 ++++ .../full_param/llama-2-13b-4k-4xg5_12xlarge.yaml | 1 + .../full_param/llama-2-7b-512-16xg5_4xlarge.yaml | 1 + 5 files changed, 11 insertions(+), 5 deletions(-) diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index e29bf8331..83cf2785a 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -21,16 +21,16 @@ directory for different base models and instance types. You can use these as a starting point for your own fine-tuning jobs. First, please go to `job_compute_configs/aws.yaml` or `job_compute_configs/gcp.yaml` -and specify your cloud name under the `cloud` field. +and specify your cloud_id and region under the `cloud_id` and `region` fields. -Then, please get an WandB API key from [WandB](https://wandb.ai/authorize). +[Optional] you can get a WandB API key from [WandB](https://wandb.ai/authorize) to track the finetuning process. Next, you can launch a fine-tuning job where the WandB API key is passed as an environment variable. ```shell # Launch a fine-tuning job for Llama 7b with 16 g5.4xlarge instances -WANDB_API_KEY={YOUR_WANDB_API_KEY} llmforge dev launch job_compute_configs/aws.yaml training_configs/llama-2-7b-512-16xg5_4xlarge.yaml +WANDB_API_KEY={YOUR_WANDB_API_KEY} python train.py job_compute_configs/aws.yaml training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml ``` Once you submit the command, you can monitor the progress of the job in diff --git a/templates/fine-tune-llm/job_compute_configs/aws.yaml b/templates/fine-tune-llm/job_compute_configs/aws.yaml index 19140cf72..bdeac9ad1 100644 --- a/templates/fine-tune-llm/job_compute_configs/aws.yaml +++ b/templates/fine-tune-llm/job_compute_configs/aws.yaml @@ -1,8 +1,8 @@ compute_config: allowed_azs: - any - cloud: my-cloud # You may specify `cloud_id` instead - region: any + cloud_id: cld_id # You should specify your `cloud_id` from the `clouds` page + region: us-west-2 # You should specify your `region` head_node_type: instance_type: m5.4xlarge name: head_node diff --git a/templates/fine-tune-llm/train.py b/templates/fine-tune-llm/train.py index 6a1c582a6..298fbac25 100644 --- a/templates/fine-tune-llm/train.py +++ b/templates/fine-tune-llm/train.py @@ -26,6 +26,10 @@ def main(): job_config["entrypoint"] = entrypoint job_config["name"] = Path(finetune_config_path).stem + api_key = os.environ.get("WANDB_API_KEY") + if api_key: + job_config["runtime_env"]["env_vars"]["WANDB_API_KEY"] = api_key + with tempfile.NamedTemporaryFile( mode="w+", delete=False, dir=".", suffix=".yaml" ) as temp_file: diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml index 504ac144e..55fc3778b 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -11,6 +11,7 @@ num_checkpoints_to_keep: 1 output_dir: /mnt/local_storage deepspeed: config_path: deepspeed_configs/zero_3_llama_2_13b.json +dataset_size_scaling_factor: 10000 flash_attention_2: true worker_resources: g5.12xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml index be8283040..eea93eb1c 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml @@ -11,6 +11,7 @@ num_checkpoints_to_keep: 1 output_dir: /mnt/local_storage deepspeed: config_path: deepspeed_configs/zero_3_llama_2_7b.json +dataset_size_scaling_factor: 10000 flash_attention_2: true worker_resources: g5.4xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up From 9b0c6357dca42a4bfc73e54c5cd04f32325fd23f Mon Sep 17 00:00:00 2001 From: Shawn Date: Thu, 8 Feb 2024 16:59:30 -0800 Subject: [PATCH 25/40] Added training configs --- .../llama-2-13b-4k-4xg5_12xlarge.yaml | 2 +- .../llama-2-7b-512-16xg5_4xlarge.yaml | 2 +- .../lora/llama-2-13b-4k-4xg5_12xlarge.yaml | 36 +++++++++++++++++++ .../lora/llama-2-7b-512-16xg5_4xlarge.yaml | 36 +++++++++++++++++++ 4 files changed, 74 insertions(+), 2 deletions(-) create mode 100644 templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml create mode 100644 templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml index 55fc3778b..d75dc5a01 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -1,4 +1,4 @@ -model_id: meta-llama/Llama-2-13b-hf # <-- change this to the model you want to fine-tune +model_id: meta-llama/Llama-2-13b-chat-hf # <-- change this to the model you want to fine-tune train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional context_length: 4096 # <-- change this to the context length you want to use diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml index eea93eb1c..7b764dcfd 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml @@ -1,4 +1,4 @@ -model_id: meta-llama/Llama-2-7b-hf # <-- change this to the model you want to fine-tune +model_id: meta-llama/Llama-2-7b-chat-hf # <-- change this to the model you want to fine-tune train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional context_length: 4096 # <-- change this to the context length you want to use diff --git a/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml new file mode 100644 index 000000000..3393528de --- /dev/null +++ b/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -0,0 +1,36 @@ +model_id: meta-llama/Llama-2-13b-chat-hf # <-- change this to the model you want to fine-tune +train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data +valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional +context_length: 4096 # <-- change this to the context length you want to use +num_devices: 16 # <-- change this to total number of GPUs that you want to use +num_epochs: 10 # <-- change this to the number of epochs that you want to train for +train_batch_size_per_device: 8 +eval_batch_size_per_device: 8 +learning_rate: 5e-6 +num_checkpoints_to_keep: 1 +output_dir: /mnt/local_storage +deepspeed: + config_path: deepspeed_configs/zero_3_llama_2_13b.json +dataset_size_scaling_factor: 10000 +flash_attention_2: true +worker_resources: + g5.12xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up +lora_config: + r: 8 + lora_alpha: 16 + lora_dropout: 0.05 + target_modules: + - q_proj + - v_proj + - k_proj + - o_proj + - gate_proj + - up_proj + - down_proj + - embed_tokens + - lm_head + task_type: "CAUSAL_LM" + modules_to_save: [] + bias: "none" + fan_in_fan_out: false + init_lora_weights: true \ No newline at end of file diff --git a/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml new file mode 100644 index 000000000..ec87c0839 --- /dev/null +++ b/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml @@ -0,0 +1,36 @@ +model_id: meta-llama/Llama-2-7b-chat-hf # <-- change this to the model you want to fine-tune +train_path: s3://air-example-data/gsm8k/train.jsonl # <-- change this to the path to your training data +valid_path: s3://air-example-data/gsm8k/test.jsonl # <-- change this to the path to your validation data. This is optional +context_length: 4096 # <-- change this to the context length you want to use +num_devices: 16 # <-- change this to total number of GPUs that you want to use +num_epochs: 10 # <-- change this to the number of epochs that you want to train for +train_batch_size_per_device: 16 +eval_batch_size_per_device: 16 +learning_rate: 5e-6 +num_checkpoints_to_keep: 1 +output_dir: /mnt/local_storage +deepspeed: + config_path: deepspeed_configs/zero_3_llama_2_7b.json +dataset_size_scaling_factor: 10000 +flash_attention_2: true +worker_resources: + g5.4xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up +lora_config: + r: 8 + lora_alpha: 16 + lora_dropout: 0.05 + target_modules: + - q_proj + - v_proj + - k_proj + - o_proj + - gate_proj + - up_proj + - down_proj + - embed_tokens + - lm_head + task_type: "CAUSAL_LM" + modules_to_save: [] + bias: "none" + fan_in_fan_out: false + init_lora_weights: true \ No newline at end of file From 9f3dc63158dac8825e0448aed54785160c9bc5c4 Mon Sep 17 00:00:00 2001 From: Shawn Date: Thu, 8 Feb 2024 17:37:52 -0800 Subject: [PATCH 26/40] Addressed comments --- templates/fine-tune-llm/train.py | 36 +++++++++---------- .../llama-2-13b-4k-4xg5_12xlarge.yaml | 2 +- .../llama-2-7b-512-16xg5_4xlarge.yaml | 2 +- .../lora/llama-2-13b-4k-4xg5_12xlarge.yaml | 2 +- .../lora/llama-2-7b-512-16xg5_4xlarge.yaml | 2 +- 5 files changed, 22 insertions(+), 22 deletions(-) diff --git a/templates/fine-tune-llm/train.py b/templates/fine-tune-llm/train.py index 298fbac25..984c533bc 100644 --- a/templates/fine-tune-llm/train.py +++ b/templates/fine-tune-llm/train.py @@ -1,23 +1,25 @@ -import sys +import argparse import os import subprocess import tempfile -from pathlib import Path - import yaml - +from pathlib import Path def read_yaml_file(file_path): - with open(file_path, "r") as file: - return yaml.safe_load(file) - + with open(file_path, 'r') as stream: + return yaml.safe_load(stream) def main(): - if len(sys.argv) < 2: - print("Usage: python train.py job_config.yaml train_config.yaml") - sys.exit(1) - job_config_path = sys.argv[1] - finetune_config_path = sys.argv[2] + # Set up the argument parser + parser = argparse.ArgumentParser(description='Submit a job with configuration files') + parser.add_argument('job_config', type=str, help='Path to the job configuration YAML file') + parser.add_argument('finetune_config', type=str, help='Path to the fine-tuning configuration YAML file') + + # Parse arguments + args = parser.parse_args() + + job_config_path = args.job_config + finetune_config_path = args.finetune_config job_config = read_yaml_file(job_config_path) @@ -28,11 +30,9 @@ def main(): api_key = os.environ.get("WANDB_API_KEY") if api_key: - job_config["runtime_env"]["env_vars"]["WANDB_API_KEY"] = api_key + job_config.setdefault("runtime_env", {}).setdefault("env_vars", {})["WANDB_API_KEY"] = api_key - with tempfile.NamedTemporaryFile( - mode="w+", delete=False, dir=".", suffix=".yaml" - ) as temp_file: + with tempfile.NamedTemporaryFile(mode="w+", delete=False, dir=".", suffix=".yaml") as temp_file: yaml.safe_dump(job_config, temp_file) temp_file_name = temp_file.name @@ -43,5 +43,5 @@ def main(): # Clean up by deleting the temporary file os.remove(temp_file_name) - -main() +if __name__ == "__main__": + main() diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml index d75dc5a01..b46992e11 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -11,7 +11,7 @@ num_checkpoints_to_keep: 1 output_dir: /mnt/local_storage deepspeed: config_path: deepspeed_configs/zero_3_llama_2_13b.json -dataset_size_scaling_factor: 10000 +dataset_size_scaling_factor: 10000 # internal flag. No need to change flash_attention_2: true worker_resources: g5.12xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up diff --git a/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml index 7b764dcfd..6ad7b6f38 100644 --- a/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml @@ -11,7 +11,7 @@ num_checkpoints_to_keep: 1 output_dir: /mnt/local_storage deepspeed: config_path: deepspeed_configs/zero_3_llama_2_7b.json -dataset_size_scaling_factor: 10000 +dataset_size_scaling_factor: 10000 # internal flag. No need to change flash_attention_2: true worker_resources: g5.4xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up diff --git a/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml index 3393528de..df6ea4ff0 100644 --- a/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -11,7 +11,7 @@ num_checkpoints_to_keep: 1 output_dir: /mnt/local_storage deepspeed: config_path: deepspeed_configs/zero_3_llama_2_13b.json -dataset_size_scaling_factor: 10000 +dataset_size_scaling_factor: 10000 # internal flag. No need to change flash_attention_2: true worker_resources: g5.12xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up diff --git a/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml index ec87c0839..12269f27b 100644 --- a/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml @@ -11,7 +11,7 @@ num_checkpoints_to_keep: 1 output_dir: /mnt/local_storage deepspeed: config_path: deepspeed_configs/zero_3_llama_2_7b.json -dataset_size_scaling_factor: 10000 +dataset_size_scaling_factor: 10000 # internal flag. No need to change flash_attention_2: true worker_resources: g5.4xlarge: 1 # <-- this maps to job_compute_configs file's custom_resources so the appropriate nodes can scale up From 5be8e264d50840b931d82da12527ce28d0167f5e Mon Sep 17 00:00:00 2001 From: Praveen Gorthy Date: Tue, 13 Feb 2024 11:28:09 -0800 Subject: [PATCH 27/40] fine_tune_llm: Provide required field in compute config --- configs/fine-tune-llm/aws.yaml | 1 + configs/fine-tune-llm/gce.yaml | 1 + 2 files changed, 2 insertions(+) diff --git a/configs/fine-tune-llm/aws.yaml b/configs/fine-tune-llm/aws.yaml index 952b7a686..943536b0c 100644 --- a/configs/fine-tune-llm/aws.yaml +++ b/configs/fine-tune-llm/aws.yaml @@ -1,3 +1,4 @@ head_node_type: name: head-node-type instance_type: m5.xlarge +worker_node_types: [] diff --git a/configs/fine-tune-llm/gce.yaml b/configs/fine-tune-llm/gce.yaml index 68de52efe..eeaf5d5bf 100644 --- a/configs/fine-tune-llm/gce.yaml +++ b/configs/fine-tune-llm/gce.yaml @@ -1,3 +1,4 @@ head_node_type: name: head_node_type instance_type: n1-standard-4 +worker_node_types: [] From ba834e9d707ccf4a40b761ce94bcd3d1362da65f Mon Sep 17 00:00:00 2001 From: shawn Date: Wed, 14 Feb 2024 01:04:32 +0000 Subject: [PATCH 28/40] Updated --- templates/fine-tune-llm/train.py | 50 +++++++++++++++++++++++++------- 1 file changed, 39 insertions(+), 11 deletions(-) diff --git a/templates/fine-tune-llm/train.py b/templates/fine-tune-llm/train.py index 984c533bc..05c1c16b9 100644 --- a/templates/fine-tune-llm/train.py +++ b/templates/fine-tune-llm/train.py @@ -5,34 +5,61 @@ import yaml from pathlib import Path -def read_yaml_file(file_path): - with open(file_path, 'r') as stream: + +def _read_yaml_file(file_path): + with open(file_path, "r") as stream: return yaml.safe_load(stream) + +def _get_lora_storage_uri() -> str: + artifact_storage = os.environ.get("ANYSCALE_CLOUD_STORAGE_BUCKET") + artifact_storage = artifact_storage.rstrp("/") + return f"{artifact_storage}/fine_tuning/" + + def main(): # Set up the argument parser - parser = argparse.ArgumentParser(description='Submit a job with configuration files') - parser.add_argument('job_config', type=str, help='Path to the job configuration YAML file') - parser.add_argument('finetune_config', type=str, help='Path to the fine-tuning configuration YAML file') - + parser = argparse.ArgumentParser( + description="Submit a job with configuration files" + ) + parser.add_argument( + "job_config", type=str, help="Path to the job configuration YAML file" + ) + parser.add_argument( + "finetune_config", + type=str, + help="Path to the fine-tuning configuration YAML file", + ) + # Parse arguments args = parser.parse_args() - + job_config_path = args.job_config finetune_config_path = args.finetune_config - job_config = read_yaml_file(job_config_path) + job_config = _read_yaml_file(job_config_path) + training_config = _read_yaml_file(finetune_config_path) - entrypoint = "llmforge dev finetune " + finetune_config_path + is_lora = "lora_config" in training_config + if is_lora: + storage_uri = _get_lora_storage_uri() + entrypoint = f"llmforge dev finetune {finetune_config_path} --forward-best-checkpoint-remote-uri={storage_uri}" + print(f"Note: Lora weights will also be stored under {storage_uri} to allow multi serving.") + else: + entrypoint = f"llmforge dev finetune {finetune_config_path}" job_config["entrypoint"] = entrypoint job_config["name"] = Path(finetune_config_path).stem api_key = os.environ.get("WANDB_API_KEY") if api_key: - job_config.setdefault("runtime_env", {}).setdefault("env_vars", {})["WANDB_API_KEY"] = api_key + job_config.setdefault("runtime_env", {}).setdefault("env_vars", {})[ + "WANDB_API_KEY" + ] = api_key - with tempfile.NamedTemporaryFile(mode="w+", delete=False, dir=".", suffix=".yaml") as temp_file: + with tempfile.NamedTemporaryFile( + mode="w+", delete=False, dir=".", suffix=".yaml" + ) as temp_file: yaml.safe_dump(job_config, temp_file) temp_file_name = temp_file.name @@ -43,5 +70,6 @@ def main(): # Clean up by deleting the temporary file os.remove(temp_file_name) + if __name__ == "__main__": main() From 77d5e9a4ff76cd54acc4f3858a7eece0d4f043ab Mon Sep 17 00:00:00 2001 From: shawn Date: Wed, 14 Feb 2024 01:40:50 +0000 Subject: [PATCH 29/40] Updated --- templates/fine-tune-llm/train.py | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/templates/fine-tune-llm/train.py b/templates/fine-tune-llm/train.py index 05c1c16b9..25d648bad 100644 --- a/templates/fine-tune-llm/train.py +++ b/templates/fine-tune-llm/train.py @@ -12,8 +12,8 @@ def _read_yaml_file(file_path): def _get_lora_storage_uri() -> str: - artifact_storage = os.environ.get("ANYSCALE_CLOUD_STORAGE_BUCKET") - artifact_storage = artifact_storage.rstrp("/") + artifact_storage = os.environ.get("ANYSCALE_ARTIFACT_STORAGE") + artifact_storage = artifact_storage.rstrip("/") return f"{artifact_storage}/fine_tuning/" @@ -41,12 +41,12 @@ def main(): training_config = _read_yaml_file(finetune_config_path) is_lora = "lora_config" in training_config + entrypoint = f"llmforge dev finetune {finetune_config_path}" if is_lora: - storage_uri = _get_lora_storage_uri() - entrypoint = f"llmforge dev finetune {finetune_config_path} --forward-best-checkpoint-remote-uri={storage_uri}" - print(f"Note: Lora weights will also be stored under {storage_uri} to allow multi serving.") + lora_storage_uri = _get_lora_storage_uri() + entrypoint += f" --forward-best-checkpoint-remote-uri={lora_storage_uri}" else: - entrypoint = f"llmforge dev finetune {finetune_config_path}" + lora_storage_uri = None job_config["entrypoint"] = entrypoint job_config["name"] = Path(finetune_config_path).stem @@ -66,6 +66,10 @@ def main(): # Call `anyscale job submit` on the temporary YAML file try: subprocess.run(["anyscale", "job", "submit", temp_file_name], check=True) + if lora_storage_uri: + print( + f"Note: Lora weights will also be stored under {lora_storage_uri} to allow multi serving." + ) finally: # Clean up by deleting the temporary file os.remove(temp_file_name) From 62fee46c240a2bc318e40cb53a33e65edd653756 Mon Sep 17 00:00:00 2001 From: shawn Date: Wed, 14 Feb 2024 01:55:08 +0000 Subject: [PATCH 30/40] Added --- templates/fine-tune-llm/README.md | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index 83cf2785a..ee4382000 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -28,17 +28,30 @@ and specify your cloud_id and region under the `cloud_id` and `region` fields. Next, you can launch a fine-tuning job where the WandB API key is passed as an environment variable. ```shell -# Launch a fine-tuning job for Llama 7b with 16 g5.4xlarge instances +# Launch a full-param fine-tuning job for Llama 7b with 16 g5.4xlarge instances WANDB_API_KEY={YOUR_WANDB_API_KEY} python train.py job_compute_configs/aws.yaml training_configs/full_param/llama-2-7b-512-16xg5_4xlarge.yaml + +# Launch a lora fine-tuning job for Llama 7b with 16 g5.4xlarge instances +WANDB_API_KEY={YOUR_WANDB_API_KEY} python train.py job_compute_configs/aws.yaml training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml ``` Once you submit the command, you can monitor the progress of the job in the provided job link. Generally a full-param fine-tuning job will take a few hours. -# Step 2 - Import the model +Depending on whether you are running lora or full-param fine-tuning, you can continue +with step 2(a) or step 2(b). + +# Step 2(a) - Serve the Lora finetuned model + +When you run the fine-tuning job, you should get a checkpoint uri +from the log `Note: Lora weights will also be stored under s3://anyscale-data-cld-id/org_id/cld_id/artifact_storage/fine_tuning/ to allow multi serving.`. +Given this URI, you can specify it as the `dynamic_lora_loading_path` in the serving +workspace template. + +# Step 2(b) - Serve the full-param finetuned model. Import the model -Once the fine-tuning job is complete, you can view the stored model weight at the very end of the job logs. Here is an example finetuning job output: +Once the fine-tuning job is complete, you can view the stored full-param fine-tuned model weight at the very end of the job logs. Here is an example finetuning job output: ```shell @@ -55,7 +68,7 @@ prefix `s3://` or `gs://`. For the generation config, you can reference example configs [here](https://docs.anyscale.com/endpoints/model-serving/import-model#generation-configuration-examples). -# Step 3 - Deploy the model on Endpoints +# Step 3 - Deploy the full-param finetuned model on Endpoints Once the model is imported, you can deploy it on Endpoints by creating a new endpoint or adding it to an existing endpoint. You can follow the From 95e0de82056ea94fec737e5f6858caa9989a0f61 Mon Sep 17 00:00:00 2001 From: shawn Date: Thu, 15 Feb 2024 00:14:13 +0000 Subject: [PATCH 31/40] Updated --- templates/fine-tune-llm/README.md | 8 ++++++++ templates/fine-tune-llm/job_compute_configs/gcp.yaml | 11 ++++++++--- 2 files changed, 16 insertions(+), 3 deletions(-) diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index ee4382000..3c1cb505e 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -49,6 +49,9 @@ from the log `Note: Lora weights will also be stored under s3://anyscale-data-cl Given this URI, you can specify it as the `dynamic_lora_loading_path` in the serving workspace template. +The model id follows the convention of `{base_model_id}:{suffix}:{id}`. +For instance, the model id `meta-llama/Llama-2-7b-chat-hf:my_lora:csdu5` would be stored under `s3://anyscale-data-cld-id/org_id/cld_id/artifact_storage/fine_tuning/meta-llama/Llama-2-7b-chat-hf:my_lora:csdu5`. + # Step 2(b) - Serve the full-param finetuned model. Import the model Once the fine-tuning job is complete, you can view the stored full-param fine-tuned model weight at the very end of the job logs. Here is an example finetuning job output: @@ -76,6 +79,11 @@ endpoints page guide to query the endpoint ([docs](https://docs.anyscale.com/end # Frequently asked questions +### Where can I view the bucket where my lora weights are stored? + +All the lora weights are stored under the uri `${ANYSCALE_ARTIFACT_STORAGE}/fine_tuning` +where `ANYSCALE_ARTIFACT_STORAGE` is an environmental variable. + ### How can I fine-tune using my own data? You can open the file under `training_configs` and update diff --git a/templates/fine-tune-llm/job_compute_configs/gcp.yaml b/templates/fine-tune-llm/job_compute_configs/gcp.yaml index 2eca37168..55194b1ec 100644 --- a/templates/fine-tune-llm/job_compute_configs/gcp.yaml +++ b/templates/fine-tune-llm/job_compute_configs/gcp.yaml @@ -1,8 +1,8 @@ compute_config: allowed_azs: - any - cloud: my-cloud # You may specify `cloud_id` instead - region: any + cloud_id: cld_id # You may specify `cloud_id` instead + region: us-west1 head_node_type: instance_type: n2-standard-4 name: head_node @@ -22,10 +22,15 @@ compute_config: resources: custom_resources: a2-highgpu-8g-nvidia-a100-40gb-8: 8 - gcp: + gcp_advanced_configurations_json: instance_properties: labels: as-feature-multi-zone: "true" + disks: + - boot: true + auto_delete: true + initialize_params: + disk_size_gb: 250 runtime_env: env_vars: HF_HOME: /mnt/local_storage/.cache/huggingface From 2d881fe63bc0ecaf65f7ad146c7b943368c9472b Mon Sep 17 00:00:00 2001 From: Praveen Date: Thu, 15 Feb 2024 13:16:01 -0800 Subject: [PATCH 32/40] Create mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml --- ...ixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 templates/fine-tune-llm/training_configs/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml diff --git a/templates/fine-tune-llm/training_configs/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml b/templates/fine-tune-llm/training_configs/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml new file mode 100644 index 000000000..dd2a18bd9 --- /dev/null +++ b/templates/fine-tune-llm/training_configs/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml @@ -0,0 +1,34 @@ +model_id: mistralai/Mixtral-8x7B-Instruct-v0.1 +train_path: s3://air-example-data/viggo/train.jsonl +valid_path: s3://air-example-data/viggo/valid.jsonl +context_length: 4096 +num_devices: 8 +train_batch_size_per_device: 4 +eval_batch_size_per_device: 4 +learning_rate: 1e-4 +num_checkpoints_to_keep: 1 +no_gradient_checkpoint: False +output_dir: /mnt/local_storage +deepspeed: + config_path: deepspeed_configs/zero_3_llama_2_70b.json +worker_resources: + p4d.24xlarge: 1 +flash_attention_2: True +padding: "max_length" +lora_config: + r: 8 + lora_alpha: 16 + lora_dropout: 0.05 + target_modules: + - q_proj + - v_proj + - k_proj + - o_proj + - embed_tokens + - lm_head + task_type: "CAUSAL_LM" + modules_to_save: [] + bias: "none" + fan_in_fan_out: false + init_lora_weights: true +dataset_size_scaling_factor: 1000 From f7476d175064e1894a9e155ee5e79653437763c9 Mon Sep 17 00:00:00 2001 From: Praveen Date: Thu, 15 Feb 2024 13:18:34 -0800 Subject: [PATCH 33/40] Update aws.yaml fix case for multizone --- templates/fine-tune-llm/job_compute_configs/aws.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/fine-tune-llm/job_compute_configs/aws.yaml b/templates/fine-tune-llm/job_compute_configs/aws.yaml index bdeac9ad1..e5b948322 100644 --- a/templates/fine-tune-llm/job_compute_configs/aws.yaml +++ b/templates/fine-tune-llm/job_compute_configs/aws.yaml @@ -48,7 +48,7 @@ compute_config: - ResourceType: instance Tags: - Key: as-feature-multi-zone - Value: True + Value: "true" runtime_env: env_vars: HF_HOME: /mnt/local_storage/.cache/huggingface From 474c38318925f1b222c192d634d657ab90a73820 Mon Sep 17 00:00:00 2001 From: Praveen Gorthy Date: Thu, 15 Feb 2024 17:00:48 -0800 Subject: [PATCH 34/40] Move lora config to sub directory --- .../{ => lora}/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename templates/fine-tune-llm/training_configs/{ => lora}/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml (100%) diff --git a/templates/fine-tune-llm/training_configs/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml b/templates/fine-tune-llm/training_configs/lora/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml similarity index 100% rename from templates/fine-tune-llm/training_configs/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml rename to templates/fine-tune-llm/training_configs/lora/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml From dda730ebebbff538a7528d795b7ea722f866b68f Mon Sep 17 00:00:00 2001 From: Praveen Date: Fri, 16 Feb 2024 10:44:39 -0800 Subject: [PATCH 35/40] Instructions for fetch model id after finetuning --- templates/fine-tune-llm/README.md | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/templates/fine-tune-llm/README.md b/templates/fine-tune-llm/README.md index 3c1cb505e..9ca35514d 100644 --- a/templates/fine-tune-llm/README.md +++ b/templates/fine-tune-llm/README.md @@ -36,8 +36,11 @@ WANDB_API_KEY={YOUR_WANDB_API_KEY} python train.py job_compute_configs/aws.yaml WANDB_API_KEY={YOUR_WANDB_API_KEY} python train.py job_compute_configs/aws.yaml training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml ``` -Once you submit the command, you can monitor the progress of the job in -the provided job link. Generally a full-param fine-tuning job will take a few hours. +Once you submit the command, in the terminal you will seen the link to your finetuning job. Ex: +``` +View the job in the UI at https://console.anyscale.com/jobs/prodjob_62is21vur3fwl5y5xkc9u1t3ll +``` +You can now monitor the progress of the job in the provided job link. Generally a full-param fine-tuning job will take a few hours. Depending on whether you are running lora or full-param fine-tuning, you can continue with step 2(a) or step 2(b). @@ -46,11 +49,19 @@ with step 2(a) or step 2(b). When you run the fine-tuning job, you should get a checkpoint uri from the log `Note: Lora weights will also be stored under s3://anyscale-data-cld-id/org_id/cld_id/artifact_storage/fine_tuning/ to allow multi serving.`. + +You can see the full path at the very end of the job UI (see Step #1 for finding Jobs UI). Here is an example finetuning job output: + +```shell + +Successfully copied files to to bucket: anyscale-data-cld-id and path: org_id/cloud_id/artifact_storage/fine_tuning/mistralai/Mixtral-8x7B-Instruct-v0.1:abcde:fzmrr + +``` + Given this URI, you can specify it as the `dynamic_lora_loading_path` in the serving workspace template. -The model id follows the convention of `{base_model_id}:{suffix}:{id}`. -For instance, the model id `meta-llama/Llama-2-7b-chat-hf:my_lora:csdu5` would be stored under `s3://anyscale-data-cld-id/org_id/cld_id/artifact_storage/fine_tuning/meta-llama/Llama-2-7b-chat-hf:my_lora:csdu5`. +The last part of the above URI is the model id. The model id follows the convention of `{base_model_id}:{suffix}:{id}`. # Step 2(b) - Serve the full-param finetuned model. Import the model From 5d603574791247d0222e405cf760452dc61d9cb3 Mon Sep 17 00:00:00 2001 From: Sofian Hnaide Date: Fri, 16 Feb 2024 11:27:32 -0800 Subject: [PATCH 36/40] add automatic generation of readme and github actions --- .github/workflows/pre-commit.yaml | 14 ++ .pre-commit-config.yaml | 20 ++ README.md | 8 +- ci/auto-generate-readme.sh | 31 +++ templates/fine-tune-llama2/README.md | 28 +-- templates/intro-services/README.md | 172 ++++++++++++++ templates/intro-workspaces/README.md | 211 ++++++++++++++++++ .../serve-stable-diffusion-aica/README.md | 121 +++++++--- templates/serve-stable-diffusion/README.md | 136 ++++++++--- 9 files changed, 663 insertions(+), 78 deletions(-) create mode 100644 .github/workflows/pre-commit.yaml create mode 100644 .pre-commit-config.yaml create mode 100755 ci/auto-generate-readme.sh create mode 100644 templates/intro-services/README.md create mode 100644 templates/intro-workspaces/README.md diff --git a/.github/workflows/pre-commit.yaml b/.github/workflows/pre-commit.yaml new file mode 100644 index 000000000..2b11178bf --- /dev/null +++ b/.github/workflows/pre-commit.yaml @@ -0,0 +1,14 @@ +name: pre-commit + +on: + pull_request: + push: + branches: [main] + +jobs: + pre-commit: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v3 + - uses: pre-commit/action@v3.0.1 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..bf03c43ac --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,20 @@ +default_stages: [commit, push] +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v3.2.0 + hooks: + - id: check-added-large-files + - id: trailing-whitespace + # README might be auto-generated + exclude: templates/.+/README.md + - id: end-of-file-fixer + # README might be auto-generated + exclude: templates/.+/README.md + - repo: local + hooks: + - id: generate-readme + name: Auto generate README.md from README.ipynb + entry: ci/auto-generate-readme.sh + language: script + pass_filenames: false + dependencies: [jupyter] diff --git a/README.md b/README.md index 2fac1dbbb..41d5368e1 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,10 @@ These templates are a set of minimal examples & tutorials for customers to run o If the template is generic to Ray & Ray libraries, please consider adding the template in: https://github.com/ray-project/ray/tree/master/doc/source/templates +To setup the environment: +1. Install pre-commit `pip install pre-commit` +2. Install the git hook scripts `pre-commit install` + To add a template: @@ -23,10 +27,8 @@ To add a template: Your template does not need to be a Jupyter notebook. It can also be presented as a Python script. - All templates MUST have a `README.md` file. + All templates MUST have a `README.md` or a `README.ipynb` file. 2. Add your compute configuration under `configs/` (for both AWS and GCE). 3. Update the product repo `backend/workspace-templates.yaml` to point to the new template added here after being merged. - -4. Coming soon: update the integration tests in the product repo. \ No newline at end of file diff --git a/ci/auto-generate-readme.sh b/ci/auto-generate-readme.sh new file mode 100755 index 000000000..0b8043652 --- /dev/null +++ b/ci/auto-generate-readme.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Search for notebook files named "demo" in the ../templates directory +notebook_files=$(find ../templates -name "README.ipynb") + +# Loop through each notebook file +for notebook_file in $notebook_files; do + # Convert notebook file to README.md using nbconvert + jupyter nbconvert --to markdown "$notebook_file" --output-dir "$(dirname "$notebook_file")" +done + +# Define the repo prefix +REPO_PREFIX="https://raw.githubusercontent.com/anyscale/templates/main" + +# Search for README.md in the ../templates directory +readme_files=$(find ../templates -name "README.md") + +# Loop through each readme files +for readme_file in $readme_files; do + # Extract the path of the directory containing the README file, relative to the repository root + readme_dir=$(dirname "$readme_file" | sed "s|\.\./templates/||") + + # Check the operating system + if [[ "$OSTYPE" == "darwin"* ]]; then + # macOS system + sed -i '' "s|edit nodes + - Run the command below to kick off fine-tuning with new model size and worker nodes. - 13b: ~9 mins for test run. ~60 mins for a full run (3 epochs) - 13b: ~35 mins for test run. ~400 mins for a full run (3 epochs) -``` -python train.py --size=13b --as-test + + +```python +!python train.py --size=13b --as-test ``` @@ -44,5 +46,5 @@ Use the same command to train with your own data. Voila! You have fine-tuned your own Llama-2 models. Want more than this? Check out advanced tutorials below -- [Walkthrough of this template](./tutorials/walkthrough.md) -- [Fine-tune Llama-2 with LoRA adapters](./tutorials/lora.md) +- Walkthrough of this template: navigate to `tutorials/walkthrough.md` +- Fine-tune Llama-2 with LoRA adapters: navigate to `tutorials/lora.md` diff --git a/templates/intro-services/README.md b/templates/intro-services/README.md new file mode 100644 index 000000000..bfbfa4d03 --- /dev/null +++ b/templates/intro-services/README.md @@ -0,0 +1,172 @@ +# Introduction to Services +This tutorial shows you how to: +1. Develop a simple Ray Serve application locally. +2. Deploy the application to production as an Anyscale service. +3. Monitor the production application. +4. Configure service scaling. + +**Note**: This tutorial is run within a workspace. Please overview the `Introduction to Workspaces` template first before this tutorial. + +## Develop a Serve app locally + + The fastest way to develop a Ray Serve app is locally within the workspace. A Serve app running within a workspace behaves identically to a Serve app running as a production service, only it does not have a stable DNS name or fault tolerance. + + To get started, create a file called `main.py` and fill it with the following skeleton code: + +```python +import requests +from fastapi import FastAPI +from ray import serve + +fastapi = FastAPI() + +@serve.deployment +@serve.ingress(fastapi) +class FastAPIDeployment: + # FastAPI will automatically parse the HTTP request for us. + # Check out https://docs.ray.io/en/latest/serve/http-guide.html + @fastapi.get("/hello") + def say_hello(self, name: str) -> str: + return f"Hello {name}!" + +my_app = FastAPIDeployment.bind() + +### Run the app locally +Run the command below to run the serve app locally on `localhost:8000`. + +If you want to deploy again, just run the command again to update the deployment. + +**Tip**: to more easily view Serve backend logs, you may find it convenient to use `serve run main:my_app --blocking` in a new VSCode terminal. This will block and print out application logs (exceptions, etc.) in the terminal. + + +```python +!serve run main:my_app --non-blocking +``` + +### Send a test request +Run the following cell to query the local serve app. + + +```python +import requests + +print(requests.get("http://localhost:8000/hello", params={"name": "Theodore"}).json()) +``` + +## Deploy to production as a service + +In order to enable fault tolerance and expose your app to the public internet, you must "Deploy" the application, which will create an Anyscale Service backed by a public load balancer. This service will run in a separate Ray cluster from the workspace, and will be monitored by the Anyscale control plane to recover on node failures. You will also be able to deploy rolling updates to the service without incurring downtime. + +Use the following command to deploy your app as `my_service`. + + +```python +!serve deploy main:my_app --name=my_service +``` + +**Tip**: if your app has PyPI dependencies added from the workspace, `serve deploy` will automatically compile these dependencies into a Docker image prior to deploying to optimize startup time. + +### Service UI Overview + +Navigate to your newly created service in the Anyscale UI (`Home > Services > my_service`). It should be in "Starting" state. Click into it and wait for the service to enter "Active" state. + +You should see the service state, key metrics, and system event logs on the overview page. + + + +### Query from the public Internet + +Once the service is up, you can query the service from the public Internet using the same logic as when testing it locally, with two changes: +1. Update the `HOST` to the service endpoint. +2. Add the authorization token as a header in the HTTP request. + +Both of these values are printed when you run `serve deploy`. You can also find them on the service page. For example, if the output looks like: +```bash +(anyscale +4.0s) You can query the service endpoint using the curl request below: +(anyscale +4.0s) curl -H 'Authorization: Bearer 26hTWi2kZwEz0Tdi1_CKRep4NLXbuuaSTDb3WMXK9DM' https://stable_diffusion_app-4rq8m.cld-ltw6mi8dxaebc3yf.s.anyscaleuserdata-staging.com +``` + +Then: +- The authorization token is `26hTWi2kZwEz0Tdi1_CKRep4NLXbuuaSTDb3WMXK9DM`. +- The service endpoint is `https://stable_diffusion_app-4rq8m.cld-ltw6mi8dxaebc3yf.s.anyscaleuserdata-staging.com`. + + +```python +import requests + +HOST = "TODO_INSERT_YOUR_SERVICE_HOST" +TOKEN = "TODO_INSERT_YOUR_SERVICE_TOKEN" + +def send_request(name: str) -> str: + response: requests.Response = requests.get( + f"{HOST}/hello", + params={"name": name}, + headers={ + "Authorization": f"Bearer {TOKEN}", + }, + ) + response.raise_for_status() + return response.content +``` + + +```python +print(send_request("Theodore")) +``` + +## Monitoring production services + +Along with the monitoring tools that come with workspaces, in services you also get a number of built-in metrics out of the box in the `Metrics` tab. This tab includes aggregated metrics across all rollouts for the service (possibly from multiple Ray clusters). + + + +## Configure Service Scaling + +By default, the service you created has a single replica. To change this, set the `num_replicas` argument in the [serve.deployment decorator](https://docs.ray.io/en/latest/serve/configure-serve-deployment.html) as follows in `main.py`. For more advanced scaling options, refer to [Serve Autoscaling](https://docs.ray.io/en/latest/serve/autoscaling-guide.html#serve-autoscaling). + +```python +@serve.deployment(num_replicas=4) +@serve.ingress(fastapi) +class FastAPIDeployment: + ... +``` + +Redeploy locally using `serve run`. + + +```python +!serve run main:my_app --non-blocking +``` + +You can check in the Ray Dashboard of the workspace that the number of replicas has been increased: + + + +We can also deploy the update to our production service. Make sure to include the `--name` option to specify which service to deploy to. This will trigger a staged rollout of the service: + + +```python +!serve deploy main:my_app --name=my_service +``` + +Monitor the status of the rollout in the service overview page. Once the new Ray cluster with the updated app config is running, the previous cluster will be shut down: + + + +### Understanding Ray Serve vs Ray cluster config + +When scaling your service, it is important to understand the interaction of the Serve scaling config (i.e., contents of `@serve.deployment`), vs the Ray cluster config (i.e., number of Ray worker nodes). In general, you can think of the Ray cluster config as an upper bound on service scaling, since Ray Serve runs inside the Ray cluster. + +For example, suppose the Ray cluster was configured to have at most 100 CPUs, then Serve would only be able to launch up to 100 replicas, no matter the deployment config. + +For this reason, we generally recommend using the "Auto-select machines" cluster config for services (this is the default). + +This concludes the services intro tutorial. To learn more, check out the model serving templates available in the template gallery, as well as the Ray Serve [documentation](https://docs.ray.io/en/latest/serve/index.html). + +## Summary + +This notebook: +- Developed and ran a simple serve app in the local workspace. +- Deployed the application to production as a service. +- Overviewed production monitoring. +- Scaled the service and covered Ray Serve vs Ray cluster config. diff --git a/templates/intro-workspaces/README.md b/templates/intro-workspaces/README.md new file mode 100644 index 000000000..588d4d00b --- /dev/null +++ b/templates/intro-workspaces/README.md @@ -0,0 +1,211 @@ +# Introduction to Workspaces + +Welcome! You are currently in a Workspace, which is a persistent cloud IDE connected to a Ray cluster. + +In this tutorial, you will learn: +1. Basic workspace features such as git repo persistence, NFS mounts, cloud storage, and SSH authentication. +2. Ray cluster management features, such as adding multiple worker nodes. +3. Ray monitoring features such as viewing tasks in the dashboard. +4. Dependency management. + +## "Hello world" in workspaces + +Let's start by checking that Ray is working properly in your workspace. You can do this by running the following cell to execute a simple parallel Ray program. + + +```python +import ray + +@ray.remote +def square(x): + return x ** 2 + +futures = [square.remote(x) for x in range(100)] +results = ray.get(futures) +print("Success!", results) +``` + +## Workspace Basics + +An Anyscale Workspace is a cloud IDE where you can develop and test Ray programs. Let's get started by creating a new git repo in this workspace. Workspaces will persist the tracked files in this git repo across restarts (as well as files not in a git repos). + +We'll use the repo later on to author and run a simple Ray app. + + +```python +!mkdir my_repo && cd my_repo && git init +``` + +### Setting up SSH authentication (optional) + +Anyscale generates a unique SSH key per user, which is accessible at `~/.ssh/id_rsa.pub`. If you'd like, you can [add this key to GitHub](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account) in order to access private repositories from the workspace. + +The public key to add is outputted by the following command: + + +```python +!cat ~/.ssh/id_rsa.pub +``` + +### NFS Mounts + +Workspace local storage is limited to 1GB, so we recommend only using it to store git repos and smaller files. To persist larger files, you can save data to NFS mounts and cloud storage. + +Here are a few handy NFS mounts included: +- `/mnt/shared_storage` is a mount shared across all users of your organization +- `/mnt/user_storage` is a mount for your user account + +NFS storage can be read and written from the workspace, as well as from any node in the Ray cluster: + + +```python +!echo "hello world" > /mnt/user_storage/persisted_file.txt && cat /mnt/user_storage/persisted_file.txt +``` + +#### Cloud Storage + +Access built-in cloud storage using the `$ANYSCALE_ARTIFACT_STORAGE` URI as a prefix: + + +```python +!aws s3 cp /mnt/user_storage/persisted_file.txt $ANYSCALE_ARTIFACT_STORAGE/persisted_object.txt +``` + + +```python +!aws s3 cp $ANYSCALE_ARTIFACT_STORAGE/persisted_object.txt /tmp/object.txt && cat /tmp/object.txt +``` + +## Ray cluster management + +This workspace is connected to a Ray cluster. Click on the resources bar on the top right corner of the screen to open the cluster control panel. This panel shows a summary of Ray resource utilization, and you can use this panel to configure the cluster resources. + + + +### Configuring the Workspace node + +The workspace node is the machine this notebook is running inside. You may wish to change the instance type of the workspace node specifically, e.g., to increase the available memory or add a GPU. Click the pencil icon in order to change the workspace node. Note that changing the workspace node will restart the workspace IDE. + + + + +### Adding worker nodes + +To parallelize beyond the resources available to the workspace node, add additional worker nodes to the Ray cluster. Click "Add a node type" to add a number of nodes of a certain type to the cluster. While most use cases only require a single worker node type, you can add multiple distinct node types (e.g., high-CPU and GPU nodes) to the workspace as well. + + + + +### Using "Auto" workers mode + +To let Ray automatically select what kind of worker nodes to add to the cluster, check the "Auto-select machines" box. Ray will try to autoscale cluster worker nodes to balance cost and performance. In auto mode, you cannot configure worker node types, but the resources panel will show which node types have been launched. + +We recommend using auto mode if you do not have specific cluster requirements, and are ok with waiting for the autoscaler to add nodes on-demand to the cluster. + +## Monitoring Ray applications + +In this section, we'll author a simple Ray python script and go over the tools available to monitor its execution. Let's take the opportunity to create a `my_app.py` file in the `my_repo` git repo you created earlier. + +You can click on the "File Explorer" in the left pane of VSCode to create the new file. Copy paste the following program into the file: + +```python +import ray, time + +@ray.remote +def do_some_work(): + print("Doing work") + time.sleep(5) + return "Done" + +ray.get([do_some_work.remote() for _ in range(100)]) +```` + +Then, use the next cell or the VSCode terminal to run the file: + + +```python +!python my_repo/my_app.py +``` + +### Understanding Ray log output + +After running `my_app.py`, you should see output of the form `(do_some_work pid=29848) Doing work [repeated 4x across cluster]`. The prefix of the log message shows the function name, PID of the worker that ran the function, and if run on a remote worker, the node IP. + +The result of the log message contains stdout and stderr from the function execution. Ray will also deduplicate repetitive logs from parallel execution of functions across the cluster. + +### Monitoring program execution + +Depending on the cluster size, the above script may take some time to run. Try playing around with the number of worker machines, increasing the sleep time, or the number of function calls. Use the tools overviewed below to understand how Ray parallelizes the program. + +Let's overview some of the tools available to monitor Ray program execution in workspaces. + +**Resources Panel** + +The resources panel provides basic stats about cluster utilization, as well as an indication of which worker nodes are being used. Use the resource panel as a quick overview of cluster status before diving deeper into the Ray dashboard. + + + +**Ray dashboard > Jobs** + +To see the status of an active or previously run Ray job, navigate to `Ray Dashboard > Jobs` in the UI. Here you will see an overview of job progress, logs, and the ability to drill down into individual task and actors. + + + +**Ray dashboard > Metrics** + +View the aggregate time-series metrics for the cluster in order to diagnose job execution efficiency. The `Ray Dashboard > Metrics` page offers metrics on Ray tasks, actors, as well as hardware resource utilization of the cluster. + + + +**Logs Tab** + +View and search over Ray cluster and application logs in the Logs tab. + + + +## Dependency Management + +In order to run code across a cluster, Ray ships code and other library dependencies to other machines in [runtime envs](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html). In workspaces, the code and installed PyPI packages are automatically added to the runtime env to be used by Ray. + +To try this out, run the following command to install the `emoji` package. You'll see a notification that the package has been registered with the cluster. + + +```python +!pip install emoji +``` + +Navigate to the `Dependencies` tab of the workspace, and you should see the `emoji` package in the list there. You can use this UI to edit the workspace runtime dependencies, or the UI. + + + +Run the following cell to check that the `emoji` package is successfully installed on the cluster (to check this properly, make sure the cluster has at least one worker node added). + + +```python +import ray +import emoji +import time + +# Reset the Ray session in the notebook kernel to pick up new dependencies. +if ray.is_initialized(): + ray.shutdown() + +@ray.remote +def f(): + print(emoji.emojize('Dependencies are :thumbs_up:')) + time.sleep(5) + +ray.get([f.remote() for _ in range(100)]) +print("Done") +``` + +That's it! Now you know everything you need to build scalable Ray applications in Anyscale Workspaces. Check out the template gallery and Ray documentation to learn more about what you can do with Ray and Anyscale. + +## Summary + +This notebook: +- Set up a basic development project in a workspace. +- Showed how to use different types of persistent storage. +- Demoed how to build and debug basic Ray application. + + diff --git a/templates/serve-stable-diffusion-aica/README.md b/templates/serve-stable-diffusion-aica/README.md index 15a2d71aa..a07bb7fb4 100644 --- a/templates/serve-stable-diffusion-aica/README.md +++ b/templates/serve-stable-diffusion-aica/README.md @@ -1,43 +1,80 @@ -# Serving a Stable Diffusion Model with Ray Serve -This template shows you how to develop and test the model locally and deploy it into production. +## Serving a Stable Diffusion Model with Ray Serve +This template shows you how to: +1. Develop and run a Ray Serve application running a stable diffusion model. +2. Send test requests to the application running locally. +3. Deploy the application to production as a service. +4. Send requests to the application running in production as a service. -## Step 1: Install python dependencies +### Step 1: Install python dependencies + +The application requires a few extra Python dependencies. Install them using `pip` and they'll be saved in the workspace and picked up when deploying to production. + + +```python +!pip install --user -q diffusers==0.25.0 transformers==4.36.2 accelerate==0.25.0 && echo 'Install complete!' ``` -pip install --user diffusers==0.25.0 transformers==4.36.2 accelerate==0.25.0 + +### Step 2: Run the model locally +- Run the command below in a VSCode terminal (Ctrl-`). +- The model will be available at http://localhost:8000. +- The command will block and print logs for the application. + +```bash +# Run the following in a VSCode terminal because it's a blocking command. +$ serve run main:stable_diffusion_app ``` -## Step 2: Deploy the model locally -- Open a new terminal. -- Run the command below to deploy your model at http://localhost:8000. +### Step 3: Send a test request to the model running locally -This template uses an A10G GPU by default. You can update the `accelerator_type` config in `main.py` to use the GPU desired. Note that A10G is not available on GCP and you would need to switch to L4. +The `generate_image` function sends an HTTP request to the model and saves the response as a local image. -```bash -serve run main:stable_diffusion_app +As the request to generate the image runs, logs will be printed to the terminal that is running `serve run`. + + +```python +import requests + +HOST = "http://localhost:8000" + +def generate_image(prompt: str, image_size: int) -> bytes: + response: requests.Response = requests.get( + f"{HOST}/imagine", + params={"prompt": prompt, "img_size": image_size}, + ) + response.raise_for_status() + return response.content ``` -## Step 3: Send test requests to the running model -- Open a new terminal. Run the command below to send a request to your model. -- An image should be generated in the current directory -```bash -python query.py + +```python +image = generate_image("twin peaks sf in basquiat painting style", 640) + +filename = "image.png" +with open(filename, "wb") as f: + f.write(image) + +from IPython.display import Image +Image(filename=filename) ``` -## Step 4: Deploy the model as an Anyscale Service +### Step 4: Deploy the model to production as a service + Deploy the model to production using the `anyscale service rollout` command. This creates a long-running [service](https://docs.anyscale.com/services/get-started) with a stable endpoint to query the application. Note that we installed some pip packages in the workspace that had to be added in to the runtime environment. For faster setup of your deployments, you can build a new [cluster environment](https://docs.anyscale.com/configure/dependency-management/cluster-environments) with these packages. -```bash -anyscale service rollout -f service.yaml --name {ENTER_NAME_FOR_SERVICE} + +```python +!anyscale service rollout -f service.yaml --name {ENTER_NAME_FOR_SERVICE} ``` -## Step 5: Query your Anyscale Service -Query the service using the same `query.py` script as when testing it locally, with two changes: +### Step 5: Send a test request to the model running in the service + +Query the service using the same logic as when testing it locally, with two changes: 1. Update the `HOST` to the service endpoint. -2. Update the `TOKEN` field to add the authorization token as a header in the HTTP request. +2. Add the authorization token as a header in the HTTP request. Both of these values are printed when you run `anyscale service rollout`. You can also find them on the service page. For example, if the output looks like: ```bash @@ -49,23 +86,43 @@ Then: - The authorization token is `26hTWi2kZwEz0Tdi1_CKRep4NLXbuuaSTDb3WMXK9DM`. - The service endpoint is `https://stable_diffusion_app-4rq8m.cld-ltw6mi8dxaebc3yf.s.anyscaleuserdata-staging.com`. -In the services UI, click the **Query** button on the top-right side to get these two fields -![deploy-pop-up](https://github.com/anyscale/templates/blob/main/templates/serve-stable-diffusion-aica/assets/query_instructions.png?raw=true) +```python +import requests -After updating these fields, run: -```bash -python query.py +HOST = "TODO_INSERT_YOUR_SERVICE_HOST" +TOKEN = "TODO_INSERT_YOUR_SERVICE_TOKEN" + +def generate_image(prompt: str, image_size: int) -> bytes: + response: requests.Response = requests.get( + f"{HOST}/imagine", + params={"prompt": prompt, "img_size": image_size}, + headers={ + "Authorization": f"Bearer {TOKEN}", + }, + ) + response.raise_for_status() + return response.content ``` -Another way to query is through the FastAPI UI linked through the API docs on the Service page: -![deploy-pop-up](https://github.com/anyscale/templates/blob/main/templates/serve-stable-diffusion-aica/assets/fastapi_docs.png?raw=true) +```python +image = generate_image("twin peaks sf in basquiat painting style", 640) -## (Optional) Step 6: Iterate in the Workspace and update the Service without downtime -You can make code changes in the same Workspace and deploy an upgrade to your Service without downtime using the same command. +filename = "image.png" +with open(filename, "wb") as f: + f.write(image) -```bash -anyscale service rollout -f service.yaml --name {ENTER_NAME_FOR_SERVICE} +from IPython.display import Image +Image(filename=filename) ``` +## Summary + +This notebook: +- Developed and ran a model serving application locally. +- Sent a test request to the application locally. +- Deployed the application to production as a service. +- Sent another test request to the application running as a service. + + diff --git a/templates/serve-stable-diffusion/README.md b/templates/serve-stable-diffusion/README.md index d624aa94b..941a623f1 100644 --- a/templates/serve-stable-diffusion/README.md +++ b/templates/serve-stable-diffusion/README.md @@ -1,52 +1,128 @@ -# Serving a Stable Diffusion Model with Ray Serve -This template shows you how to develop and test the model locally and deploy it into production. +## Serving a Stable Diffusion Model with Ray Serve +This template shows you how to: +1. Develop and run a Ray Serve application running a stable diffusion model. +2. Send test requests to the application running locally. +3. Deploy the application to production as a service. +4. Send requests to the application running in production as a service. -## Step 1: Install python dependencies -``` -pip install diffusers==0.25.0 transformers==4.36.2 accelerate==0.25.0 +### Step 1: Install python dependencies + +The application requires a few extra Python dependencies. Install them using `pip` and they'll be saved in the workspace and picked up when deploying to production. + + +```python +!pip install -q diffusers==0.25.0 transformers==4.36.2 accelerate==0.25.0 && echo 'Install complete!' ``` -## Step 2: Deploy the model locally -- Open a new terminal (ctl+shift+`) in VS Code. -- Run the command below to deploy your model at http://localhost:8000. +### Step 2: Run the model locally +- Run the command below in a VSCode terminal (Ctrl-`). +- The model will be available at http://localhost:8000. +- The command will block and print logs for the application. ```bash -serve run main:stable_diffusion_app +# Run the following in a VSCode terminal because it's a blocking command. +$ serve run main:stable_diffusion_app ``` -## Step 3: Send test requests to the running model -- Open a new terminal. Run the command below to send a request to your model. -- An image should be generated in the current directory -```bash -python query.py +### Step 3: Send a test request to the model running locally + +The `generate_image` function sends an HTTP request to the model and saves the response as a local image. + +As the request to generate the image runs, logs will be printed to the terminal that is running `serve run`. + + +```python +import requests + +HOST = "http://localhost:8000" + +def generate_image(prompt: str, image_size: int) -> bytes: + response: requests.Response = requests.get( + f"{HOST}/imagine", + params={"prompt": prompt, "img_size": image_size}, + ) + response.raise_for_status() + return response.content ``` --------- -## Note: the following steps are still under implementation. We'll send a notice after they are ready for test. --------- -## Step 4: Deploy the model as an Anyscale Service -Deploy it as an Anyscale Service for staging or production traffic with `--publish` flag +```python +image = generate_image("twin peaks sf in basquiat painting style", 640) -```bash -serve run main:stable_diffusion_app --publish +filename = "image.png" +with open(filename, "wb") as f: + f.write(image) + +from IPython.display import Image +Image(filename=filename) ``` +### Step 4: Deploy the model to production as a service + +Deploy the model to production using the `serve deploy` command. + +This creates a long-running [service](https://docs.anyscale.com/services/get-started) with a stable endpoint to query the application. -## Step 5: Query your Anyscale Service -Navigate to Service UI with the URL generated from the previous step, click **Query** button to get detailed query instructions and integrate it into your own app. +Local files and dependencies installed in the workspace are automatically included when the service is deployed. -![deploy-pop-up](https://github.com/anyscale/templates/blob/main/templates/serve-stable-diffusion/assets/query_instructions.png?raw=true) -The benefits of using Anyscale Services for staging/production traffic: -- Zero-downtime upgrade -- Better fault tolerence (auto-recover from node failures, etc.) +```python +!serve deploy --name stable_diffusion_service main:stable_diffusion_app +``` +### Step 5: Send a test request to the model running in the service -## (Optional) Step 6: Iterate in the Workspace and update the Service without downtime -You can make code changes in the same Workspace and deploy an upgrade to your Service without downtime using the same command. +Query the service using the same logic as when testing it locally, with two changes: +1. Update the `HOST` to the service endpoint. +2. Add the authorization token as a header in the HTTP request. +Both of these values are printed when you run `serve deploy`. You can also find them on the service page. For example, if the output looks like: ```bash -serve run main:stable_diffusion_app --publish +(anyscale +4.0s) You can query the service endpoint using the curl request below: +(anyscale +4.0s) curl -H 'Authorization: Bearer 26hTWi2kZwEz0Tdi1_CKRep4NLXbuuaSTDb3WMXK9DM' https://stable_diffusion_app-4rq8m.cld-ltw6mi8dxaebc3yf.s.anyscaleuserdata-staging.com +``` + +Then: +- The authorization token is `26hTWi2kZwEz0Tdi1_CKRep4NLXbuuaSTDb3WMXK9DM`. +- The service endpoint is `https://stable_diffusion_app-4rq8m.cld-ltw6mi8dxaebc3yf.s.anyscaleuserdata-staging.com`. + + +```python +import requests + +HOST = "TODO_INSERT_YOUR_SERVICE_HOST" +TOKEN = "TODO_INSERT_YOUR_SERVICE_TOKEN" + +def generate_image(prompt: str, image_size: int) -> bytes: + response: requests.Response = requests.get( + f"{HOST}/imagine", + params={"prompt": prompt, "img_size": image_size}, + headers={ + "Authorization": f"Bearer {TOKEN}", + }, + ) + response.raise_for_status() + return response.content ``` + +```python +image = generate_image("twin peaks sf in basquiat painting style", 640) + +filename = "image.png" +with open(filename, "wb") as f: + f.write(image) + +from IPython.display import Image +Image(filename=filename) +``` + +## Summary + +This notebook: +- Developed and ran a model serving application locally. +- Sent a test request to the application locally. +- Deployed the application to production as a service. +- Sent another test request to the application running as a service. + + From f7d2857021934dbc69934e2ef17aa0ce0139c847 Mon Sep 17 00:00:00 2001 From: Sofian Hnaide Date: Fri, 16 Feb 2024 11:39:38 -0800 Subject: [PATCH 37/40] fix pre-commit hooks --- configs/anyscale-ray-101/aws.yaml | 2 +- configs/anyscale-ray-101/gce.yaml | 2 +- configs/endpoints/gcp.yaml | 8 ++--- configs/endpoints_v2/gcp.yaml | 8 ++--- configs/fine-tune-GPTJ/aws.yaml | 2 +- configs/fine-tune-GPTJ/gce.yaml | 2 +- configs/fine-tune-llama2/aws.yaml | 2 +- configs/fine-tune-llama2/gce.yaml | 2 +- configs/serve-stable-diffusion-aica/gcp.yaml | 8 ++--- templates/endpoints/AdvancedModelConfigs.md | 12 ++++---- templates/endpoints/CustomModels.md | 6 ++-- templates/endpoints/DeployFunctionCalling.md | 4 +-- templates/endpoints/DeployLora.md | 4 +-- templates/endpoints/EmbeddingModels.md | 4 +-- templates/endpoints/OptimizeModels.md | 6 ++-- ...Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml | 1 - ...alai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml | 2 +- ...xtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml | 2 +- ...xtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml | 2 +- .../endpoints_v2/AdvancedModelConfigs.md | 12 ++++---- templates/endpoints_v2/CustomModels.md | 6 ++-- .../endpoints_v2/DeployFunctionCalling.md | 4 +-- templates/endpoints_v2/DeployLora.md | 4 +-- templates/endpoints_v2/EmbeddingModels.md | 4 +-- templates/endpoints_v2/OptimizeModels.md | 6 ++-- ...Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml | 1 - ...alai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml | 2 +- ...xtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml | 2 +- ...xtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml | 2 +- templates/fine-tune-GPTJ/cluster_env.yaml | 2 +- .../gptj_deepspeed_fine_tuning.py | 18 +++++------ templates/fine-tune-llama2/README.ipynb | 1 - templates/fine-tune-llama2/create_dataset.py | 2 +- templates/fine-tune-llama2/data/tokens.json | 2 +- .../deepspeed_configs/zero_3_llama_2_13b.json | 4 +-- .../deepspeed_configs/zero_3_llama_2_70b.json | 2 +- .../zero_3_llama_2_70b_nvme_offload.json | 4 +-- .../deepspeed_configs/zero_3_llama_2_7b.json | 4 +-- .../fine-tune-llama2/lora_configs/lora.json | 4 +-- .../fine-tune-llama2/merge_lora_weights.py | 2 +- templates/fine-tune-llama2/train.py | 26 ++++++++-------- templates/fine-tune-llama2/tutorials/lora.md | 4 +-- .../fine-tune-llama2/tutorials/walkthrough.md | 16 +++++----- templates/fine-tune-llama2/utils.py | 2 +- .../lora/llama-2-13b-4k-4xg5_12xlarge.yaml | 4 +-- .../lora/llama-2-7b-512-16xg5_4xlarge.yaml | 4 +-- ...ixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml | 4 +-- .../cluster_env.yaml | 6 ++-- .../download_assets.sh | 2 +- .../imagenet_classes.txt | 2 +- .../onnx-resnet.py | 6 ++-- .../image-classification-service/pt-resnet.py | 4 +-- .../image-classification-service/query.py | 2 +- .../trt-resnet.py | 2 +- templates/inferentia-llama/llama-serve.py | 6 ++-- templates/inferentia-llama/setup.sh | 6 ++-- .../inferentia-stable-diffusion/compile.py | 30 +++++++++---------- .../serve/query.py | 2 +- .../serve/sd_serve.py | 26 +++++++--------- .../inferentia-stable-diffusion/setup.sh | 6 ++-- templates/serve-stable-diffusion-aica/main.py | 6 ++-- .../serve-stable-diffusion-aica/service.yaml | 2 +- templates/serve-stable-diffusion/main.py | 4 +-- 63 files changed, 165 insertions(+), 174 deletions(-) diff --git a/configs/anyscale-ray-101/aws.yaml b/configs/anyscale-ray-101/aws.yaml index 09144cb1d..82148de35 100644 --- a/configs/anyscale-ray-101/aws.yaml +++ b/configs/anyscale-ray-101/aws.yaml @@ -7,4 +7,4 @@ worker_node_types: instance_type: m5.2xlarge min_workers: 0 max_workers: 2 - use_spot: false \ No newline at end of file + use_spot: false diff --git a/configs/anyscale-ray-101/gce.yaml b/configs/anyscale-ray-101/gce.yaml index 49bfccb86..9e539380f 100644 --- a/configs/anyscale-ray-101/gce.yaml +++ b/configs/anyscale-ray-101/gce.yaml @@ -7,4 +7,4 @@ worker_node_types: instance_type: n2-standard-8 min_workers: 0 max_workers: 2 - use_spot: false \ No newline at end of file + use_spot: false diff --git a/configs/endpoints/gcp.yaml b/configs/endpoints/gcp.yaml index 77e345d42..69108266f 100644 --- a/configs/endpoints/gcp.yaml +++ b/configs/endpoints/gcp.yaml @@ -87,7 +87,7 @@ worker_node_types: max_workers: 100 use_spot: true fallback_to_ondemand: true -- name: gpu-worker-a100-40g-1 +- name: gpu-worker-a100-40g-1 instance_type: a2-highgpu-1g-nvidia-a100-40gb-1 resources: cpu: @@ -98,7 +98,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-2 +- name: gpu-worker-a100-40g-2 instance_type: a2-highgpu-2g-nvidia-a100-40gb-2 resources: cpu: @@ -109,7 +109,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-4 +- name: gpu-worker-a100-40g-4 instance_type: a2-highgpu-4g-nvidia-a100-40gb-4 resources: cpu: @@ -120,7 +120,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-8 +- name: gpu-worker-a100-40g-8 instance_type: a2-highgpu-8g-nvidia-a100-40gb-8 resources: cpu: diff --git a/configs/endpoints_v2/gcp.yaml b/configs/endpoints_v2/gcp.yaml index 128f34349..3c865a3f1 100644 --- a/configs/endpoints_v2/gcp.yaml +++ b/configs/endpoints_v2/gcp.yaml @@ -87,7 +87,7 @@ worker_node_types: max_workers: 100 use_spot: true fallback_to_ondemand: true -- name: gpu-worker-a100-40g-1 +- name: gpu-worker-a100-40g-1 instance_type: a2-highgpu-1g-nvidia-a100-40gb-1 resources: cpu: @@ -98,7 +98,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-2 +- name: gpu-worker-a100-40g-2 instance_type: a2-highgpu-2g-nvidia-a100-40gb-2 resources: cpu: @@ -109,7 +109,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-4 +- name: gpu-worker-a100-40g-4 instance_type: a2-highgpu-4g-nvidia-a100-40gb-4 resources: cpu: @@ -120,7 +120,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-8 +- name: gpu-worker-a100-40g-8 instance_type: a2-highgpu-8g-nvidia-a100-40gb-8 resources: cpu: diff --git a/configs/fine-tune-GPTJ/aws.yaml b/configs/fine-tune-GPTJ/aws.yaml index be08b393f..bf42a3c83 100644 --- a/configs/fine-tune-GPTJ/aws.yaml +++ b/configs/fine-tune-GPTJ/aws.yaml @@ -7,4 +7,4 @@ worker_node_types: instance_type: g4dn.4xlarge min_workers: 0 max_workers: 15 - use_spot: false \ No newline at end of file + use_spot: false diff --git a/configs/fine-tune-GPTJ/gce.yaml b/configs/fine-tune-GPTJ/gce.yaml index 84fffc436..5cacfd201 100644 --- a/configs/fine-tune-GPTJ/gce.yaml +++ b/configs/fine-tune-GPTJ/gce.yaml @@ -7,4 +7,4 @@ worker_node_types: instance_type: n1-standard-16-nvidia-t4-16gb-1 min_workers: 1 max_workers: 15 - use_spot: false \ No newline at end of file + use_spot: false diff --git a/configs/fine-tune-llama2/aws.yaml b/configs/fine-tune-llama2/aws.yaml index 0e849e65a..cb80513d4 100644 --- a/configs/fine-tune-llama2/aws.yaml +++ b/configs/fine-tune-llama2/aws.yaml @@ -7,4 +7,4 @@ worker_node_types: instance_type: g5.4xlarge min_workers: 0 max_workers: 100 - use_spot: false \ No newline at end of file + use_spot: false diff --git a/configs/fine-tune-llama2/gce.yaml b/configs/fine-tune-llama2/gce.yaml index 4d67b7a48..691a0b2ca 100644 --- a/configs/fine-tune-llama2/gce.yaml +++ b/configs/fine-tune-llama2/gce.yaml @@ -7,4 +7,4 @@ worker_node_types: instance_type: g2-standard-16-nvidia-l4-1 min_workers: 0 max_workers: 100 - use_spot: false \ No newline at end of file + use_spot: false diff --git a/configs/serve-stable-diffusion-aica/gcp.yaml b/configs/serve-stable-diffusion-aica/gcp.yaml index 128f34349..3c865a3f1 100644 --- a/configs/serve-stable-diffusion-aica/gcp.yaml +++ b/configs/serve-stable-diffusion-aica/gcp.yaml @@ -87,7 +87,7 @@ worker_node_types: max_workers: 100 use_spot: true fallback_to_ondemand: true -- name: gpu-worker-a100-40g-1 +- name: gpu-worker-a100-40g-1 instance_type: a2-highgpu-1g-nvidia-a100-40gb-1 resources: cpu: @@ -98,7 +98,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-2 +- name: gpu-worker-a100-40g-2 instance_type: a2-highgpu-2g-nvidia-a100-40gb-2 resources: cpu: @@ -109,7 +109,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-4 +- name: gpu-worker-a100-40g-4 instance_type: a2-highgpu-4g-nvidia-a100-40gb-4 resources: cpu: @@ -120,7 +120,7 @@ worker_node_types: "accelerator_type:A100-40G": 1 min_workers: 0 max_workers: 100 -- name: gpu-worker-a100-40g-8 +- name: gpu-worker-a100-40g-8 instance_type: a2-highgpu-8g-nvidia-a100-40gb-8 resources: cpu: diff --git a/templates/endpoints/AdvancedModelConfigs.md b/templates/endpoints/AdvancedModelConfigs.md index 9f3c4429a..f961e96ba 100644 --- a/templates/endpoints/AdvancedModelConfigs.md +++ b/templates/endpoints/AdvancedModelConfigs.md @@ -5,10 +5,10 @@ Each model is defined by a YAML configuration file in the `models` directory. ## Modify an existing model To modify an existing model, simply edit the YAML file for that model. -Each config file consists of three sections: +Each config file consists of three sections: -- `deployment_config`, -- `engine_config`, +- `deployment_config`, +- `engine_config`, - `scaling_config`. It's best to check out examples of existing models to see how they are configured. @@ -24,7 +24,7 @@ and specifies how to [auto-scale the model](https://docs.ray.io/en/latest/serve/ * `max_concurrent_queries` - Maximum number of queries that a Ray Serve replica can process at a time. Additional queries are queued at the proxy. * `target_num_ongoing_requests_per_replica` - Guides the auto-scaling behavior. If the average number of ongoing requests across replicas is above this number, Ray Serve attempts to scale up the number of replicas, and vice-versa for downscaling. We typically set this to ~40% of the `max_concurrent_queries`. * `ray_actor_options` - Similar to the `resources_per_worker` configuration in the `scaling_config`. Refer to the `scaling_config` section for more guidance. -* `smoothing_factor` - The multiplicative factor to amplify or moderate each upscaling or downscaling decision. A value less than 1.0 will slow down the scaling decision made in each step. See [advanced auto-scaling guide](https://docs.ray.io/en/latest/serve/advanced-guides/advanced-autoscaling.html#optional-define-how-the-system-reacts-to-changing-traffic) for more details. +* `smoothing_factor` - The multiplicative factor to amplify or moderate each upscaling or downscaling decision. A value less than 1.0 will slow down the scaling decision made in each step. See [advanced auto-scaling guide](https://docs.ray.io/en/latest/serve/advanced-guides/advanced-autoscaling.html#optional-define-how-the-system-reacts-to-changing-traffic) for more details. ## Engine config @@ -36,7 +36,7 @@ RayLLM supports continuous batching, meaning incoming requests are processed as * `model_id` is the ID that refers to the model in the RayLLM or OpenAI API. * `type` is the type of inference engine. Only `VLLMEngine` is currently supported. -* `engine_kwargs` and `max_total_tokens` are configuration options for the inference engine (e.g. gpu memory utilization, quantization, max number of concurrent sequences). These options may vary depending on the hardware accelerator type and model size. We have tuned the parameters in the configuration files included in RayLLM for you to use as reference. +* `engine_kwargs` and `max_total_tokens` are configuration options for the inference engine (e.g. gpu memory utilization, quantization, max number of concurrent sequences). These options may vary depending on the hardware accelerator type and model size. We have tuned the parameters in the configuration files included in RayLLM for you to use as reference. * `generation` contains configurations related to default generation parameters such as `prompt_format` and `stopping_sequences`. * `hf_model_id` is the Hugging Face model ID. If not specified, defaults to `model_id`. * `runtime_env` is a dictionary that contains Ray runtime environment configuration. It allows you to set per-model pip packages and environment variables. See [Ray documentation on Runtime Environments](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments) for more information. @@ -51,7 +51,7 @@ Finally, the `scaling_config` section specifies what resources should be used to * `num_gpus_per_worker` - Number of GPUs to be allocated per worker. This should always be 1. * `num_cpus_per_worker` - Number of CPUs to be allocated per worker. Usually set to 8. * `placement_strategy` - Ray supports different [placement strategies](https://docs.ray.io/en/latest/ray-core/scheduling/placement-group.html#placement-strategy) for guiding the physical distribution of workers. To ensure all workers are on the same node, use "STRICT_PACK". -* `resources_per_worker` - we use `resources_per_worker` to set [Ray custom resources](https://docs.ray.io/en/latest/ray-core/scheduling/resources.html#id1) and place the models on specific node types. An example configuration of `resources_per_worker` involves setting `accelerator_type:L4` to 0.001 for a Llama-2-7b model to be deployed on an L4 GPU. This must always be set to 0.001. The `num_gpus_per_worker` configuration along with number of GPUs available on the node will determine the number of workers Ray schedules on the node. The supported accelerator types are: T4, L4, A10G, A100-40G and A100-80G. +* `resources_per_worker` - we use `resources_per_worker` to set [Ray custom resources](https://docs.ray.io/en/latest/ray-core/scheduling/resources.html#id1) and place the models on specific node types. An example configuration of `resources_per_worker` involves setting `accelerator_type:L4` to 0.001 for a Llama-2-7b model to be deployed on an L4 GPU. This must always be set to 0.001. The `num_gpus_per_worker` configuration along with number of GPUs available on the node will determine the number of workers Ray schedules on the node. The supported accelerator types are: T4, L4, A10G, A100-40G and A100-80G. ## My deployment isn't starting/working correctly, how can I debug? diff --git a/templates/endpoints/CustomModels.md b/templates/endpoints/CustomModels.md index 01fc1f112..5c3aa2486 100644 --- a/templates/endpoints/CustomModels.md +++ b/templates/endpoints/CustomModels.md @@ -1,11 +1,11 @@ # Adding a new model -RayLLM supports fine-tuned versions of models in the `models` directory as well as model architectures supported by [vLLM](https://docs.vllm.ai/en/latest/models/supported_models.html). You can either bring a model from HuggingFace or artifact storage like S3, GCS. +RayLLM supports fine-tuned versions of models in the `models` directory as well as model architectures supported by [vLLM](https://docs.vllm.ai/en/latest/models/supported_models.html). You can either bring a model from HuggingFace or artifact storage like S3, GCS. ## Configuring a new model To add an entirely new model to the zoo, you will need to create a new YAML file. -This file should follow the naming convention +This file should follow the naming convention `----.yaml`. We recommend using one of the existing models as a template (ideally, one that is the same architecture and number of parameters as the model you are adding). The examples in the `models` directory should help you get started. You can look at the [Advanced Model Configs](./AdvancedModelConfigs.md) for more details on these configurations. ```yaml @@ -75,7 +75,7 @@ scaling_config: ``` -## Adding a private model +## Adding a private model For loading a model from S3 or GCS, set `engine_config.s3_mirror_config.bucket_uri` or `engine_config.gcs_mirror_config.bucket_uri` to point to a folder containing your model and tokenizer files (`config.json`, `tokenizer_config.json`, `.bin`/`.safetensors` files, etc.) and set `engine_config.model_id` to any ID you desire in the `organization/model` format, eg. `myorganization/llama2-finetuned`. The model will be downloaded to a folder in the `/models----/snapshots/` directory on each node in the cluster. `` will be determined by the contents of `hash` file in the S3 folder, or default to `0000000000000000000000000000000000000000`. See the [HuggingFace transformers documentation](https://huggingface.co/docs/transformers/main/en/installation#cache-setup). diff --git a/templates/endpoints/DeployFunctionCalling.md b/templates/endpoints/DeployFunctionCalling.md index b00e6ef48..d346b4b11 100644 --- a/templates/endpoints/DeployFunctionCalling.md +++ b/templates/endpoints/DeployFunctionCalling.md @@ -21,11 +21,11 @@ For Example, you can see `models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a10 enable_json_logits_processors: true ``` -2. Set `standalone_function_calling_model: true` in top level configuration. +2. Set `standalone_function_calling_model: true` in top level configuration. # Step 2 - Deploying & Querying Function calling model -`func_calling-serve.yaml` and `func_calling-query.py` are provided for you in this template. +`func_calling-serve.yaml` and `func_calling-query.py` are provided for you in this template. In order to deploy a model in function calling mode you need to edit `func_calling-serve.yaml`: Under `function_calling_models` add path to the model you want to use. You can add multiple model diff --git a/templates/endpoints/DeployLora.md b/templates/endpoints/DeployLora.md index 66edd0d86..a46020c7c 100644 --- a/templates/endpoints/DeployLora.md +++ b/templates/endpoints/DeployLora.md @@ -1,10 +1,10 @@ # Serving LoRA Models -We support serving multiple LoRA adapters with a common base model in the same request batch which allows you to serve a wide variety of use-cases without increasing hardware spend. In addition, we use Serve multiplexing to reduce the number of swaps for LoRA adapters. There is a slight latency overhead to serving a LoRA model compared to the base model, typically 10-20%. +We support serving multiple LoRA adapters with a common base model in the same request batch which allows you to serve a wide variety of use-cases without increasing hardware spend. In addition, we use Serve multiplexing to reduce the number of swaps for LoRA adapters. There is a slight latency overhead to serving a LoRA model compared to the base model, typically 10-20%. # Setup LoRA Model Deployment -`lora-serve.yaml` and `lora-query.py` are provided for you in this template. +`lora-serve.yaml` and `lora-query.py` are provided for you in this template. In order to deploy LoRA adapters you would need to update `lora-serve.yaml`: 1. `dynamic_lora_loading_path` - The LoRA checkpoints are loaded from the artifact storage path specified in `dynamic_lora_loading_path`. The path to the checkpoints must be in the following format: `{dynamic_lora_loading_path}/{base_model_id}:{suffix}:{id}`, e.g. `s3://my-bucket/my-lora-checkouts/meta-llama/Llama-2-7b-chat-hf:lora-model:1234`. The models can be loaded from any accessible AWS S3 or Google Cloud Storage bucket. You can use an existing bucket where you have the LoRA models or can upload the models to `$ANYSCALE_ARTIFACT_STORAGE` already provided by Anyscale Workspace. New models can be uploaded to the `dynamic_lora_loading_path` dynamically before or after the Serve application is launched. diff --git a/templates/endpoints/EmbeddingModels.md b/templates/endpoints/EmbeddingModels.md index cc4bcf3c9..51d943f6d 100644 --- a/templates/endpoints/EmbeddingModels.md +++ b/templates/endpoints/EmbeddingModels.md @@ -4,7 +4,7 @@ We support serving embedding models available in HuggingFace as well as optimizi # Setting up Model -See an example for serving embedding models in `embedding-serve.yaml`. Notably the `args` field in the yaml file needs to contain the `embedding_models` field. This field contains a list of YAML files (in the `models` directory) for the embedding models you want to deploy. +See an example for serving embedding models in `embedding-serve.yaml`. Notably the `args` field in the yaml file needs to contain the `embedding_models` field. This field contains a list of YAML files (in the `models` directory) for the embedding models you want to deploy. In order to deploy an embedding model run: ```shell @@ -21,7 +21,7 @@ python embedding-query.py # Optimizing Embedding Models -We support optimizing embedding models with ONNX. In order to enable this, set the flag under `engine_config` in your model yaml file. See `models/embedding_models\BAAI--bge-large-en-v1.5.yaml` for an example. +We support optimizing embedding models with ONNX. In order to enable this, set the flag under `engine_config` in your model yaml file. See `models/embedding_models\BAAI--bge-large-en-v1.5.yaml` for an example. ```shell engine_config: diff --git a/templates/endpoints/OptimizeModels.md b/templates/endpoints/OptimizeModels.md index 4c2b67bc0..699792502 100644 --- a/templates/endpoints/OptimizeModels.md +++ b/templates/endpoints/OptimizeModels.md @@ -3,7 +3,7 @@ We have provided various model configurations for different accelerator types and tensor parallelism (or tp). The supported accelerator types are: T4, L4, A10G, A100-40G and A100-80G. -Tensor parallelism is a type of model parallelism in which specific model weights, gradients, and optimizer states are split across devices. This typically involves distributed computation of specific operations, modules, or layers of the model. +Tensor parallelism is a type of model parallelism in which specific model weights, gradients, and optimizer states are split across devices. This typically involves distributed computation of specific operations, modules, or layers of the model. ## Configurations to optimize @@ -11,9 +11,9 @@ Tensor parallelism is a type of model parallelism in which specific model weight These are some configurations you should consider changing when updating tensor parallelism or the accelerator type: 1. The `num_workers` configuration can be used to set the tensor parallelism for a model. A higher value for tensor parallelism will typically lead to lower latency at the cost of more GPUs per replica. 2. `resources_per_worker` in `scaling_config` and `resources` under `ray_actor_options` in the `deployment_config`. These determine the accelerator type used. It must follow the format of `"accelerator_type:T4":0.01`. -3. `engine_kwargs`: `max_num_batched_tokens` and `max_num_seqs`. These are the maximum number of batched tokens and sequences configured for each iteration in [vLLM](https://docs.vllm.ai/en/latest/models/engine_args.html). With increase in available GPU memory, you can increase these values. +3. `engine_kwargs`: `max_num_batched_tokens` and `max_num_seqs`. These are the maximum number of batched tokens and sequences configured for each iteration in [vLLM](https://docs.vllm.ai/en/latest/models/engine_args.html). With increase in available GPU memory, you can increase these values. 4. `autoscaling_config`: `max_concurrent_queries` - the maximum number of queries that will be handled concurrently by each replica of the model (should be set equal to `max_num_seqs`) and `target_num_ongoing_requests_per_replica` - the number of ongoing requests per replica that will trigger auto-scaling. Similar to the arguments above, these can be increased as the GPU memory changes. -You may consider optimizing for either latency or throughput. The per-request latency generally degrades as the number of concurrent requests increase. The provided configurations generally optimize for latency. We recommend starting with one of our configurations and running load tests if you would like to tune any of the above parameters. +You may consider optimizing for either latency or throughput. The per-request latency generally degrades as the number of concurrent requests increase. The provided configurations generally optimize for latency. We recommend starting with one of our configurations and running load tests if you would like to tune any of the above parameters. Note - You can only run one configuration for a model id at a time. diff --git a/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml b/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml index 27f82c0c2..f9088dc58 100644 --- a/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml +++ b/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml @@ -43,4 +43,3 @@ scaling_config: resources_per_worker: "accelerator_type:A100-40G": 0.001 standalone_function_calling_model: true - diff --git a/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml b/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml index 49760ce0d..e7847f52c 100644 --- a/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml +++ b/templates/endpoints/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml @@ -43,4 +43,4 @@ scaling_config: placement_strategy: "STRICT_PACK" resources_per_worker: "accelerator_type:L4": 0.001 -standalone_function_calling_model: true +standalone_function_calling_model: true diff --git a/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml b/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml index 6f483ca7d..d4bcdb82f 100644 --- a/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml +++ b/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml @@ -43,4 +43,4 @@ scaling_config: placement_strategy: "STRICT_PACK" resources_per_worker: "accelerator_type:A100-40G": 0.001 -standalone_function_calling_model: true \ No newline at end of file +standalone_function_calling_model: true diff --git a/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml b/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml index 4ff63485f..9bf040276 100644 --- a/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml +++ b/templates/endpoints/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml @@ -43,4 +43,4 @@ scaling_config: placement_strategy: "STRICT_PACK" resources_per_worker: "accelerator_type:A100-80G": 0.001 -standalone_function_calling_model: true \ No newline at end of file +standalone_function_calling_model: true diff --git a/templates/endpoints_v2/AdvancedModelConfigs.md b/templates/endpoints_v2/AdvancedModelConfigs.md index cf4ee28bc..f961e96ba 100644 --- a/templates/endpoints_v2/AdvancedModelConfigs.md +++ b/templates/endpoints_v2/AdvancedModelConfigs.md @@ -5,10 +5,10 @@ Each model is defined by a YAML configuration file in the `models` directory. ## Modify an existing model To modify an existing model, simply edit the YAML file for that model. -Each config file consists of three sections: +Each config file consists of three sections: -- `deployment_config`, -- `engine_config`, +- `deployment_config`, +- `engine_config`, - `scaling_config`. It's best to check out examples of existing models to see how they are configured. @@ -24,7 +24,7 @@ and specifies how to [auto-scale the model](https://docs.ray.io/en/latest/serve/ * `max_concurrent_queries` - Maximum number of queries that a Ray Serve replica can process at a time. Additional queries are queued at the proxy. * `target_num_ongoing_requests_per_replica` - Guides the auto-scaling behavior. If the average number of ongoing requests across replicas is above this number, Ray Serve attempts to scale up the number of replicas, and vice-versa for downscaling. We typically set this to ~40% of the `max_concurrent_queries`. * `ray_actor_options` - Similar to the `resources_per_worker` configuration in the `scaling_config`. Refer to the `scaling_config` section for more guidance. -* `smoothing_factor` - The multiplicative factor to amplify or moderate each upscaling or downscaling decision. A value less than 1.0 will slow down the scaling decision made in each step. See [advanced auto-scaling guide](https://docs.ray.io/en/latest/serve/advanced-guides/advanced-autoscaling.html#optional-define-how-the-system-reacts-to-changing-traffic) for more details. +* `smoothing_factor` - The multiplicative factor to amplify or moderate each upscaling or downscaling decision. A value less than 1.0 will slow down the scaling decision made in each step. See [advanced auto-scaling guide](https://docs.ray.io/en/latest/serve/advanced-guides/advanced-autoscaling.html#optional-define-how-the-system-reacts-to-changing-traffic) for more details. ## Engine config @@ -36,7 +36,7 @@ RayLLM supports continuous batching, meaning incoming requests are processed as * `model_id` is the ID that refers to the model in the RayLLM or OpenAI API. * `type` is the type of inference engine. Only `VLLMEngine` is currently supported. -* `engine_kwargs` and `max_total_tokens` are configuration options for the inference engine (e.g. gpu memory utilization, quantization, max number of concurrent sequences). These options may vary depending on the hardware accelerator type and model size. We have tuned the parameters in the configuration files included in RayLLM for you to use as reference. +* `engine_kwargs` and `max_total_tokens` are configuration options for the inference engine (e.g. gpu memory utilization, quantization, max number of concurrent sequences). These options may vary depending on the hardware accelerator type and model size. We have tuned the parameters in the configuration files included in RayLLM for you to use as reference. * `generation` contains configurations related to default generation parameters such as `prompt_format` and `stopping_sequences`. * `hf_model_id` is the Hugging Face model ID. If not specified, defaults to `model_id`. * `runtime_env` is a dictionary that contains Ray runtime environment configuration. It allows you to set per-model pip packages and environment variables. See [Ray documentation on Runtime Environments](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html#runtime-environments) for more information. @@ -51,7 +51,7 @@ Finally, the `scaling_config` section specifies what resources should be used to * `num_gpus_per_worker` - Number of GPUs to be allocated per worker. This should always be 1. * `num_cpus_per_worker` - Number of CPUs to be allocated per worker. Usually set to 8. * `placement_strategy` - Ray supports different [placement strategies](https://docs.ray.io/en/latest/ray-core/scheduling/placement-group.html#placement-strategy) for guiding the physical distribution of workers. To ensure all workers are on the same node, use "STRICT_PACK". -* `resources_per_worker` - we use `resources_per_worker` to set [Ray custom resources](https://docs.ray.io/en/latest/ray-core/scheduling/resources.html#id1) and place the models on specific node types. An example configuration of `resources_per_worker` involves setting `accelerator_type:L4` to 0.001 for a Llama-2-7b model to be deployed on an L4 GPU. This must always be set to 0.001. The `num_gpus_per_worker` configuration along with number of GPUs available on the node will determine the number of workers Ray schedules on the node. The supported accelerator types are: T4, L4, A10G, A100-40G and A100-80G. +* `resources_per_worker` - we use `resources_per_worker` to set [Ray custom resources](https://docs.ray.io/en/latest/ray-core/scheduling/resources.html#id1) and place the models on specific node types. An example configuration of `resources_per_worker` involves setting `accelerator_type:L4` to 0.001 for a Llama-2-7b model to be deployed on an L4 GPU. This must always be set to 0.001. The `num_gpus_per_worker` configuration along with number of GPUs available on the node will determine the number of workers Ray schedules on the node. The supported accelerator types are: T4, L4, A10G, A100-40G and A100-80G. ## My deployment isn't starting/working correctly, how can I debug? diff --git a/templates/endpoints_v2/CustomModels.md b/templates/endpoints_v2/CustomModels.md index 01fc1f112..5c3aa2486 100644 --- a/templates/endpoints_v2/CustomModels.md +++ b/templates/endpoints_v2/CustomModels.md @@ -1,11 +1,11 @@ # Adding a new model -RayLLM supports fine-tuned versions of models in the `models` directory as well as model architectures supported by [vLLM](https://docs.vllm.ai/en/latest/models/supported_models.html). You can either bring a model from HuggingFace or artifact storage like S3, GCS. +RayLLM supports fine-tuned versions of models in the `models` directory as well as model architectures supported by [vLLM](https://docs.vllm.ai/en/latest/models/supported_models.html). You can either bring a model from HuggingFace or artifact storage like S3, GCS. ## Configuring a new model To add an entirely new model to the zoo, you will need to create a new YAML file. -This file should follow the naming convention +This file should follow the naming convention `----.yaml`. We recommend using one of the existing models as a template (ideally, one that is the same architecture and number of parameters as the model you are adding). The examples in the `models` directory should help you get started. You can look at the [Advanced Model Configs](./AdvancedModelConfigs.md) for more details on these configurations. ```yaml @@ -75,7 +75,7 @@ scaling_config: ``` -## Adding a private model +## Adding a private model For loading a model from S3 or GCS, set `engine_config.s3_mirror_config.bucket_uri` or `engine_config.gcs_mirror_config.bucket_uri` to point to a folder containing your model and tokenizer files (`config.json`, `tokenizer_config.json`, `.bin`/`.safetensors` files, etc.) and set `engine_config.model_id` to any ID you desire in the `organization/model` format, eg. `myorganization/llama2-finetuned`. The model will be downloaded to a folder in the `/models----/snapshots/` directory on each node in the cluster. `` will be determined by the contents of `hash` file in the S3 folder, or default to `0000000000000000000000000000000000000000`. See the [HuggingFace transformers documentation](https://huggingface.co/docs/transformers/main/en/installation#cache-setup). diff --git a/templates/endpoints_v2/DeployFunctionCalling.md b/templates/endpoints_v2/DeployFunctionCalling.md index b00e6ef48..d346b4b11 100644 --- a/templates/endpoints_v2/DeployFunctionCalling.md +++ b/templates/endpoints_v2/DeployFunctionCalling.md @@ -21,11 +21,11 @@ For Example, you can see `models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a10 enable_json_logits_processors: true ``` -2. Set `standalone_function_calling_model: true` in top level configuration. +2. Set `standalone_function_calling_model: true` in top level configuration. # Step 2 - Deploying & Querying Function calling model -`func_calling-serve.yaml` and `func_calling-query.py` are provided for you in this template. +`func_calling-serve.yaml` and `func_calling-query.py` are provided for you in this template. In order to deploy a model in function calling mode you need to edit `func_calling-serve.yaml`: Under `function_calling_models` add path to the model you want to use. You can add multiple model diff --git a/templates/endpoints_v2/DeployLora.md b/templates/endpoints_v2/DeployLora.md index 66edd0d86..a46020c7c 100644 --- a/templates/endpoints_v2/DeployLora.md +++ b/templates/endpoints_v2/DeployLora.md @@ -1,10 +1,10 @@ # Serving LoRA Models -We support serving multiple LoRA adapters with a common base model in the same request batch which allows you to serve a wide variety of use-cases without increasing hardware spend. In addition, we use Serve multiplexing to reduce the number of swaps for LoRA adapters. There is a slight latency overhead to serving a LoRA model compared to the base model, typically 10-20%. +We support serving multiple LoRA adapters with a common base model in the same request batch which allows you to serve a wide variety of use-cases without increasing hardware spend. In addition, we use Serve multiplexing to reduce the number of swaps for LoRA adapters. There is a slight latency overhead to serving a LoRA model compared to the base model, typically 10-20%. # Setup LoRA Model Deployment -`lora-serve.yaml` and `lora-query.py` are provided for you in this template. +`lora-serve.yaml` and `lora-query.py` are provided for you in this template. In order to deploy LoRA adapters you would need to update `lora-serve.yaml`: 1. `dynamic_lora_loading_path` - The LoRA checkpoints are loaded from the artifact storage path specified in `dynamic_lora_loading_path`. The path to the checkpoints must be in the following format: `{dynamic_lora_loading_path}/{base_model_id}:{suffix}:{id}`, e.g. `s3://my-bucket/my-lora-checkouts/meta-llama/Llama-2-7b-chat-hf:lora-model:1234`. The models can be loaded from any accessible AWS S3 or Google Cloud Storage bucket. You can use an existing bucket where you have the LoRA models or can upload the models to `$ANYSCALE_ARTIFACT_STORAGE` already provided by Anyscale Workspace. New models can be uploaded to the `dynamic_lora_loading_path` dynamically before or after the Serve application is launched. diff --git a/templates/endpoints_v2/EmbeddingModels.md b/templates/endpoints_v2/EmbeddingModels.md index cc4bcf3c9..51d943f6d 100644 --- a/templates/endpoints_v2/EmbeddingModels.md +++ b/templates/endpoints_v2/EmbeddingModels.md @@ -4,7 +4,7 @@ We support serving embedding models available in HuggingFace as well as optimizi # Setting up Model -See an example for serving embedding models in `embedding-serve.yaml`. Notably the `args` field in the yaml file needs to contain the `embedding_models` field. This field contains a list of YAML files (in the `models` directory) for the embedding models you want to deploy. +See an example for serving embedding models in `embedding-serve.yaml`. Notably the `args` field in the yaml file needs to contain the `embedding_models` field. This field contains a list of YAML files (in the `models` directory) for the embedding models you want to deploy. In order to deploy an embedding model run: ```shell @@ -21,7 +21,7 @@ python embedding-query.py # Optimizing Embedding Models -We support optimizing embedding models with ONNX. In order to enable this, set the flag under `engine_config` in your model yaml file. See `models/embedding_models\BAAI--bge-large-en-v1.5.yaml` for an example. +We support optimizing embedding models with ONNX. In order to enable this, set the flag under `engine_config` in your model yaml file. See `models/embedding_models\BAAI--bge-large-en-v1.5.yaml` for an example. ```shell engine_config: diff --git a/templates/endpoints_v2/OptimizeModels.md b/templates/endpoints_v2/OptimizeModels.md index 4c2b67bc0..699792502 100644 --- a/templates/endpoints_v2/OptimizeModels.md +++ b/templates/endpoints_v2/OptimizeModels.md @@ -3,7 +3,7 @@ We have provided various model configurations for different accelerator types and tensor parallelism (or tp). The supported accelerator types are: T4, L4, A10G, A100-40G and A100-80G. -Tensor parallelism is a type of model parallelism in which specific model weights, gradients, and optimizer states are split across devices. This typically involves distributed computation of specific operations, modules, or layers of the model. +Tensor parallelism is a type of model parallelism in which specific model weights, gradients, and optimizer states are split across devices. This typically involves distributed computation of specific operations, modules, or layers of the model. ## Configurations to optimize @@ -11,9 +11,9 @@ Tensor parallelism is a type of model parallelism in which specific model weight These are some configurations you should consider changing when updating tensor parallelism or the accelerator type: 1. The `num_workers` configuration can be used to set the tensor parallelism for a model. A higher value for tensor parallelism will typically lead to lower latency at the cost of more GPUs per replica. 2. `resources_per_worker` in `scaling_config` and `resources` under `ray_actor_options` in the `deployment_config`. These determine the accelerator type used. It must follow the format of `"accelerator_type:T4":0.01`. -3. `engine_kwargs`: `max_num_batched_tokens` and `max_num_seqs`. These are the maximum number of batched tokens and sequences configured for each iteration in [vLLM](https://docs.vllm.ai/en/latest/models/engine_args.html). With increase in available GPU memory, you can increase these values. +3. `engine_kwargs`: `max_num_batched_tokens` and `max_num_seqs`. These are the maximum number of batched tokens and sequences configured for each iteration in [vLLM](https://docs.vllm.ai/en/latest/models/engine_args.html). With increase in available GPU memory, you can increase these values. 4. `autoscaling_config`: `max_concurrent_queries` - the maximum number of queries that will be handled concurrently by each replica of the model (should be set equal to `max_num_seqs`) and `target_num_ongoing_requests_per_replica` - the number of ongoing requests per replica that will trigger auto-scaling. Similar to the arguments above, these can be increased as the GPU memory changes. -You may consider optimizing for either latency or throughput. The per-request latency generally degrades as the number of concurrent requests increase. The provided configurations generally optimize for latency. We recommend starting with one of our configurations and running load tests if you would like to tune any of the above parameters. +You may consider optimizing for either latency or throughput. The per-request latency generally degrades as the number of concurrent requests increase. The provided configurations generally optimize for latency. We recommend starting with one of our configurations and running load tests if you would like to tune any of the above parameters. Note - You can only run one configuration for a model id at a time. diff --git a/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml b/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml index 27f82c0c2..f9088dc58 100644 --- a/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml +++ b/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_a100-40g_tp1.yaml @@ -43,4 +43,3 @@ scaling_config: resources_per_worker: "accelerator_type:A100-40G": 0.001 standalone_function_calling_model: true - diff --git a/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml b/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml index 49760ce0d..e7847f52c 100644 --- a/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml +++ b/templates/endpoints_v2/models/mistral/mistralai--Mistral-7B-Instruct-v0.1_l4_tp2.yaml @@ -43,4 +43,4 @@ scaling_config: placement_strategy: "STRICT_PACK" resources_per_worker: "accelerator_type:L4": 0.001 -standalone_function_calling_model: true +standalone_function_calling_model: true diff --git a/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml b/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml index 6f483ca7d..d4bcdb82f 100644 --- a/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml +++ b/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-40g_tp8.yaml @@ -43,4 +43,4 @@ scaling_config: placement_strategy: "STRICT_PACK" resources_per_worker: "accelerator_type:A100-40G": 0.001 -standalone_function_calling_model: true \ No newline at end of file +standalone_function_calling_model: true diff --git a/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml b/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml index 4ff63485f..9bf040276 100644 --- a/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml +++ b/templates/endpoints_v2/models/mistral/mistralai--Mixtral-8x7B-Instruct-v0.1_a100-80g_tp8.yaml @@ -43,4 +43,4 @@ scaling_config: placement_strategy: "STRICT_PACK" resources_per_worker: "accelerator_type:A100-80G": 0.001 -standalone_function_calling_model: true \ No newline at end of file +standalone_function_calling_model: true diff --git a/templates/fine-tune-GPTJ/cluster_env.yaml b/templates/fine-tune-GPTJ/cluster_env.yaml index 209838289..a69a8c97d 100644 --- a/templates/fine-tune-GPTJ/cluster_env.yaml +++ b/templates/fine-tune-GPTJ/cluster_env.yaml @@ -17,4 +17,4 @@ python: conda_packages: [] -post_build_cmds: [] \ No newline at end of file +post_build_cmds: [] diff --git a/templates/fine-tune-GPTJ/gptj_deepspeed_fine_tuning.py b/templates/fine-tune-GPTJ/gptj_deepspeed_fine_tuning.py index b1e439c25..0c17f21b0 100644 --- a/templates/fine-tune-GPTJ/gptj_deepspeed_fine_tuning.py +++ b/templates/fine-tune-GPTJ/gptj_deepspeed_fine_tuning.py @@ -41,9 +41,9 @@ #---------------------EDIT AND UPDATE WITH YOUR DATASET HERE---------------------# -# Because the dataset is represented by a single large string, we will need to do some preprocessing. +# Because the dataset is represented by a single large string, we will need to do some preprocessing. # For that, we will define two Ray AIR Preprocessors using the BatchMapper API, allowing us to define functions that will be applied on batches of data. -# The split_text function will take the single string and split it into separate lines, removing empty lines and character names ending with ‘:’ (eg. ‘ROMEO:’). +# The split_text function will take the single string and split it into separate lines, removing empty lines and character names ending with ‘:’ (eg. ‘ROMEO:’). # The tokenize function will take the lines and tokenize them using the 🤗 Tokenizer associated with the model, ensuring each entry has the same length (block_size) by padding and truncating. block_size = 512 @@ -75,12 +75,12 @@ def tokenize(batch: pd.DataFrame) -> dict: splitter = BatchMapper(split_text, batch_format="pandas") tokenizer = BatchMapper(tokenize, batch_format="pandas") -# We can now configure Ray AIR's ray.train.huggingface.TransformersTrainer to perform distributed fine-tuning of the model. -# In order to do that, we specify a trainer_init_per_worker function, which creates a 🤗 Transformers Trainer that will be distributed by Ray using Distributed Data Parallelism (using PyTorch Distributed backend internally). +# We can now configure Ray AIR's ray.train.huggingface.TransformersTrainer to perform distributed fine-tuning of the model. +# In order to do that, we specify a trainer_init_per_worker function, which creates a 🤗 Transformers Trainer that will be distributed by Ray using Distributed Data Parallelism (using PyTorch Distributed backend internally). # This means that each worker will have its own copy of the model, but operate on different data, At the end of each step, all the workers will sync gradients. # Because GPT-J is a relatively large model, it may not be possible to fit it on smaller GPU types (<=16 GB GRAM). -# To deal with that issue, we can use DeepSpeed, a library to optimize the training process and allow us to (among other things) offload and partition optimizer and parameter states, reducing GRAM usage. +# To deal with that issue, we can use DeepSpeed, a library to optimize the training process and allow us to (among other things) offload and partition optimizer and parameter states, reducing GRAM usage. # Furthermore, DeepSpeed ZeRO Stage 3 allows us to load large models without running out of memory. @@ -189,10 +189,10 @@ def compute_metrics(eval_pred): ) return trainer -# With our trainer_init_per_worker complete, we can now instantiate the ray.train.huggingface.TransformersTrainer. +# With our trainer_init_per_worker complete, we can now instantiate the ray.train.huggingface.TransformersTrainer. # Aside from the function, we set the scaling_config, controlling the amount of workers and resources used, and the datasets we will use for training and evaluation. -# We pass the preprocessors we have defined earlier as an argument, wrapped in a ray.data.preprocessors.chain.Chain. +# We pass the preprocessors we have defined earlier as an argument, wrapped in a ray.data.preprocessors.chain.Chain. # The preprocessor will be included with the returned ray.air.checkpoint.Checkpoint, meaning it will also be applied during inference. trainer = TransformersTrainer( @@ -215,6 +215,6 @@ def compute_metrics(eval_pred): preprocessor=Chain(splitter, tokenizer), ) -#Finally, we call the ray.train.huggingface.TransformersTrainer.fit method to start training with Ray AIR. +#Finally, we call the ray.train.huggingface.TransformersTrainer.fit method to start training with Ray AIR. # We will save the ray.air.Result object to a variable so we can access metrics and checkpoints. -results = trainer.fit() \ No newline at end of file +results = trainer.fit() diff --git a/templates/fine-tune-llama2/README.ipynb b/templates/fine-tune-llama2/README.ipynb index a6a704653..3d4521b3e 100644 --- a/templates/fine-tune-llama2/README.ipynb +++ b/templates/fine-tune-llama2/README.ipynb @@ -83,4 +83,3 @@ "nbformat": 4, "nbformat_minor": 2 } - diff --git a/templates/fine-tune-llama2/create_dataset.py b/templates/fine-tune-llama2/create_dataset.py index b2fabca5e..97d6c7e3f 100644 --- a/templates/fine-tune-llama2/create_dataset.py +++ b/templates/fine-tune-llama2/create_dataset.py @@ -30,4 +30,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/templates/fine-tune-llama2/data/tokens.json b/templates/fine-tune-llama2/data/tokens.json index 7091d9bcc..eaf1edf26 100644 --- a/templates/fine-tune-llama2/data/tokens.json +++ b/templates/fine-tune-llama2/data/tokens.json @@ -1 +1 @@ -{"tokens": ["", "", "", ""]} \ No newline at end of file +{"tokens": ["", "", "", ""]} diff --git a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_13b.json b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_13b.json index f1ddac17f..a851e88ca 100644 --- a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_13b.json +++ b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_13b.json @@ -1,4 +1,4 @@ -{ +{ "fp16": { "enabled": "auto" }, @@ -32,4 +32,4 @@ "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false -} \ No newline at end of file +} diff --git a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b.json b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b.json index 23c70b4f7..111654771 100644 --- a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b.json +++ b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b.json @@ -25,4 +25,4 @@ "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false -} \ No newline at end of file +} diff --git a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b_nvme_offload.json b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b_nvme_offload.json index 8543b4475..17cae7f41 100644 --- a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b_nvme_offload.json +++ b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_70b_nvme_offload.json @@ -1,4 +1,4 @@ -{ +{ "fp16": { "enabled": "auto" }, @@ -28,4 +28,4 @@ "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false -} \ No newline at end of file +} diff --git a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_7b.json b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_7b.json index f1ddac17f..a851e88ca 100644 --- a/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_7b.json +++ b/templates/fine-tune-llama2/deepspeed_configs/zero_3_llama_2_7b.json @@ -1,4 +1,4 @@ -{ +{ "fp16": { "enabled": "auto" }, @@ -32,4 +32,4 @@ "train_batch_size": "auto", "train_micro_batch_size_per_gpu": "auto", "wall_clock_breakdown": false -} \ No newline at end of file +} diff --git a/templates/fine-tune-llama2/lora_configs/lora.json b/templates/fine-tune-llama2/lora_configs/lora.json index 3e0e621cd..f66f9620e 100644 --- a/templates/fine-tune-llama2/lora_configs/lora.json +++ b/templates/fine-tune-llama2/lora_configs/lora.json @@ -1,4 +1,4 @@ -{ +{ "r": 8, "lora_alpha": 16, "lora_dropout": 0.05, @@ -8,4 +8,4 @@ "bias": "none", "fan_in_fan_out": false, "init_lora_weights": true -} \ No newline at end of file +} diff --git a/templates/fine-tune-llama2/merge_lora_weights.py b/templates/fine-tune-llama2/merge_lora_weights.py index 2f6e55f2d..be06cf94b 100644 --- a/templates/fine-tune-llama2/merge_lora_weights.py +++ b/templates/fine-tune-llama2/merge_lora_weights.py @@ -154,4 +154,4 @@ def main(): if __name__ == "__main__": - main() \ No newline at end of file + main() diff --git a/templates/fine-tune-llama2/train.py b/templates/fine-tune-llama2/train.py index c2e00047a..2d44d8eff 100644 --- a/templates/fine-tune-llama2/train.py +++ b/templates/fine-tune-llama2/train.py @@ -537,15 +537,15 @@ def parse_args(): ) parser.add_argument( - "--test_path", - type=str, + "--test_path", + type=str, default="./data/test.jsonl", help="Path to testing jsonl file" ) parser.add_argument( - "--special_token_path", - type=str, + "--special_token_path", + type=str, default="./data/tokens.json", help="Path to token json file" ) @@ -556,16 +556,16 @@ def parse_args(): help="If passed, will not use gradient checkpointing.", ) parser.add_argument( - "--output_dir", - type=str, + "--output_dir", + type=str, default="/mnt/local_storage", help="Path to output directory." ) parser.add_argument( - "--num-epochs", - type=int, - default=3, + "--num-epochs", + type=int, + default=3, help="Number of epochs to train for." ) @@ -581,9 +581,9 @@ def parse_args(): ) parser.add_argument( - "--lr", - type=float, - default=5e-6, + "--lr", + type=float, + default=5e-6, help="Learning rate to use." ) @@ -640,7 +640,7 @@ def main(): # Adjust batch size per device (BS) and number of devices (ND) according to model size # Number of devices (ND) is determined by a combination of factors, context length, accelerator type, whether LoRA is used, etc. - if size == "7b" or size == "13b": + if size == "7b" or size == "13b": # nd is set on the basis of using Nvidia A10 and conducting full parameter fine-tuning with default context length. # If Nvidia A100 is used. ND can be set to 8 instead. bs, nd = 16, 16 diff --git a/templates/fine-tune-llama2/tutorials/lora.md b/templates/fine-tune-llama2/tutorials/lora.md index 6f304409f..25f3a5286 100644 --- a/templates/fine-tune-llama2/tutorials/lora.md +++ b/templates/fine-tune-llama2/tutorials/lora.md @@ -11,8 +11,8 @@ Fine-tuning a model with LoRA results in a checkpoint containing only the fine-t As an example, the default Llama 2 LoRA configuration should yield a 42/64/202MB checkpoint for 7B/13B/70B models. If we want to evaluate the model after training, we can merge the model weights with the original (non-fine-tuned) model. We provide a script to merge the fine-tuned weights with the original weights to produce a full-parameter checkpoint. -The script has high CPU memory requirements because it requires us to load all parameters into memory at the same time, -13GB/24GB/152GB for 7B/13B/70B models. Downloading and loading the original weights should take ~1min/~2min/~10min each +The script has high CPU memory requirements because it requires us to load all parameters into memory at the same time, +13GB/24GB/152GB for 7B/13B/70B models. Downloading and loading the original weights should take ~1min/~2min/~10min each on a p4de.24xlarge instance. You can run the script as follows: ``` diff --git a/templates/fine-tune-llama2/tutorials/walkthrough.md b/templates/fine-tune-llama2/tutorials/walkthrough.md index e36e6a95d..bd0839cb9 100644 --- a/templates/fine-tune-llama2/tutorials/walkthrough.md +++ b/templates/fine-tune-llama2/tutorials/walkthrough.md @@ -1,8 +1,8 @@ # Walkthrough of the template -## Downloading the pre-trained checkpoint on to all GPU nodes +## Downloading the pre-trained checkpoint on to all GPU nodes -The pre-trained models for these models is quite large (12.8G for 7B model, 24.2G for 13B model, and 128.5G for 70B model). In order to make loading these models faster, we have mirrored the weights on to an AWS S3 bucket which can result in up 10GB/s download speed if the aws configs are setup correctly. +The pre-trained models for these models is quite large (12.8G for 7B model, 24.2G for 13B model, and 128.5G for 70B model). In order to make loading these models faster, we have mirrored the weights on to an AWS S3 bucket which can result in up 10GB/s download speed if the aws configs are setup correctly. ## Saving fine-tuned checkpoint to cloud storage @@ -34,7 +34,7 @@ After training we can use the Llama-2 ## Dataset format -The main fine-tuning script is written in a general format that would require you to provide a `jsonl` file for train and test datasets in addition to a `json` file listing the special tokens used in your dataset. +The main fine-tuning script is written in a general format that would require you to provide a `jsonl` file for train and test datasets in addition to a `json` file listing the special tokens used in your dataset. For example each row in your dataset might be formated like the following: @@ -48,9 +48,9 @@ And the special tokens can be: {"tokens": ["", "", "", ""]} ``` -Depending on the dataset you want to fine-tune on, the tokenization and dataset pre-processing will likely need to be adjusted. The current code is configured to train on the Grade School Math 8k (GSM8K) dataset. +Depending on the dataset you want to fine-tune on, the tokenization and dataset pre-processing will likely need to be adjusted. The current code is configured to train on the Grade School Math 8k (GSM8K) dataset. -The existing Grade School Math 8k (GSM8K) dataset is generated by running the code below. We create three files that are needed to launch the training script with. +The existing Grade School Math 8k (GSM8K) dataset is generated by running the code below. We create three files that are needed to launch the training script with. ``` python create_dataset.py @@ -65,9 +65,9 @@ This dataset includes excessive padding to keep all samples limited to 512 token ## Launching fine-tuning -The script is written using Ray Train + Deepspeed integration via accelerate API. The script is general enough that it can be used to fine-tune all released sizes of Llama-2 models. +The script is written using Ray Train + Deepspeed integration via accelerate API. The script is general enough that it can be used to fine-tune all released sizes of Llama-2 models. -This script was tested across three model sizes on the following cluster configurations on Anyscale platform. +This script was tested across three model sizes on the following cluster configurations on Anyscale platform. | Model Size | Base HF Model ID | Batch size per device | GPUs | Time per epoch (min.) | @@ -91,4 +91,4 @@ python train.py --size=7b python train.py --help ``` -- Refer to the comments in `train.py` for how to customize the parameters \ No newline at end of file +- Refer to the comments in `train.py` for how to customize the parameters diff --git a/templates/fine-tune-llama2/utils.py b/templates/fine-tune-llama2/utils.py index 246f30fdd..1a579d15a 100644 --- a/templates/fine-tune-llama2/utils.py +++ b/templates/fine-tune-llama2/utils.py @@ -81,4 +81,4 @@ def download_model( def get_mirror_link(model_id: str) -> str: - return f"s3://llama-2-weights/models--{model_id.replace('/', '--')}" \ No newline at end of file + return f"s3://llama-2-weights/models--{model_id.replace('/', '--')}" diff --git a/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml b/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml index df6ea4ff0..e6a52eb61 100644 --- a/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/lora/llama-2-13b-4k-4xg5_12xlarge.yaml @@ -19,7 +19,7 @@ lora_config: r: 8 lora_alpha: 16 lora_dropout: 0.05 - target_modules: + target_modules: - q_proj - v_proj - k_proj @@ -33,4 +33,4 @@ lora_config: modules_to_save: [] bias: "none" fan_in_fan_out: false - init_lora_weights: true \ No newline at end of file + init_lora_weights: true diff --git a/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml b/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml index 12269f27b..862dd6916 100644 --- a/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml +++ b/templates/fine-tune-llm/training_configs/lora/llama-2-7b-512-16xg5_4xlarge.yaml @@ -19,7 +19,7 @@ lora_config: r: 8 lora_alpha: 16 lora_dropout: 0.05 - target_modules: + target_modules: - q_proj - v_proj - k_proj @@ -33,4 +33,4 @@ lora_config: modules_to_save: [] bias: "none" fan_in_fan_out: false - init_lora_weights: true \ No newline at end of file + init_lora_weights: true diff --git a/templates/fine-tune-llm/training_configs/lora/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml b/templates/fine-tune-llm/training_configs/lora/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml index dd2a18bd9..795d99c30 100644 --- a/templates/fine-tune-llm/training_configs/lora/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml +++ b/templates/fine-tune-llm/training_configs/lora/mixtral-8X7b-512-1xp4de_24xlarge-viggo.yaml @@ -10,7 +10,7 @@ num_checkpoints_to_keep: 1 no_gradient_checkpoint: False output_dir: /mnt/local_storage deepspeed: - config_path: deepspeed_configs/zero_3_llama_2_70b.json + config_path: deepspeed_configs/zero_3_llama_2_70b.json worker_resources: p4d.24xlarge: 1 flash_attention_2: True @@ -19,7 +19,7 @@ lora_config: r: 8 lora_alpha: 16 lora_dropout: 0.05 - target_modules: + target_modules: - q_proj - v_proj - k_proj diff --git a/templates/image-classification-service/cluster_env.yaml b/templates/image-classification-service/cluster_env.yaml index 0fb4db441..3d45860ac 100644 --- a/templates/image-classification-service/cluster_env.yaml +++ b/templates/image-classification-service/cluster_env.yaml @@ -10,10 +10,10 @@ python: pip_packages: [] conda_packages: [] -post_build_cmds: -- pip uninstall -y onnxruntime +post_build_cmds: +- pip uninstall -y onnxruntime - pip install -U torchvision - pip install -U onnxruntime-gpu - pip install -U torch_tensorrt -- pip install -U tensorrt +- pip install -U tensorrt - pip install -U python-multipart diff --git a/templates/image-classification-service/download_assets.sh b/templates/image-classification-service/download_assets.sh index d28076e0d..8e0a2fc72 100755 --- a/templates/image-classification-service/download_assets.sh +++ b/templates/image-classification-service/download_assets.sh @@ -1,2 +1,2 @@ wget imagenet_classes.txt https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt -wget -O img0.jpg "https://d17fnq9dkz9hgj.cloudfront.net/breed-uploads/2018/08/siberian-husky-detail.jpg?bust=1535566590&width=630" \ No newline at end of file +wget -O img0.jpg "https://d17fnq9dkz9hgj.cloudfront.net/breed-uploads/2018/08/siberian-husky-detail.jpg?bust=1535566590&width=630" diff --git a/templates/image-classification-service/imagenet_classes.txt b/templates/image-classification-service/imagenet_classes.txt index 888d6f51d..f40829ed0 100644 --- a/templates/image-classification-service/imagenet_classes.txt +++ b/templates/image-classification-service/imagenet_classes.txt @@ -997,4 +997,4 @@ earthstar hen-of-the-woods bolete ear -toilet tissue \ No newline at end of file +toilet tissue diff --git a/templates/image-classification-service/onnx-resnet.py b/templates/image-classification-service/onnx-resnet.py index 0bd359bd6..a88a70527 100644 --- a/templates/image-classification-service/onnx-resnet.py +++ b/templates/image-classification-service/onnx-resnet.py @@ -12,7 +12,7 @@ from torchvision.models import ResNet50_Weights from torchvision import transforms import io -import onnxruntime +import onnxruntime import numpy as np from fastapi import FastAPI from fastapi.responses import Response @@ -66,5 +66,5 @@ async def call(self, file: UploadFile) -> str: output = self.softmax(output) ind = np.argmax(output) return self.categories[ind] - -model = Classifier.bind() \ No newline at end of file + +model = Classifier.bind() diff --git a/templates/image-classification-service/pt-resnet.py b/templates/image-classification-service/pt-resnet.py index 0e53df675..ce08eb3c3 100644 --- a/templates/image-classification-service/pt-resnet.py +++ b/templates/image-classification-service/pt-resnet.py @@ -45,5 +45,5 @@ async def call(self, file: UploadFile) -> str: sm_output = torch.nn.functional.softmax(output[0], dim=0) ind = torch.argmax(sm_output) return self.categories[ind] - -model = Classifier.bind() \ No newline at end of file + +model = Classifier.bind() diff --git a/templates/image-classification-service/query.py b/templates/image-classification-service/query.py index c0ffd32b0..7f77df531 100644 --- a/templates/image-classification-service/query.py +++ b/templates/image-classification-service/query.py @@ -12,4 +12,4 @@ duration = time.time() - start print(resp.text) -print("Response took " + str(duration) + " seconds") \ No newline at end of file +print("Response took " + str(duration) + " seconds") diff --git a/templates/image-classification-service/trt-resnet.py b/templates/image-classification-service/trt-resnet.py index f9cc3d7ed..7a9c33d1c 100644 --- a/templates/image-classification-service/trt-resnet.py +++ b/templates/image-classification-service/trt-resnet.py @@ -51,5 +51,5 @@ async def call(self, file: UploadFile) -> str: sm_output = torch.nn.functional.softmax(output[0], dim=0) ind = torch.argmax(sm_output) return self.categories[ind] - + model = Classifier.bind() diff --git a/templates/inferentia-llama/llama-serve.py b/templates/inferentia-llama/llama-serve.py index 119d3ccc7..f94634dd8 100644 --- a/templates/inferentia-llama/llama-serve.py +++ b/templates/inferentia-llama/llama-serve.py @@ -6,7 +6,7 @@ from fastapi.responses import Response import time import copy -import ray +import ray app = FastAPI() @@ -16,7 +16,7 @@ class Llama: def __init__(self): self.model = NeuronModelForCausalLM.from_pretrained('aws-neuron/Llama-2-7b-hf-neuron-budget') self.tokenizer = AutoTokenizer.from_pretrained("aws-neuron/Llama-2-7b-hf-neuron-budget") - + @app.get("/") def generate(self, prompt): inputs = self.tokenizer(prompt, return_tensors="pt") @@ -28,6 +28,6 @@ def generate(self, prompt): top_p=0.9) return self.tokenizer.batch_decode(outputs, skip_special_tokens=True) - + lldep = Llama.bind() diff --git a/templates/inferentia-llama/setup.sh b/templates/inferentia-llama/setup.sh index 40641e5ad..84e2616eb 100644 --- a/templates/inferentia-llama/setup.sh +++ b/templates/inferentia-llama/setup.sh @@ -7,14 +7,14 @@ sudo tee /etc/apt/sources.list.d/neuron.list > /dev/null wget -qO - https://apt.repos.neuron.amazonaws.com/GPG-PUB-KEY-AMAZON-AWS-NEURON.PUB | sudo apt-key add - -# Update OS packages +# Update OS packages sudo apt-get update -y -# Install Neuron Runtime +# Install Neuron Runtime sudo apt-get install aws-neuronx-collectives=2.* -y sudo apt-get install aws-neuronx-runtime-lib=2.* -y -# Install Neuron Tools +# Install Neuron Tools sudo apt-get install aws-neuronx-tools=2.* -y # Install neuronx and torch_xla diff --git a/templates/inferentia-stable-diffusion/compile.py b/templates/inferentia-stable-diffusion/compile.py index 243f2241c..8e6207087 100644 --- a/templates/inferentia-stable-diffusion/compile.py +++ b/templates/inferentia-stable-diffusion/compile.py @@ -1,5 +1,5 @@ import os - + import numpy as np import torch import torch.nn as nn @@ -14,7 +14,7 @@ from diffusers import DiffusionPipeline from diffusers.models.unet_2d_condition import UNet2DConditionOutput from diffusers.models.attention_processor import Attention - + from matplotlib import pyplot as plt from matplotlib import image as mpimg import time @@ -23,7 +23,7 @@ clear_output(wait=False) -def get_attention_scores_neuron(self, query, key, attn_mask): +def get_attention_scores_neuron(self, query, key, attn_mask): if(query.size() == key.size()): attention_scores = custom_badbmm( key, @@ -39,21 +39,21 @@ def get_attention_scores_neuron(self, query, key, attn_mask): self.scale ) attention_probs = attention_scores.softmax(dim=-1) - + return attention_probs - + def custom_badbmm(a, b, scale): bmm = torch.bmm(a, b) scaled = bmm * scale return scaled - + class UNetWrap(nn.Module): def __init__(self, unet): super().__init__() self.unet = unet - + def forward(self, sample, timestep, encoder_hidden_states, text_embeds=None, time_ids=None): out_tuple = self.unet(sample, timestep, @@ -61,8 +61,8 @@ def forward(self, sample, timestep, encoder_hidden_states, text_embeds=None, tim added_cond_kwargs={"text_embeds": text_embeds, "time_ids": time_ids}, return_dict=False) return out_tuple - - + + class NeuronUNet(nn.Module): def __init__(self, unetwrap): super().__init__() @@ -71,7 +71,7 @@ def __init__(self, unetwrap): self.in_channels = unetwrap.unet.in_channels self.add_embedding = unetwrap.unet.add_embedding self.device = unetwrap.unet.device - + def forward(self, sample, timestep, encoder_hidden_states, added_cond_kwargs=None, return_dict=False, cross_attention_kwargs=None): sample = self.unetwrap(sample, timestep.float().expand((sample.shape[0],)), @@ -80,7 +80,7 @@ def forward(self, sample, timestep, encoder_hidden_states, added_cond_kwargs=Non added_cond_kwargs["time_ids"])[0] return UNet2DConditionOutput(sample=sample) - + COMPILER_WORKDIR_ROOT = 'sdxl_base_and_refiner_compile_dir_1024' # Model ID for SD XL version pipeline @@ -171,8 +171,8 @@ def forward(self, sample, timestep, encoder_hidden_states, added_cond_kwargs=Non # # Compile vae decoder decoder_in = torch.randn([1, 4, 128, 128]) decoder_neuron = torch_neuronx.trace( - decoder, - decoder_in, + decoder, + decoder_in, compiler_workdir=os.path.join(COMPILER_WORKDIR_ROOT, 'vae_decoder'), ) @@ -195,7 +195,7 @@ def forward(self, sample, timestep, encoder_hidden_states, added_cond_kwargs=Non # Compile vae post_quant_conv post_quant_conv_in = torch.randn([1, 4, 128, 128]) post_quant_conv_neuron = torch_neuronx.trace( - post_quant_conv, + post_quant_conv, post_quant_conv_in, compiler_workdir=os.path.join(COMPILER_WORKDIR_ROOT, 'vae_post_quant_conv'), ) @@ -207,5 +207,3 @@ def forward(self, sample, timestep, encoder_hidden_states, added_cond_kwargs=Non # delete unused objects del post_quant_conv del post_quant_conv_neuron - - diff --git a/templates/inferentia-stable-diffusion/serve/query.py b/templates/inferentia-stable-diffusion/serve/query.py index a40055fee..78212711d 100644 --- a/templates/inferentia-stable-diffusion/serve/query.py +++ b/templates/inferentia-stable-diffusion/serve/query.py @@ -8,7 +8,7 @@ def generate_image(prompt, image_size): resp = requests.post(endpoint, params=req) return resp.content -i = 0 +i = 0 while i < 10: i = i + 1 image = generate_image("a photo of an astronaut riding a horse on mars", 640) diff --git a/templates/inferentia-stable-diffusion/serve/sd_serve.py b/templates/inferentia-stable-diffusion/serve/sd_serve.py index f9312629b..5f9d53482 100644 --- a/templates/inferentia-stable-diffusion/serve/sd_serve.py +++ b/templates/inferentia-stable-diffusion/serve/sd_serve.py @@ -5,7 +5,7 @@ import torch from diffusers import EulerDiscreteScheduler, StableDiffusionPipeline import os - + import numpy as np import torch import torch.nn as nn @@ -13,11 +13,11 @@ from diffusers import DiffusionPipeline from diffusers.models.unet_2d_condition import UNet2DConditionOutput from diffusers.models.attention_processor import Attention - + import time import copy from IPython.display import clear_output -import ray +import ray app = FastAPI() @@ -26,7 +26,7 @@ class UNetWrap(nn.Module): def __init__(self, unet): super().__init__() self.unet = unet - + def forward(self, sample, timestep, encoder_hidden_states, text_embeds=None, time_ids=None): out_tuple = self.unet(sample, timestep, @@ -34,8 +34,8 @@ def forward(self, sample, timestep, encoder_hidden_states, text_embeds=None, tim added_cond_kwargs={"text_embeds": text_embeds, "time_ids": time_ids}, return_dict=False) return out_tuple - - + + class NeuronUNet(nn.Module): def __init__(self, unetwrap): super().__init__() @@ -44,7 +44,7 @@ def __init__(self, unetwrap): self.in_channels = unetwrap.unet.in_channels self.add_embedding = unetwrap.unet.add_embedding self.device = unetwrap.unet.device - + def forward(self, sample, timestep, encoder_hidden_states, added_cond_kwargs=None, return_dict=False, cross_attention_kwargs=None): sample = self.unetwrap(sample, timestep.float().expand((sample.shape[0],)), @@ -53,7 +53,7 @@ def forward(self, sample, timestep, encoder_hidden_states, added_cond_kwargs=Non added_cond_kwargs["time_ids"])[0] return UNet2DConditionOutput(sample=sample) - + @serve.deployment(ray_actor_options={"resources": {"neuron_cores": 2}}) @serve.ingress(app) class StableDiffusion: @@ -72,7 +72,7 @@ def __init__(self): # Load the compiled UNet (base) onto two neuron cores. self.pipe_base.unet = NeuronUNet(UNetWrap(self.pipe_base.unet)) ids = ray.get_runtime_context().get_resource_ids() - device_ids = [int(x) for x in ids["neuron_cores"]] + device_ids = [int(x) for x in ids["neuron_cores"]] self.pipe_base.unet.unetwrap = torch_neuronx.DataParallel(torch.jit.load(unet_base_filename), device_ids, set_dynamic_batching=False) @@ -105,7 +105,7 @@ def generate(self, prompt): file_stream = BytesIO() image.save(file_stream, "PNG") return Response(content=file_stream.getvalue(), media_type="image/png") - + def run_refiner_and_base(self, base, refiner, prompt, n_steps=40, high_noise_frac=0.8): image = base( prompt=prompt, @@ -122,9 +122,5 @@ def run_refiner_and_base(self, base, refiner, prompt, n_steps=40, high_noise_fra ).images[0] return image - -sd_app = StableDiffusion.bind() - - - +sd_app = StableDiffusion.bind() diff --git a/templates/inferentia-stable-diffusion/setup.sh b/templates/inferentia-stable-diffusion/setup.sh index ac38c0105..5fc5c1125 100644 --- a/templates/inferentia-stable-diffusion/setup.sh +++ b/templates/inferentia-stable-diffusion/setup.sh @@ -7,14 +7,14 @@ sudo tee /etc/apt/sources.list.d/neuron.list > /dev/null wget -qO - https://apt.repos.neuron.amazonaws.com/GPG-PUB-KEY-AMAZON-AWS-NEURON.PUB | sudo apt-key add - -# Update OS packages +# Update OS packages sudo apt-get update -y -# Install Neuron Runtime +# Install Neuron Runtime sudo apt-get install aws-neuronx-collectives=2.* -y sudo apt-get install aws-neuronx-runtime-lib=2.* -y -# Install Neuron Tools +# Install Neuron Tools sudo apt-get install aws-neuronx-tools=2.* -y # Install neuronx and torch_xla diff --git a/templates/serve-stable-diffusion-aica/main.py b/templates/serve-stable-diffusion-aica/main.py index 2e7b6adfa..c99e19312 100644 --- a/templates/serve-stable-diffusion-aica/main.py +++ b/templates/serve-stable-diffusion-aica/main.py @@ -49,7 +49,7 @@ async def generate(self, prompt: str, img_size: int = 512): @serve.deployment( - ray_actor_options={"num_gpus": 1, + ray_actor_options={"num_gpus": 1, "num_cpus": 1, # Set the number of GPUs and CPUs required for each model replica. "accelerator_type": "A10G"}, # Set accelerator type based on GPU type to use (T4, A10G, L4, V100, A100-40G or A100-80G) max_concurrent_queries=2, # Maximum number of queries that are sent to a replica of this deployment without receiving a response. @@ -71,7 +71,7 @@ def __init__(self): model_id, scheduler=scheduler, revision="fp16", torch_dtype=torch.float16 ) self.pipe = self.pipe.to("cuda") - + def generate(self, prompt: str, img_size: int = 512): assert len(prompt), "prompt parameter cannot be empty" @@ -81,6 +81,6 @@ def generate(self, prompt: str, img_size: int = 512): return image -# Bind the deployments to arguments that will be passed into its constructor. +# Bind the deployments to arguments that will be passed into its constructor. # This defines a Ray Serve application that we can run locally or deploy to production. stable_diffusion_app = APIIngress.bind(StableDiffusionV2.bind()) diff --git a/templates/serve-stable-diffusion-aica/service.yaml b/templates/serve-stable-diffusion-aica/service.yaml index b38ee4396..278c598dd 100644 --- a/templates/serve-stable-diffusion-aica/service.yaml +++ b/templates/serve-stable-diffusion-aica/service.yaml @@ -8,4 +8,4 @@ ray_serve_config: pip: - diffusers==0.25.0 - transformers==4.36.2 - - accelerate==0.25.0 \ No newline at end of file + - accelerate==0.25.0 diff --git a/templates/serve-stable-diffusion/main.py b/templates/serve-stable-diffusion/main.py index 1c66e0473..d503fdd8f 100644 --- a/templates/serve-stable-diffusion/main.py +++ b/templates/serve-stable-diffusion/main.py @@ -69,7 +69,7 @@ def __init__(self): model_id, scheduler=scheduler, revision="fp16", torch_dtype=torch.float16 ) self.pipe = self.pipe.to("cuda") - + def generate(self, prompt: str, img_size: int = 512): assert len(prompt), "prompt parameter cannot be empty" @@ -79,6 +79,6 @@ def generate(self, prompt: str, img_size: int = 512): return image -# Bind the deployments to arguments that will be passed into its constructor. +# Bind the deployments to arguments that will be passed into its constructor. # This defines a Ray Serve application that we can run locally or deploy to production. stable_diffusion_app = APIIngress.bind(StableDiffusionV2.bind()) From ca4ef18a99b7613352f016354b54036dec51f72e Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 20 Feb 2024 17:24:07 -0800 Subject: [PATCH 38/40] intro workspaces updates Signed-off-by: Eric Liang --- templates/intro-workspaces/README.ipynb | 39 +++++++------------------ 1 file changed, 10 insertions(+), 29 deletions(-) diff --git a/templates/intro-workspaces/README.ipynb b/templates/intro-workspaces/README.ipynb index e500a478d..8eaa6535f 100644 --- a/templates/intro-workspaces/README.ipynb +++ b/templates/intro-workspaces/README.ipynb @@ -1,4 +1,3 @@ - { "cells": [ { @@ -10,7 +9,7 @@ "Welcome! You are currently in a Workspace, which is a persistent cloud IDE connected to a Ray cluster.\n", "\n", "In this tutorial, you will learn:\n", - "1. Basic workspace features such as git repo persistence, NFS mounts, cloud storage, and SSH authentication.\n", + "1. Basic workspace features such as git repo persistence, cloud storage, and SSH authentication.\n", "2. Ray cluster management features, such as adding multiple worker nodes.\n", "3. Ray monitoring features such as viewing tasks in the dashboard.\n", "4. Dependency management.\n", @@ -81,31 +80,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### NFS Mounts\n", + "### Cloud Storage\n", "\n", - "Workspace local storage is limited to 1GB, so we recommend only using it to store git repos and smaller files. To persist larger files, you can save data to NFS mounts and cloud storage.\n", + "Workspace local storage is limited to 1GB, so we recommend only using it to store git repos and smaller files. To persist larger files, you can save data to cloud storage.\n", "\n", - "Here are a few handy NFS mounts included:\n", - "- `/mnt/shared_storage` is a mount shared across all users of your organization\n", - "- `/mnt/user_storage` is a mount for your user account\n", - "\n", - "NFS storage can be read and written from the workspace, as well as from any node in the Ray cluster:" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "!echo \"hello world\" > /mnt/user_storage/persisted_file.txt && cat /mnt/user_storage/persisted_file.txt" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "#### Cloud Storage\n", + "Cloud storage can be read and written from the workspace, as well as from any node in the Ray cluster.\n", "\n", "Access built-in cloud storage using the `$ANYSCALE_ARTIFACT_STORAGE` URI as a prefix:" ] @@ -116,7 +95,8 @@ "metadata": {}, "outputs": [], "source": [ - "!aws s3 cp /mnt/user_storage/persisted_file.txt $ANYSCALE_ARTIFACT_STORAGE/persisted_object.txt" + "# Note: \"gsutil cp\" instead of \"aws s3 cp\" in GCP clouds.\n", + "!echo \"hello world\" > /tmp/input.txt && aws s3 cp /tmp/input.txt $ANYSCALE_ARTIFACT_STORAGE/saved.txt" ] }, { @@ -125,7 +105,8 @@ "metadata": {}, "outputs": [], "source": [ - "!aws s3 cp $ANYSCALE_ARTIFACT_STORAGE/persisted_object.txt /tmp/object.txt && cat /tmp/object.txt" + "# Note: \"gsutil cp\" instead of \"aws s3 cp\" in GCP clouds.\n", + "!aws s3 cp $ANYSCALE_ARTIFACT_STORAGE/saved.txt /tmp/output.txt && cat /tmp/output.txt" ] }, { @@ -157,9 +138,9 @@ "\n", "\n", "\n", - "### Using \"Auto\" workers mode\n", + "### Using \"Auto-select workers\" mode\n", "\n", - "To let Ray automatically select what kind of worker nodes to add to the cluster, check the \"Auto-select machines\" box. Ray will try to autoscale cluster worker nodes to balance cost and performance. In auto mode, you cannot configure worker node types, but the resources panel will show which node types have been launched.\n", + "To let Ray automatically select what kind of worker nodes to add to the cluster, check the \"Auto-select workers\" box. Ray will try to autoscale cluster worker nodes to balance cost and performance. In auto mode, you cannot configure worker node types, but the resources panel will show which node types have been launched.\n", "\n", "We recommend using auto mode if you do not have specific cluster requirements, and are ok with waiting for the autoscaler to add nodes on-demand to the cluster." ] From a19461c83a0cfeb04f1c60274736f60aa6fe9d34 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 20 Feb 2024 17:34:50 -0800 Subject: [PATCH 39/40] wip --- templates/endpoints_v2/README.ipynb | 168 +++++++++++++++++++++ templates/endpoints_v2/README.md | 102 ------------- templates/intro-workspaces/README.md | 211 --------------------------- 3 files changed, 168 insertions(+), 313 deletions(-) create mode 100644 templates/endpoints_v2/README.ipynb delete mode 100644 templates/endpoints_v2/README.md delete mode 100644 templates/intro-workspaces/README.md diff --git a/templates/endpoints_v2/README.ipynb b/templates/endpoints_v2/README.ipynb new file mode 100644 index 000000000..dcb9933db --- /dev/null +++ b/templates/endpoints_v2/README.ipynb @@ -0,0 +1,168 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Endpoints - Deploy, configure, and serve LLMs \n", + "\n", + "The guide below walks you through the steps required for deployment of LLM endpoints. Based on Ray Serve and RayLLM, the foundation for [Anyscale-Hosted Endpoints](http://anyscale.com/endpoints), the Endpoints template provides an easy to configure solution for ML Platform teams, Infrastructure engineers, and Developers who want to deploy optimized LLMs in production. We have provided a number of examples for popular open-source models (Llama2, Mistral, Mixtral, embedding models, and more) with different GPU accelerator and tensor-parallelism configurations in the `models` directory. \n", + "\n", + "# Step 1 - Run the model locally in the Workspace\n", + "\n", + "The llm-serve.yaml file in this example runs the Mistral-7B model. There are 2 important configurations you would need to modify:\n", + "1. The `models` config in `llm-serve.yaml` contains a list of YAML files for the models you want to deploy. You can run any of the models in the `models` directory or define your own model YAML file and run that instead. All config files follow the naming convention `{model_name}_{accelerator_type}_{tensor_parallelism}`. Follow the CustomModels [guide](CustomModels.md) for bringing your own models.\n", + "2. `HUGGING_FACE_HUB_TOKEN` - The Meta Llama-2 family of models need the HUGGING_FACE_HUB_TOKEN variable to be set to a Hugging Face Access Token for an account with permissions to download the model.\n", + "\n", + "From the terminal use the Ray Serve CLI to deploy a model. It will be run locally in this workspace's cluster:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Deploy the Mistral-7b model locally in the workspace.\n", + "\n", + "!serve run --non-blocking llm-serve.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "\n", + "# Step 2 - Query the model\n", + "\n", + "Once deployed you can use the OpenAI SDK to interact with the models, ensuring an easy integration for your applications.\n", + "\n", + "Run the following command to query. You should get the following output:\n", + "```\n", + "The top rated restaurants in San Francisco include:\n", + " • Chez Panisse\n", + " • Momofuku Noodle Bar\n", + " • Nopa\n", + " • Saison\n", + " • Mission Chinese Food\n", + " • Sushi Nakazawa\n", + " • The French Laundry\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Query the local service we just deployed.\n", + "\n", + "!python llm-query.py" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Endpoints uses an OpenAI-compatible API, allowing us to use the OpenAI SDK to access Endpoint backends." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from openai import OpenAI\n", + "\n", + "client = OpenAI(\n", + " base_url=\"http://localhost:8000/v1\",\n", + " api_key=\"NOT A REAL KEY\",\n", + ")\n", + "\n", + "# List all models.\n", + "models = client.models.list()\n", + "print(models)\n", + "\n", + "# Note: not all arguments are currently supported and will be ignored by the backend.\n", + "chat_completion = client.chat.completions.create(\n", + " model=\"mistralai/Mistral-7B-Instruct-v0.1\",\n", + " messages=[\n", + " {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n", + " {\"role\": \"user\", \"content\": \"What are some of the highest rated restaurants in San Francisco?'.\"},\n", + " ],\n", + " temperature=0.01\n", + ")\n", + "\n", + "print(chat_completion)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Step 3 - Deploying a production service\n", + "\n", + "To deploy an application with one model as an Anyscale Service you can run:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Deploy the serve app to production with a given service name.\n", + "\n", + "!serve deploy --name=my_service_name service.yaml" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This is setup to run the Mistral-7B model, but can be easily modified to run any of the other models in this repo.\n", + "\n", + "# Step 4 - Query the service endpoint\n", + "\n", + "In order to query the endpoint, you can modify the `llm-query.py` script, replacing the query url with the Service URL found in the Service UI.\n", + "\n", + "Note: please make sure to include the path \"/v1\" at the end of the Service url.\n", + "\n", + "# More Guides\n", + "\n", + "Endpoints makes it easy for LLM Developers to interact with OpenAI compatible APIs for their applications by providing an easy to manage backend for serving OSS LLMs. It does this by:\n", + "\n", + "- Providing an extensive suite of pre-configured open source LLMs and embedding models, with defaults that work out of the box. \n", + "- Simplifying the addition of new LLMs.\n", + "- Simplifying the deployment of multiple LLMs\n", + "- Offering unique autoscaling support, including scale-to-zero.\n", + "- Fully supporting multi-GPU & multi-node model deployments.\n", + "- Offering high performance features like continuous batching, quantization and streaming.\n", + "- Providing a REST API that is similar to OpenAI's to make it easy to migrate and integrate with other tools.\n", + "\n", + "Look at the following guides for more advanced use-cases -\n", + "* [Deploy models for embedding generation](EmbeddingModels.md)\n", + "* [Learn how to bring your own models](CustomModels.md)\n", + "* [Deploy multiple LoRA fine-tuned models](DeployLora.md)\n", + "* [Deploy Function calling models](DeployFunctionCalling.md)\n", + "* [Learn how to leverage different configurations that can optimize the latency and throughput of your models](OptimizeModels.md)\n", + "* [Learn how to fully configure your deployment including auto-scaling, optimization parameters and tensor-parallelism](AdvancedModelConfigs.md)\n", + "\n", + "# Application Examples\n", + "See examples of building applications with your deployed endpoint on the [Anyscale Endpoints](https://docs.endpoints.anyscale.com/category/examples) page.\n", + "\n", + "Be sure to update the api_base and token for your private deployment. This can be found under the \"Serve deployments\" tab on the \"Query\" button when deploying on your Workspace.\n" + ] + } + ], + "metadata": { + "language_info": { + "name": "python" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/templates/endpoints_v2/README.md b/templates/endpoints_v2/README.md deleted file mode 100644 index 33042464b..000000000 --- a/templates/endpoints_v2/README.md +++ /dev/null @@ -1,102 +0,0 @@ -# Endpoints - Deploy, configure, and serve LLMs - -The guide below walks you through the steps required for deployment of LLM endpoints. Based on Ray Serve and RayLLM, the foundation for [Anyscale-Hosted Endpoints](http://anyscale.com/endpoints), the Endpoints template provides an easy to configure solution for ML Platform teams, Infrastructure engineers, and Developers who want to deploy optimized LLMs in production. We have provided a number of examples for popular open-source models (Llama2, Mistral, Mixtral, embedding models, and more) with different GPU accelerator and tensor-parallelism configurations in the `models` directory. - -# Step 1 - Deploy the model on Workspace - -The llm-serve.yaml file in this example runs the Mistral-7B model. There are 2 important configurations you would need to modify: -1. The `models` config in `llm-serve.yaml` contains a list of YAML files for the models you want to deploy. You can run any of the models in the `models` directory or define your own model YAML file and run that instead. All config files follow the naming convention `{model_name}_{accelerator_type}_{tensor_parallelism}`. Follow the CustomModels [guide](CustomModels.md) for bringing your own models. -2. `HUGGING_FACE_HUB_TOKEN` - The Meta Llama-2 family of models need the HUGGING_FACE_HUB_TOKEN variable to be set to a Hugging Face Access Token for an account with permissions to download the model. - -From the terminal use the Ray Serve CLI to deploy a model: - -```shell -# Deploy the Mistral-7b model. - -serve run llm-serve.yaml -``` - -# Step 2 - Query the model - -Once deployed you can use the OpenAI SDK to interact with the models, ensuring an easy integration for your applications. Run the following command in a separate terminal to query. - -```shell -python llm-query.py -``` -```text -Output: -The top rated restaurants in San Francisco include: - • Chez Panisse - • Momofuku Noodle Bar - • Nopa - • Saison - • Mission Chinese Food - • Sushi Nakazawa - • The French Laundry -``` - -Endpoints uses an OpenAI-compatible API, allowing us to use the OpenAI SDK to access Endpoint backends. - -```python -from openai import OpenAI - -client = OpenAI( - base_url="http://localhost:8000/v1", - api_key="NOT A REAL KEY", -) - -# List all models. -models = client.models.list() -print(models) - -# Note: not all arguments are currently supported and will be ignored by the backend. -chat_completion = client.chat.completions.create( - model="mistralai/Mistral-7B-Instruct-v0.1", - messages=[{"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "What are some of the highest rated restaurants in San Francisco?'."}], - temperature=0.01 -) - -print(chat_completion) - -``` - -# Step 3 - Deploying a production service - TODO : Update with new CLI - -To deploy an application with one model on an Anyscale Service you can run: - -```shell -anyscale service rollout -f service.yaml --name {ENTER_NAME_FOR_SERVICE_HERE} -``` - -This is setup to run the Mistral-7B model, but can be easily modified to run any of the other models in this repo. - -# Step 4 - Query the service endpoint - -In order to query the endpoint, you can modify the `llm-query.py` script, replacing the query url with the Service URL found in the Service UI. - -Note: please make sure to include the path "/v1" at the end of the Service url. - -# More Guides - -Endpoints makes it easy for LLM Developers to interact with OpenAI compatible APIs for their applications by providing an easy to manage backend for serving OSS LLMs. It does this by: - -- Providing an extensive suite of pre-configured open source LLMs and embedding models, with defaults that work out of the box. -- Simplifying the addition of new LLMs. -- Simplifying the deployment of multiple LLMs -- Offering unique autoscaling support, including scale-to-zero. -- Fully supporting multi-GPU & multi-node model deployments. -- Offering high performance features like continuous batching, quantization and streaming. -- Providing a REST API that is similar to OpenAI's to make it easy to migrate and integrate with other tools. - -Look at the following guides for more advanced use-cases - -* [Deploy models for embedding generation](EmbeddingModels.md) -* [Learn how to bring your own models](CustomModels.md) -* [Deploy multiple LoRA fine-tuned models](DeployLora.md) -* [Deploy Function calling models](DeployFunctionCalling.md) -* [Learn how to leverage different configurations that can optimize the latency and throughput of your models](OptimizeModels.md) -* [Learn how to fully configure your deployment including auto-scaling, optimization parameters and tensor-parallelism](AdvancedModelConfigs.md) - -# Application Examples -See examples of building applications with your deployed endpoint on the [Anyscale Endpoints](https://docs.endpoints.anyscale.com/category/examples) page. - -Be sure to update the api_base and token for your private deployment. This can be found under the "Serve deployments" tab on the "Query" button when deploying on your Workspace. diff --git a/templates/intro-workspaces/README.md b/templates/intro-workspaces/README.md deleted file mode 100644 index 588d4d00b..000000000 --- a/templates/intro-workspaces/README.md +++ /dev/null @@ -1,211 +0,0 @@ -# Introduction to Workspaces - -Welcome! You are currently in a Workspace, which is a persistent cloud IDE connected to a Ray cluster. - -In this tutorial, you will learn: -1. Basic workspace features such as git repo persistence, NFS mounts, cloud storage, and SSH authentication. -2. Ray cluster management features, such as adding multiple worker nodes. -3. Ray monitoring features such as viewing tasks in the dashboard. -4. Dependency management. - -## "Hello world" in workspaces - -Let's start by checking that Ray is working properly in your workspace. You can do this by running the following cell to execute a simple parallel Ray program. - - -```python -import ray - -@ray.remote -def square(x): - return x ** 2 - -futures = [square.remote(x) for x in range(100)] -results = ray.get(futures) -print("Success!", results) -``` - -## Workspace Basics - -An Anyscale Workspace is a cloud IDE where you can develop and test Ray programs. Let's get started by creating a new git repo in this workspace. Workspaces will persist the tracked files in this git repo across restarts (as well as files not in a git repos). - -We'll use the repo later on to author and run a simple Ray app. - - -```python -!mkdir my_repo && cd my_repo && git init -``` - -### Setting up SSH authentication (optional) - -Anyscale generates a unique SSH key per user, which is accessible at `~/.ssh/id_rsa.pub`. If you'd like, you can [add this key to GitHub](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account) in order to access private repositories from the workspace. - -The public key to add is outputted by the following command: - - -```python -!cat ~/.ssh/id_rsa.pub -``` - -### NFS Mounts - -Workspace local storage is limited to 1GB, so we recommend only using it to store git repos and smaller files. To persist larger files, you can save data to NFS mounts and cloud storage. - -Here are a few handy NFS mounts included: -- `/mnt/shared_storage` is a mount shared across all users of your organization -- `/mnt/user_storage` is a mount for your user account - -NFS storage can be read and written from the workspace, as well as from any node in the Ray cluster: - - -```python -!echo "hello world" > /mnt/user_storage/persisted_file.txt && cat /mnt/user_storage/persisted_file.txt -``` - -#### Cloud Storage - -Access built-in cloud storage using the `$ANYSCALE_ARTIFACT_STORAGE` URI as a prefix: - - -```python -!aws s3 cp /mnt/user_storage/persisted_file.txt $ANYSCALE_ARTIFACT_STORAGE/persisted_object.txt -``` - - -```python -!aws s3 cp $ANYSCALE_ARTIFACT_STORAGE/persisted_object.txt /tmp/object.txt && cat /tmp/object.txt -``` - -## Ray cluster management - -This workspace is connected to a Ray cluster. Click on the resources bar on the top right corner of the screen to open the cluster control panel. This panel shows a summary of Ray resource utilization, and you can use this panel to configure the cluster resources. - - - -### Configuring the Workspace node - -The workspace node is the machine this notebook is running inside. You may wish to change the instance type of the workspace node specifically, e.g., to increase the available memory or add a GPU. Click the pencil icon in order to change the workspace node. Note that changing the workspace node will restart the workspace IDE. - - - - -### Adding worker nodes - -To parallelize beyond the resources available to the workspace node, add additional worker nodes to the Ray cluster. Click "Add a node type" to add a number of nodes of a certain type to the cluster. While most use cases only require a single worker node type, you can add multiple distinct node types (e.g., high-CPU and GPU nodes) to the workspace as well. - - - - -### Using "Auto" workers mode - -To let Ray automatically select what kind of worker nodes to add to the cluster, check the "Auto-select machines" box. Ray will try to autoscale cluster worker nodes to balance cost and performance. In auto mode, you cannot configure worker node types, but the resources panel will show which node types have been launched. - -We recommend using auto mode if you do not have specific cluster requirements, and are ok with waiting for the autoscaler to add nodes on-demand to the cluster. - -## Monitoring Ray applications - -In this section, we'll author a simple Ray python script and go over the tools available to monitor its execution. Let's take the opportunity to create a `my_app.py` file in the `my_repo` git repo you created earlier. - -You can click on the "File Explorer" in the left pane of VSCode to create the new file. Copy paste the following program into the file: - -```python -import ray, time - -@ray.remote -def do_some_work(): - print("Doing work") - time.sleep(5) - return "Done" - -ray.get([do_some_work.remote() for _ in range(100)]) -```` - -Then, use the next cell or the VSCode terminal to run the file: - - -```python -!python my_repo/my_app.py -``` - -### Understanding Ray log output - -After running `my_app.py`, you should see output of the form `(do_some_work pid=29848) Doing work [repeated 4x across cluster]`. The prefix of the log message shows the function name, PID of the worker that ran the function, and if run on a remote worker, the node IP. - -The result of the log message contains stdout and stderr from the function execution. Ray will also deduplicate repetitive logs from parallel execution of functions across the cluster. - -### Monitoring program execution - -Depending on the cluster size, the above script may take some time to run. Try playing around with the number of worker machines, increasing the sleep time, or the number of function calls. Use the tools overviewed below to understand how Ray parallelizes the program. - -Let's overview some of the tools available to monitor Ray program execution in workspaces. - -**Resources Panel** - -The resources panel provides basic stats about cluster utilization, as well as an indication of which worker nodes are being used. Use the resource panel as a quick overview of cluster status before diving deeper into the Ray dashboard. - - - -**Ray dashboard > Jobs** - -To see the status of an active or previously run Ray job, navigate to `Ray Dashboard > Jobs` in the UI. Here you will see an overview of job progress, logs, and the ability to drill down into individual task and actors. - - - -**Ray dashboard > Metrics** - -View the aggregate time-series metrics for the cluster in order to diagnose job execution efficiency. The `Ray Dashboard > Metrics` page offers metrics on Ray tasks, actors, as well as hardware resource utilization of the cluster. - - - -**Logs Tab** - -View and search over Ray cluster and application logs in the Logs tab. - - - -## Dependency Management - -In order to run code across a cluster, Ray ships code and other library dependencies to other machines in [runtime envs](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html). In workspaces, the code and installed PyPI packages are automatically added to the runtime env to be used by Ray. - -To try this out, run the following command to install the `emoji` package. You'll see a notification that the package has been registered with the cluster. - - -```python -!pip install emoji -``` - -Navigate to the `Dependencies` tab of the workspace, and you should see the `emoji` package in the list there. You can use this UI to edit the workspace runtime dependencies, or the UI. - - - -Run the following cell to check that the `emoji` package is successfully installed on the cluster (to check this properly, make sure the cluster has at least one worker node added). - - -```python -import ray -import emoji -import time - -# Reset the Ray session in the notebook kernel to pick up new dependencies. -if ray.is_initialized(): - ray.shutdown() - -@ray.remote -def f(): - print(emoji.emojize('Dependencies are :thumbs_up:')) - time.sleep(5) - -ray.get([f.remote() for _ in range(100)]) -print("Done") -``` - -That's it! Now you know everything you need to build scalable Ray applications in Anyscale Workspaces. Check out the template gallery and Ray documentation to learn more about what you can do with Ray and Anyscale. - -## Summary - -This notebook: -- Set up a basic development project in a workspace. -- Showed how to use different types of persistent storage. -- Demoed how to build and debug basic Ray application. - - From b1c84cd8daa968e0174476c3d33c11b570695df2 Mon Sep 17 00:00:00 2001 From: Eric Liang Date: Tue, 20 Feb 2024 17:37:57 -0800 Subject: [PATCH 40/40] regen readmes Signed-off-by: Eric Liang --- templates/endpoints_v2/README.md | 115 +++++++++++++++ templates/intro-workspaces/README.md | 202 +++++++++++++++++++++++++++ 2 files changed, 317 insertions(+) create mode 100644 templates/endpoints_v2/README.md create mode 100644 templates/intro-workspaces/README.md diff --git a/templates/endpoints_v2/README.md b/templates/endpoints_v2/README.md new file mode 100644 index 000000000..1886238f0 --- /dev/null +++ b/templates/endpoints_v2/README.md @@ -0,0 +1,115 @@ +# Endpoints - Deploy, configure, and serve LLMs + +The guide below walks you through the steps required for deployment of LLM endpoints. Based on Ray Serve and RayLLM, the foundation for [Anyscale-Hosted Endpoints](http://anyscale.com/endpoints), the Endpoints template provides an easy to configure solution for ML Platform teams, Infrastructure engineers, and Developers who want to deploy optimized LLMs in production. We have provided a number of examples for popular open-source models (Llama2, Mistral, Mixtral, embedding models, and more) with different GPU accelerator and tensor-parallelism configurations in the `models` directory. + +# Step 1 - Run the model locally in the Workspace + +The llm-serve.yaml file in this example runs the Mistral-7B model. There are 2 important configurations you would need to modify: +1. The `models` config in `llm-serve.yaml` contains a list of YAML files for the models you want to deploy. You can run any of the models in the `models` directory or define your own model YAML file and run that instead. All config files follow the naming convention `{model_name}_{accelerator_type}_{tensor_parallelism}`. Follow the CustomModels [guide](CustomModels.md) for bringing your own models. +2. `HUGGING_FACE_HUB_TOKEN` - The Meta Llama-2 family of models need the HUGGING_FACE_HUB_TOKEN variable to be set to a Hugging Face Access Token for an account with permissions to download the model. + +From the terminal use the Ray Serve CLI to deploy a model. It will be run locally in this workspace's cluster: + + +```python +# Deploy the Mistral-7b model locally in the workspace. + +!serve run --non-blocking llm-serve.yaml +``` + + +# Step 2 - Query the model + +Once deployed you can use the OpenAI SDK to interact with the models, ensuring an easy integration for your applications. + +Run the following command to query. You should get the following output: +``` +The top rated restaurants in San Francisco include: + • Chez Panisse + • Momofuku Noodle Bar + • Nopa + • Saison + • Mission Chinese Food + • Sushi Nakazawa + • The French Laundry +``` + + +```python +# Query the local service we just deployed. + +!python llm-query.py +``` + +Endpoints uses an OpenAI-compatible API, allowing us to use the OpenAI SDK to access Endpoint backends. + + +```python +from openai import OpenAI + +client = OpenAI( + base_url="http://localhost:8000/v1", + api_key="NOT A REAL KEY", +) + +# List all models. +models = client.models.list() +print(models) + +# Note: not all arguments are currently supported and will be ignored by the backend. +chat_completion = client.chat.completions.create( + model="mistralai/Mistral-7B-Instruct-v0.1", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "What are some of the highest rated restaurants in San Francisco?'."}, + ], + temperature=0.01 +) + +print(chat_completion) +``` + +# Step 3 - Deploying a production service + +To deploy an application with one model as an Anyscale Service you can run: + + +```python +# Deploy the serve app to production with a given service name. + +!serve deploy --name=my_service_name service.yaml +``` + +This is setup to run the Mistral-7B model, but can be easily modified to run any of the other models in this repo. + +# Step 4 - Query the service endpoint + +In order to query the endpoint, you can modify the `llm-query.py` script, replacing the query url with the Service URL found in the Service UI. + +Note: please make sure to include the path "/v1" at the end of the Service url. + +# More Guides + +Endpoints makes it easy for LLM Developers to interact with OpenAI compatible APIs for their applications by providing an easy to manage backend for serving OSS LLMs. It does this by: + +- Providing an extensive suite of pre-configured open source LLMs and embedding models, with defaults that work out of the box. +- Simplifying the addition of new LLMs. +- Simplifying the deployment of multiple LLMs +- Offering unique autoscaling support, including scale-to-zero. +- Fully supporting multi-GPU & multi-node model deployments. +- Offering high performance features like continuous batching, quantization and streaming. +- Providing a REST API that is similar to OpenAI's to make it easy to migrate and integrate with other tools. + +Look at the following guides for more advanced use-cases - +* [Deploy models for embedding generation](EmbeddingModels.md) +* [Learn how to bring your own models](CustomModels.md) +* [Deploy multiple LoRA fine-tuned models](DeployLora.md) +* [Deploy Function calling models](DeployFunctionCalling.md) +* [Learn how to leverage different configurations that can optimize the latency and throughput of your models](OptimizeModels.md) +* [Learn how to fully configure your deployment including auto-scaling, optimization parameters and tensor-parallelism](AdvancedModelConfigs.md) + +# Application Examples +See examples of building applications with your deployed endpoint on the [Anyscale Endpoints](https://docs.endpoints.anyscale.com/category/examples) page. + +Be sure to update the api_base and token for your private deployment. This can be found under the "Serve deployments" tab on the "Query" button when deploying on your Workspace. + diff --git a/templates/intro-workspaces/README.md b/templates/intro-workspaces/README.md new file mode 100644 index 000000000..5d22c0c70 --- /dev/null +++ b/templates/intro-workspaces/README.md @@ -0,0 +1,202 @@ +# Introduction to Workspaces + +Welcome! You are currently in a Workspace, which is a persistent cloud IDE connected to a Ray cluster. + +In this tutorial, you will learn: +1. Basic workspace features such as git repo persistence, cloud storage, and SSH authentication. +2. Ray cluster management features, such as adding multiple worker nodes. +3. Ray monitoring features such as viewing tasks in the dashboard. +4. Dependency management. + +## "Hello world" in workspaces + +Let's start by checking that Ray is working properly in your workspace. You can do this by running the following cell to execute a simple parallel Ray program. + + +```python +import ray + +@ray.remote +def square(x): + return x ** 2 + +futures = [square.remote(x) for x in range(100)] +results = ray.get(futures) +print("Success!", results) +``` + +## Workspace Basics + +An Anyscale Workspace is a cloud IDE where you can develop and test Ray programs. Let's get started by creating a new git repo in this workspace. Workspaces will persist the tracked files in this git repo across restarts (as well as files not in a git repos). + +We'll use the repo later on to author and run a simple Ray app. + + +```python +!mkdir my_repo && cd my_repo && git init +``` + +### Setting up SSH authentication (optional) + +Anyscale generates a unique SSH key per user, which is accessible at `~/.ssh/id_rsa.pub`. If you'd like, you can [add this key to GitHub](https://docs.github.com/en/authentication/connecting-to-github-with-ssh/adding-a-new-ssh-key-to-your-github-account) in order to access private repositories from the workspace. + +The public key to add is outputted by the following command: + + +```python +!cat ~/.ssh/id_rsa.pub +``` + +### Cloud Storage + +Workspace local storage is limited to 1GB, so we recommend only using it to store git repos and smaller files. To persist larger files, you can save data to cloud storage. + +Cloud storage can be read and written from the workspace, as well as from any node in the Ray cluster. + +Access built-in cloud storage using the `$ANYSCALE_ARTIFACT_STORAGE` URI as a prefix: + + +```python +# Note: "gsutil cp" instead of "aws s3 cp" in GCP clouds. +!echo "hello world" > /tmp/input.txt && aws s3 cp /tmp/input.txt $ANYSCALE_ARTIFACT_STORAGE/saved.txt +``` + + +```python +# Note: "gsutil cp" instead of "aws s3 cp" in GCP clouds. +!aws s3 cp $ANYSCALE_ARTIFACT_STORAGE/saved.txt /tmp/output.txt && cat /tmp/output.txt +``` + +## Ray cluster management + +This workspace is connected to a Ray cluster. Click on the resources bar on the top right corner of the screen to open the cluster control panel. This panel shows a summary of Ray resource utilization, and you can use this panel to configure the cluster resources. + + + +### Configuring the Workspace node + +The workspace node is the machine this notebook is running inside. You may wish to change the instance type of the workspace node specifically, e.g., to increase the available memory or add a GPU. Click the pencil icon in order to change the workspace node. Note that changing the workspace node will restart the workspace IDE. + + + + +### Adding worker nodes + +To parallelize beyond the resources available to the workspace node, add additional worker nodes to the Ray cluster. Click "Add a node type" to add a number of nodes of a certain type to the cluster. While most use cases only require a single worker node type, you can add multiple distinct node types (e.g., high-CPU and GPU nodes) to the workspace as well. + + + + +### Using "Auto-select workers" mode + +To let Ray automatically select what kind of worker nodes to add to the cluster, check the "Auto-select workers" box. Ray will try to autoscale cluster worker nodes to balance cost and performance. In auto mode, you cannot configure worker node types, but the resources panel will show which node types have been launched. + +We recommend using auto mode if you do not have specific cluster requirements, and are ok with waiting for the autoscaler to add nodes on-demand to the cluster. + +## Monitoring Ray applications + +In this section, we'll author a simple Ray python script and go over the tools available to monitor its execution. Let's take the opportunity to create a `my_app.py` file in the `my_repo` git repo you created earlier. + +You can click on the "File Explorer" in the left pane of VSCode to create the new file. Copy paste the following program into the file: + +```python +import ray, time + +@ray.remote +def do_some_work(): + print("Doing work") + time.sleep(5) + return "Done" + +ray.get([do_some_work.remote() for _ in range(100)]) +```` + +Then, use the next cell or the VSCode terminal to run the file: + + +```python +!python my_repo/my_app.py +``` + +### Understanding Ray log output + +After running `my_app.py`, you should see output of the form `(do_some_work pid=29848) Doing work [repeated 4x across cluster]`. The prefix of the log message shows the function name, PID of the worker that ran the function, and if run on a remote worker, the node IP. + +The result of the log message contains stdout and stderr from the function execution. Ray will also deduplicate repetitive logs from parallel execution of functions across the cluster. + +### Monitoring program execution + +Depending on the cluster size, the above script may take some time to run. Try playing around with the number of worker machines, increasing the sleep time, or the number of function calls. Use the tools overviewed below to understand how Ray parallelizes the program. + +Let's overview some of the tools available to monitor Ray program execution in workspaces. + +**Resources Panel** + +The resources panel provides basic stats about cluster utilization, as well as an indication of which worker nodes are being used. Use the resource panel as a quick overview of cluster status before diving deeper into the Ray dashboard. + + + +**Ray dashboard > Jobs** + +To see the status of an active or previously run Ray job, navigate to `Ray Dashboard > Jobs` in the UI. Here you will see an overview of job progress, logs, and the ability to drill down into individual task and actors. + + + +**Ray dashboard > Metrics** + +View the aggregate time-series metrics for the cluster in order to diagnose job execution efficiency. The `Ray Dashboard > Metrics` page offers metrics on Ray tasks, actors, as well as hardware resource utilization of the cluster. + + + +**Logs Tab** + +View and search over Ray cluster and application logs in the Logs tab. + + + +## Dependency Management + +In order to run code across a cluster, Ray ships code and other library dependencies to other machines in [runtime envs](https://docs.ray.io/en/latest/ray-core/handling-dependencies.html). In workspaces, the code and installed PyPI packages are automatically added to the runtime env to be used by Ray. + +To try this out, run the following command to install the `emoji` package. You'll see a notification that the package has been registered with the cluster. + + +```python +!pip install emoji +``` + +Navigate to the `Dependencies` tab of the workspace, and you should see the `emoji` package in the list there. You can use this UI to edit the workspace runtime dependencies, or the UI. + + + +Run the following cell to check that the `emoji` package is successfully installed on the cluster (to check this properly, make sure the cluster has at least one worker node added). + + +```python +import ray +import emoji +import time + +# Reset the Ray session in the notebook kernel to pick up new dependencies. +if ray.is_initialized(): + ray.shutdown() + +@ray.remote +def f(): + print(emoji.emojize('Dependencies are :thumbs_up:')) + time.sleep(5) + +ray.get([f.remote() for _ in range(100)]) +print("Done") +``` + +That's it! Now you know everything you need to build scalable Ray applications in Anyscale Workspaces. Check out the template gallery and Ray documentation to learn more about what you can do with Ray and Anyscale. + +## Summary + +This notebook: +- Set up a basic development project in a workspace. +- Showed how to use different types of persistent storage. +- Demoed how to build and debug basic Ray application. + +